Garbage-collect pagemove(); nothing use it anymore (YAY!!!)

This commit is contained in:
thorpej 2004-08-28 22:12:40 +00:00
parent 605050704a
commit 6c08646cb8
17 changed files with 32 additions and 487 deletions

View File

@ -1,10 +1,10 @@
/* $NetBSD: stubs.c,v 1.5 2003/09/30 00:35:30 thorpej Exp $ */
/* $NetBSD: stubs.c,v 1.6 2004/08/28 22:12:40 thorpej Exp $ */
/*
* stubs.c -- functions I haven't written yet
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: stubs.c,v 1.5 2003/09/30 00:35:30 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: stubs.c,v 1.6 2004/08/28 22:12:40 thorpej Exp $");
#include <sys/param.h>
#include <sys/kernel.h>
@ -89,11 +89,3 @@ fuiword(base)
{
panic("fuiword not implemented");
}
void
pagemove(foo, bar, len)
caddr_t foo, bar;
size_t len;
{
panic("pagemove not implemented");
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.82 2004/01/05 23:51:19 nathanw Exp $ */
/* $NetBSD: vm_machdep.c,v 1.83 2004/08/28 22:12:40 thorpej Exp $ */
/*
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
@ -29,7 +29,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.82 2004/01/05 23:51:19 nathanw Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.83 2004/08/28 22:12:40 thorpej Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -269,46 +269,6 @@ cpu_swapout(struct lwp *l)
fpusave_proc(l, 1);
}
/*
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to have valid page table pages.
* and size must be a multiple of PAGE_SIZE.
*
* Note that since all kernel page table pages are pre-allocated
* and mapped in, we can use the Virtual Page Table.
*/
void
pagemove(caddr_t from, caddr_t to, size_t size)
{
long fidx, tidx;
ssize_t todo;
PMAP_TLB_SHOOTDOWN_CPUSET_DECL
if (size % PAGE_SIZE)
panic("pagemove");
todo = size; /* if testing > 0, need sign... */
while (todo > 0) {
fidx = VPT_INDEX(from);
tidx = VPT_INDEX(to);
VPT[tidx] = VPT[fidx];
VPT[fidx] = 0;
ALPHA_TBIS((vaddr_t)from);
ALPHA_TBIS((vaddr_t)to);
PMAP_TLB_SHOOTDOWN(pmap_kernel(), (vaddr_t)from, PG_ASM);
PMAP_TLB_SHOOTDOWN(pmap_kernel(), (vaddr_t)to, PG_ASM);
todo -= PAGE_SIZE;
from += PAGE_SIZE;
to += PAGE_SIZE;
}
PMAP_TLB_SHOOTNOW();
}
/*
* Map a user I/O request into kernel virtual address space.
* Note: the pages are already locked by uvm_vslock(), so we

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.7 2004/06/28 08:24:01 fvdl Exp $ */
/* $NetBSD: vm_machdep.c,v 1.8 2004/08/28 22:12:40 thorpej Exp $ */
/*-
* Copyright (c) 1982, 1986 The Regents of the University of California.
@ -80,7 +80,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.7 2004/06/28 08:24:01 fvdl Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.8 2004/08/28 22:12:40 thorpej Exp $");
#include "opt_user_ldt.h"
#include "opt_largepages.h"
@ -342,48 +342,6 @@ setredzone(struct lwp *l)
pmap_update(pmap_kernel());
}
/*
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in the Sysmap.
*/
void
pagemove(from, to, size)
register caddr_t from, to;
size_t size;
{
register pt_entry_t *fpte, *tpte, ofpte, otpte;
int32_t cpumask = 0;
if (size & PAGE_MASK)
panic("pagemove");
fpte = kvtopte((vaddr_t)from);
tpte = kvtopte((vaddr_t)to);
#ifdef LARGEPAGES
/* XXX For now... */
if (*fpte & PG_PS)
panic("pagemove: fpte PG_PS");
if (*tpte & PG_PS)
panic("pagemove: tpte PG_PS");
#endif
while (size > 0) {
otpte = *tpte;
ofpte = *fpte;
*tpte++ = *fpte;
*fpte++ = 0;
if (otpte & PG_V)
pmap_tlb_shootdown(pmap_kernel(),
(vaddr_t)to, otpte, &cpumask);
if (ofpte & PG_V)
pmap_tlb_shootdown(pmap_kernel(),
(vaddr_t)from, ofpte, &cpumask);
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
}
pmap_tlb_shootnow(cpumask);
}
/*
* Convert kernel VA to physical address
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.31 2004/01/04 11:33:29 jdolecek Exp $ */
/* $NetBSD: vm_machdep.c,v 1.32 2004/08/28 22:12:40 thorpej Exp $ */
/*
* Copyright (c) 1994-1998 Mark Brinicombe.
@ -44,7 +44,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.31 2004/01/04 11:33:29 jdolecek Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.32 2004/08/28 22:12:40 thorpej Exp $");
#include "opt_armfpe.h"
#include "opt_pmap_debug.h"
@ -307,41 +307,6 @@ cpu_swapout(l)
#endif
}
/*
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in the Sysmap,
* and size must be a multiple of PAGE_SIZE.
*/
void
pagemove(from, to, size)
caddr_t from, to;
size_t size;
{
paddr_t pa;
boolean_t rv;
if (size % PAGE_SIZE)
panic("pagemove: size=%08lx", (u_long) size);
while (size > 0) {
rv = pmap_extract(pmap_kernel(), (vaddr_t) from, &pa);
#ifdef DEBUG
if (rv == FALSE)
panic("pagemove 2");
if (pmap_extract(pmap_kernel(), (vaddr_t) to, NULL) == TRUE)
panic("pagemove 3");
#endif
pmap_kremove((vaddr_t) from, PAGE_SIZE);
pmap_kenter_pa((vaddr_t) to, pa, VM_PROT_READ|VM_PROT_WRITE);
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
}
pmap_update(pmap_kernel());
}
/*
* Map a user I/O request into kernel virtual address space.
* Note: the pages are already locked by uvm_vslock(), so we

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.12 2004/07/24 18:59:06 chs Exp $ */
/* $NetBSD: vm_machdep.c,v 1.13 2004/08/28 22:12:40 thorpej Exp $ */
/* $OpenBSD: vm_machdep.c,v 1.25 2001/09/19 20:50:56 mickey Exp $ */
@ -34,7 +34,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.12 2004/07/24 18:59:06 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.13 2004/08/28 22:12:40 thorpej Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -96,32 +96,6 @@ cpu_coredump(struct lwp *l, struct vnode *vp, struct ucred *cred,
return error;
}
/*
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in the Sysmap.
*/
void
pagemove(caddr_t from, caddr_t to, size_t size)
{
paddr_t pa;
boolean_t rv;
KASSERT(((vaddr_t)from & PGOFSET) == 0);
KASSERT(((vaddr_t)to & PGOFSET) == 0);
KASSERT((size & PGOFSET) == 0);
while (size > 0) {
rv = pmap_extract(pmap_kernel(), (vaddr_t)from, &pa);
KASSERT(rv);
KASSERT(!pmap_extract(pmap_kernel(), (vaddr_t)to, NULL));
pmap_kremove((vaddr_t)from, PAGE_SIZE);
pmap_kenter_pa((vaddr_t)to, pa,
VM_PROT_READ|VM_PROT_WRITE);
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
}
}
void
cpu_swapin(struct lwp *l)
{

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.116 2004/02/06 10:28:03 drochner Exp $ */
/* $NetBSD: vm_machdep.c,v 1.117 2004/08/28 22:12:40 thorpej Exp $ */
/*-
* Copyright (c) 1982, 1986 The Regents of the University of California.
@ -80,7 +80,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.116 2004/02/06 10:28:03 drochner Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.117 2004/08/28 22:12:40 thorpej Exp $");
#include "opt_user_ldt.h"
#include "opt_largepages.h"
@ -330,45 +330,6 @@ setredzone(struct lwp *l)
}
#endif
/*
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in the Sysmap.
*/
void
pagemove(register caddr_t from, register caddr_t to, size_t size)
{
register pt_entry_t *fpte, *tpte, ofpte, otpte;
int32_t cpumask = 0;
if (size & PAGE_MASK)
panic("pagemove");
fpte = kvtopte((vaddr_t)from);
tpte = kvtopte((vaddr_t)to);
#ifdef LARGEPAGES
/* XXX For now... */
if (*fpte & PG_PS)
panic("pagemove: fpte PG_PS");
if (*tpte & PG_PS)
panic("pagemove: tpte PG_PS");
#endif
while (size > 0) {
otpte = *tpte;
ofpte = *fpte;
*tpte++ = *fpte;
*fpte++ = 0;
if (otpte & PG_V)
pmap_tlb_shootdown(pmap_kernel(),
(vaddr_t)to, otpte, &cpumask);
if (ofpte & PG_V)
pmap_tlb_shootdown(pmap_kernel(),
(vaddr_t)from, ofpte, &cpumask);
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
}
pmap_tlb_shootnow(cpumask);
}
/*
* Convert kernel VA to physical address
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.8 2004/08/28 22:06:28 thorpej Exp $ */
/* $NetBSD: vm_machdep.c,v 1.9 2004/08/28 22:12:41 thorpej Exp $ */
/*
* Copyright (c) 1982, 1986, 1990, 1993
@ -77,7 +77,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.8 2004/08/28 22:06:28 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.9 2004/08/28 22:12:41 thorpej Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -256,38 +256,6 @@ cpu_coredump(struct lwp *l, struct vnode *vp, struct ucred *cred,
return 0;
}
/*
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in the Sysmap,
* and size must be a multiple of PAGE_SIZE.
*/
void
pagemove(caddr_t from, caddr_t to, size_t size)
{
paddr_t pa;
boolean_t rv;
#ifdef DEBUG
if (size & PGOFSET)
panic("pagemove");
#endif
while (size > 0) {
rv = pmap_extract(pmap_kernel(), (vaddr_t)from, &pa);
#ifdef DEBUG
if (rv == FALSE)
panic("pagemove 2");
if (pmap_extract(pmap_kernel(), (vaddr_t)to, NULL) == TRUE)
panic("pagemove 3");
#endif
pmap_kremove((vaddr_t)from, PAGE_SIZE);
pmap_kenter_pa((vaddr_t)to, pa, VM_PROT_READ | VM_PROT_WRITE);
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
}
pmap_update(pmap_kernel());
}
/*
* Map a user I/O request into kernel virtual address space.
* Note: the pages are already locked by uvm_vslock(), so we

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.102 2004/02/28 16:02:03 simonb Exp $ */
/* $NetBSD: vm_machdep.c,v 1.103 2004/08/28 22:12:41 thorpej Exp $ */
/*
* Copyright (c) 1992, 1993
@ -79,7 +79,7 @@
#include "opt_ddb.h"
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.102 2004/02/28 16:02:03 simonb Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.103 2004/08/28 22:12:41 thorpej Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -308,39 +308,6 @@ cpu_coredump(struct lwp *l, struct vnode *vp, struct ucred *cred,
return error;
}
/*
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in the Sysmap,
* and size must be a multiple of PAGE_SIZE.
*/
void
pagemove(caddr_t from, caddr_t to, size_t size)
{
pt_entry_t *fpte, *tpte;
paddr_t invalid;
if (size % PAGE_SIZE)
panic("pagemove");
fpte = kvtopte(from);
tpte = kvtopte(to);
#ifdef MIPS3_PLUS
if (CPUISMIPS3 &&
(mips_cache_indexof(from) != mips_cache_indexof(to)))
mips_dcache_wbinv_range((vaddr_t) from, size);
#endif
invalid = (MIPS_HAS_R4K_MMU) ? MIPS3_PG_NV | MIPS3_PG_G : MIPS1_PG_NV;
while (size > 0) {
tpte->pt_entry = fpte->pt_entry;
fpte->pt_entry = invalid;
MIPS_TBIS((vaddr_t)from);
MIPS_TBIS((vaddr_t)to);
fpte++; tpte++;
size -= PAGE_SIZE;
from += PAGE_SIZE;
to += PAGE_SIZE;
}
}
/*
* Map a user I/O request into kernel virtual address space.
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.60 2004/01/23 04:12:39 simonb Exp $ */
/* $NetBSD: vm_machdep.c,v 1.61 2004/08/28 22:12:41 thorpej Exp $ */
/*-
* Copyright (c) 1982, 1986 The Regents of the University of California.
@ -78,7 +78,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.60 2004/01/23 04:12:39 simonb Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.61 2004/08/28 22:12:41 thorpej Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -326,49 +326,6 @@ setredzone(pte, vaddr)
}
#endif
/*
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in the Sysmap,
* and size must be a multiple of PAGE_SIZE.
*/
void
pagemove(from, to, size)
register caddr_t from, to;
size_t size;
{
register pt_entry_t *fpte, *tpte, ofpte, otpte;
if (size % PAGE_SIZE)
panic("pagemove");
fpte = kvtopte((vaddr_t)from);
tpte = kvtopte((vaddr_t)to);
if (size <= PAGE_SIZE * 16) {
while (size > 0) {
otpte = *tpte;
ofpte = *fpte;
*tpte++ = *fpte;
*fpte++ = 0;
if (otpte & PG_V)
tlbflush_entry((vaddr_t) to);
if (ofpte & PG_V)
tlbflush_entry((vaddr_t) from);
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
}
} else {
while (size > 0) {
*tpte++ = *fpte;
*fpte++ = 0;
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
}
tlbflush();
}
}
/*
* Convert kernel VA to physical address
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.1 2003/08/19 10:55:00 ragge Exp $ */
/* $NetBSD: vm_machdep.c,v 1.2 2004/08/28 22:12:41 thorpej Exp $ */
/*
* Copyright (c) 2003 Anders Magnusson (ragge@ludd.luth.se).
* All rights reserved.
@ -35,12 +35,6 @@
#include <uvm/uvm_extern.h>
void
pagemove(caddr_t from, caddr_t to, size_t size)
{
panic("pagemove");
}
int
cpu_coredump(struct lwp *p, struct vnode *vp,
struct ucred *cred, struct core *chdr)

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.60 2004/04/16 23:58:08 matt Exp $ */
/* $NetBSD: vm_machdep.c,v 1.61 2004/08/28 22:12:41 thorpej Exp $ */
/*
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.60 2004/04/16 23:58:08 matt Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.61 2004/08/28 22:12:41 thorpej Exp $");
#include "opt_altivec.h"
#include "opt_multiprocessor.h"
@ -198,25 +198,6 @@ cpu_swapin(struct lwp *l)
{
}
/*
* Move pages from one kernel virtual address to another.
*/
void
pagemove(caddr_t from, caddr_t to, size_t size)
{
paddr_t pa;
vaddr_t va;
for (va = (vaddr_t)from; size > 0; size -= PAGE_SIZE) {
(void) pmap_extract(pmap_kernel(), va, &pa);
pmap_kremove(va, PAGE_SIZE);
pmap_kenter_pa((vaddr_t)to, pa, VM_PROT_READ|VM_PROT_WRITE);
va += PAGE_SIZE;
to += PAGE_SIZE;
}
pmap_update(pmap_kernel());
}
void
cpu_lwp_free(struct lwp *l, int proc)
{

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.43 2004/03/24 15:38:42 wiz Exp $ */
/* $NetBSD: vm_machdep.c,v 1.44 2004/08/28 22:12:41 thorpej Exp $ */
/*-
* Copyright (c) 2002 The NetBSD Foundation, Inc. All rights reserved.
@ -81,7 +81,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.43 2004/03/24 15:38:42 wiz Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.44 2004/08/28 22:12:41 thorpej Exp $");
#include "opt_kstack_debug.h"
@ -355,34 +355,6 @@ cpu_coredump(struct lwp *l, struct vnode *vp, struct ucred *cred,
return 0;
}
/*
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in the pmap_kernel().
*/
void
pagemove(caddr_t from, caddr_t to, size_t size)
{
pt_entry_t *fpte, *tpte;
if ((size & PGOFSET) != 0)
panic("pagemove");
fpte = __pmap_kpte_lookup((vaddr_t)from);
tpte = __pmap_kpte_lookup((vaddr_t)to);
if (SH_HAS_VIRTUAL_ALIAS)
sh_dcache_wbinv_range((vaddr_t)from, size);
while (size > 0) {
*tpte++ = *fpte;
*fpte++ = 0;
sh_tlb_invalidate_addr(0, (vaddr_t)from);
sh_tlb_invalidate_addr(0, (vaddr_t)to);
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
}
}
/*
* Map an IO request into kernel virtual address space. Requests fall into
* one of five catagories:

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.8 2004/01/04 11:33:31 jdolecek Exp $ */
/* $NetBSD: vm_machdep.c,v 1.9 2004/08/28 22:12:41 thorpej Exp $ */
/*
* Copyright 2002 Wasabi Systems, Inc.
@ -62,7 +62,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.8 2004/01/04 11:33:31 jdolecek Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.9 2004/08/28 22:12:41 thorpej Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -244,40 +244,6 @@ cpu_setfunc(struct lwp *l, void (*func)(void *), void *arg)
pcb->pcb_ctx.sf_r11 = (register_t)(intptr_t)arg;
}
/*
* Move pages from one kernel virtual address to another.
* Both addresses are assumed to reside in KSEG1,
* and size must be a multiple of NBPG.
*/
void
pagemove(caddr_t from, caddr_t to, size_t size)
{
paddr_t pa;
boolean_t rv;
#ifdef DEBUG
if (size & PGOFSET ||
(uintptr_t)from < SH5_KSEG1_BASE || (uintptr_t)to < SH5_KSEG1_BASE)
panic("pagemove");
#endif
while (size > 0) {
rv = pmap_extract(pmap_kernel(), (vaddr_t)from, &pa);
#ifdef DEBUG
if (rv == FALSE)
panic("pagemove 2");
if (pmap_extract(pmap_kernel(), (vaddr_t)to, NULL) == TRUE)
panic("pagemove 3");
#endif
pmap_kremove((vaddr_t)from, (vsize_t)PAGE_SIZE);
pmap_kenter_pa((vaddr_t)to, pa, VM_PROT_READ|VM_PROT_WRITE);
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
}
pmap_update(pmap_kernel());
}
/*
* Map a user I/O request into kernel virtual address space.
* Note: the pages are already locked by uvm_vslock(), so we

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.79 2004/05/02 11:22:07 pk Exp $ */
/* $NetBSD: vm_machdep.c,v 1.80 2004/08/28 22:12:41 thorpej Exp $ */
/*
* Copyright (c) 1996
@ -49,7 +49,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.79 2004/05/02 11:22:07 pk Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.80 2004/08/28 22:12:41 thorpej Exp $");
#include "opt_multiprocessor.h"
@ -71,31 +71,6 @@ __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.79 2004/05/02 11:22:07 pk Exp $");
#include <sparc/sparc/cpuvar.h>
/*
* Move pages from one kernel virtual address to another.
*/
void
pagemove(from, to, size)
caddr_t from, to;
size_t size;
{
paddr_t pa;
if (size & PGOFSET || (int)from & PGOFSET || (int)to & PGOFSET)
panic("pagemove 1");
while (size > 0) {
if (pmap_extract(pmap_kernel(), (vaddr_t)from, &pa) == FALSE)
panic("pagemove 2");
pmap_kremove((vaddr_t)from, PAGE_SIZE);
pmap_kenter_pa((vaddr_t)to, pa, VM_PROT_READ | VM_PROT_WRITE);
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
}
pmap_update(pmap_kernel());
}
/*
* Map a user I/O request into kernel virtual address space.
* Note: the pages are already locked by uvm_vslock(), so we

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.55 2004/01/19 10:39:49 martin Exp $ */
/* $NetBSD: vm_machdep.c,v 1.56 2004/08/28 22:12:42 thorpej Exp $ */
/*
* Copyright (c) 1996-2002 Eduardo Horvath. All rights reserved.
@ -50,7 +50,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.55 2004/01/19 10:39:49 martin Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.56 2004/08/28 22:12:42 thorpej Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -71,31 +71,6 @@ __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.55 2004/01/19 10:39:49 martin Exp $
#include <sparc64/sparc64/cache.h>
/*
* Move pages from one kernel virtual address to another.
*/
void
pagemove(from, to, size)
register caddr_t from, to;
size_t size;
{
paddr_t pa;
if (size & PGOFSET || (long)from & PGOFSET || (long)to & PGOFSET)
panic("pagemove 1");
while (size > 0) {
if (pmap_extract(pmap_kernel(), (vaddr_t)from, &pa) == FALSE)
panic("pagemove 2");
pmap_kremove((vaddr_t)from, PAGE_SIZE);
pmap_kenter_pa((vaddr_t)to, pa, VM_PROT_READ | VM_PROT_WRITE);
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
}
pmap_update(pmap_kernel());
}
/*
* Map a user I/O request into kernel virtual address space.
* Note: the pages are already locked by uvm_vslock(), so we

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.83 2003/07/15 02:15:06 lukem Exp $ */
/* $NetBSD: vm_machdep.c,v 1.84 2004/08/28 22:12:42 thorpej Exp $ */
/*
* Copyright (c) 1994 Ludd, University of Lule}, Sweden.
@ -31,7 +31,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.83 2003/07/15 02:15:06 lukem Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.84 2004/08/28 22:12:42 thorpej Exp $");
#include "opt_compat_ultrix.h"
#include "opt_multiprocessor.h"
@ -65,25 +65,6 @@ __KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.83 2003/07/15 02:15:06 lukem Exp $"
#include "opt_cputype.h"
/*
* pagemove - moves pages at virtual address from to virtual address to,
* block moved of size size. Using fast insn bcopy for pte move.
*/
void
pagemove(caddr_t from, caddr_t to, size_t size)
{
pt_entry_t *fpte, *tpte;
int stor;
fpte = kvtopte(from);
tpte = kvtopte(to);
stor = (size >> VAX_PGSHIFT) * sizeof(struct pte);
bcopy(fpte, tpte, stor);
bzero(fpte, stor);
mtpr(0, PR_TBIA);
}
#ifdef MULTIPROCESSOR
static void
procjmp(void *arg)

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_extern.h,v 1.92 2004/05/04 21:33:40 pk Exp $ */
/* $NetBSD: uvm_extern.h,v 1.93 2004/08/28 22:12:40 thorpej Exp $ */
/*
*
@ -531,7 +531,6 @@ MALLOC_DECLARE(M_VMPMAP);
/* vm_machdep.c */
void vmapbuf(struct buf *, vsize_t);
void vunmapbuf(struct buf *, vsize_t);
void pagemove(caddr_t, caddr_t, size_t);
#ifndef cpu_swapin
void cpu_swapin(struct lwp *);
#endif