use pmap_k* in pagemove() (ie. for buffer cache pages)

and in vmapbuf() and vunmapbuf() (since there's no VAC on this platform).
This commit is contained in:
chs 2001-08-04 07:44:52 +00:00
parent ea127ad258
commit 6bc422a7ee
4 changed files with 26 additions and 58 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.58 2001/06/02 18:09:09 chs Exp $ */
/* $NetBSD: vm_machdep.c,v 1.59 2001/08/04 07:44:52 chs Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -166,11 +166,8 @@ pagemove(from, to, size)
if (pmap_extract(pmap_kernel(), (vaddr_t)to, NULL) == TRUE)
panic("pagemove 3");
#endif
pmap_remove(pmap_kernel(),
(vaddr_t)from, (vaddr_t)from + PAGE_SIZE);
pmap_enter(pmap_kernel(),
(vaddr_t)to, pa, VM_PROT_READ|VM_PROT_WRITE,
VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
pmap_kremove((vaddr_t)from, PAGE_SIZE);
pmap_kenter_pa((vaddr_t)to, pa, VM_PROT_READ | VM_PROT_WRITE);
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
@ -338,7 +335,7 @@ vmapbuf(bp, len)
struct buf *bp;
vm_size_t len;
{
struct pmap *upmap, *kpmap;
struct pmap *upmap;
vaddr_t uva; /* User VA (map from) */
vaddr_t kva; /* Kernel VA (new to) */
paddr_t pa; /* physical address */
@ -354,12 +351,10 @@ vmapbuf(bp, len)
bp->b_data = (caddr_t)(kva + off);
upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
kpmap = vm_map_pmap(phys_map);
do {
if (pmap_extract(upmap, uva, &pa) == FALSE)
panic("vmapbuf: null page frame");
pmap_enter(kpmap, kva, pa, VM_PROT_READ|VM_PROT_WRITE,
PMAP_WIRED);
pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE);
uva += PAGE_SIZE;
kva += PAGE_SIZE;
len -= PAGE_SIZE;
@ -385,10 +380,7 @@ vunmapbuf(bp, len)
off = (vaddr_t)bp->b_data - kva;
len = m68k_round_page(off + len);
/*
* pmap_remove() is unnecessary here, as kmem_free_wakeup()
* will do it for us.
*/
pmap_kremove(kva, len);
uvm_km_free_wakeup(phys_map, kva, len);
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = 0;

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.36 2001/06/02 18:09:11 chs Exp $ */
/* $NetBSD: vm_machdep.c,v 1.37 2001/08/04 07:45:42 chs Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -167,11 +167,8 @@ pagemove(from, to, size)
if (pmap_extract(pmap_kernel(), (vaddr_t)to, NULL) == TRUE)
panic("pagemove 3");
#endif
pmap_remove(pmap_kernel(),
(vaddr_t)from, (vaddr_t)from + PAGE_SIZE);
pmap_enter(pmap_kernel(),
(vaddr_t)to, pa, VM_PROT_READ|VM_PROT_WRITE,
VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
pmap_kremove((vaddr_t)from, PAGE_SIZE);
pmap_kenter_pa((vaddr_t)to, pa, VM_PROT_READ | VM_PROT_WRITE);
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
@ -336,7 +333,7 @@ vmapbuf(bp, len)
struct buf *bp;
vsize_t len;
{
struct pmap *upmap, *kpmap;
struct pmap *upmap;
vm_offset_t uva; /* User VA (map from) */
vm_offset_t kva; /* Kernel VA (new to) */
vm_offset_t pa; /* physical address */
@ -352,12 +349,10 @@ vmapbuf(bp, len)
bp->b_data = (caddr_t)(kva + off);
upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
kpmap = vm_map_pmap(phys_map);
do {
if (pmap_extract(upmap, uva, &pa) == FALSE)
panic("vmapbuf: null page frame");
pmap_enter(kpmap, kva, pa, VM_PROT_READ|VM_PROT_WRITE,
PMAP_WIRED);
pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE);
uva += PAGE_SIZE;
kva += PAGE_SIZE;
len -= PAGE_SIZE;
@ -383,10 +378,7 @@ vunmapbuf(bp, len)
off = (vm_offset_t)bp->b_data - kva;
len = m68k_round_page(off + len);
/*
* pmap_remove() is unnecessary here, as kmem_free_wakeup()
* will do it for us.
*/
pmap_kremove(kva, len);
uvm_km_free_wakeup(phys_map, kva, len);
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = 0;

View File

@ -1,5 +1,5 @@
/* $NetBSD: vm_machdep.c,v 1.7 2001/06/02 18:09:14 chs Exp $ */
/* $NetBSD: vm_machdep.c,v 1.7 2001/06/02 18:09:14 chs Exp $ */
/* $NetBSD: vm_machdep.c,v 1.8 2001/08/04 07:46:22 chs Exp $ */
/* $NetBSD: vm_machdep.c,v 1.8 2001/08/04 07:46:22 chs Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -44,7 +44,7 @@
*/
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.7 2001/06/02 18:09:14 chs Exp $");
__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.8 2001/08/04 07:46:22 chs Exp $");
#include "opt_compat_hpux.h"
@ -236,11 +236,8 @@ pagemove(from, to, size)
if (pmap_extract(pmap_kernel(), (vaddr_t)to, NULL) == TRUE)
panic("pagemove 3");
#endif
pmap_remove(pmap_kernel(),
(vaddr_t)from, (vaddr_t)from + PAGE_SIZE);
pmap_enter(pmap_kernel(),
(vaddr_t)to, pa, VM_PROT_READ|VM_PROT_WRITE,
VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
pmap_kremove((vaddr_t)from, PAGE_SIZE);
pmap_kenter_pa((vaddr_t)to, pa, VM_PROT_READ | VM_PROT_WRITE);
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
@ -309,7 +306,7 @@ vmapbuf(bp, len)
struct buf *bp;
vsize_t len;
{
struct pmap *upmap, *kpmap;
struct pmap *upmap;
vaddr_t uva; /* User VA (map from) */
vaddr_t kva; /* Kernel VA (new to) */
paddr_t pa; /* physical address */
@ -325,12 +322,10 @@ vmapbuf(bp, len)
bp->b_data = (caddr_t)(kva + off);
upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
kpmap = vm_map_pmap(phys_map);
do {
if (pmap_extract(upmap, uva, &pa) == FALSE)
panic("vmapbuf: null page frame");
pmap_enter(kpmap, kva, pa, VM_PROT_READ|VM_PROT_WRITE,
PMAP_WIRED);
pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE);
uva += PAGE_SIZE;
kva += PAGE_SIZE;
len -= PAGE_SIZE;
@ -356,10 +351,7 @@ vunmapbuf(bp, len)
off = (vaddr_t)bp->b_data - kva;
len = m68k_round_page(off + len);
/*
* pmap_remove() is unnecessary here, as kmem_free_wakeup()
* will do it for us.
*/
pmap_kremove(kva, len);
uvm_km_free_wakeup(phys_map, kva, len);
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = 0;

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.50 2001/06/02 18:09:15 chs Exp $ */
/* $NetBSD: vm_machdep.c,v 1.51 2001/08/04 07:46:56 chs Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -223,11 +223,8 @@ pagemove(from, to, size)
if (pmap_extract(pmap_kernel(), (vaddr_t)to, NULL) == TRUE)
panic("pagemove 3");
#endif
pmap_remove(pmap_kernel(),
(vaddr_t)from, (vaddr_t)from + PAGE_SIZE);
pmap_enter(pmap_kernel(),
(vaddr_t)to, pa, VM_PROT_READ|VM_PROT_WRITE,
VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
pmap_kremove((vaddr_t)from, PAGE_SIZE);
pmap_kenter_pa((vaddr_t)to, pa, VM_PROT_READ|VM_PROT_WRITE);
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
@ -296,7 +293,7 @@ vmapbuf(bp, len)
struct buf *bp;
vsize_t len;
{
struct pmap *upmap, *kpmap;
struct pmap *upmap;
vaddr_t uva; /* User VA (map from) */
vaddr_t kva; /* Kernel VA (new to) */
paddr_t pa; /* physical address */
@ -312,12 +309,10 @@ vmapbuf(bp, len)
bp->b_data = (caddr_t)(kva + off);
upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
kpmap = vm_map_pmap(phys_map);
do {
if (pmap_extract(upmap, uva, &pa) == FALSE)
panic("vmapbuf: null page frame");
pmap_enter(kpmap, kva, pa, VM_PROT_READ|VM_PROT_WRITE,
PMAP_WIRED);
pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE);
uva += PAGE_SIZE;
kva += PAGE_SIZE;
len -= PAGE_SIZE;
@ -343,10 +338,7 @@ vunmapbuf(bp, len)
off = (vaddr_t)bp->b_data - kva;
len = m68k_round_page(off + len);
/*
* pmap_remove() is unnecessary here, as kmem_free_wakeup()
* will do it for us.
*/
pmap_kremove(kva, len);
uvm_km_free_wakeup(phys_map, kva, len);
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = 0;