Undo changes which make pmap_xxxx_page() functions re-entrant; just block
all interrupts which might cause re-entrancy. Allow virtual addresses which are not managed by the MI VM system to participate in the PV system. Remove a few frivoulous TLB cache flushes.
This commit is contained in:
parent
a71bce3f88
commit
3f6f86d875
|
@ -1,4 +1,4 @@
|
||||||
/* $NetBSD: bus_subr.c,v 1.6 1997/05/28 04:27:00 jeremy Exp $ */
|
/* $NetBSD: bus_subr.c,v 1.7 1997/05/30 07:02:14 jeremy Exp $ */
|
||||||
|
|
||||||
/*-
|
/*-
|
||||||
* Copyright (c) 1996 The NetBSD Foundation, Inc.
|
* Copyright (c) 1996 The NetBSD Foundation, Inc.
|
||||||
|
@ -132,11 +132,9 @@ bus_print(args, name)
|
||||||
|
|
||||||
label_t *nofault;
|
label_t *nofault;
|
||||||
|
|
||||||
/* This is defined in pmap.c */
|
/* These are defined in pmap.c */
|
||||||
extern vm_offset_t tmp_vpages[];
|
extern vm_offset_t tmp_vpages[];
|
||||||
extern u_char tmp_vpage0_inuse, tmp_vpage1_inuse;
|
extern int tmp_vpages_inuse;
|
||||||
|
|
||||||
vm_offset_t pmap_extract_kernel(vm_offset_t);
|
|
||||||
|
|
||||||
static const int bustype_to_patype[4] = {
|
static const int bustype_to_patype[4] = {
|
||||||
0, /* OBMEM */
|
0, /* OBMEM */
|
||||||
|
@ -153,11 +151,12 @@ static const int bustype_to_patype[4] = {
|
||||||
* Try the access using peek_*
|
* Try the access using peek_*
|
||||||
* Clean up temp. mapping
|
* Clean up temp. mapping
|
||||||
*/
|
*/
|
||||||
int bus_peek(bustype, paddr, sz)
|
int
|
||||||
|
bus_peek(bustype, paddr, sz)
|
||||||
int bustype, paddr, sz;
|
int bustype, paddr, sz;
|
||||||
{
|
{
|
||||||
int offset, rtn, s;
|
int offset, rtn, s;
|
||||||
vm_offset_t va_page, oldpaddr;
|
vm_offset_t va_page;
|
||||||
caddr_t va;
|
caddr_t va;
|
||||||
|
|
||||||
/* XXX - Must fix for VME support... */
|
/* XXX - Must fix for VME support... */
|
||||||
|
@ -170,11 +169,9 @@ int bus_peek(bustype, paddr, sz)
|
||||||
paddr |= PMAP_NC;
|
paddr |= PMAP_NC;
|
||||||
|
|
||||||
s = splimp();
|
s = splimp();
|
||||||
oldpaddr = 0; /*XXXgcc*/
|
if (tmp_vpages_inuse)
|
||||||
if (tmp_vpage1_inuse++) {
|
panic("bus_peek: temporary vpages are in use.");
|
||||||
oldpaddr = pmap_extract_kernel(tmp_vpages[1]);
|
tmp_vpages_inuse++;
|
||||||
}
|
|
||||||
splx(s);
|
|
||||||
|
|
||||||
va_page = tmp_vpages[1];
|
va_page = tmp_vpages[1];
|
||||||
va = (caddr_t) va_page + offset;
|
va = (caddr_t) va_page + offset;
|
||||||
|
@ -197,16 +194,11 @@ int bus_peek(bustype, paddr, sz)
|
||||||
rtn = -1;
|
rtn = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
s = splimp();
|
pmap_remove(pmap_kernel(), va_page, va_page + NBPG);
|
||||||
if (--tmp_vpage1_inuse) {
|
--tmp_vpages_inuse;
|
||||||
pmap_enter(pmap_kernel(), tmp_vpages[1], oldpaddr,
|
|
||||||
VM_PROT_READ|VM_PROT_WRITE, TRUE);
|
|
||||||
} else {
|
|
||||||
pmap_remove(pmap_kernel(), va_page, va_page + NBPG);
|
|
||||||
}
|
|
||||||
splx(s);
|
splx(s);
|
||||||
|
|
||||||
return rtn;
|
return (rtn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/* $NetBSD: pmap.c,v 1.23 1997/05/28 04:28:52 jeremy Exp $ */
|
/* $NetBSD: pmap.c,v 1.24 1997/05/30 07:02:15 jeremy Exp $ */
|
||||||
|
|
||||||
/*-
|
/*-
|
||||||
* Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
|
* Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
|
||||||
|
@ -265,8 +265,7 @@ static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
|
||||||
*/
|
*/
|
||||||
static boolean_t pv_initialized = FALSE, /* PV system has been initialized. */
|
static boolean_t pv_initialized = FALSE, /* PV system has been initialized. */
|
||||||
bootstrap_alloc_enabled = FALSE; /*Safe to use pmap_bootstrap_alloc().*/
|
bootstrap_alloc_enabled = FALSE; /*Safe to use pmap_bootstrap_alloc().*/
|
||||||
u_char tmp_vpage0_inuse, /* Temporary virtual page 0 is in use */
|
int tmp_vpages_inuse; /* Temporary virtual pages are in use */
|
||||||
tmp_vpage1_inuse; /* Temporary virtual page 1 is in use */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX: For now, retain the traditional variables that were
|
* XXX: For now, retain the traditional variables that were
|
||||||
|
@ -284,8 +283,7 @@ vm_offset_t virtual_contig_end;
|
||||||
vm_offset_t avail_next;
|
vm_offset_t avail_next;
|
||||||
|
|
||||||
/* These are used by pmap_copy_page(), etc. */
|
/* These are used by pmap_copy_page(), etc. */
|
||||||
vm_offset_t tmp_vpages[2]; /* Note: tmp_vpage[0] MUST be mapped R/O */
|
vm_offset_t tmp_vpages[2];
|
||||||
/* tmp_vpage[1] MUST be mapped R/W */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The 3/80 is the only member of the sun3x family that has non-contiguous
|
* The 3/80 is the only member of the sun3x family that has non-contiguous
|
||||||
|
@ -834,7 +832,6 @@ pmap_bootstrap(nextva)
|
||||||
virtual_avail += NBPG;
|
virtual_avail += NBPG;
|
||||||
tmp_vpages[1] = virtual_avail;
|
tmp_vpages[1] = virtual_avail;
|
||||||
virtual_avail += NBPG;
|
virtual_avail += NBPG;
|
||||||
tmp_vpage0_inuse = tmp_vpage1_inuse = 0;
|
|
||||||
|
|
||||||
/** Initialize the PV system **/
|
/** Initialize the PV system **/
|
||||||
pmap_init_pv();
|
pmap_init_pv();
|
||||||
|
@ -2081,68 +2078,50 @@ pmap_enter_kernel(va, pa, prot)
|
||||||
/*
|
/*
|
||||||
* If the PTE is already mapped to an address and it differs
|
* If the PTE is already mapped to an address and it differs
|
||||||
* from the address requested, unlink it from the PV list.
|
* from the address requested, unlink it from the PV list.
|
||||||
*
|
|
||||||
* This only applies to mappings within virtual_avail
|
|
||||||
* and VM_MAX_KERNEL_ADDRESS. All others are not requests
|
|
||||||
* from the VM system and should not be part of the PV system.
|
|
||||||
*/
|
*/
|
||||||
if ((va >= virtual_avail) && (va < VM_MAX_KERNEL_ADDRESS)) {
|
old_pa = MMU_PTE_PA(*pte);
|
||||||
old_pa = MMU_PTE_PA(*pte);
|
if (pa != old_pa) {
|
||||||
if (pa != old_pa) {
|
if (is_managed(old_pa)) {
|
||||||
if (is_managed(old_pa)) {
|
/* XXX - Make this into a function call? */
|
||||||
/* XXX - Make this into a function call? */
|
pv = pa2pv(old_pa);
|
||||||
pv = pa2pv(old_pa);
|
pv_idx = pv->pv_idx;
|
||||||
pv_idx = pv->pv_idx;
|
if (pv_idx == pte_idx) {
|
||||||
if (pv_idx == pte_idx) {
|
pv->pv_idx = pvebase[pte_idx].pve_next;
|
||||||
pv->pv_idx = pvebase[pte_idx].pve_next;
|
} else {
|
||||||
} else {
|
while (pvebase[pv_idx].pve_next != pte_idx)
|
||||||
while (pvebase[pv_idx].pve_next != pte_idx)
|
pv_idx = pvebase[pv_idx].pve_next;
|
||||||
pv_idx = pvebase[pv_idx].pve_next;
|
pvebase[pv_idx].pve_next =
|
||||||
pvebase[pv_idx].pve_next =
|
pvebase[pte_idx].pve_next;
|
||||||
pvebase[pte_idx].pve_next;
|
|
||||||
}
|
|
||||||
/* Save modified/reference bits */
|
|
||||||
pv->pv_flags |= (u_short) pte->attr.raw;
|
|
||||||
}
|
}
|
||||||
if (is_managed(pa))
|
/* Save modified/reference bits */
|
||||||
insert = TRUE;
|
pv->pv_flags |= (u_short) pte->attr.raw;
|
||||||
else
|
|
||||||
insert = FALSE;
|
|
||||||
/*
|
|
||||||
* Clear out any old bits in the PTE.
|
|
||||||
*/
|
|
||||||
pte->attr.raw = MMU_DT_INVALID;
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* Old PA and new PA are the same. No need to relink
|
|
||||||
* the mapping within the PV list.
|
|
||||||
*/
|
|
||||||
insert = FALSE;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Save any mod/ref bits on the PTE.
|
|
||||||
*/
|
|
||||||
pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
|
|
||||||
}
|
}
|
||||||
|
if (is_managed(pa))
|
||||||
|
insert = TRUE;
|
||||||
|
else
|
||||||
|
insert = FALSE;
|
||||||
|
/*
|
||||||
|
* Clear out any old bits in the PTE.
|
||||||
|
*/
|
||||||
|
pte->attr.raw = MMU_DT_INVALID;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* If the VA lies below virtual_avail or beyond
|
* Old PA and new PA are the same. No need to relink
|
||||||
* VM_MAX_KERNEL_ADDRESS, it is not a request by the VM
|
* the mapping within the PV list.
|
||||||
* system and hence does not need to be linked into the PV
|
|
||||||
* system.
|
|
||||||
*/
|
*/
|
||||||
insert = FALSE;
|
insert = FALSE;
|
||||||
pte->attr.raw = MMU_DT_INVALID;
|
|
||||||
|
/*
|
||||||
|
* Save any mod/ref bits on the PTE.
|
||||||
|
*/
|
||||||
|
pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
pte->attr.raw = MMU_DT_INVALID;
|
pte->attr.raw = MMU_DT_INVALID;
|
||||||
was_valid = FALSE;
|
was_valid = FALSE;
|
||||||
if ((va >= virtual_avail) && (va < VM_MAX_KERNEL_ADDRESS)) {
|
if (is_managed(pa))
|
||||||
if (is_managed(pa))
|
insert = TRUE;
|
||||||
insert = TRUE;
|
else
|
||||||
else
|
|
||||||
insert = FALSE;
|
|
||||||
} else
|
|
||||||
insert = FALSE;
|
insert = FALSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2454,42 +2433,36 @@ pmap_copy(pmap_a, pmap_b, dst, len, src)
|
||||||
* Copy the contents of one physical page into another.
|
* Copy the contents of one physical page into another.
|
||||||
*
|
*
|
||||||
* This function makes use of two virtual pages allocated in pmap_bootstrap()
|
* This function makes use of two virtual pages allocated in pmap_bootstrap()
|
||||||
* to map the two specified physical pages into the kernel address space. It
|
* to map the two specified physical pages into the kernel address space.
|
||||||
* then uses bcopy() to copy one into the other.
|
|
||||||
*
|
*
|
||||||
* Note: We could use the transparent translation registers to make the
|
* Note: We could use the transparent translation registers to make the
|
||||||
* mappings. If we do so, be sure to disable interrupts before using them.
|
* mappings. If we do so, be sure to disable interrupts before using them.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
pmap_copy_page(src, dst)
|
pmap_copy_page(srcpa, dstpa)
|
||||||
vm_offset_t src, dst;
|
vm_offset_t srcpa, dstpa;
|
||||||
{
|
{
|
||||||
|
vm_offset_t srcva, dstva;
|
||||||
int s;
|
int s;
|
||||||
vm_offset_t oldsrc, olddst;
|
|
||||||
|
srcva = tmp_vpages[0];
|
||||||
|
dstva = tmp_vpages[1];
|
||||||
|
|
||||||
s = splimp();
|
s = splimp();
|
||||||
oldsrc = olddst = 0; /*XXXgcc*/
|
if (tmp_vpages_inuse++)
|
||||||
if (tmp_vpage0_inuse++) {
|
panic("pmap_copy_page: temporary vpages are in use.");
|
||||||
oldsrc = pmap_extract_kernel(tmp_vpages[0]);
|
|
||||||
}
|
|
||||||
if (tmp_vpage1_inuse++) {
|
|
||||||
olddst = pmap_extract_kernel(tmp_vpages[1]);
|
|
||||||
}
|
|
||||||
splx(s);
|
|
||||||
|
|
||||||
/* Map pages as non-cacheable to avoid cache polution? */
|
/* Map pages as non-cacheable to avoid cache polution? */
|
||||||
pmap_enter_kernel(tmp_vpages[0], src, VM_PROT_READ);
|
pmap_enter_kernel(srcva, srcpa, VM_PROT_READ);
|
||||||
pmap_enter_kernel(tmp_vpages[1], dst, VM_PROT_READ|VM_PROT_WRITE);
|
pmap_enter_kernel(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
|
||||||
copypage((char *) tmp_vpages[0], (char *) tmp_vpages[1]);
|
|
||||||
|
|
||||||
s = splimp();
|
/* Hand-optimized version of bcopy(src, dst, NBPG) */
|
||||||
if (--tmp_vpage0_inuse) {
|
copypage((char *) srcva, (char *) dstva);
|
||||||
pmap_enter_kernel(tmp_vpages[0], oldsrc, VM_PROT_READ);
|
|
||||||
}
|
pmap_remove_kernel(srcva, srcva + NBPG);
|
||||||
if (--tmp_vpage1_inuse) {
|
pmap_remove_kernel(dstva, dstva + NBPG);
|
||||||
pmap_enter_kernel(tmp_vpages[1], olddst,
|
|
||||||
VM_PROT_READ|VM_PROT_WRITE);
|
--tmp_vpages_inuse;
|
||||||
}
|
|
||||||
splx(s);
|
splx(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2498,31 +2471,33 @@ pmap_copy_page(src, dst)
|
||||||
* Zero the contents of the specified physical page.
|
* Zero the contents of the specified physical page.
|
||||||
*
|
*
|
||||||
* Uses one of the virtual pages allocated in pmap_boostrap()
|
* Uses one of the virtual pages allocated in pmap_boostrap()
|
||||||
* to map the specified page into the kernel address space. Then uses
|
* to map the specified page into the kernel address space.
|
||||||
* bzero() to zero out the page.
|
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
pmap_zero_page(pa)
|
pmap_zero_page(dstpa)
|
||||||
vm_offset_t pa;
|
vm_offset_t dstpa;
|
||||||
{
|
{
|
||||||
|
vm_offset_t dstva;
|
||||||
int s;
|
int s;
|
||||||
vm_offset_t oldpa;
|
|
||||||
|
|
||||||
|
dstva = tmp_vpages[1];
|
||||||
s = splimp();
|
s = splimp();
|
||||||
oldpa = 0; /*XXXgcc*/
|
if (tmp_vpages_inuse)
|
||||||
if (tmp_vpage1_inuse++) {
|
panic("pmap_zero_page: temporary vpages are in use.");
|
||||||
oldpa = pmap_extract_kernel(tmp_vpages[1]);
|
tmp_vpages_inuse++;
|
||||||
}
|
|
||||||
splx(s);
|
|
||||||
|
|
||||||
pmap_enter_kernel(tmp_vpages[1], pa, VM_PROT_READ|VM_PROT_WRITE);
|
/* The comments in pmap_copy_page() above apply here also. */
|
||||||
zeropage((char *) tmp_vpages[1]);
|
pmap_enter_kernel(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
|
||||||
|
|
||||||
s = splimp();
|
/* Hand-optimized version of bzero(ptr, NBPG) */
|
||||||
if (--tmp_vpage1_inuse) {
|
zeropage((char *) dstva);
|
||||||
pmap_enter_kernel(tmp_vpages[1], oldpa,
|
|
||||||
VM_PROT_READ|VM_PROT_WRITE);
|
#if 0
|
||||||
}
|
/* XXX - See comment above about the PV problem. */
|
||||||
|
pmap_remove_kernel(dstva, dstva + NBPG);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
--tmp_vpages_inuse;
|
||||||
splx(s);
|
splx(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3053,10 +3028,11 @@ pmap_remove_kernel(sva, eva)
|
||||||
idx = _btop(sva - KERNBASE);
|
idx = _btop(sva - KERNBASE);
|
||||||
eidx = _btop(eva - KERNBASE);
|
eidx = _btop(eva - KERNBASE);
|
||||||
|
|
||||||
while (idx < eidx)
|
while (idx < eidx) {
|
||||||
pmap_remove_pte(&kernCbase[idx++]);
|
pmap_remove_pte(&kernCbase[idx++]);
|
||||||
/* Always flush the ATC when maniplating the kernel address space. */
|
TBIS(sva);
|
||||||
TBIAS();
|
sva += NBPG;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* pmap_remove INTERFACE
|
/* pmap_remove INTERFACE
|
||||||
|
@ -3539,7 +3515,8 @@ pmap_pa_exists(pa)
|
||||||
**
|
**
|
||||||
* This is called by locore.s:cpu_switch when we are switching to a
|
* This is called by locore.s:cpu_switch when we are switching to a
|
||||||
* new process. This should load the MMU context for the new proc.
|
* new process. This should load the MMU context for the new proc.
|
||||||
* XXX - Later, this should be done directly in locore.s
|
*
|
||||||
|
* Note: Only used when locore.s is compiled with PMAP_DEBUG.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
pmap_activate(pmap)
|
pmap_activate(pmap)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/* $NetBSD: bus_subr.c,v 1.6 1997/05/28 04:27:00 jeremy Exp $ */
|
/* $NetBSD: bus_subr.c,v 1.7 1997/05/30 07:02:14 jeremy Exp $ */
|
||||||
|
|
||||||
/*-
|
/*-
|
||||||
* Copyright (c) 1996 The NetBSD Foundation, Inc.
|
* Copyright (c) 1996 The NetBSD Foundation, Inc.
|
||||||
|
@ -132,11 +132,9 @@ bus_print(args, name)
|
||||||
|
|
||||||
label_t *nofault;
|
label_t *nofault;
|
||||||
|
|
||||||
/* This is defined in pmap.c */
|
/* These are defined in pmap.c */
|
||||||
extern vm_offset_t tmp_vpages[];
|
extern vm_offset_t tmp_vpages[];
|
||||||
extern u_char tmp_vpage0_inuse, tmp_vpage1_inuse;
|
extern int tmp_vpages_inuse;
|
||||||
|
|
||||||
vm_offset_t pmap_extract_kernel(vm_offset_t);
|
|
||||||
|
|
||||||
static const int bustype_to_patype[4] = {
|
static const int bustype_to_patype[4] = {
|
||||||
0, /* OBMEM */
|
0, /* OBMEM */
|
||||||
|
@ -153,11 +151,12 @@ static const int bustype_to_patype[4] = {
|
||||||
* Try the access using peek_*
|
* Try the access using peek_*
|
||||||
* Clean up temp. mapping
|
* Clean up temp. mapping
|
||||||
*/
|
*/
|
||||||
int bus_peek(bustype, paddr, sz)
|
int
|
||||||
|
bus_peek(bustype, paddr, sz)
|
||||||
int bustype, paddr, sz;
|
int bustype, paddr, sz;
|
||||||
{
|
{
|
||||||
int offset, rtn, s;
|
int offset, rtn, s;
|
||||||
vm_offset_t va_page, oldpaddr;
|
vm_offset_t va_page;
|
||||||
caddr_t va;
|
caddr_t va;
|
||||||
|
|
||||||
/* XXX - Must fix for VME support... */
|
/* XXX - Must fix for VME support... */
|
||||||
|
@ -170,11 +169,9 @@ int bus_peek(bustype, paddr, sz)
|
||||||
paddr |= PMAP_NC;
|
paddr |= PMAP_NC;
|
||||||
|
|
||||||
s = splimp();
|
s = splimp();
|
||||||
oldpaddr = 0; /*XXXgcc*/
|
if (tmp_vpages_inuse)
|
||||||
if (tmp_vpage1_inuse++) {
|
panic("bus_peek: temporary vpages are in use.");
|
||||||
oldpaddr = pmap_extract_kernel(tmp_vpages[1]);
|
tmp_vpages_inuse++;
|
||||||
}
|
|
||||||
splx(s);
|
|
||||||
|
|
||||||
va_page = tmp_vpages[1];
|
va_page = tmp_vpages[1];
|
||||||
va = (caddr_t) va_page + offset;
|
va = (caddr_t) va_page + offset;
|
||||||
|
@ -197,16 +194,11 @@ int bus_peek(bustype, paddr, sz)
|
||||||
rtn = -1;
|
rtn = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
s = splimp();
|
pmap_remove(pmap_kernel(), va_page, va_page + NBPG);
|
||||||
if (--tmp_vpage1_inuse) {
|
--tmp_vpages_inuse;
|
||||||
pmap_enter(pmap_kernel(), tmp_vpages[1], oldpaddr,
|
|
||||||
VM_PROT_READ|VM_PROT_WRITE, TRUE);
|
|
||||||
} else {
|
|
||||||
pmap_remove(pmap_kernel(), va_page, va_page + NBPG);
|
|
||||||
}
|
|
||||||
splx(s);
|
splx(s);
|
||||||
|
|
||||||
return rtn;
|
return (rtn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
/* $NetBSD: pmap.c,v 1.23 1997/05/28 04:28:52 jeremy Exp $ */
|
/* $NetBSD: pmap.c,v 1.24 1997/05/30 07:02:15 jeremy Exp $ */
|
||||||
|
|
||||||
/*-
|
/*-
|
||||||
* Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
|
* Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
|
||||||
|
@ -265,8 +265,7 @@ static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
|
||||||
*/
|
*/
|
||||||
static boolean_t pv_initialized = FALSE, /* PV system has been initialized. */
|
static boolean_t pv_initialized = FALSE, /* PV system has been initialized. */
|
||||||
bootstrap_alloc_enabled = FALSE; /*Safe to use pmap_bootstrap_alloc().*/
|
bootstrap_alloc_enabled = FALSE; /*Safe to use pmap_bootstrap_alloc().*/
|
||||||
u_char tmp_vpage0_inuse, /* Temporary virtual page 0 is in use */
|
int tmp_vpages_inuse; /* Temporary virtual pages are in use */
|
||||||
tmp_vpage1_inuse; /* Temporary virtual page 1 is in use */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX: For now, retain the traditional variables that were
|
* XXX: For now, retain the traditional variables that were
|
||||||
|
@ -284,8 +283,7 @@ vm_offset_t virtual_contig_end;
|
||||||
vm_offset_t avail_next;
|
vm_offset_t avail_next;
|
||||||
|
|
||||||
/* These are used by pmap_copy_page(), etc. */
|
/* These are used by pmap_copy_page(), etc. */
|
||||||
vm_offset_t tmp_vpages[2]; /* Note: tmp_vpage[0] MUST be mapped R/O */
|
vm_offset_t tmp_vpages[2];
|
||||||
/* tmp_vpage[1] MUST be mapped R/W */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The 3/80 is the only member of the sun3x family that has non-contiguous
|
* The 3/80 is the only member of the sun3x family that has non-contiguous
|
||||||
|
@ -834,7 +832,6 @@ pmap_bootstrap(nextva)
|
||||||
virtual_avail += NBPG;
|
virtual_avail += NBPG;
|
||||||
tmp_vpages[1] = virtual_avail;
|
tmp_vpages[1] = virtual_avail;
|
||||||
virtual_avail += NBPG;
|
virtual_avail += NBPG;
|
||||||
tmp_vpage0_inuse = tmp_vpage1_inuse = 0;
|
|
||||||
|
|
||||||
/** Initialize the PV system **/
|
/** Initialize the PV system **/
|
||||||
pmap_init_pv();
|
pmap_init_pv();
|
||||||
|
@ -2081,68 +2078,50 @@ pmap_enter_kernel(va, pa, prot)
|
||||||
/*
|
/*
|
||||||
* If the PTE is already mapped to an address and it differs
|
* If the PTE is already mapped to an address and it differs
|
||||||
* from the address requested, unlink it from the PV list.
|
* from the address requested, unlink it from the PV list.
|
||||||
*
|
|
||||||
* This only applies to mappings within virtual_avail
|
|
||||||
* and VM_MAX_KERNEL_ADDRESS. All others are not requests
|
|
||||||
* from the VM system and should not be part of the PV system.
|
|
||||||
*/
|
*/
|
||||||
if ((va >= virtual_avail) && (va < VM_MAX_KERNEL_ADDRESS)) {
|
old_pa = MMU_PTE_PA(*pte);
|
||||||
old_pa = MMU_PTE_PA(*pte);
|
if (pa != old_pa) {
|
||||||
if (pa != old_pa) {
|
if (is_managed(old_pa)) {
|
||||||
if (is_managed(old_pa)) {
|
/* XXX - Make this into a function call? */
|
||||||
/* XXX - Make this into a function call? */
|
pv = pa2pv(old_pa);
|
||||||
pv = pa2pv(old_pa);
|
pv_idx = pv->pv_idx;
|
||||||
pv_idx = pv->pv_idx;
|
if (pv_idx == pte_idx) {
|
||||||
if (pv_idx == pte_idx) {
|
pv->pv_idx = pvebase[pte_idx].pve_next;
|
||||||
pv->pv_idx = pvebase[pte_idx].pve_next;
|
} else {
|
||||||
} else {
|
while (pvebase[pv_idx].pve_next != pte_idx)
|
||||||
while (pvebase[pv_idx].pve_next != pte_idx)
|
pv_idx = pvebase[pv_idx].pve_next;
|
||||||
pv_idx = pvebase[pv_idx].pve_next;
|
pvebase[pv_idx].pve_next =
|
||||||
pvebase[pv_idx].pve_next =
|
pvebase[pte_idx].pve_next;
|
||||||
pvebase[pte_idx].pve_next;
|
|
||||||
}
|
|
||||||
/* Save modified/reference bits */
|
|
||||||
pv->pv_flags |= (u_short) pte->attr.raw;
|
|
||||||
}
|
}
|
||||||
if (is_managed(pa))
|
/* Save modified/reference bits */
|
||||||
insert = TRUE;
|
pv->pv_flags |= (u_short) pte->attr.raw;
|
||||||
else
|
|
||||||
insert = FALSE;
|
|
||||||
/*
|
|
||||||
* Clear out any old bits in the PTE.
|
|
||||||
*/
|
|
||||||
pte->attr.raw = MMU_DT_INVALID;
|
|
||||||
} else {
|
|
||||||
/*
|
|
||||||
* Old PA and new PA are the same. No need to relink
|
|
||||||
* the mapping within the PV list.
|
|
||||||
*/
|
|
||||||
insert = FALSE;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Save any mod/ref bits on the PTE.
|
|
||||||
*/
|
|
||||||
pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
|
|
||||||
}
|
}
|
||||||
|
if (is_managed(pa))
|
||||||
|
insert = TRUE;
|
||||||
|
else
|
||||||
|
insert = FALSE;
|
||||||
|
/*
|
||||||
|
* Clear out any old bits in the PTE.
|
||||||
|
*/
|
||||||
|
pte->attr.raw = MMU_DT_INVALID;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* If the VA lies below virtual_avail or beyond
|
* Old PA and new PA are the same. No need to relink
|
||||||
* VM_MAX_KERNEL_ADDRESS, it is not a request by the VM
|
* the mapping within the PV list.
|
||||||
* system and hence does not need to be linked into the PV
|
|
||||||
* system.
|
|
||||||
*/
|
*/
|
||||||
insert = FALSE;
|
insert = FALSE;
|
||||||
pte->attr.raw = MMU_DT_INVALID;
|
|
||||||
|
/*
|
||||||
|
* Save any mod/ref bits on the PTE.
|
||||||
|
*/
|
||||||
|
pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
pte->attr.raw = MMU_DT_INVALID;
|
pte->attr.raw = MMU_DT_INVALID;
|
||||||
was_valid = FALSE;
|
was_valid = FALSE;
|
||||||
if ((va >= virtual_avail) && (va < VM_MAX_KERNEL_ADDRESS)) {
|
if (is_managed(pa))
|
||||||
if (is_managed(pa))
|
insert = TRUE;
|
||||||
insert = TRUE;
|
else
|
||||||
else
|
|
||||||
insert = FALSE;
|
|
||||||
} else
|
|
||||||
insert = FALSE;
|
insert = FALSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2454,42 +2433,36 @@ pmap_copy(pmap_a, pmap_b, dst, len, src)
|
||||||
* Copy the contents of one physical page into another.
|
* Copy the contents of one physical page into another.
|
||||||
*
|
*
|
||||||
* This function makes use of two virtual pages allocated in pmap_bootstrap()
|
* This function makes use of two virtual pages allocated in pmap_bootstrap()
|
||||||
* to map the two specified physical pages into the kernel address space. It
|
* to map the two specified physical pages into the kernel address space.
|
||||||
* then uses bcopy() to copy one into the other.
|
|
||||||
*
|
*
|
||||||
* Note: We could use the transparent translation registers to make the
|
* Note: We could use the transparent translation registers to make the
|
||||||
* mappings. If we do so, be sure to disable interrupts before using them.
|
* mappings. If we do so, be sure to disable interrupts before using them.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
pmap_copy_page(src, dst)
|
pmap_copy_page(srcpa, dstpa)
|
||||||
vm_offset_t src, dst;
|
vm_offset_t srcpa, dstpa;
|
||||||
{
|
{
|
||||||
|
vm_offset_t srcva, dstva;
|
||||||
int s;
|
int s;
|
||||||
vm_offset_t oldsrc, olddst;
|
|
||||||
|
srcva = tmp_vpages[0];
|
||||||
|
dstva = tmp_vpages[1];
|
||||||
|
|
||||||
s = splimp();
|
s = splimp();
|
||||||
oldsrc = olddst = 0; /*XXXgcc*/
|
if (tmp_vpages_inuse++)
|
||||||
if (tmp_vpage0_inuse++) {
|
panic("pmap_copy_page: temporary vpages are in use.");
|
||||||
oldsrc = pmap_extract_kernel(tmp_vpages[0]);
|
|
||||||
}
|
|
||||||
if (tmp_vpage1_inuse++) {
|
|
||||||
olddst = pmap_extract_kernel(tmp_vpages[1]);
|
|
||||||
}
|
|
||||||
splx(s);
|
|
||||||
|
|
||||||
/* Map pages as non-cacheable to avoid cache polution? */
|
/* Map pages as non-cacheable to avoid cache polution? */
|
||||||
pmap_enter_kernel(tmp_vpages[0], src, VM_PROT_READ);
|
pmap_enter_kernel(srcva, srcpa, VM_PROT_READ);
|
||||||
pmap_enter_kernel(tmp_vpages[1], dst, VM_PROT_READ|VM_PROT_WRITE);
|
pmap_enter_kernel(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
|
||||||
copypage((char *) tmp_vpages[0], (char *) tmp_vpages[1]);
|
|
||||||
|
|
||||||
s = splimp();
|
/* Hand-optimized version of bcopy(src, dst, NBPG) */
|
||||||
if (--tmp_vpage0_inuse) {
|
copypage((char *) srcva, (char *) dstva);
|
||||||
pmap_enter_kernel(tmp_vpages[0], oldsrc, VM_PROT_READ);
|
|
||||||
}
|
pmap_remove_kernel(srcva, srcva + NBPG);
|
||||||
if (--tmp_vpage1_inuse) {
|
pmap_remove_kernel(dstva, dstva + NBPG);
|
||||||
pmap_enter_kernel(tmp_vpages[1], olddst,
|
|
||||||
VM_PROT_READ|VM_PROT_WRITE);
|
--tmp_vpages_inuse;
|
||||||
}
|
|
||||||
splx(s);
|
splx(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2498,31 +2471,33 @@ pmap_copy_page(src, dst)
|
||||||
* Zero the contents of the specified physical page.
|
* Zero the contents of the specified physical page.
|
||||||
*
|
*
|
||||||
* Uses one of the virtual pages allocated in pmap_boostrap()
|
* Uses one of the virtual pages allocated in pmap_boostrap()
|
||||||
* to map the specified page into the kernel address space. Then uses
|
* to map the specified page into the kernel address space.
|
||||||
* bzero() to zero out the page.
|
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
pmap_zero_page(pa)
|
pmap_zero_page(dstpa)
|
||||||
vm_offset_t pa;
|
vm_offset_t dstpa;
|
||||||
{
|
{
|
||||||
|
vm_offset_t dstva;
|
||||||
int s;
|
int s;
|
||||||
vm_offset_t oldpa;
|
|
||||||
|
|
||||||
|
dstva = tmp_vpages[1];
|
||||||
s = splimp();
|
s = splimp();
|
||||||
oldpa = 0; /*XXXgcc*/
|
if (tmp_vpages_inuse)
|
||||||
if (tmp_vpage1_inuse++) {
|
panic("pmap_zero_page: temporary vpages are in use.");
|
||||||
oldpa = pmap_extract_kernel(tmp_vpages[1]);
|
tmp_vpages_inuse++;
|
||||||
}
|
|
||||||
splx(s);
|
|
||||||
|
|
||||||
pmap_enter_kernel(tmp_vpages[1], pa, VM_PROT_READ|VM_PROT_WRITE);
|
/* The comments in pmap_copy_page() above apply here also. */
|
||||||
zeropage((char *) tmp_vpages[1]);
|
pmap_enter_kernel(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
|
||||||
|
|
||||||
s = splimp();
|
/* Hand-optimized version of bzero(ptr, NBPG) */
|
||||||
if (--tmp_vpage1_inuse) {
|
zeropage((char *) dstva);
|
||||||
pmap_enter_kernel(tmp_vpages[1], oldpa,
|
|
||||||
VM_PROT_READ|VM_PROT_WRITE);
|
#if 0
|
||||||
}
|
/* XXX - See comment above about the PV problem. */
|
||||||
|
pmap_remove_kernel(dstva, dstva + NBPG);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
--tmp_vpages_inuse;
|
||||||
splx(s);
|
splx(s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3053,10 +3028,11 @@ pmap_remove_kernel(sva, eva)
|
||||||
idx = _btop(sva - KERNBASE);
|
idx = _btop(sva - KERNBASE);
|
||||||
eidx = _btop(eva - KERNBASE);
|
eidx = _btop(eva - KERNBASE);
|
||||||
|
|
||||||
while (idx < eidx)
|
while (idx < eidx) {
|
||||||
pmap_remove_pte(&kernCbase[idx++]);
|
pmap_remove_pte(&kernCbase[idx++]);
|
||||||
/* Always flush the ATC when maniplating the kernel address space. */
|
TBIS(sva);
|
||||||
TBIAS();
|
sva += NBPG;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* pmap_remove INTERFACE
|
/* pmap_remove INTERFACE
|
||||||
|
@ -3539,7 +3515,8 @@ pmap_pa_exists(pa)
|
||||||
**
|
**
|
||||||
* This is called by locore.s:cpu_switch when we are switching to a
|
* This is called by locore.s:cpu_switch when we are switching to a
|
||||||
* new process. This should load the MMU context for the new proc.
|
* new process. This should load the MMU context for the new proc.
|
||||||
* XXX - Later, this should be done directly in locore.s
|
*
|
||||||
|
* Note: Only used when locore.s is compiled with PMAP_DEBUG.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
pmap_activate(pmap)
|
pmap_activate(pmap)
|
||||||
|
|
Loading…
Reference in New Issue