Bring back pmap_kernel(), for now always inlined as a pointer to
kernel_pmap_store.
This commit is contained in:
parent
c668fc0161
commit
22cefc03bc
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.3 1995/03/24 15:07:15 cgd Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.4 1995/04/10 12:41:29 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1991, 1993
|
||||
|
@ -212,7 +212,7 @@ extern vm_offset_t pager_sva, pager_eva;
|
|||
* Given a map and a machine independent protection code,
|
||||
* convert to an hp300 protection code.
|
||||
*/
|
||||
#define pte_prot(m, p) (protection_codes[m == kernel_pmap ? 0 : 1][p])
|
||||
#define pte_prot(m, p) (protection_codes[m == pmap_kernel() ? 0 : 1][p])
|
||||
int protection_codes[2][8];
|
||||
|
||||
/*
|
||||
|
@ -339,14 +339,14 @@ pmap_bootstrap(firstaddr, ptaddr)
|
|||
*/
|
||||
Sysptmapsize = roundup(howmany(Sysmapsize, NPTEPG), NPTEPG);
|
||||
valloc(Sysptmap, pt_entry_t, Sysptmapsize);
|
||||
kernel_pmap->pm_stab = Sysptmap;
|
||||
pmap_kernel()->pm_stab = Sysptmap;
|
||||
|
||||
/*
|
||||
* Allocate a level 3 PTE table for the kernel.
|
||||
* Contains Sysmapsize PTEs.
|
||||
*/
|
||||
valloc(Sysmap, pt_entry_t, Sysmapsize);
|
||||
kernel_pmap->pm_ptab = Sysmap;
|
||||
pmap_kernel()->pm_ptab = Sysmap;
|
||||
|
||||
/*
|
||||
* Allocate memory for page attributes.
|
||||
|
@ -415,8 +415,8 @@ pmap_bootstrap(firstaddr, ptaddr)
|
|||
virtual_avail = VM_MIN_KERNEL_ADDRESS;
|
||||
virtual_end = VM_MIN_KERNEL_ADDRESS + Sysmapsize * NBPG;
|
||||
|
||||
simple_lock_init(&kernel_pmap_store.pm_lock);
|
||||
kernel_pmap_store.pm_count = 1;
|
||||
simple_lock_init(&pmap_kernel()->pm_lock);
|
||||
pmap_kernel()->pm_count = 1;
|
||||
|
||||
/*
|
||||
* Set up curproc's (i.e. proc 0's) PCB such that the ptbr
|
||||
|
@ -574,7 +574,7 @@ pmap_map(virt, start, end, prot)
|
|||
printf("pmap_map(%lx, %lx, %lx, %lx)\n", virt, start, end, prot);
|
||||
#endif
|
||||
while (start < end) {
|
||||
pmap_enter(kernel_pmap, virt, start, prot, FALSE);
|
||||
pmap_enter(pmap_kernel(), virt, start, prot, FALSE);
|
||||
virt += PAGE_SIZE;
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
|
@ -951,7 +951,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
return;
|
||||
|
||||
#ifdef PMAPSTATS
|
||||
if (pmap == kernel_pmap)
|
||||
if (pmap == pmap_kernel())
|
||||
enter_stats.kernel++;
|
||||
else
|
||||
enter_stats.user++;
|
||||
|
@ -1036,7 +1036,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
* on this PT page. PT pages are wired down as long as there
|
||||
* is a valid mapping in the page.
|
||||
*/
|
||||
if (pmap != kernel_pmap)
|
||||
if (pmap != pmap_kernel())
|
||||
(void) vm_map_pageable(pt_map, trunc_page(pte),
|
||||
round_page(pte+1), FALSE);
|
||||
|
||||
|
@ -1138,7 +1138,7 @@ validate:
|
|||
if (!wired && active_pmap(pmap))
|
||||
TBIS((caddr_t)va);
|
||||
#ifdef DEBUG
|
||||
if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap)
|
||||
if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
|
||||
pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
|
||||
#endif
|
||||
}
|
||||
|
@ -1296,7 +1296,7 @@ pmap_collect(pmap)
|
|||
pt_entry_t *ste;
|
||||
int opmapdebug;
|
||||
#endif
|
||||
if (pmap != kernel_pmap)
|
||||
if (pmap != pmap_kernel())
|
||||
return;
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -1315,10 +1315,10 @@ pmap_collect(pmap)
|
|||
* page table pages.
|
||||
*/
|
||||
pv = pa_to_pvh(pa);
|
||||
if (pv->pv_pmap != kernel_pmap || !(pv->pv_flags & PV_PTPAGE))
|
||||
if (pv->pv_pmap != pmap_kernel() || !(pv->pv_flags & PV_PTPAGE))
|
||||
continue;
|
||||
do {
|
||||
if (pv->pv_ptpte && pv->pv_ptpmap == kernel_pmap)
|
||||
if (pv->pv_ptpte && pv->pv_ptpmap == pmap_kernel())
|
||||
break;
|
||||
} while (pv = pv->pv_next);
|
||||
if (pv == NULL)
|
||||
|
@ -1388,7 +1388,7 @@ ok:
|
|||
if (*ste)
|
||||
printf("collect: kernel STE at %lx still valid (%lx)\n",
|
||||
ste, *ste);
|
||||
ste = &Sysptmap[(pt_entry_t *)ste-pmap_ste(kernel_pmap, 0)];
|
||||
ste = &Sysptmap[(pt_entry_t *)ste-pmap_ste(pmap_kernel(), 0)];
|
||||
if (*ste)
|
||||
printf("collect: kernel PTmap at %lx still valid (%lx)\n",
|
||||
ste, *ste);
|
||||
|
@ -1486,7 +1486,7 @@ pmap_pageable(pmap, sva, eva, pageable)
|
|||
* - we are called with only one page at a time
|
||||
* - PT pages have only one pv_table entry
|
||||
*/
|
||||
if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
|
||||
if (pmap == pmap_kernel() && pageable && sva + PAGE_SIZE == eva) {
|
||||
register pv_entry_t pv;
|
||||
register vm_offset_t pa;
|
||||
|
||||
|
@ -1681,7 +1681,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
|
|||
* pmap_pageable which clears the modify bit for the
|
||||
* PT page.
|
||||
*/
|
||||
if (pmap != kernel_pmap) {
|
||||
if (pmap != pmap_kernel()) {
|
||||
(void) vm_map_pageable(pt_map, trunc_page(pte),
|
||||
round_page(pte+1), TRUE);
|
||||
#ifdef DEBUG
|
||||
|
@ -1758,7 +1758,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
|
|||
* reference count on the segment table as well,
|
||||
* freeing it if it is now empty.
|
||||
*/
|
||||
if (ptpmap != kernel_pmap) {
|
||||
if (ptpmap != pmap_kernel()) {
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
|
||||
printf("remove: stab %lx, refcnt %d\n",
|
||||
|
@ -1799,7 +1799,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
|
|||
* XXX this should be unnecessary as we have been
|
||||
* flushing individual mappings as we go.
|
||||
*/
|
||||
if (ptpmap == kernel_pmap)
|
||||
if (ptpmap == pmap_kernel())
|
||||
TBIAS();
|
||||
else
|
||||
TBIAU();
|
||||
|
@ -1901,7 +1901,7 @@ pmap_changebit(pa, bit, setem)
|
|||
#endif
|
||||
for (; pv; pv = pv->pv_next) {
|
||||
#ifdef DEBUG
|
||||
toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
|
||||
toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
|
||||
#endif
|
||||
va = pv->pv_va;
|
||||
|
||||
|
@ -1995,7 +1995,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
* free list and map it into the kernel page table map (via
|
||||
* pmap_enter).
|
||||
*/
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
register struct kpt_page *kpt;
|
||||
|
||||
s = splimp();
|
||||
|
@ -2008,7 +2008,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
if (pmapdebug & PDB_COLLECT)
|
||||
printf("enter: no KPT pages, collecting...\n");
|
||||
#endif
|
||||
pmap_collect(kernel_pmap);
|
||||
pmap_collect(pmap_kernel());
|
||||
if ((kpt = kpt_free_list) == (struct kpt_page *)0)
|
||||
panic("pmap_enter_ptpage: can't get KPT page");
|
||||
}
|
||||
|
@ -2046,7 +2046,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
printf("vm_fault(pt_map, %lx, RW, 0) -> %d\n", va, s);
|
||||
panic("pmap_enter: vm_fault failed");
|
||||
}
|
||||
ptpa = pmap_extract(kernel_pmap, va);
|
||||
ptpa = pmap_extract(pmap_kernel(), va);
|
||||
/*
|
||||
* Mark the page clean now to avoid its pageout (and
|
||||
* hence creation of a pager) between now and when it
|
||||
|
@ -2067,7 +2067,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
if (pv) {
|
||||
pv->pv_flags |= PV_PTPAGE;
|
||||
do {
|
||||
if (pv->pv_pmap == kernel_pmap && pv->pv_va == va)
|
||||
if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
|
||||
break;
|
||||
} while (pv = pv->pv_next);
|
||||
}
|
||||
|
@ -2091,8 +2091,8 @@ pmap_enter_ptpage(pmap, va)
|
|||
* release them. We also avoid the overhead of vm_map_pageable.
|
||||
*/
|
||||
*ste = ((ptpa >> PGSHIFT) << PG_SHIFT) | PG_KRE | PG_KWE | PG_V |
|
||||
(pmap == kernel_pmap ? PG_ASM : 0);
|
||||
if (pmap != kernel_pmap) {
|
||||
(pmap == pmap_kernel() ? PG_ASM : 0);
|
||||
if (pmap != pmap_kernel()) {
|
||||
pmap->pm_sref++;
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
|
||||
|
@ -2104,7 +2104,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
/*
|
||||
* Flush stale TLB info.
|
||||
*/
|
||||
if (pmap == kernel_pmap)
|
||||
if (pmap == pmap_kernel())
|
||||
TBIAS();
|
||||
else
|
||||
TBIAU();
|
||||
|
@ -2140,8 +2140,8 @@ pmap_check_wiring(str, va)
|
|||
register int count;
|
||||
|
||||
va = trunc_page(va);
|
||||
if (!pmap_ste_v(kernel_pmap, va) ||
|
||||
!pmap_pte_v(pmap_pte(kernel_pmap, va)))
|
||||
if (!pmap_ste_v(pmap_kernel(), va) ||
|
||||
!pmap_pte_v(pmap_pte(pmap_kernel(), va)))
|
||||
return;
|
||||
|
||||
if (!vm_map_lookup_entry(pt_map, va, &entry)) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.old.c,v 1.3 1995/03/24 15:07:15 cgd Exp $ */
|
||||
/* $NetBSD: pmap.old.c,v 1.4 1995/04/10 12:41:29 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1991, 1993
|
||||
|
@ -212,7 +212,7 @@ extern vm_offset_t pager_sva, pager_eva;
|
|||
* Given a map and a machine independent protection code,
|
||||
* convert to an hp300 protection code.
|
||||
*/
|
||||
#define pte_prot(m, p) (protection_codes[m == kernel_pmap ? 0 : 1][p])
|
||||
#define pte_prot(m, p) (protection_codes[m == pmap_kernel() ? 0 : 1][p])
|
||||
int protection_codes[2][8];
|
||||
|
||||
/*
|
||||
|
@ -339,14 +339,14 @@ pmap_bootstrap(firstaddr, ptaddr)
|
|||
*/
|
||||
Sysptmapsize = roundup(howmany(Sysmapsize, NPTEPG), NPTEPG);
|
||||
valloc(Sysptmap, pt_entry_t, Sysptmapsize);
|
||||
kernel_pmap->pm_stab = Sysptmap;
|
||||
pmap_kernel()->pm_stab = Sysptmap;
|
||||
|
||||
/*
|
||||
* Allocate a level 3 PTE table for the kernel.
|
||||
* Contains Sysmapsize PTEs.
|
||||
*/
|
||||
valloc(Sysmap, pt_entry_t, Sysmapsize);
|
||||
kernel_pmap->pm_ptab = Sysmap;
|
||||
pmap_kernel()->pm_ptab = Sysmap;
|
||||
|
||||
/*
|
||||
* Allocate memory for page attributes.
|
||||
|
@ -415,8 +415,8 @@ pmap_bootstrap(firstaddr, ptaddr)
|
|||
virtual_avail = VM_MIN_KERNEL_ADDRESS;
|
||||
virtual_end = VM_MIN_KERNEL_ADDRESS + Sysmapsize * NBPG;
|
||||
|
||||
simple_lock_init(&kernel_pmap_store.pm_lock);
|
||||
kernel_pmap_store.pm_count = 1;
|
||||
simple_lock_init(&pmap_kernel()->pm_lock);
|
||||
pmap_kernel()->pm_count = 1;
|
||||
|
||||
/*
|
||||
* Set up curproc's (i.e. proc 0's) PCB such that the ptbr
|
||||
|
@ -574,7 +574,7 @@ pmap_map(virt, start, end, prot)
|
|||
printf("pmap_map(%lx, %lx, %lx, %lx)\n", virt, start, end, prot);
|
||||
#endif
|
||||
while (start < end) {
|
||||
pmap_enter(kernel_pmap, virt, start, prot, FALSE);
|
||||
pmap_enter(pmap_kernel(), virt, start, prot, FALSE);
|
||||
virt += PAGE_SIZE;
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
|
@ -951,7 +951,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
return;
|
||||
|
||||
#ifdef PMAPSTATS
|
||||
if (pmap == kernel_pmap)
|
||||
if (pmap == pmap_kernel())
|
||||
enter_stats.kernel++;
|
||||
else
|
||||
enter_stats.user++;
|
||||
|
@ -1036,7 +1036,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
* on this PT page. PT pages are wired down as long as there
|
||||
* is a valid mapping in the page.
|
||||
*/
|
||||
if (pmap != kernel_pmap)
|
||||
if (pmap != pmap_kernel())
|
||||
(void) vm_map_pageable(pt_map, trunc_page(pte),
|
||||
round_page(pte+1), FALSE);
|
||||
|
||||
|
@ -1138,7 +1138,7 @@ validate:
|
|||
if (!wired && active_pmap(pmap))
|
||||
TBIS((caddr_t)va);
|
||||
#ifdef DEBUG
|
||||
if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap)
|
||||
if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
|
||||
pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
|
||||
#endif
|
||||
}
|
||||
|
@ -1296,7 +1296,7 @@ pmap_collect(pmap)
|
|||
pt_entry_t *ste;
|
||||
int opmapdebug;
|
||||
#endif
|
||||
if (pmap != kernel_pmap)
|
||||
if (pmap != pmap_kernel())
|
||||
return;
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -1315,10 +1315,10 @@ pmap_collect(pmap)
|
|||
* page table pages.
|
||||
*/
|
||||
pv = pa_to_pvh(pa);
|
||||
if (pv->pv_pmap != kernel_pmap || !(pv->pv_flags & PV_PTPAGE))
|
||||
if (pv->pv_pmap != pmap_kernel() || !(pv->pv_flags & PV_PTPAGE))
|
||||
continue;
|
||||
do {
|
||||
if (pv->pv_ptpte && pv->pv_ptpmap == kernel_pmap)
|
||||
if (pv->pv_ptpte && pv->pv_ptpmap == pmap_kernel())
|
||||
break;
|
||||
} while (pv = pv->pv_next);
|
||||
if (pv == NULL)
|
||||
|
@ -1388,7 +1388,7 @@ ok:
|
|||
if (*ste)
|
||||
printf("collect: kernel STE at %lx still valid (%lx)\n",
|
||||
ste, *ste);
|
||||
ste = &Sysptmap[(pt_entry_t *)ste-pmap_ste(kernel_pmap, 0)];
|
||||
ste = &Sysptmap[(pt_entry_t *)ste-pmap_ste(pmap_kernel(), 0)];
|
||||
if (*ste)
|
||||
printf("collect: kernel PTmap at %lx still valid (%lx)\n",
|
||||
ste, *ste);
|
||||
|
@ -1486,7 +1486,7 @@ pmap_pageable(pmap, sva, eva, pageable)
|
|||
* - we are called with only one page at a time
|
||||
* - PT pages have only one pv_table entry
|
||||
*/
|
||||
if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
|
||||
if (pmap == pmap_kernel() && pageable && sva + PAGE_SIZE == eva) {
|
||||
register pv_entry_t pv;
|
||||
register vm_offset_t pa;
|
||||
|
||||
|
@ -1681,7 +1681,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
|
|||
* pmap_pageable which clears the modify bit for the
|
||||
* PT page.
|
||||
*/
|
||||
if (pmap != kernel_pmap) {
|
||||
if (pmap != pmap_kernel()) {
|
||||
(void) vm_map_pageable(pt_map, trunc_page(pte),
|
||||
round_page(pte+1), TRUE);
|
||||
#ifdef DEBUG
|
||||
|
@ -1758,7 +1758,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
|
|||
* reference count on the segment table as well,
|
||||
* freeing it if it is now empty.
|
||||
*/
|
||||
if (ptpmap != kernel_pmap) {
|
||||
if (ptpmap != pmap_kernel()) {
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
|
||||
printf("remove: stab %lx, refcnt %d\n",
|
||||
|
@ -1799,7 +1799,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
|
|||
* XXX this should be unnecessary as we have been
|
||||
* flushing individual mappings as we go.
|
||||
*/
|
||||
if (ptpmap == kernel_pmap)
|
||||
if (ptpmap == pmap_kernel())
|
||||
TBIAS();
|
||||
else
|
||||
TBIAU();
|
||||
|
@ -1901,7 +1901,7 @@ pmap_changebit(pa, bit, setem)
|
|||
#endif
|
||||
for (; pv; pv = pv->pv_next) {
|
||||
#ifdef DEBUG
|
||||
toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
|
||||
toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
|
||||
#endif
|
||||
va = pv->pv_va;
|
||||
|
||||
|
@ -1995,7 +1995,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
* free list and map it into the kernel page table map (via
|
||||
* pmap_enter).
|
||||
*/
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
register struct kpt_page *kpt;
|
||||
|
||||
s = splimp();
|
||||
|
@ -2008,7 +2008,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
if (pmapdebug & PDB_COLLECT)
|
||||
printf("enter: no KPT pages, collecting...\n");
|
||||
#endif
|
||||
pmap_collect(kernel_pmap);
|
||||
pmap_collect(pmap_kernel());
|
||||
if ((kpt = kpt_free_list) == (struct kpt_page *)0)
|
||||
panic("pmap_enter_ptpage: can't get KPT page");
|
||||
}
|
||||
|
@ -2046,7 +2046,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
printf("vm_fault(pt_map, %lx, RW, 0) -> %d\n", va, s);
|
||||
panic("pmap_enter: vm_fault failed");
|
||||
}
|
||||
ptpa = pmap_extract(kernel_pmap, va);
|
||||
ptpa = pmap_extract(pmap_kernel(), va);
|
||||
/*
|
||||
* Mark the page clean now to avoid its pageout (and
|
||||
* hence creation of a pager) between now and when it
|
||||
|
@ -2067,7 +2067,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
if (pv) {
|
||||
pv->pv_flags |= PV_PTPAGE;
|
||||
do {
|
||||
if (pv->pv_pmap == kernel_pmap && pv->pv_va == va)
|
||||
if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
|
||||
break;
|
||||
} while (pv = pv->pv_next);
|
||||
}
|
||||
|
@ -2091,8 +2091,8 @@ pmap_enter_ptpage(pmap, va)
|
|||
* release them. We also avoid the overhead of vm_map_pageable.
|
||||
*/
|
||||
*ste = ((ptpa >> PGSHIFT) << PG_SHIFT) | PG_KRE | PG_KWE | PG_V |
|
||||
(pmap == kernel_pmap ? PG_ASM : 0);
|
||||
if (pmap != kernel_pmap) {
|
||||
(pmap == pmap_kernel() ? PG_ASM : 0);
|
||||
if (pmap != pmap_kernel()) {
|
||||
pmap->pm_sref++;
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
|
||||
|
@ -2104,7 +2104,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
/*
|
||||
* Flush stale TLB info.
|
||||
*/
|
||||
if (pmap == kernel_pmap)
|
||||
if (pmap == pmap_kernel())
|
||||
TBIAS();
|
||||
else
|
||||
TBIAU();
|
||||
|
@ -2140,8 +2140,8 @@ pmap_check_wiring(str, va)
|
|||
register int count;
|
||||
|
||||
va = trunc_page(va);
|
||||
if (!pmap_ste_v(kernel_pmap, va) ||
|
||||
!pmap_pte_v(pmap_pte(kernel_pmap, va)))
|
||||
if (!pmap_ste_v(pmap_kernel(), va) ||
|
||||
!pmap_pte_v(pmap_pte(pmap_kernel(), va)))
|
||||
return;
|
||||
|
||||
if (!vm_map_lookup_entry(pt_map, va, &entry)) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.h,v 1.2 1995/03/28 18:13:54 jtc Exp $ */
|
||||
/* $NetBSD: pmap.h,v 1.3 1995/04/10 12:41:38 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1987 Carnegie-Mellon University
|
||||
|
@ -70,9 +70,9 @@ typedef struct pmap *pmap_t;
|
|||
|
||||
extern struct pmap kernel_pmap_store;
|
||||
|
||||
#define kernel_pmap (&kernel_pmap_store)
|
||||
#define pmap_kernel() (&kernel_pmap_store)
|
||||
#define active_pmap(pm) \
|
||||
((pm) == kernel_pmap || (pm) == curproc->p_vmspace->vm_map.pmap)
|
||||
((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
|
||||
|
||||
/*
|
||||
* Macros for speed
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.old.h,v 1.2 1995/03/28 18:13:54 jtc Exp $ */
|
||||
/* $NetBSD: pmap.old.h,v 1.3 1995/04/10 12:41:38 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1987 Carnegie-Mellon University
|
||||
|
@ -70,9 +70,9 @@ typedef struct pmap *pmap_t;
|
|||
|
||||
extern struct pmap kernel_pmap_store;
|
||||
|
||||
#define kernel_pmap (&kernel_pmap_store)
|
||||
#define pmap_kernel() (&kernel_pmap_store)
|
||||
#define active_pmap(pm) \
|
||||
((pm) == kernel_pmap || (pm) == curproc->p_vmspace->vm_map.pmap)
|
||||
((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
|
||||
|
||||
/*
|
||||
* Macros for speed
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.21 1995/04/02 20:38:22 chopps Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.22 1995/04/10 12:41:41 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
|
@ -172,7 +172,7 @@ int pmapdebug = PDB_PARANOIA;
|
|||
#define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))
|
||||
|
||||
#define active_pmap(pm) \
|
||||
((pm) == kernel_pmap || (pm) == curproc->p_vmspace->vm_map.pmap)
|
||||
((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
|
||||
|
||||
/*
|
||||
* Given a map and a machine independent protection code,
|
||||
|
@ -208,7 +208,6 @@ u_int *Segtabzero;
|
|||
vm_size_t Sysptsize = VM_KERNEL_PT_PAGES + 4 / NPTEPG;
|
||||
|
||||
struct pmap kernel_pmap_store;
|
||||
pmap_t kernel_pmap;
|
||||
vm_map_t pt_map;
|
||||
|
||||
vm_offset_t avail_start; /* PA of first available physical page */
|
||||
|
@ -335,29 +334,22 @@ pmap_bootstrap(firstaddr, loadaddr)
|
|||
*/
|
||||
amiga_protection_init();
|
||||
|
||||
/*
|
||||
* The kernel's pmap is statically allocated so we don't
|
||||
* have to use pmap_create, which is unlikely to work
|
||||
* correctly at this part of the boot sequence.
|
||||
*/
|
||||
kernel_pmap = &kernel_pmap_store;
|
||||
|
||||
/*
|
||||
* Kernel page/segment table allocated in locore,
|
||||
* just initialize pointers.
|
||||
*/
|
||||
kernel_pmap->pm_stab = Sysseg;
|
||||
kernel_pmap->pm_ptab = Sysmap;
|
||||
pmap_kernel()->pm_stab = Sysseg;
|
||||
pmap_kernel()->pm_ptab = Sysmap;
|
||||
#ifdef M68040
|
||||
if (cpu040) {
|
||||
kernel_pmap->pm_rtab = Sysseg1;
|
||||
pmap_kernel()->pm_rtab = Sysseg1;
|
||||
pmap_ishift = SG4_SHIFT2;
|
||||
} else
|
||||
#endif
|
||||
pmap_ishift = SG_ISHIFT;
|
||||
|
||||
simple_lock_init(&kernel_pmap->pm_lock);
|
||||
kernel_pmap->pm_count = 1;
|
||||
simple_lock_init(&pmap_kernel()->pm_lock);
|
||||
pmap_kernel()->pm_count = 1;
|
||||
|
||||
/*
|
||||
* Allocate all the submaps we need
|
||||
|
@ -366,7 +358,7 @@ pmap_bootstrap(firstaddr, loadaddr)
|
|||
v = (c)va; va += ((n)*NBPG); p = pte; pte += (n);
|
||||
|
||||
va = virtual_avail;
|
||||
pte = pmap_pte(kernel_pmap, va);
|
||||
pte = pmap_pte(pmap_kernel(), va);
|
||||
|
||||
SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 )
|
||||
SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 )
|
||||
|
@ -551,7 +543,7 @@ bogons:
|
|||
(--kpt_pages)->kpt_next = kpt_free_list;
|
||||
kpt_free_list = kpt_pages;
|
||||
kpt_pages->kpt_va = addr2;
|
||||
kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2);
|
||||
kpt_pages->kpt_pa = pmap_extract(pmap_kernel(), addr2);
|
||||
} while (addr != addr2);
|
||||
#ifdef DEBUG
|
||||
kpt_stats.kpttotal = atop(s);
|
||||
|
@ -673,7 +665,7 @@ pmap_map(virt, start, end, prot)
|
|||
printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
|
||||
#endif
|
||||
while (start < end) {
|
||||
pmap_enter(kernel_pmap, virt, start, prot, FALSE);
|
||||
pmap_enter(pmap_kernel(), virt, start, prot, FALSE);
|
||||
virt += PAGE_SIZE;
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
|
@ -907,7 +899,7 @@ pmap_remove(pmap, sva, eva)
|
|||
* pmap_pageable which clears the modify bit for the
|
||||
* PT page.
|
||||
*/
|
||||
if (pmap != kernel_pmap) {
|
||||
if (pmap != pmap_kernel()) {
|
||||
pte = pmap_pte(pmap, va);
|
||||
vm_map_pageable(pt_map, trunc_page(pte),
|
||||
round_page(pte+1), TRUE);
|
||||
|
@ -1004,7 +996,7 @@ printf ("pmap_remove: PA %08x index %d\n", pa, pa_index(pa));
|
|||
* reference count on the segment table as well,
|
||||
* freeing it if it is now empty.
|
||||
*/
|
||||
if (ptpmap != kernel_pmap) {
|
||||
if (ptpmap != pmap_kernel()) {
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
|
||||
printf("remove: stab %x, refcnt %d\n",
|
||||
|
@ -1047,7 +1039,7 @@ printf ("pmap_remove: PA %08x index %d\n", pa, pa_index(pa));
|
|||
(struct pcb *)curproc->p_addr, 1);
|
||||
}
|
||||
}
|
||||
if (ptpmap == kernel_pmap)
|
||||
if (ptpmap == pmap_kernel())
|
||||
TBIAS();
|
||||
else
|
||||
TBIAU();
|
||||
|
@ -1061,7 +1053,7 @@ printf ("pmap_remove: PA %08x index %d\n", pa, pa_index(pa));
|
|||
splx(s);
|
||||
}
|
||||
if (flushcache) {
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
DCIS();
|
||||
#ifdef DEBUG
|
||||
remove_stats.sflushes++;
|
||||
|
@ -1236,7 +1228,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
return;
|
||||
|
||||
#ifdef DEBUG
|
||||
if (pmap == kernel_pmap)
|
||||
if (pmap == pmap_kernel())
|
||||
enter_stats.kernel++;
|
||||
else
|
||||
enter_stats.user++;
|
||||
|
@ -1316,7 +1308,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
* on this PT page. PT pages are wired down as long as there
|
||||
* is a valid mapping in the page.
|
||||
*/
|
||||
if (pmap != kernel_pmap)
|
||||
if (pmap != pmap_kernel())
|
||||
vm_map_pageable(pt_map, trunc_page(pte),
|
||||
round_page(pte+1), FALSE);
|
||||
|
||||
|
@ -1403,7 +1395,7 @@ validate:
|
|||
* AMIGA pages in a MACH page.
|
||||
*/
|
||||
#ifdef M68040
|
||||
if (cpu040 && pmap == kernel_pmap && va >= AMIGA_UPTBASE &&
|
||||
if (cpu040 && pmap == pmap_kernel() && va >= AMIGA_UPTBASE &&
|
||||
va < (AMIGA_UPTBASE + AMIGA_UPTMAXSIZE))
|
||||
cacheable = FALSE; /* don't cache user page tables */
|
||||
#endif
|
||||
|
@ -1436,7 +1428,7 @@ validate:
|
|||
if (!wired && active_pmap(pmap))
|
||||
TBIS(va);
|
||||
#ifdef DEBUG
|
||||
if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap) {
|
||||
if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel()) {
|
||||
va -= PAGE_SIZE;
|
||||
pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
|
||||
}
|
||||
|
@ -1592,7 +1584,7 @@ pmap_collect(pmap)
|
|||
int *ste;
|
||||
int opmapdebug;
|
||||
#endif
|
||||
if (pmap != kernel_pmap)
|
||||
if (pmap != pmap_kernel())
|
||||
return;
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -1609,10 +1601,10 @@ pmap_collect(pmap)
|
|||
* page table pages.
|
||||
*/
|
||||
pv = pa_to_pvh(pa);
|
||||
if (pv->pv_pmap != kernel_pmap || !(pv->pv_flags & PV_PTPAGE))
|
||||
if (pv->pv_pmap != pmap_kernel() || !(pv->pv_flags & PV_PTPAGE))
|
||||
continue;
|
||||
do {
|
||||
if (pv->pv_ptste && pv->pv_ptpmap == kernel_pmap)
|
||||
if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
|
||||
break;
|
||||
} while (pv = pv->pv_next);
|
||||
if (pv == NULL)
|
||||
|
@ -1679,7 +1671,7 @@ ok:
|
|||
if (*ste)
|
||||
printf("collect: kernel STE at %x still valid (%x)\n",
|
||||
ste, *ste);
|
||||
ste = (int *)&Sysptmap[(u_int *)ste-pmap_ste(kernel_pmap, 0)];
|
||||
ste = (int *)&Sysptmap[(u_int *)ste-pmap_ste(pmap_kernel(), 0)];
|
||||
if (*ste)
|
||||
printf("collect: kernel PTmap at %x still valid (%x)\n",
|
||||
ste, *ste);
|
||||
|
@ -1771,7 +1763,7 @@ pmap_pageable(pmap, sva, eva, pageable)
|
|||
* - we are called with only one page at a time
|
||||
* - PT pages have only one pv_table entry
|
||||
*/
|
||||
if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
|
||||
if (pmap == pmap_kernel() && pageable && sva + PAGE_SIZE == eva) {
|
||||
register pv_entry_t pv;
|
||||
register vm_offset_t pa;
|
||||
|
||||
|
@ -2068,7 +2060,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
kmem_alloc(kernel_map, AMIGA_040STSIZE*128);
|
||||
/* intialize root table entries */
|
||||
sg = (u_int *) pmap->pm_rtab;
|
||||
sg_proto = pmap_extract(kernel_pmap,
|
||||
sg_proto = pmap_extract(pmap_kernel(),
|
||||
(vm_offset_t) pmap->pm_stab) | SG_RW | SG_V;
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
|
||||
|
@ -2120,7 +2112,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
* free list and map it into the kernel page table map (via
|
||||
* pmap_enter).
|
||||
*/
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
register struct kpt_page *kpt;
|
||||
|
||||
s = splimp();
|
||||
|
@ -2133,7 +2125,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
if (pmapdebug & PDB_COLLECT)
|
||||
printf("enter: no KPT pages, collecting...\n");
|
||||
#endif
|
||||
pmap_collect(kernel_pmap);
|
||||
pmap_collect(pmap_kernel());
|
||||
if ((kpt = kpt_free_list) == (struct kpt_page *)0)
|
||||
panic("pmap_enter_ptpage: can't get KPT page");
|
||||
}
|
||||
|
@ -2168,7 +2160,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
if (vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE)
|
||||
!= KERN_SUCCESS)
|
||||
panic("pmap_enter: vm_fault failed");
|
||||
ptpa = pmap_extract(kernel_pmap, va);
|
||||
ptpa = pmap_extract(pmap_kernel(), va);
|
||||
#ifdef DEBUG
|
||||
PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE;
|
||||
#endif
|
||||
|
@ -2184,7 +2176,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
if (pv) {
|
||||
pv->pv_flags |= PV_PTPAGE;
|
||||
do {
|
||||
if (pv->pv_pmap == kernel_pmap && pv->pv_va == va)
|
||||
if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
|
||||
break;
|
||||
} while (pv = pv->pv_next);
|
||||
}
|
||||
|
@ -2220,7 +2212,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
else
|
||||
#endif
|
||||
*(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
|
||||
if (pmap != kernel_pmap) {
|
||||
if (pmap != pmap_kernel()) {
|
||||
pmap->pm_sref++;
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
|
||||
|
@ -2231,7 +2223,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
/*
|
||||
* Flush stale TLB info.
|
||||
*/
|
||||
if (pmap == kernel_pmap)
|
||||
if (pmap == pmap_kernel())
|
||||
TBIAS();
|
||||
else
|
||||
TBIAU();
|
||||
|
@ -2261,8 +2253,8 @@ pmap_check_wiring(str, va)
|
|||
register int count, *pte;
|
||||
|
||||
va = trunc_page(va);
|
||||
if (!pmap_ste_v(pmap_ste(kernel_pmap, va)) ||
|
||||
!pmap_pte_v(pmap_pte(kernel_pmap, va)))
|
||||
if (!pmap_ste_v(pmap_ste(pmap_kernel(), va)) ||
|
||||
!pmap_pte_v(pmap_pte(pmap_kernel(), va)))
|
||||
return;
|
||||
|
||||
if (!vm_map_lookup_entry(pt_map, va, &entry)) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.h,v 1.11 1994/12/28 09:08:45 chopps Exp $ */
|
||||
/* $NetBSD: pmap.h,v 1.12 1995/04/10 12:41:44 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1987 Carnegie-Mellon University
|
||||
|
@ -58,7 +58,6 @@ struct pmap {
|
|||
};
|
||||
|
||||
typedef struct pmap *pmap_t;
|
||||
extern pmap_t kernel_pmap;
|
||||
|
||||
/*
|
||||
* Macros for speed
|
||||
|
@ -66,7 +65,7 @@ extern pmap_t kernel_pmap;
|
|||
#define PMAP_ACTIVATE(pmapp, pcbp, iscurproc) \
|
||||
if ((pmapp) != NULL && (pmapp)->pm_stchanged) { \
|
||||
(pcbp)->pcb_ustp = \
|
||||
amiga_btop(pmap_extract(kernel_pmap, \
|
||||
amiga_btop(pmap_extract(pmap_kernel(), \
|
||||
cpu040 ? (vm_offset_t)(pmapp)->pm_rtab : \
|
||||
(vm_offset_t)(pmapp)->pm_stab)); \
|
||||
if (iscurproc) \
|
||||
|
@ -95,6 +94,7 @@ typedef struct pv_entry {
|
|||
pv_entry_t pv_table; /* array of entries, one per page */
|
||||
u_int *Sysmap;
|
||||
char *vmmap; /* map for mem, dumps, etc. */
|
||||
struct pmap kernel_pmap_store;
|
||||
|
||||
#ifdef MACHINE_NONCONTIG
|
||||
#define pa_index(pa) pmap_page_index(pa)
|
||||
|
@ -102,6 +102,7 @@ char *vmmap; /* map for mem, dumps, etc. */
|
|||
#define pa_index(pa) atop(pa - vm_first_phys)
|
||||
#endif
|
||||
#define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
|
||||
#define pmap_kernel() (&kernel_pmap_store)
|
||||
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
|
||||
#endif KERNEL
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.1.1.1 1995/03/26 07:12:20 leo Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.2 1995/04/10 12:41:47 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
|
@ -171,7 +171,7 @@ int pmapdebug = PDB_PARANOIA;
|
|||
#define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))
|
||||
|
||||
#define active_pmap(pm) \
|
||||
((pm) == kernel_pmap || (pm) == curproc->p_vmspace->vm_map.pmap)
|
||||
((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
|
||||
|
||||
/*
|
||||
* Given a map and a machine independent protection code,
|
||||
|
@ -207,7 +207,6 @@ u_int *Segtabzero;
|
|||
vm_size_t Sysptsize = VM_KERNEL_PT_PAGES + 4 / NPTEPG;
|
||||
|
||||
struct pmap kernel_pmap_store;
|
||||
pmap_t kernel_pmap;
|
||||
vm_map_t pt_map;
|
||||
|
||||
vm_offset_t avail_start; /* PA of first available physical page */
|
||||
|
@ -277,29 +276,22 @@ vm_offset_t kernel_size;
|
|||
*/
|
||||
atari_protection_init();
|
||||
|
||||
/*
|
||||
* The kernel's pmap is statically allocated so we don't
|
||||
* have to use pmap_create, which is unlikely to work
|
||||
* correctly at this part of the boot sequence.
|
||||
*/
|
||||
kernel_pmap = &kernel_pmap_store;
|
||||
|
||||
/*
|
||||
* Kernel page/segment table allocated in locore,
|
||||
* just initialize pointers.
|
||||
*/
|
||||
kernel_pmap->pm_stab = Sysseg;
|
||||
kernel_pmap->pm_ptab = Sysmap;
|
||||
pmap_kernel()->pm_stab = Sysseg;
|
||||
pmap_kernel()->pm_ptab = Sysmap;
|
||||
#ifdef M68040
|
||||
if (cpu040) {
|
||||
kernel_pmap->pm_rtab = Sysseg1;
|
||||
pmap_kernel()->pm_rtab = Sysseg1;
|
||||
pmap_ishift = SG4_ISHIFT;
|
||||
} else
|
||||
#endif
|
||||
pmap_ishift = SG_ISHIFT;
|
||||
|
||||
simple_lock_init(&kernel_pmap->pm_lock);
|
||||
kernel_pmap->pm_count = 1;
|
||||
simple_lock_init(&pmap_kernel()->pm_lock);
|
||||
pmap_kernel()->pm_count = 1;
|
||||
|
||||
/*
|
||||
* Allocate all the submaps we need
|
||||
|
@ -308,7 +300,7 @@ vm_offset_t kernel_size;
|
|||
v = (c)va; va += ((n)*NBPG); p = pte; pte += (n);
|
||||
|
||||
va = virtual_avail;
|
||||
pte = pmap_pte(kernel_pmap, va);
|
||||
pte = pmap_pte(pmap_kernel(), va);
|
||||
|
||||
SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 )
|
||||
SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 )
|
||||
|
@ -462,7 +454,7 @@ bogons:
|
|||
(--kpt_pages)->kpt_next = kpt_free_list;
|
||||
kpt_free_list = kpt_pages;
|
||||
kpt_pages->kpt_va = addr2;
|
||||
kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2);
|
||||
kpt_pages->kpt_pa = pmap_extract(pmap_kernel(), addr2);
|
||||
} while (addr != addr2);
|
||||
#ifdef DEBUG
|
||||
kpt_stats.kpttotal = atop(s);
|
||||
|
@ -520,7 +512,7 @@ pmap_map(virt, start, end, prot)
|
|||
printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
|
||||
#endif
|
||||
while (start < end) {
|
||||
pmap_enter(kernel_pmap, virt, start, prot, FALSE);
|
||||
pmap_enter(pmap_kernel(), virt, start, prot, FALSE);
|
||||
virt += PAGE_SIZE;
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
|
@ -754,7 +746,7 @@ pmap_remove(pmap, sva, eva)
|
|||
* pmap_pageable which clears the modify bit for the
|
||||
* PT page.
|
||||
*/
|
||||
if (pmap != kernel_pmap) {
|
||||
if (pmap != pmap_kernel()) {
|
||||
pte = pmap_pte(pmap, va);
|
||||
vm_map_pageable(pt_map, trunc_page(pte),
|
||||
round_page(pte+1), TRUE);
|
||||
|
@ -849,7 +841,7 @@ pmap_remove(pmap, sva, eva)
|
|||
* reference count on the segment table as well,
|
||||
* freeing it if it is now empty.
|
||||
*/
|
||||
if (ptpmap != kernel_pmap) {
|
||||
if (ptpmap != pmap_kernel()) {
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
|
||||
printf("remove: stab %x, refcnt %d\n",
|
||||
|
@ -892,7 +884,7 @@ pmap_remove(pmap, sva, eva)
|
|||
(struct pcb *)curproc->p_addr, 1);
|
||||
}
|
||||
}
|
||||
if (ptpmap == kernel_pmap)
|
||||
if (ptpmap == pmap_kernel())
|
||||
TBIAS();
|
||||
else
|
||||
TBIAU();
|
||||
|
@ -906,7 +898,7 @@ pmap_remove(pmap, sva, eva)
|
|||
splx(s);
|
||||
}
|
||||
if (flushcache) {
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
DCIS();
|
||||
#ifdef DEBUG
|
||||
remove_stats.sflushes++;
|
||||
|
@ -1075,7 +1067,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
return;
|
||||
|
||||
#ifdef DEBUG
|
||||
if (pmap == kernel_pmap)
|
||||
if (pmap == pmap_kernel())
|
||||
enter_stats.kernel++;
|
||||
else
|
||||
enter_stats.user++;
|
||||
|
@ -1155,7 +1147,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
* on this PT page. PT pages are wired down as long as there
|
||||
* is a valid mapping in the page.
|
||||
*/
|
||||
if (pmap != kernel_pmap)
|
||||
if (pmap != pmap_kernel())
|
||||
vm_map_pageable(pt_map, trunc_page(pte),
|
||||
round_page(pte+1), FALSE);
|
||||
|
||||
|
@ -1242,7 +1234,7 @@ validate:
|
|||
* ATARI pages in a MACH page.
|
||||
*/
|
||||
#ifdef M68040
|
||||
if (cpu040 && pmap == kernel_pmap && va >= ATARI_UPTBASE &&
|
||||
if (cpu040 && pmap == pmap_kernel() && va >= ATARI_UPTBASE &&
|
||||
va < (ATARI_UPTBASE + ATARI_UPTMAXSIZE))
|
||||
cacheable = FALSE; /* don't cache user page tables */
|
||||
#endif
|
||||
|
@ -1275,7 +1267,7 @@ validate:
|
|||
if (!wired && active_pmap(pmap))
|
||||
TBIS(va);
|
||||
#ifdef DEBUG
|
||||
if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap) {
|
||||
if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel()) {
|
||||
va -= PAGE_SIZE;
|
||||
pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
|
||||
}
|
||||
|
@ -1431,7 +1423,7 @@ pmap_collect(pmap)
|
|||
int *ste;
|
||||
int opmapdebug;
|
||||
#endif
|
||||
if (pmap != kernel_pmap)
|
||||
if (pmap != pmap_kernel())
|
||||
return;
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -1448,10 +1440,10 @@ pmap_collect(pmap)
|
|||
* page table pages.
|
||||
*/
|
||||
pv = pa_to_pvh(pa);
|
||||
if (pv->pv_pmap != kernel_pmap || !(pv->pv_flags & PV_PTPAGE))
|
||||
if (pv->pv_pmap != pmap_kernel() || !(pv->pv_flags & PV_PTPAGE))
|
||||
continue;
|
||||
do {
|
||||
if (pv->pv_ptste && pv->pv_ptpmap == kernel_pmap)
|
||||
if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
|
||||
break;
|
||||
} while (pv = pv->pv_next);
|
||||
if (pv == NULL)
|
||||
|
@ -1518,7 +1510,7 @@ ok:
|
|||
if (*ste)
|
||||
printf("collect: kernel STE at %x still valid (%x)\n",
|
||||
ste, *ste);
|
||||
ste = (int *)&Sysptmap[(u_int *)ste-pmap_ste(kernel_pmap, 0)];
|
||||
ste = (int *)&Sysptmap[(u_int *)ste-pmap_ste(pmap_kernel(), 0)];
|
||||
if (*ste)
|
||||
printf("collect: kernel PTmap at %x still valid (%x)\n",
|
||||
ste, *ste);
|
||||
|
@ -1610,7 +1602,7 @@ pmap_pageable(pmap, sva, eva, pageable)
|
|||
* - we are called with only one page at a time
|
||||
* - PT pages have only one pv_table entry
|
||||
*/
|
||||
if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
|
||||
if (pmap == pmap_kernel() && pageable && sva + PAGE_SIZE == eva) {
|
||||
register pv_entry_t pv;
|
||||
register vm_offset_t pa;
|
||||
|
||||
|
@ -1907,7 +1899,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
kmem_alloc(kernel_map, ATARI_040STSIZE*128);
|
||||
/* intialize root table entries */
|
||||
sg = (u_int *) pmap->pm_rtab;
|
||||
sg_proto = pmap_extract(kernel_pmap,
|
||||
sg_proto = pmap_extract(pmap_kernel(),
|
||||
(vm_offset_t) pmap->pm_stab) | SG_RW | SG_V;
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
|
||||
|
@ -1958,7 +1950,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
* free list and map it into the kernel page table map (via
|
||||
* pmap_enter).
|
||||
*/
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
register struct kpt_page *kpt;
|
||||
|
||||
s = splimp();
|
||||
|
@ -1971,7 +1963,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
if (pmapdebug & PDB_COLLECT)
|
||||
printf("enter: no KPT pages, collecting...\n");
|
||||
#endif
|
||||
pmap_collect(kernel_pmap);
|
||||
pmap_collect(pmap_kernel());
|
||||
if ((kpt = kpt_free_list) == (struct kpt_page *)0)
|
||||
panic("pmap_enter_ptpage: can't get KPT page");
|
||||
}
|
||||
|
@ -2006,7 +1998,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
if (vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE)
|
||||
!= KERN_SUCCESS)
|
||||
panic("pmap_enter: vm_fault failed");
|
||||
ptpa = pmap_extract(kernel_pmap, va);
|
||||
ptpa = pmap_extract(pmap_kernel(), va);
|
||||
#ifdef DEBUG
|
||||
PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE;
|
||||
#endif
|
||||
|
@ -2022,7 +2014,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
if (pv) {
|
||||
pv->pv_flags |= PV_PTPAGE;
|
||||
do {
|
||||
if (pv->pv_pmap == kernel_pmap && pv->pv_va == va)
|
||||
if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
|
||||
break;
|
||||
} while (pv = pv->pv_next);
|
||||
}
|
||||
|
@ -2058,7 +2050,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
else
|
||||
#endif
|
||||
*(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
|
||||
if (pmap != kernel_pmap) {
|
||||
if (pmap != pmap_kernel()) {
|
||||
pmap->pm_sref++;
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
|
||||
|
@ -2069,7 +2061,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
/*
|
||||
* Flush stale TLB info.
|
||||
*/
|
||||
if (pmap == kernel_pmap)
|
||||
if (pmap == pmap_kernel())
|
||||
TBIAS();
|
||||
else
|
||||
TBIAU();
|
||||
|
@ -2109,8 +2101,8 @@ pmap_check_wiring(str, va)
|
|||
register int count, *pte;
|
||||
|
||||
va = trunc_page(va);
|
||||
if (!pmap_ste_v(pmap_ste(kernel_pmap, va)) ||
|
||||
!pmap_pte_v(pmap_pte(kernel_pmap, va)))
|
||||
if (!pmap_ste_v(pmap_ste(pmap_kernel(), va)) ||
|
||||
!pmap_pte_v(pmap_pte(pmap_kernel(), va)))
|
||||
return;
|
||||
|
||||
if (!vm_map_lookup_entry(pt_map, va, &entry)) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.h,v 1.2 1995/03/30 06:01:19 leo Exp $ */
|
||||
/* $NetBSD: pmap.h,v 1.3 1995/04/10 12:41:49 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1987 Carnegie-Mellon University
|
||||
|
@ -59,7 +59,6 @@ struct pmap {
|
|||
};
|
||||
|
||||
typedef struct pmap *pmap_t;
|
||||
extern pmap_t kernel_pmap;
|
||||
|
||||
/*
|
||||
* Macros for speed
|
||||
|
@ -93,12 +92,14 @@ typedef struct pv_entry {
|
|||
#define PV_PTPAGE 0x02 /* entry maps a page table page */
|
||||
|
||||
#ifdef _KERNEL
|
||||
extern pv_entry_t pv_table; /* array of entries, one per page */
|
||||
extern u_int *Sysmap;
|
||||
extern char *vmmap; /* map for mem, dumps, etc. */
|
||||
pv_entry_t pv_table; /* array of entries, one per page */
|
||||
u_int *Sysmap;
|
||||
char *vmmap; /* map for mem, dumps, etc. */
|
||||
struct pmap kernel_pmap_store;
|
||||
|
||||
#define pa_index(pa) atop(pa - vm_first_phys)
|
||||
#define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
|
||||
#define pmap_kernel() (&kernel_pmap_store)
|
||||
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
|
||||
#endif KERNEL
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.4 1994/10/26 02:32:58 cgd Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.5 1995/04/10 12:41:51 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1991, 1993
|
||||
|
@ -363,7 +363,7 @@ bogons:
|
|||
s = round_page(s);
|
||||
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
|
||||
Segtabzero = (st_entry_t *) addr;
|
||||
Segtabzeropa = (st_entry_t *) pmap_extract(kernel_pmap, addr);
|
||||
Segtabzeropa = (st_entry_t *) pmap_extract(pmap_kernel(), addr);
|
||||
addr += DA_STSIZE;
|
||||
pv_table = (pv_entry_t) addr;
|
||||
addr += sizeof(struct pv_entry) * npg;
|
||||
|
@ -406,7 +406,7 @@ bogons:
|
|||
(--kpt_pages)->kpt_next = kpt_free_list;
|
||||
kpt_free_list = kpt_pages;
|
||||
kpt_pages->kpt_va = addr2;
|
||||
kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2);
|
||||
kpt_pages->kpt_pa = pmap_extract(pmap_kernel(), addr2);
|
||||
} while (addr != addr2);
|
||||
#ifdef PMAPSTATS
|
||||
kpt_stats.kpttotal = atop(s);
|
||||
|
@ -472,7 +472,7 @@ pmap_map(virt, start, end, prot)
|
|||
printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
|
||||
#endif
|
||||
while (start < end) {
|
||||
pmap_enter(kernel_pmap, virt, start, prot, FALSE);
|
||||
pmap_enter(pmap_kernel(), virt, start, prot, FALSE);
|
||||
virt += PAGE_SIZE;
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
|
@ -710,20 +710,20 @@ pmap_remove(pmap, sva, eva)
|
|||
* it won't be there
|
||||
*/
|
||||
if (pmap_aliasmask &&
|
||||
(pmap == kernel_pmap || pmap != curproc->p_vmspace->vm_map.pmap))
|
||||
(pmap == pmap_kernel() || pmap != curproc->p_vmspace->vm_map.pmap))
|
||||
needcflush = FALSE;
|
||||
#ifdef DEBUG
|
||||
if (pmap_aliasmask && (pmapvacflush & PVF_REMOVE)) {
|
||||
if (pmapvacflush & PVF_TOTAL)
|
||||
DCIA();
|
||||
else if (pmap == kernel_pmap)
|
||||
else if (pmap == pmap_kernel())
|
||||
DCIS();
|
||||
else
|
||||
DCIU();
|
||||
} else
|
||||
#endif
|
||||
if (needcflush) {
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
DCIS();
|
||||
#ifdef PMAPSTATS
|
||||
remove_stats.sflushes++;
|
||||
|
@ -889,7 +889,7 @@ pmap_protect(pmap, sva, eva, prot)
|
|||
if (pmap_aliasmask && (pmapvacflush & PVF_PROTECT)) {
|
||||
if (pmapvacflush & PVF_TOTAL)
|
||||
DCIA();
|
||||
else if (pmap == kernel_pmap)
|
||||
else if (pmap == pmap_kernel())
|
||||
DCIS();
|
||||
else
|
||||
DCIU();
|
||||
|
@ -932,7 +932,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
return;
|
||||
|
||||
#ifdef PMAPSTATS
|
||||
if (pmap == kernel_pmap)
|
||||
if (pmap == pmap_kernel())
|
||||
enter_stats.kernel++;
|
||||
else
|
||||
enter_stats.user++;
|
||||
|
@ -1020,7 +1020,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
* on this PT page. PT pages are wired down as long as there
|
||||
* is a valid mapping in the page.
|
||||
*/
|
||||
if (pmap != kernel_pmap)
|
||||
if (pmap != pmap_kernel())
|
||||
(void) vm_map_pageable(pt_map, trunc_page(pte),
|
||||
round_page(pte+1), FALSE);
|
||||
|
||||
|
@ -1113,8 +1113,8 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
checkpv = cacheable = FALSE;
|
||||
} else if (npv->pv_next ||
|
||||
((pmap == pv->pv_pmap ||
|
||||
pmap == kernel_pmap ||
|
||||
pv->pv_pmap == kernel_pmap) &&
|
||||
pmap == pmap_kernel() ||
|
||||
pv->pv_pmap == pmap_kernel()) &&
|
||||
((pv->pv_va & pmap_aliasmask) !=
|
||||
(va & pmap_aliasmask)))) {
|
||||
#ifdef DEBUG
|
||||
|
@ -1204,7 +1204,7 @@ validate:
|
|||
else if (pmapvacflush & PVF_ENTER) {
|
||||
if (pmapvacflush & PVF_TOTAL)
|
||||
DCIA();
|
||||
else if (pmap == kernel_pmap)
|
||||
else if (pmap == pmap_kernel())
|
||||
DCIS();
|
||||
else
|
||||
DCIU();
|
||||
|
@ -1212,7 +1212,7 @@ validate:
|
|||
#endif
|
||||
#endif
|
||||
#ifdef DEBUG
|
||||
if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap)
|
||||
if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
|
||||
pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
|
||||
#endif
|
||||
}
|
||||
|
@ -1367,7 +1367,7 @@ pmap_collect(pmap)
|
|||
int *ste;
|
||||
int opmapdebug;
|
||||
#endif
|
||||
if (pmap != kernel_pmap)
|
||||
if (pmap != pmap_kernel())
|
||||
return;
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -1386,10 +1386,10 @@ pmap_collect(pmap)
|
|||
* page table pages.
|
||||
*/
|
||||
pv = pa_to_pvh(pa);
|
||||
if (pv->pv_pmap != kernel_pmap || !(pv->pv_flags & PV_PTPAGE))
|
||||
if (pv->pv_pmap != pmap_kernel() || !(pv->pv_flags & PV_PTPAGE))
|
||||
continue;
|
||||
do {
|
||||
if (pv->pv_ptste && pv->pv_ptpmap == kernel_pmap)
|
||||
if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
|
||||
break;
|
||||
} while (pv = pv->pv_next);
|
||||
if (pv == NULL)
|
||||
|
@ -1459,7 +1459,7 @@ ok:
|
|||
if (*ste)
|
||||
printf("collect: kernel STE at %x still valid (%x)\n",
|
||||
ste, *ste);
|
||||
ste = (int *)&Sysptmap[(st_entry_t *)ste-pmap_ste(kernel_pmap, 0)];
|
||||
ste = (int *)&Sysptmap[(st_entry_t *)ste-pmap_ste(pmap_kernel(), 0)];
|
||||
if (*ste)
|
||||
printf("collect: kernel PTmap at %x still valid (%x)\n",
|
||||
ste, *ste);
|
||||
|
@ -1504,9 +1504,9 @@ pmap_zero_page(phys)
|
|||
printf("pmap_zero_page(%x)\n", phys);
|
||||
#endif
|
||||
kva = (vm_offset_t) CADDR1;
|
||||
pmap_enter(kernel_pmap, kva, phys, VM_PROT_READ|VM_PROT_WRITE, TRUE);
|
||||
pmap_enter(pmap_kernel(), kva, phys, VM_PROT_READ|VM_PROT_WRITE, TRUE);
|
||||
bzero((caddr_t)kva, DA_PAGE_SIZE);
|
||||
pmap_remove_mapping(kernel_pmap, kva, PT_ENTRY_NULL,
|
||||
pmap_remove_mapping(pmap_kernel(), kva, PT_ENTRY_NULL,
|
||||
PRM_TFLUSH|PRM_CFLUSH);
|
||||
}
|
||||
|
||||
|
@ -1536,11 +1536,11 @@ pmap_copy_page(src, dst)
|
|||
#endif
|
||||
skva = (vm_offset_t) CADDR1;
|
||||
dkva = (vm_offset_t) CADDR2;
|
||||
pmap_enter(kernel_pmap, skva, src, VM_PROT_READ, TRUE);
|
||||
pmap_enter(kernel_pmap, dkva, dst, VM_PROT_READ|VM_PROT_WRITE, TRUE);
|
||||
pmap_enter(pmap_kernel(), skva, src, VM_PROT_READ, TRUE);
|
||||
pmap_enter(pmap_kernel(), dkva, dst, VM_PROT_READ|VM_PROT_WRITE, TRUE);
|
||||
copypage((caddr_t)skva, (caddr_t)dkva);
|
||||
/* CADDR1 and CADDR2 are virtually contiguous */
|
||||
pmap_remove(kernel_pmap, skva, skva+2*PAGE_SIZE);
|
||||
pmap_remove(pmap_kernel(), skva, skva+2*PAGE_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1576,7 +1576,7 @@ pmap_pageable(pmap, sva, eva, pageable)
|
|||
* - we are called with only one page at a time
|
||||
* - PT pages have only one pv_table entry
|
||||
*/
|
||||
if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
|
||||
if (pmap == pmap_kernel() && pageable && sva + PAGE_SIZE == eva) {
|
||||
register pv_entry_t pv;
|
||||
register vm_offset_t pa;
|
||||
|
||||
|
@ -1793,7 +1793,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
|
|||
* pmap_pageable which clears the modify bit for the
|
||||
* PT page.
|
||||
*/
|
||||
if (pmap != kernel_pmap) {
|
||||
if (pmap != pmap_kernel()) {
|
||||
(void) vm_map_pageable(pt_map, trunc_page(pte),
|
||||
round_page(pte+1), TRUE);
|
||||
#ifdef DEBUG
|
||||
|
@ -1889,7 +1889,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
|
|||
* reference count on the segment table as well,
|
||||
* freeing it if it is now empty.
|
||||
*/
|
||||
if (ptpmap != kernel_pmap) {
|
||||
if (ptpmap != pmap_kernel()) {
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
|
||||
printf("remove: stab %x, refcnt %d\n",
|
||||
|
@ -1930,7 +1930,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
|
|||
* XXX this should be unnecessary as we have been
|
||||
* flushing individual mappings as we go.
|
||||
*/
|
||||
if (ptpmap == kernel_pmap)
|
||||
if (ptpmap == pmap_kernel())
|
||||
TBIAS();
|
||||
else
|
||||
TBIAU();
|
||||
|
@ -2039,7 +2039,7 @@ pmap_changebit(pa, bit, setem)
|
|||
#endif
|
||||
for (; pv; pv = pv->pv_next) {
|
||||
#ifdef DEBUG
|
||||
toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
|
||||
toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
|
||||
#endif
|
||||
va = pv->pv_va;
|
||||
|
||||
|
@ -2131,7 +2131,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
pmap->pm_stab = (st_entry_t *)
|
||||
kmem_alloc(st_map, DA_STSIZE);
|
||||
pmap->pm_stpa = (st_entry_t *)
|
||||
pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_stab);
|
||||
pmap_extract(pmap_kernel(), (vm_offset_t)pmap->pm_stab);
|
||||
pmap->pm_stchanged = TRUE;
|
||||
/*
|
||||
* XXX may have changed segment table pointer for current
|
||||
|
@ -2154,7 +2154,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
* free list and map it into the kernel page table map (via
|
||||
* pmap_enter).
|
||||
*/
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
register struct kpt_page *kpt;
|
||||
|
||||
s = splimp();
|
||||
|
@ -2167,7 +2167,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
if (pmapdebug & PDB_COLLECT)
|
||||
printf("enter: no KPT pages, collecting...\n");
|
||||
#endif
|
||||
pmap_collect(kernel_pmap);
|
||||
pmap_collect(pmap_kernel());
|
||||
if ((kpt = kpt_free_list) == (struct kpt_page *)0)
|
||||
panic("pmap_enter_ptpage: can't get KPT page");
|
||||
}
|
||||
|
@ -2205,7 +2205,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
printf("vm_fault(pt_map, %x, RW, 0) -> %d\n", va, s);
|
||||
panic("pmap_enter: vm_fault failed");
|
||||
}
|
||||
ptpa = pmap_extract(kernel_pmap, va);
|
||||
ptpa = pmap_extract(pmap_kernel(), va);
|
||||
/*
|
||||
* Mark the page clean now to avoid its pageout (and
|
||||
* hence creation of a pager) between now and when it
|
||||
|
@ -2226,7 +2226,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
if (pv) {
|
||||
pv->pv_flags |= PV_PTPAGE;
|
||||
do {
|
||||
if (pv->pv_pmap == kernel_pmap && pv->pv_va == va)
|
||||
if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
|
||||
break;
|
||||
} while (pv = pv->pv_next);
|
||||
}
|
||||
|
@ -2250,7 +2250,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
* release them. We also avoid the overhead of vm_map_pageable.
|
||||
*/
|
||||
*(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
|
||||
if (pmap != kernel_pmap) {
|
||||
if (pmap != pmap_kernel()) {
|
||||
pmap->pm_sref++;
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
|
||||
|
@ -2288,8 +2288,8 @@ pmap_check_wiring(str, va)
|
|||
register int count, *pte;
|
||||
|
||||
va = trunc_page(va);
|
||||
if (!pmap_ste_v(kernel_pmap, va) ||
|
||||
!pmap_pte_v(pmap_pte(kernel_pmap, va)))
|
||||
if (!pmap_ste_v(pmap_kernel(), va) ||
|
||||
!pmap_pte_v(pmap_pte(pmap_kernel(), va)))
|
||||
return;
|
||||
|
||||
if (!vm_map_lookup_entry(pt_map, va, &entry)) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.h,v 1.4 1995/03/28 18:15:42 jtc Exp $ */
|
||||
/* $NetBSD: pmap.h,v 1.5 1995/04/10 12:41:54 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1987 Carnegie-Mellon University
|
||||
|
@ -68,9 +68,9 @@ typedef struct pmap *pmap_t;
|
|||
|
||||
extern struct pmap kernel_pmap_store;
|
||||
|
||||
#define kernel_pmap (&kernel_pmap_store)
|
||||
#define pmap_kernel() (&kernel_pmap_store)
|
||||
#define active_pmap(pm) \
|
||||
((pm) == kernel_pmap || (pm) == curproc->p_vmspace->vm_map.pmap)
|
||||
((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
|
||||
|
||||
/*
|
||||
* Macros for speed
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.16 1994/11/08 01:17:37 mycroft Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.17 1995/04/10 12:41:56 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1991, 1993
|
||||
|
@ -400,7 +400,7 @@ bogons:
|
|||
s = round_page(s);
|
||||
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
|
||||
Segtabzero = (st_entry_t *) addr;
|
||||
Segtabzeropa = (st_entry_t *) pmap_extract(kernel_pmap, addr);
|
||||
Segtabzeropa = (st_entry_t *) pmap_extract(pmap_kernel(), addr);
|
||||
addr += HP_STSIZE;
|
||||
pv_table = (struct pv_entry *) addr;
|
||||
addr += sizeof(struct pv_entry) * npages;
|
||||
|
@ -443,7 +443,7 @@ bogons:
|
|||
(--kpt_pages)->kpt_next = kpt_free_list;
|
||||
kpt_free_list = kpt_pages;
|
||||
kpt_pages->kpt_va = addr2;
|
||||
kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2);
|
||||
kpt_pages->kpt_pa = pmap_extract(pmap_kernel(), addr2);
|
||||
} while (addr != addr2);
|
||||
#ifdef PMAPSTATS
|
||||
kpt_stats.kpttotal = atop(s);
|
||||
|
@ -632,7 +632,7 @@ pmap_map(va, spa, epa, prot)
|
|||
#endif
|
||||
|
||||
while (spa < epa) {
|
||||
pmap_enter(kernel_pmap, va, spa, prot, FALSE);
|
||||
pmap_enter(pmap_kernel(), va, spa, prot, FALSE);
|
||||
va += NBPG;
|
||||
spa += NBPG;
|
||||
}
|
||||
|
@ -906,20 +906,20 @@ pmap_remove(pmap, sva, eva)
|
|||
* it won't be there
|
||||
*/
|
||||
if (pmap_aliasmask &&
|
||||
(pmap == kernel_pmap || pmap != curproc->p_vmspace->vm_map.pmap))
|
||||
(pmap == pmap_kernel() || pmap != curproc->p_vmspace->vm_map.pmap))
|
||||
needcflush = FALSE;
|
||||
#ifdef DEBUG
|
||||
if (pmap_aliasmask && (pmapvacflush & PVF_REMOVE)) {
|
||||
if (pmapvacflush & PVF_TOTAL)
|
||||
DCIA();
|
||||
else if (pmap == kernel_pmap)
|
||||
else if (pmap == pmap_kernel())
|
||||
DCIS();
|
||||
else
|
||||
DCIU();
|
||||
} else
|
||||
#endif
|
||||
if (needcflush) {
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
DCIS();
|
||||
#ifdef PMAPSTATS
|
||||
remove_stats.sflushes++;
|
||||
|
@ -1097,7 +1097,7 @@ pmap_protect(pmap, sva, eva, prot)
|
|||
if (pmap_aliasmask && (pmapvacflush & PVF_PROTECT)) {
|
||||
if (pmapvacflush & PVF_TOTAL)
|
||||
DCIA();
|
||||
else if (pmap == kernel_pmap)
|
||||
else if (pmap == pmap_kernel())
|
||||
DCIS();
|
||||
else
|
||||
DCIU();
|
||||
|
@ -1140,7 +1140,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
return;
|
||||
|
||||
#ifdef PMAPSTATS
|
||||
if (pmap == kernel_pmap)
|
||||
if (pmap == pmap_kernel())
|
||||
enter_stats.kernel++;
|
||||
else
|
||||
enter_stats.user++;
|
||||
|
@ -1228,7 +1228,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
* on this PT page. PT pages are wired down as long as there
|
||||
* is a valid mapping in the page.
|
||||
*/
|
||||
if (pmap != kernel_pmap)
|
||||
if (pmap != pmap_kernel())
|
||||
(void) vm_map_pageable(pt_map, trunc_page(pte),
|
||||
round_page(pte+1), FALSE);
|
||||
|
||||
|
@ -1320,8 +1320,8 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
checkpv = cacheable = FALSE;
|
||||
} else if (npv->pv_next ||
|
||||
((pmap == pv->pv_pmap ||
|
||||
pmap == kernel_pmap ||
|
||||
pv->pv_pmap == kernel_pmap) &&
|
||||
pmap == pmap_kernel() ||
|
||||
pv->pv_pmap == pmap_kernel()) &&
|
||||
((pv->pv_va & pmap_aliasmask) !=
|
||||
(va & pmap_aliasmask)))) {
|
||||
#ifdef DEBUG
|
||||
|
@ -1378,7 +1378,7 @@ validate:
|
|||
#if defined(HP380)
|
||||
if (mmutype == MMU_68040 && (npte & (PG_PROT|PG_CI)) == PG_RW)
|
||||
#ifdef DEBUG
|
||||
if (dowriteback && (dokwriteback || pmap != kernel_pmap))
|
||||
if (dowriteback && (dokwriteback || pmap != pmap_kernel()))
|
||||
#endif
|
||||
npte |= PG_CCB;
|
||||
#endif
|
||||
|
@ -1424,7 +1424,7 @@ validate:
|
|||
else if (pmapvacflush & PVF_ENTER) {
|
||||
if (pmapvacflush & PVF_TOTAL)
|
||||
DCIA();
|
||||
else if (pmap == kernel_pmap)
|
||||
else if (pmap == pmap_kernel())
|
||||
DCIS();
|
||||
else
|
||||
DCIU();
|
||||
|
@ -1432,7 +1432,7 @@ validate:
|
|||
#endif
|
||||
#endif
|
||||
#ifdef DEBUG
|
||||
if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap)
|
||||
if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel())
|
||||
pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
|
||||
#endif
|
||||
}
|
||||
|
@ -1587,7 +1587,7 @@ pmap_collect(pmap)
|
|||
st_entry_t *ste;
|
||||
int opmapdebug;
|
||||
#endif
|
||||
if (pmap != kernel_pmap)
|
||||
if (pmap != pmap_kernel())
|
||||
return;
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -1606,10 +1606,10 @@ pmap_collect(pmap)
|
|||
* page table pages.
|
||||
*/
|
||||
pv = pa_to_pvh(pa);
|
||||
if (pv->pv_pmap != kernel_pmap || !(pv->pv_flags & PV_PTPAGE))
|
||||
if (pv->pv_pmap != pmap_kernel() || !(pv->pv_flags & PV_PTPAGE))
|
||||
continue;
|
||||
do {
|
||||
if (pv->pv_ptste && pv->pv_ptpmap == kernel_pmap)
|
||||
if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
|
||||
break;
|
||||
} while (pv = pv->pv_next);
|
||||
if (pv == NULL)
|
||||
|
@ -1679,7 +1679,7 @@ ok:
|
|||
if (*ste != SG_NV)
|
||||
printf("collect: kernel STE at %x still valid (%x)\n",
|
||||
ste, *ste);
|
||||
ste = &Sysptmap[ste - pmap_ste(kernel_pmap, 0)];
|
||||
ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)];
|
||||
if (*ste != SG_NV)
|
||||
printf("collect: kernel PTmap at %x still valid (%x)\n",
|
||||
ste, *ste);
|
||||
|
@ -1712,9 +1712,9 @@ pmap_zero_page(phys)
|
|||
printf("pmap_zero_page(%x)\n", phys);
|
||||
#endif
|
||||
kva = (vm_offset_t) CADDR1;
|
||||
pmap_enter(kernel_pmap, kva, phys, VM_PROT_READ|VM_PROT_WRITE, TRUE);
|
||||
pmap_enter(pmap_kernel(), kva, phys, VM_PROT_READ|VM_PROT_WRITE, TRUE);
|
||||
bzero((caddr_t)kva, NBPG);
|
||||
pmap_remove_mapping(kernel_pmap, kva, PT_ENTRY_NULL,
|
||||
pmap_remove_mapping(pmap_kernel(), kva, PT_ENTRY_NULL,
|
||||
PRM_TFLUSH|PRM_CFLUSH);
|
||||
}
|
||||
|
||||
|
@ -1744,11 +1744,11 @@ pmap_copy_page(src, dst)
|
|||
#endif
|
||||
skva = (vm_offset_t) CADDR1;
|
||||
dkva = (vm_offset_t) CADDR2;
|
||||
pmap_enter(kernel_pmap, skva, src, VM_PROT_READ, TRUE);
|
||||
pmap_enter(kernel_pmap, dkva, dst, VM_PROT_READ|VM_PROT_WRITE, TRUE);
|
||||
pmap_enter(pmap_kernel(), skva, src, VM_PROT_READ, TRUE);
|
||||
pmap_enter(pmap_kernel(), dkva, dst, VM_PROT_READ|VM_PROT_WRITE, TRUE);
|
||||
copypage((caddr_t)skva, (caddr_t)dkva);
|
||||
/* CADDR1 and CADDR2 are virtually contiguous */
|
||||
pmap_remove(kernel_pmap, skva, skva+2*NBPG);
|
||||
pmap_remove(pmap_kernel(), skva, skva+2*NBPG);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1784,7 +1784,7 @@ pmap_pageable(pmap, sva, eva, pageable)
|
|||
* - we are called with only one page at a time
|
||||
* - PT pages have only one pv_table entry
|
||||
*/
|
||||
if (pmap == kernel_pmap && pageable && sva + NBPG == eva) {
|
||||
if (pmap == pmap_kernel() && pageable && sva + NBPG == eva) {
|
||||
register struct pv_entry *pv;
|
||||
register vm_offset_t pa;
|
||||
|
||||
|
@ -2037,7 +2037,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
|
|||
* pmap_pageable which clears the modify bit for the
|
||||
* PT page.
|
||||
*/
|
||||
if (pmap != kernel_pmap) {
|
||||
if (pmap != pmap_kernel()) {
|
||||
(void) vm_map_pageable(pt_map, trunc_page(pte),
|
||||
round_page(pte+1), TRUE);
|
||||
#ifdef DEBUG
|
||||
|
@ -2144,7 +2144,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
|
|||
* reference count on the segment table as well,
|
||||
* freeing it if it is now empty.
|
||||
*/
|
||||
if (ptpmap != kernel_pmap) {
|
||||
if (ptpmap != pmap_kernel()) {
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
|
||||
printf("remove: stab %x, refcnt %d\n",
|
||||
|
@ -2188,7 +2188,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
|
|||
* XXX this should be unnecessary as we have been
|
||||
* flushing individual mappings as we go.
|
||||
*/
|
||||
if (ptpmap == kernel_pmap)
|
||||
if (ptpmap == pmap_kernel())
|
||||
TBIAS();
|
||||
else
|
||||
TBIAU();
|
||||
|
@ -2297,7 +2297,7 @@ pmap_changebit(pa, bit, setem)
|
|||
#endif
|
||||
for (; pv; pv = pv->pv_next) {
|
||||
#ifdef DEBUG
|
||||
toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
|
||||
toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
|
||||
#endif
|
||||
va = pv->pv_va;
|
||||
|
||||
|
@ -2403,7 +2403,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
pmap->pm_stab = (st_entry_t *)
|
||||
kmem_alloc(st_map, HP_STSIZE);
|
||||
pmap->pm_stpa = (st_entry_t *)
|
||||
pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_stab);
|
||||
pmap_extract(pmap_kernel(), (vm_offset_t)pmap->pm_stab);
|
||||
#if defined(HP380)
|
||||
if (mmutype == MMU_68040) {
|
||||
#ifdef DEBUG
|
||||
|
@ -2474,7 +2474,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
* free list and map it into the kernel page table map (via
|
||||
* pmap_enter).
|
||||
*/
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
register struct kpt_page *kpt;
|
||||
|
||||
s = splimp();
|
||||
|
@ -2487,7 +2487,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
if (pmapdebug & PDB_COLLECT)
|
||||
printf("enter: no KPT pages, collecting...\n");
|
||||
#endif
|
||||
pmap_collect(kernel_pmap);
|
||||
pmap_collect(pmap_kernel());
|
||||
if ((kpt = kpt_free_list) == (struct kpt_page *)0)
|
||||
panic("pmap_enter_ptpage: can't get KPT page");
|
||||
}
|
||||
|
@ -2525,7 +2525,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
printf("vm_fault(pt_map, %x, RW, 0) -> %d\n", va, s);
|
||||
panic("pmap_enter: vm_fault failed");
|
||||
}
|
||||
ptpa = pmap_extract(kernel_pmap, va);
|
||||
ptpa = pmap_extract(pmap_kernel(), va);
|
||||
/*
|
||||
* Mark the page clean now to avoid its pageout (and
|
||||
* hence creation of a pager) between now and when it
|
||||
|
@ -2545,11 +2545,11 @@ pmap_enter_ptpage(pmap, va)
|
|||
if (dowriteback && dokwriteback)
|
||||
#endif
|
||||
if (mmutype == MMU_68040) {
|
||||
pt_entry_t *pte = pmap_pte(kernel_pmap, va);
|
||||
pt_entry_t *pte = pmap_pte(pmap_kernel(), va);
|
||||
#ifdef DEBUG
|
||||
if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
|
||||
printf("%s PT no CCB: kva=%x ptpa=%x pte@%x=%x\n",
|
||||
pmap == kernel_pmap ? "Kernel" : "User",
|
||||
pmap == pmap_kernel() ? "Kernel" : "User",
|
||||
va, ptpa, pte, *pte);
|
||||
#endif
|
||||
pmap_changebit(ptpa, PG_CCB, 0);
|
||||
|
@ -2565,7 +2565,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
if (pv) {
|
||||
pv->pv_flags |= PV_PTPAGE;
|
||||
do {
|
||||
if (pv->pv_pmap == kernel_pmap && pv->pv_va == va)
|
||||
if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
|
||||
break;
|
||||
} while (pv = pv->pv_next);
|
||||
}
|
||||
|
@ -2599,7 +2599,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
} else
|
||||
#endif
|
||||
*ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
|
||||
if (pmap != kernel_pmap) {
|
||||
if (pmap != pmap_kernel()) {
|
||||
pmap->pm_sref++;
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
|
||||
|
@ -2611,7 +2611,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
/*
|
||||
* Flush stale TLB info.
|
||||
*/
|
||||
if (pmap == kernel_pmap)
|
||||
if (pmap == pmap_kernel())
|
||||
TBIAS();
|
||||
else
|
||||
TBIAU();
|
||||
|
@ -2647,8 +2647,8 @@ pmap_check_wiring(str, va)
|
|||
register pt_entry_t *pte;
|
||||
|
||||
va = trunc_page(va);
|
||||
if (!pmap_ste_v(kernel_pmap, va) ||
|
||||
!pmap_pte_v(pmap_pte(kernel_pmap, va)))
|
||||
if (!pmap_ste_v(pmap_kernel(), va) ||
|
||||
!pmap_pte_v(pmap_pte(pmap_kernel(), va)))
|
||||
return;
|
||||
|
||||
if (!vm_map_lookup_entry(pt_map, va, &entry)) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.h,v 1.7 1995/03/28 18:16:38 jtc Exp $ */
|
||||
/* $NetBSD: pmap.h,v 1.8 1995/04/10 12:41:59 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1987 Carnegie-Mellon University
|
||||
|
@ -142,9 +142,9 @@ struct pv_page {
|
|||
|
||||
extern struct pmap kernel_pmap_store;
|
||||
|
||||
#define kernel_pmap (&kernel_pmap_store)
|
||||
#define pmap_kernel() (&kernel_pmap_store)
|
||||
#define active_pmap(pm) \
|
||||
((pm) == kernel_pmap || (pm) == curproc->p_vmspace->vm_map.pmap)
|
||||
((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
|
||||
|
||||
extern struct pv_entry *pv_table; /* array of entries, one per page */
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.27 1994/11/08 01:17:19 mycroft Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.28 1995/04/10 12:42:02 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1993, 1994 Charles Hannum.
|
||||
|
@ -234,22 +234,22 @@ pmap_bootstrap(virtual_start)
|
|||
* [ currently done in locore. i have wild and crazy ideas -wfj ]
|
||||
*/
|
||||
bzero(firstaddr, (1+NKPDE)*NBPG);
|
||||
kernel_pmap->pm_pdir = firstaddr + VM_MIN_KERNEL_ADDRESS;
|
||||
kernel_pmap->pm_ptab = firstaddr + VM_MIN_KERNEL_ADDRESS + NBPG;
|
||||
pmap_kernel()->pm_pdir = firstaddr + VM_MIN_KERNEL_ADDRESS;
|
||||
pmap_kernel()->pm_ptab = firstaddr + VM_MIN_KERNEL_ADDRESS + NBPG;
|
||||
|
||||
firstaddr += NBPG;
|
||||
for (x = i386_btod(VM_MIN_KERNEL_ADDRESS);
|
||||
x < i386_btod(VM_MIN_KERNEL_ADDRESS) + NKPDE; x++) {
|
||||
pd_entry_t *pde;
|
||||
pde = kernel_pmap->pm_pdir + x;
|
||||
pde = pmap_kernel()->pm_pdir + x;
|
||||
*pde = (firstaddr + x*NBPG) | PG_V | PG_KW;
|
||||
}
|
||||
#else
|
||||
kernel_pmap->pm_pdir = (pd_entry_t *)(KERNBASE + IdlePTD);
|
||||
pmap_kernel()->pm_pdir = (pd_entry_t *)(KERNBASE + IdlePTD);
|
||||
#endif
|
||||
|
||||
simple_lock_init(&kernel_pmap->pm_lock);
|
||||
kernel_pmap->pm_count = 1;
|
||||
simple_lock_init(&pmap_kernel()->pm_lock);
|
||||
pmap_kernel()->pm_count = 1;
|
||||
|
||||
#if BSDVM_COMPAT
|
||||
/*
|
||||
|
@ -259,7 +259,7 @@ pmap_bootstrap(virtual_start)
|
|||
v = (c)va; va += ((n)*NBPG); p = pte; pte += (n);
|
||||
|
||||
va = virtual_avail;
|
||||
pte = pmap_pte(kernel_pmap, va);
|
||||
pte = pmap_pte(pmap_kernel(), va);
|
||||
|
||||
SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 )
|
||||
SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 )
|
||||
|
@ -565,7 +565,7 @@ pmap_map(va, spa, epa, prot)
|
|||
#endif
|
||||
|
||||
while (spa < epa) {
|
||||
pmap_enter(kernel_pmap, va, spa, prot, FALSE);
|
||||
pmap_enter(pmap_kernel(), va, spa, prot, FALSE);
|
||||
va += NBPG;
|
||||
spa += NBPG;
|
||||
}
|
||||
|
@ -635,7 +635,7 @@ pmap_pinit(pmap)
|
|||
|
||||
/* install self-referential address mapping entry */
|
||||
*(pmap->pm_pdir + PTDPTDI) =
|
||||
pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_pdir) | PG_V | PG_KW;
|
||||
pmap_extract(pmap_kernel(), (vm_offset_t)pmap->pm_pdir) | PG_V | PG_KW;
|
||||
|
||||
pmap->pm_count = 1;
|
||||
simple_lock_init(&pmap->pm_lock);
|
||||
|
@ -722,7 +722,7 @@ pmap_activate(pmap, pcb)
|
|||
|
||||
if (pmap /*&& pmap->pm_pdchanged */) {
|
||||
pcb->pcb_cr3 =
|
||||
pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_pdir);
|
||||
pmap_extract(pmap_kernel(), (vm_offset_t)pmap->pm_pdir);
|
||||
if (pmap == &curproc->p_vmspace->vm_pmap)
|
||||
lcr3(pcb->pcb_cr3);
|
||||
pmap->pm_pdchanged = FALSE;
|
||||
|
@ -1061,7 +1061,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
/* also, should not muck with PTD va! */
|
||||
|
||||
#ifdef DEBUG
|
||||
if (pmap == kernel_pmap)
|
||||
if (pmap == pmap_kernel())
|
||||
enter_stats.kernel++;
|
||||
else
|
||||
enter_stats.user++;
|
||||
|
@ -1282,7 +1282,7 @@ pmap_pte(pmap, va)
|
|||
return NULL;
|
||||
|
||||
if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde & PG_FRAME) ||
|
||||
pmap == kernel_pmap)
|
||||
pmap == pmap_kernel())
|
||||
/* current address space or kernel */
|
||||
ptp = PTmap;
|
||||
else {
|
||||
|
@ -1377,7 +1377,7 @@ pmap_collect(pmap)
|
|||
printf("pmap_collect(%x) ", pmap);
|
||||
#endif
|
||||
|
||||
if (pmap != kernel_pmap)
|
||||
if (pmap != pmap_kernel())
|
||||
return;
|
||||
|
||||
}
|
||||
|
@ -1484,7 +1484,7 @@ pmap_pageable(pmap, sva, eva, pageable)
|
|||
* - we are called with only one page at a time
|
||||
* - PT pages have only one pv_table entry
|
||||
*/
|
||||
if (pmap == kernel_pmap && pageable && sva + NBPG == eva) {
|
||||
if (pmap == pmap_kernel() && pageable && sva + NBPG == eva) {
|
||||
register vm_offset_t pa;
|
||||
register pt_entry_t *pte;
|
||||
|
||||
|
@ -1674,8 +1674,8 @@ pmap_check_wiring(str, va)
|
|||
register int count, *pte;
|
||||
|
||||
va = trunc_page(va);
|
||||
if (!pmap_pde_v(pmap_pde(kernel_pmap, va)) ||
|
||||
!pmap_pte_v(pmap_pte(kernel_pmap, va)))
|
||||
if (!pmap_pde_v(pmap_pde(pmap_kernel(), va)) ||
|
||||
!pmap_pte_v(pmap_pte(pmap_kernel(), va)))
|
||||
return;
|
||||
|
||||
if (!vm_map_lookup_entry(pt_map, va, &entry)) {
|
||||
|
@ -1699,16 +1699,16 @@ pads(pm)
|
|||
unsigned va, i, j;
|
||||
register pt_entry_t *pte;
|
||||
|
||||
if (pm == kernel_pmap)
|
||||
if (pm == pmap_kernel())
|
||||
return;
|
||||
for (i = 0; i < 1024; i++)
|
||||
if (pmap_pde_v(&pm->pm_pdir[i]))
|
||||
for (j = 0; j < 1024 ; j++) {
|
||||
va = (i << PDSHIFT) | (j << PGSHIFT);
|
||||
if (pm == kernel_pmap &&
|
||||
if (pm == pmap_kernel() &&
|
||||
va < VM_MIN_KERNEL_ADDRESS)
|
||||
continue;
|
||||
if (pm != kernel_pmap &&
|
||||
if (pm != pmap_kernel() &&
|
||||
va > VM_MAX_ADDRESS)
|
||||
continue;
|
||||
pte = pmap_pte(pm, va);
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.h,v 1.17 1995/03/28 18:17:06 jtc Exp $ */
|
||||
/* $NetBSD: pmap.h,v 1.18 1995/04/10 12:42:05 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
|
@ -156,7 +156,7 @@ struct pv_page {
|
|||
extern struct pmap kernel_pmap_store;
|
||||
struct pv_entry *pv_table; /* array of entries, one per page */
|
||||
|
||||
#define kernel_pmap (&kernel_pmap_store)
|
||||
#define pmap_kernel() (&kernel_pmap_store)
|
||||
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
|
||||
#define pmap_update() tlbflush()
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.h,v 1.8 1995/03/23 20:19:23 briggs Exp $ */
|
||||
/* $NetBSD: pmap.h,v 1.9 1995/04/10 12:42:07 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1987 Carnegie-Mellon University
|
||||
|
@ -97,15 +97,13 @@ struct pmap {
|
|||
|
||||
typedef struct pmap *pmap_t;
|
||||
|
||||
extern pmap_t kernel_pmap;
|
||||
|
||||
/*
|
||||
* Macros for speed
|
||||
*/
|
||||
#define PMAP_ACTIVATE(pmapp, pcbp, iscurproc) \
|
||||
if ((pmapp) != NULL && (pmapp)->pm_stchanged) { \
|
||||
(pcbp)->pcb_ustp = \
|
||||
mac68k_btop(pmap_extract(kernel_pmap, (vm_offset_t) \
|
||||
mac68k_btop(pmap_extract(pmap_kernel(), (vm_offset_t) \
|
||||
(cpu040 ? (pmapp)->pm_rtab : (pmapp)->pm_stab))); \
|
||||
if (iscurproc) \
|
||||
loadustp((pcbp)->pcb_ustp); \
|
||||
|
@ -131,6 +129,7 @@ typedef struct pv_entry {
|
|||
|
||||
#ifdef _KERNEL
|
||||
pv_entry_t pv_table; /* array of entries, one per page */
|
||||
struct pmap kernel_pmap_store;
|
||||
|
||||
#ifdef MACHINE_NONCONTIG
|
||||
#define pa_index(pa) pmap_page_index(pa)
|
||||
|
@ -140,7 +139,7 @@ pv_entry_t pv_table; /* array of entries, one per page */
|
|||
|
||||
#define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
|
||||
|
||||
#define pmap_kernel() (kernel_pmap)
|
||||
#define pmap_kernel() (&kernel_pmap_store)
|
||||
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
|
||||
|
||||
extern struct pte *Sysmap;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.15 1994/12/03 23:35:06 briggs Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.16 1995/04/10 12:42:10 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1990 The Regents of the University of California.
|
||||
|
@ -244,7 +244,6 @@ vm_size_t Sysptsize = VM_KERNEL_PT_PAGES;
|
|||
#endif
|
||||
|
||||
struct pmap kernel_pmap_store;
|
||||
pmap_t kernel_pmap;
|
||||
vm_map_t pt_map;
|
||||
|
||||
vm_offset_t avail_start; /* PA of first available physical page */
|
||||
|
@ -390,25 +389,18 @@ pmap_bootstrap(firstaddr, loadaddr)
|
|||
*/
|
||||
mac68k_protection_init();
|
||||
|
||||
/*
|
||||
* The kernel's pmap is statically allocated so we don't
|
||||
* have to use pmap_create, which is unlikely to work
|
||||
* correctly at this part of the boot sequence.
|
||||
*/
|
||||
kernel_pmap = &kernel_pmap_store;
|
||||
|
||||
/*
|
||||
* Kernel page/segment table allocated in locore,
|
||||
* just initialize pointers.
|
||||
*/
|
||||
if (cpu040)
|
||||
kernel_pmap->pm_rtab = Sysseg1;
|
||||
kernel_pmap->pm_stab = Sysseg;
|
||||
kernel_pmap->pm_ptab = Sysmap;
|
||||
pmap_kernel()->pm_rtab = Sysseg1;
|
||||
pmap_kernel()->pm_stab = Sysseg;
|
||||
pmap_kernel()->pm_ptab = Sysmap;
|
||||
pmap_ishift = cpu040 ? SG_040ISHIFT : SG_ISHIFT;
|
||||
|
||||
simple_lock_init(&kernel_pmap->pm_lock);
|
||||
kernel_pmap->pm_count = 1;
|
||||
simple_lock_init(&pmap_kernel()->pm_lock);
|
||||
pmap_kernel()->pm_count = 1;
|
||||
|
||||
#if BSDVM_COMPAT
|
||||
/*
|
||||
|
@ -418,7 +410,7 @@ pmap_bootstrap(firstaddr, loadaddr)
|
|||
v = (c)va; va += ((n)*MAC_PAGE_SIZE); p = pte; pte += (n);
|
||||
|
||||
va = virtual_avail;
|
||||
pte = pmap_pte(kernel_pmap, va);
|
||||
pte = pmap_pte(pmap_kernel(), va);
|
||||
|
||||
|
||||
/* BG -- these are used in locore. */
|
||||
|
@ -635,7 +627,7 @@ bogons:
|
|||
(--kpt_pages)->kpt_next = kpt_free_list;
|
||||
kpt_free_list = kpt_pages;
|
||||
kpt_pages->kpt_va = addr2;
|
||||
kpt_pages->kpt_pa = pmap_extract(kernel_pmap, addr2);
|
||||
kpt_pages->kpt_pa = pmap_extract(pmap_kernel(), addr2);
|
||||
} while (addr != addr2);
|
||||
#ifdef DEBUG
|
||||
kpt_stats.kpttotal = atop(s);
|
||||
|
@ -695,7 +687,7 @@ pmap_map(virt, start, end, prot)
|
|||
printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
|
||||
#endif
|
||||
while (start < end) {
|
||||
pmap_enter(kernel_pmap, virt, start, prot, FALSE);
|
||||
pmap_enter(pmap_kernel(), virt, start, prot, FALSE);
|
||||
virt += PAGE_SIZE;
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
|
@ -910,7 +902,7 @@ pmap_remove(pmap, sva, eva)
|
|||
* flushed the supervisor side.
|
||||
*/
|
||||
if (pmap_aliasmask && !pmap_pte_ci(pte) &&
|
||||
!(pmap == kernel_pmap && firstpage))
|
||||
!(pmap == pmap_kernel() && firstpage))
|
||||
flushcache = TRUE;
|
||||
#ifdef DEBUG
|
||||
opte = *pte;
|
||||
|
@ -939,7 +931,7 @@ pmap_remove(pmap, sva, eva)
|
|||
*/
|
||||
if (firstpage && pmap_aliasmask) {
|
||||
firstpage = FALSE;
|
||||
if (pmap == kernel_pmap)
|
||||
if (pmap == pmap_kernel())
|
||||
flushcache = FALSE;
|
||||
DCIS();
|
||||
#ifdef DEBUG
|
||||
|
@ -965,7 +957,7 @@ pmap_remove(pmap, sva, eva)
|
|||
* pmap_pageable which clears the modify bit for the
|
||||
* PT page.
|
||||
*/
|
||||
if (pmap != kernel_pmap) {
|
||||
if (pmap != pmap_kernel()) {
|
||||
pte = pmap_pte(pmap, va);
|
||||
vm_map_pageable(pt_map, trunc_page(pte),
|
||||
round_page(pte+1), TRUE);
|
||||
|
@ -1079,7 +1071,7 @@ pmap_remove(pmap, sva, eva)
|
|||
* reference count on the segment table as well,
|
||||
* freeing it if it is now empty.
|
||||
*/
|
||||
if (ptpmap != kernel_pmap) {
|
||||
if (ptpmap != pmap_kernel()) {
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB))
|
||||
printf("remove: stab %x, refcnt %d\n",
|
||||
|
@ -1120,7 +1112,7 @@ pmap_remove(pmap, sva, eva)
|
|||
(struct pcb *)curproc->p_addr, 1);
|
||||
}
|
||||
}
|
||||
if (ptpmap == kernel_pmap)
|
||||
if (ptpmap == pmap_kernel())
|
||||
TBIAS();
|
||||
else
|
||||
TBIAU();
|
||||
|
@ -1137,14 +1129,14 @@ pmap_remove(pmap, sva, eva)
|
|||
if (pmapvacflush & PVF_REMOVE) {
|
||||
if (pmapvacflush & PVF_TOTAL)
|
||||
DCIA();
|
||||
else if (pmap == kernel_pmap)
|
||||
else if (pmap == pmap_kernel())
|
||||
DCIS();
|
||||
else
|
||||
DCIU();
|
||||
}
|
||||
#endif
|
||||
if (flushcache) {
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
DCIS();
|
||||
#ifdef DEBUG
|
||||
remove_stats.sflushes++;
|
||||
|
@ -1289,7 +1281,7 @@ pmap_protect(pmap, sva, eva, prot)
|
|||
if (hpprot && (pmapvacflush & PVF_PROTECT)) {
|
||||
if (pmapvacflush & PVF_TOTAL)
|
||||
DCIA();
|
||||
else if (pmap == kernel_pmap)
|
||||
else if (pmap == pmap_kernel())
|
||||
DCIS();
|
||||
else
|
||||
DCIU();
|
||||
|
@ -1334,7 +1326,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
return;
|
||||
|
||||
#ifdef DEBUG
|
||||
if (pmap == kernel_pmap)
|
||||
if (pmap == pmap_kernel())
|
||||
enter_stats.kernel++;
|
||||
else
|
||||
enter_stats.user++;
|
||||
|
@ -1414,7 +1406,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
* on this PT page. PT pages are wired down as long as there
|
||||
* is a valid mapping in the page.
|
||||
*/
|
||||
if (pmap != kernel_pmap)
|
||||
if (pmap != pmap_kernel())
|
||||
vm_map_pageable(pt_map, trunc_page(pte),
|
||||
round_page(pte+1), FALSE);
|
||||
|
||||
|
@ -1511,8 +1503,8 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
checkpv = cacheable = FALSE;
|
||||
} else if (npv->pv_next ||
|
||||
((pmap == pv->pv_pmap ||
|
||||
pmap == kernel_pmap ||
|
||||
pv->pv_pmap == kernel_pmap) &&
|
||||
pmap == pmap_kernel() ||
|
||||
pv->pv_pmap == pmap_kernel()) &&
|
||||
((pv->pv_va & pmap_aliasmask) !=
|
||||
(va & pmap_aliasmask)))) {
|
||||
#ifdef DEBUG
|
||||
|
@ -1560,7 +1552,7 @@ validate:
|
|||
* Assume uniform modified and referenced status for all
|
||||
* MAC pages in a MACH page.
|
||||
*/
|
||||
if (cpu040 && pmap == kernel_pmap && va >= MAC_PTBASE)
|
||||
if (cpu040 && pmap == pmap_kernel() && va >= MAC_PTBASE)
|
||||
cacheable = FALSE; /* Don't cache user page tables */
|
||||
npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
|
||||
npte |= (*(int *)pte & (PG_M|PG_U));
|
||||
|
@ -1604,12 +1596,12 @@ validate:
|
|||
else if (pmapvacflush & PVF_ENTER) {
|
||||
if (pmapvacflush & PVF_TOTAL)
|
||||
DCIA();
|
||||
else if (pmap == kernel_pmap)
|
||||
else if (pmap == pmap_kernel())
|
||||
DCIS();
|
||||
else
|
||||
DCIU();
|
||||
}
|
||||
if ((pmapdebug & PDB_WIRING) && pmap != kernel_pmap) {
|
||||
if ((pmapdebug & PDB_WIRING) && pmap != pmap_kernel()) {
|
||||
va -= PAGE_SIZE;
|
||||
pmap_check_wiring("enter", trunc_page(pmap_pte(pmap, va)));
|
||||
}
|
||||
|
@ -1772,7 +1764,7 @@ pmap_collect(pmap)
|
|||
int *ste;
|
||||
int opmapdebug;
|
||||
#endif
|
||||
if (pmap != kernel_pmap)
|
||||
if (pmap != pmap_kernel())
|
||||
return;
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -1789,10 +1781,10 @@ pmap_collect(pmap)
|
|||
* page table pages.
|
||||
*/
|
||||
pv = pa_to_pvh(pa);
|
||||
if (pv->pv_pmap != kernel_pmap || !(pv->pv_flags & PV_PTPAGE))
|
||||
if (pv->pv_pmap != pmap_kernel() || !(pv->pv_flags & PV_PTPAGE))
|
||||
continue;
|
||||
do {
|
||||
if (pv->pv_ptste && pv->pv_ptpmap == kernel_pmap)
|
||||
if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel())
|
||||
break;
|
||||
} while (pv = pv->pv_next);
|
||||
if (pv == NULL)
|
||||
|
@ -1859,7 +1851,7 @@ ok:
|
|||
if (*ste)
|
||||
printf("collect: kernel STE at %x still valid (%x)\n",
|
||||
ste, *ste);
|
||||
ste = (int *)&Sysptmap[(st_entry_t *)ste-pmap_ste(kernel_pmap, 0)];
|
||||
ste = (int *)&Sysptmap[(st_entry_t *)ste-pmap_ste(pmap_kernel(), 0)];
|
||||
if (*ste)
|
||||
printf("collect: kernel PTmap at %x still valid (%x)\n",
|
||||
ste, *ste);
|
||||
|
@ -1950,7 +1942,7 @@ pmap_pageable(pmap, sva, eva, pageable)
|
|||
* - we are called with only one page at a time
|
||||
* - PT pages have only one pv_table entry
|
||||
*/
|
||||
if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
|
||||
if (pmap == pmap_kernel() && pageable && sva + PAGE_SIZE == eva) {
|
||||
register pv_entry_t pv;
|
||||
register vm_offset_t pa;
|
||||
|
||||
|
@ -2201,7 +2193,7 @@ pmap_changebit(pa, bit, setem)
|
|||
#endif
|
||||
for (; pv; pv = pv->pv_next) {
|
||||
#ifdef DEBUG
|
||||
toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
|
||||
toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
|
||||
#endif
|
||||
va = pv->pv_va;
|
||||
|
||||
|
@ -2288,7 +2280,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
kmem_alloc(kernel_map, MAC_040STSIZE*128);
|
||||
/* intialize root table entries */
|
||||
sg = (u_int *) pmap->pm_rtab;
|
||||
sg_proto = pmap_extract(kernel_pmap, (vm_offset_t) pmap->pm_stab) |
|
||||
sg_proto = pmap_extract(pmap_kernel(), (vm_offset_t) pmap->pm_stab) |
|
||||
SG_RW | SG_V;
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
|
||||
|
@ -2335,7 +2327,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
* free list and map it into the kernel page table map (via
|
||||
* pmap_enter).
|
||||
*/
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
register struct kpt_page *kpt;
|
||||
|
||||
s = splimp();
|
||||
|
@ -2348,7 +2340,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
if (pmapdebug & PDB_COLLECT)
|
||||
printf("enter: no KPT pages, collecting...\n");
|
||||
#endif
|
||||
pmap_collect(kernel_pmap);
|
||||
pmap_collect(pmap_kernel());
|
||||
if ((kpt = kpt_free_list) == (struct kpt_page *)0)
|
||||
panic("pmap_enter_ptpage: can't get KPT page");
|
||||
}
|
||||
|
@ -2383,7 +2375,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
if (vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE)
|
||||
!= KERN_SUCCESS)
|
||||
panic("pmap_enter: vm_fault failed");
|
||||
ptpa = pmap_extract(kernel_pmap, va);
|
||||
ptpa = pmap_extract(pmap_kernel(), va);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2396,7 +2388,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
if (pv) {
|
||||
pv->pv_flags |= PV_PTPAGE;
|
||||
do {
|
||||
if (pv->pv_pmap == kernel_pmap && pv->pv_va == va)
|
||||
if (pv->pv_pmap == pmap_kernel() && pv->pv_va == va)
|
||||
break;
|
||||
} while (pv = pv->pv_next);
|
||||
}
|
||||
|
@ -2430,7 +2422,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
}
|
||||
else
|
||||
*(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
|
||||
if (pmap != kernel_pmap) {
|
||||
if (pmap != pmap_kernel()) {
|
||||
pmap->pm_sref++;
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
|
||||
|
@ -2441,7 +2433,7 @@ pmap_enter_ptpage(pmap, va)
|
|||
/*
|
||||
* Flush stale TLB info.
|
||||
*/
|
||||
if (pmap == kernel_pmap)
|
||||
if (pmap == pmap_kernel())
|
||||
TBIAS();
|
||||
else
|
||||
TBIAU();
|
||||
|
@ -2471,8 +2463,8 @@ pmap_check_wiring(str, va)
|
|||
register int count, *pte;
|
||||
|
||||
va = trunc_page(va);
|
||||
if (!pmap_ste_v(pmap_ste(kernel_pmap, va)) ||
|
||||
!pmap_pte_v(pmap_pte(kernel_pmap, va)))
|
||||
if (!pmap_ste_v(pmap_ste(pmap_kernel(), va)) ||
|
||||
!pmap_pte_v(pmap_pte(pmap_kernel(), va)))
|
||||
return;
|
||||
|
||||
if (!vm_map_lookup_entry(pt_map, va, &entry)) {
|
||||
|
@ -2531,8 +2523,8 @@ pmap_print_debug()
|
|||
|
||||
pmap_check_stab()
|
||||
{
|
||||
if(kernel_pmap->pm_stab != Sysseg){
|
||||
printf("Uh-oh. kernel_pmap->pm_stab != Sysseg\n");
|
||||
if(pmap_kernel()->pm_stab != Sysseg){
|
||||
printf("Uh-oh. pmap_kernel()->pm_stab != Sysseg\n");
|
||||
panic("Sysseg!");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.h,v 1.6 1995/03/28 18:19:22 jtc Exp $ */
|
||||
/* $NetBSD: pmap.h,v 1.7 1995/04/10 12:42:18 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1987 Carnegie-Mellon University
|
||||
|
@ -95,11 +95,11 @@ typedef struct pmap {
|
|||
#define PMAP_ATTR_REF 0x02 /* page has been referenced */
|
||||
|
||||
#ifdef KERNEL
|
||||
extern char *pmap_attributes; /* reference and modify bits */
|
||||
extern struct pmap kernel_pmap_store;
|
||||
extern pmap_t kernel_pmap;
|
||||
char *pmap_attributes; /* reference and modify bits */
|
||||
struct pmap kernel_pmap_store;
|
||||
|
||||
#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
|
||||
#define pmap_kernel() kernel_pmap
|
||||
#define pmap_kernel() (&kernel_pmap_store)
|
||||
#endif /* _KERNEL */
|
||||
|
||||
#endif /* _PMAP_MACHINE_ */
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.7 1994/11/23 20:46:20 dean Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.8 1995/04/10 12:42:20 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1992, 1993
|
||||
|
@ -146,7 +146,6 @@ int pmapdebug;
|
|||
#endif /* DEBUG */
|
||||
|
||||
struct pmap kernel_pmap_store;
|
||||
pmap_t kernel_pmap;
|
||||
|
||||
vm_offset_t avail_start; /* PA of first available physical page */
|
||||
vm_offset_t avail_end; /* PA of last available physical page */
|
||||
|
@ -217,7 +216,6 @@ pmap_bootstrap(firstaddr)
|
|||
/* XXX need to decide how to set cnt.v_page_size */
|
||||
pmaxpagesperpage = 1;
|
||||
|
||||
kernel_pmap = &kernel_pmap_store;
|
||||
simple_lock_init(&kernel_pmap_store.pm_lock);
|
||||
kernel_pmap_store.pm_count = 1;
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.h,v 1.4 1995/03/28 18:18:28 jtc Exp $ */
|
||||
/* $NetBSD: pmap.h,v 1.5 1995/04/10 12:42:13 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
|
@ -195,17 +195,13 @@ struct pmap {
|
|||
|
||||
typedef struct pmap *pmap_t;
|
||||
|
||||
#ifdef _KERNEL
|
||||
extern pmap_t kernel_pmap;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Macros for speed
|
||||
*/
|
||||
#define PMAP_ACTIVATE(pmapp, pcbp) \
|
||||
if ((pmapp) != NULL /*&& (pmapp)->pm_pdchanged */) { \
|
||||
(pcbp)->pcb_ptb = \
|
||||
pmap_extract(kernel_pmap, (pmapp)->pm_pdir); \
|
||||
pmap_extract(pmap_kernel(), (pmapp)->pm_pdir); \
|
||||
if ((pmapp) == &curproc->p_vmspace->vm_pmap) \
|
||||
_load_ptb0((pcbp)->pcb_ptb); \
|
||||
(pmapp)->pm_pdchanged = FALSE; \
|
||||
|
@ -232,10 +228,12 @@ typedef struct pv_entry {
|
|||
#ifdef KERNEL
|
||||
|
||||
pv_entry_t pv_table; /* array of entries, one per page */
|
||||
struct pmap kernel_pmap_store;
|
||||
|
||||
#define pa_index(pa) atop(pa - vm_first_phys)
|
||||
#define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
|
||||
|
||||
#define pmap_kernel() (&kernel_pmap_store)
|
||||
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
|
||||
|
||||
#endif KERNEL
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.7 1994/10/26 08:25:15 cgd Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.8 1995/04/10 12:42:16 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
|
@ -182,7 +182,6 @@ int protection_codes[8];
|
|||
|
||||
struct user *proc0paddr;
|
||||
struct pmap kernel_pmap_store;
|
||||
pmap_t kernel_pmap;
|
||||
|
||||
vm_offset_t avail_start; /* PA of first available physical page */
|
||||
vm_offset_t avail_end; /* PA of last available physical page */
|
||||
|
@ -317,7 +316,7 @@ map_page_table(pmap_t pmap, int index, vm_offset_t va, vm_offset_t pa)
|
|||
panic("remapping 2nd level table");
|
||||
}
|
||||
/* map in the 2nd level table */
|
||||
map_page(kernel_pmap, va, pa);
|
||||
map_page(pmap_kernel(), va, pa);
|
||||
/* init the 2nd level table to all invalid */
|
||||
bzero(pa, NBPG);
|
||||
/* install the 2nd level table */
|
||||
|
@ -358,13 +357,6 @@ pmap_bootstrap(firstaddr, loadaddr)
|
|||
*/
|
||||
ns532_protection_init();
|
||||
|
||||
/*
|
||||
* The kernel's pmap is statically allocated so we don't
|
||||
* have to use pmap_create, which is unlikely to work
|
||||
* correctly at this part of the boot sequence.
|
||||
*/
|
||||
kernel_pmap = &kernel_pmap_store;
|
||||
|
||||
/* setup avail_start, avail_end, virtual_avail, virtual_end */
|
||||
avail_start = firstaddr;
|
||||
avail_end = mem_size;
|
||||
|
@ -378,12 +370,12 @@ pmap_bootstrap(firstaddr, loadaddr)
|
|||
/*
|
||||
* Create Kernel page directory table and page maps.
|
||||
*/
|
||||
kernel_pmap->pm_pdir = (pd_entry_t *) (KPTphys + KERNBASE);
|
||||
pmap_kernel()->pm_pdir = (pd_entry_t *) (KPTphys + KERNBASE);
|
||||
/* recursively map in ptb0 */
|
||||
ptr = ((int *) kernel_pmap->pm_pdir) + PDRPDROFF;
|
||||
ptr = ((int *) pmap_kernel()->pm_pdir) + PDRPDROFF;
|
||||
if (*ptr) {
|
||||
printf("ptb0 0x%x offset 0x%x should be 0 but is 0x%x\n",
|
||||
kernel_pmap->pm_pdir, PDRPDROFF, *ptr);
|
||||
pmap_kernel()->pm_pdir, PDRPDROFF, *ptr);
|
||||
bpt_to_monitor();
|
||||
}
|
||||
/* don't add KERNBASE as this has to be a physical address */
|
||||
|
@ -391,11 +383,11 @@ pmap_bootstrap(firstaddr, loadaddr)
|
|||
/* fill in the rest of the top-level kernel VA entries */
|
||||
for (x = ns532_btod(VM_MIN_KERNEL_ADDRESS);
|
||||
x < ns532_btod(VM_MAX_KERNEL_ADDRESS); x++) {
|
||||
ptr = (int *) &kernel_pmap->pm_pdir[x];
|
||||
ptr = (int *) &pmap_kernel()->pm_pdir[x];
|
||||
/* only fill in the entries not yet made in _low_level_init() */
|
||||
if (!*ptr) {
|
||||
/* map in the page table */
|
||||
map_page_table(kernel_pmap, x,
|
||||
map_page_table(pmap_kernel(), x,
|
||||
virtual_avail, avail_start);
|
||||
avail_start += NBPG;
|
||||
virtual_avail += NBPG;
|
||||
|
@ -403,7 +395,7 @@ pmap_bootstrap(firstaddr, loadaddr)
|
|||
}
|
||||
/* map in the kernel stack for process 0 */
|
||||
/* install avail_start as a 2nd level table for index 0x3f6 */
|
||||
map_page_table(kernel_pmap, 0x3f6, virtual_avail, avail_start);
|
||||
map_page_table(pmap_kernel(), 0x3f6, virtual_avail, avail_start);
|
||||
avail_start += NBPG;
|
||||
virtual_avail += NBPG;
|
||||
/* reserve UPAGES pages */
|
||||
|
@ -411,16 +403,16 @@ pmap_bootstrap(firstaddr, loadaddr)
|
|||
curpcb = (struct pcb *) proc0paddr;
|
||||
va = ns532_dtob(0x3f6) | ns532_ptob(0x3fe); /* USRSTACK ? */
|
||||
for (x = 0; x < UPAGES; ++x) {
|
||||
map_page(kernel_pmap, va, avail_start);
|
||||
map_page(kernel_pmap, virtual_avail, avail_start);
|
||||
map_page(pmap_kernel(), va, avail_start);
|
||||
map_page(pmap_kernel(), virtual_avail, avail_start);
|
||||
bzero(va, NBPG);
|
||||
va += NBPG;
|
||||
avail_start += NBPG;
|
||||
virtual_avail += NBPG;
|
||||
}
|
||||
|
||||
simple_lock_init(&kernel_pmap->pm_lock);
|
||||
kernel_pmap->pm_count = 1;
|
||||
simple_lock_init(&pmap_kernel()->pm_lock);
|
||||
pmap_kernel()->pm_count = 1;
|
||||
|
||||
#ifdef DEBUG
|
||||
printf("avail_start = 0x%x\n", avail_start);
|
||||
|
@ -437,7 +429,7 @@ pmap_bootstrap(firstaddr, loadaddr)
|
|||
v = (c)va; va += ((n)*NS532_PAGE_SIZE); p = pte; pte += (n);
|
||||
|
||||
va = virtual_avail;
|
||||
pte = pmap_pte(kernel_pmap, va);
|
||||
pte = pmap_pte(pmap_kernel(), va);
|
||||
|
||||
SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 )
|
||||
SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 )
|
||||
|
@ -544,7 +536,7 @@ pmap_map(virt, start, end, prot)
|
|||
printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
|
||||
#endif
|
||||
while (start < end) {
|
||||
pmap_enter(kernel_pmap, virt, start, prot, FALSE);
|
||||
pmap_enter(pmap_kernel(), virt, start, prot, FALSE);
|
||||
virt += PAGE_SIZE;
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
|
@ -619,7 +611,7 @@ pmap_pinit(pmap)
|
|||
|
||||
/* install self-referential address mapping entry */
|
||||
*(int *)(pmap->pm_pdir+PTDPTDI) =
|
||||
(int)pmap_extract(kernel_pmap, pmap->pm_pdir) | PG_V | PG_KW;
|
||||
(int)pmap_extract(pmap_kernel(), pmap->pm_pdir) | PG_V | PG_KW;
|
||||
|
||||
pmap->pm_count = 1;
|
||||
simple_lock_init(&pmap->pm_lock);
|
||||
|
@ -724,7 +716,7 @@ pmap_remove(pmap, sva, eva)
|
|||
|
||||
/* are we current address space or kernel? */
|
||||
if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
|
||||
|| pmap == kernel_pmap)
|
||||
|| pmap == pmap_kernel())
|
||||
ptp=PTmap;
|
||||
|
||||
/* otherwise, we are alternate address space */
|
||||
|
@ -809,7 +801,7 @@ pmap_remove(pmap, sva, eva)
|
|||
/* commented out in 386 version as well */
|
||||
/* are we current address space or kernel? */
|
||||
if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
|
||||
|| pmap == kernel_pmap) {
|
||||
|| pmap == pmap_kernel()) {
|
||||
_load_ptb0(curpcb->pcb_ptb);
|
||||
}
|
||||
#endif
|
||||
|
@ -969,7 +961,7 @@ pmap_protect(pmap, sva, eva, prot)
|
|||
|
||||
/* are we current address space or kernel? */
|
||||
if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
|
||||
|| pmap == kernel_pmap)
|
||||
|| pmap == pmap_kernel())
|
||||
ptp=PTmap;
|
||||
|
||||
/* otherwise, we are alternate address space */
|
||||
|
@ -1057,7 +1049,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
/* also, should not muck with PTD va! */
|
||||
|
||||
#ifdef DEBUG
|
||||
if (pmap == kernel_pmap)
|
||||
if (pmap == pmap_kernel())
|
||||
enter_stats.kernel++;
|
||||
else
|
||||
enter_stats.user++;
|
||||
|
@ -1347,7 +1339,7 @@ struct pte *pmap_pte(pmap, va)
|
|||
|
||||
/* are we current address space or kernel? */
|
||||
if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
|
||||
|| pmap == kernel_pmap)
|
||||
|| pmap == pmap_kernel())
|
||||
return ((struct pte *) vtopte(va));
|
||||
|
||||
/* otherwise, we are alternate address space */
|
||||
|
@ -1460,7 +1452,7 @@ pmap_collect(pmap)
|
|||
int *pde;
|
||||
int opmapdebug;
|
||||
#endif
|
||||
if (pmap != kernel_pmap)
|
||||
if (pmap != pmap_kernel())
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1487,17 +1479,6 @@ pmap_activate(pmap, pcbp)
|
|||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Routine: pmap_kernel
|
||||
* Function:
|
||||
* Returns the physical map handle for the kernel.
|
||||
*/
|
||||
/* pmap_t
|
||||
pmap_kernel()
|
||||
{
|
||||
return (kernel_pmap);
|
||||
} */
|
||||
|
||||
/*
|
||||
* pmap_zero_page zeros the specified (machine independent)
|
||||
* page by mapping the page into virtual memory and using
|
||||
|
@ -1579,7 +1560,7 @@ pmap_pageable(pmap, sva, eva, pageable)
|
|||
* - we are called with only one page at a time
|
||||
* - PT pages have only one pv_table entry
|
||||
*/
|
||||
if (pmap == kernel_pmap && pageable && sva + PAGE_SIZE == eva) {
|
||||
if (pmap == pmap_kernel() && pageable && sva + PAGE_SIZE == eva) {
|
||||
register pv_entry_t pv;
|
||||
register vm_offset_t pa;
|
||||
|
||||
|
@ -1807,7 +1788,7 @@ pmap_changebit(pa, bit, setem)
|
|||
#endif
|
||||
for (; pv; pv = pv->pv_next) {
|
||||
#ifdef DEBUG
|
||||
toflush |= (pv->pv_pmap == kernel_pmap) ? 2 : 1;
|
||||
toflush |= (pv->pv_pmap == pmap_kernel()) ? 2 : 1;
|
||||
#endif
|
||||
va = pv->pv_va;
|
||||
|
||||
|
@ -1877,8 +1858,8 @@ pmap_check_wiring(str, va)
|
|||
register int count, *pte;
|
||||
|
||||
va = trunc_page(va);
|
||||
if (!pmap_pde_v(pmap_pde(kernel_pmap, va)) ||
|
||||
!pmap_pte_v(pmap_pte(kernel_pmap, va)))
|
||||
if (!pmap_pde_v(pmap_pde(pmap_kernel(), va)) ||
|
||||
!pmap_pte_v(pmap_pte(pmap_kernel(), va)))
|
||||
return;
|
||||
|
||||
if (!vm_map_lookup_entry(pt_map, va, &entry)) {
|
||||
|
@ -1903,14 +1884,14 @@ pads(pm)
|
|||
struct pte *ptep;
|
||||
int num=0;
|
||||
|
||||
/* if(pm == kernel_pmap) return; */
|
||||
/* if(pm == pmap_kernel()) return; */
|
||||
for (i = 0; i < 1024; i++)
|
||||
if(pm->pm_pdir[i].pd_v)
|
||||
for (j = 0; j < 1024 ; j++) {
|
||||
va = (i<<22)+(j<<12);
|
||||
if (pm == kernel_pmap && va < 0xfe000000)
|
||||
if (pm == pmap_kernel() && va < 0xfe000000)
|
||||
continue;
|
||||
if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
|
||||
if (pm != pmap_kernel() && va > UPT_MAX_ADDRESS)
|
||||
continue;
|
||||
ptep = pmap_pte(pm, va);
|
||||
if(pmap_pte_v(ptep)) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.h,v 1.6 1995/03/28 18:19:22 jtc Exp $ */
|
||||
/* $NetBSD: pmap.h,v 1.7 1995/04/10 12:42:18 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1987 Carnegie-Mellon University
|
||||
|
@ -95,11 +95,11 @@ typedef struct pmap {
|
|||
#define PMAP_ATTR_REF 0x02 /* page has been referenced */
|
||||
|
||||
#ifdef KERNEL
|
||||
extern char *pmap_attributes; /* reference and modify bits */
|
||||
extern struct pmap kernel_pmap_store;
|
||||
extern pmap_t kernel_pmap;
|
||||
char *pmap_attributes; /* reference and modify bits */
|
||||
struct pmap kernel_pmap_store;
|
||||
|
||||
#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
|
||||
#define pmap_kernel() kernel_pmap
|
||||
#define pmap_kernel() (&kernel_pmap_store)
|
||||
#endif /* _KERNEL */
|
||||
|
||||
#endif /* _PMAP_MACHINE_ */
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.7 1994/11/23 20:46:20 dean Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.8 1995/04/10 12:42:20 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1992, 1993
|
||||
|
@ -146,7 +146,6 @@ int pmapdebug;
|
|||
#endif /* DEBUG */
|
||||
|
||||
struct pmap kernel_pmap_store;
|
||||
pmap_t kernel_pmap;
|
||||
|
||||
vm_offset_t avail_start; /* PA of first available physical page */
|
||||
vm_offset_t avail_end; /* PA of last available physical page */
|
||||
|
@ -217,7 +216,6 @@ pmap_bootstrap(firstaddr)
|
|||
/* XXX need to decide how to set cnt.v_page_size */
|
||||
pmaxpagesperpage = 1;
|
||||
|
||||
kernel_pmap = &kernel_pmap_store;
|
||||
simple_lock_init(&kernel_pmap_store.pm_lock);
|
||||
kernel_pmap_store.pm_count = 1;
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.h,v 1.13 1995/03/28 18:19:59 jtc Exp $ */
|
||||
/* $NetBSD: pmap.h,v 1.14 1995/04/10 12:42:23 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1992, 1993
|
||||
|
@ -147,7 +147,6 @@ typedef struct pmap *pmap_t;
|
|||
|
||||
extern struct pmap kernel_pmap_store;
|
||||
extern struct ksegmap kernel_segmap_store;
|
||||
extern pmap_t kernel_pmap;
|
||||
extern vm_offset_t vm_first_phys, vm_num_phys;
|
||||
|
||||
/*
|
||||
|
@ -170,6 +169,7 @@ int pmap_count_ptes __P((struct pmap *));
|
|||
vm_offset_t pmap_prefer __P((vm_offset_t, vm_offset_t));
|
||||
int pmap_pa_exists __P((vm_offset_t));
|
||||
|
||||
#define pmap_kernel() (&kernel_pmap_store)
|
||||
#define pmap_resident_count(pmap) pmap_count_ptes(pmap)
|
||||
#define managed(pa) ((unsigned)((pa) - vm_first_phys) < vm_num_phys)
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.41 1995/04/10 11:57:17 mycroft Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.42 1995/04/10 12:42:26 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1992, 1993
|
||||
|
@ -264,7 +264,6 @@ caddr_t vdumppages; /* 32KB worth of reserved dump pages */
|
|||
|
||||
struct pmap kernel_pmap_store; /* the kernel's pmap */
|
||||
struct ksegmap kernel_segmap_store; /* the kernel's segmap */
|
||||
pmap_t kernel_pmap;
|
||||
|
||||
#define MA_SIZE 32 /* size of memory descriptor arrays */
|
||||
#ifdef MACHINE_NONCONTIG
|
||||
|
@ -753,7 +752,7 @@ me_alloc(mh, newpm, newvseg)
|
|||
pm = me->me_pmap;
|
||||
if (pm == NULL)
|
||||
panic("me_alloc: LRU entry has no pmap");
|
||||
if (pm == kernel_pmap)
|
||||
if (pm == pmap_kernel())
|
||||
panic("me_alloc: stealing from kernel");
|
||||
pte = pm->pm_pte[me->me_vseg];
|
||||
if (pte == NULL)
|
||||
|
@ -1391,8 +1390,6 @@ pmap_bootstrap(nmmu, nctx)
|
|||
cnt.v_page_size = NBPG;
|
||||
vm_set_page_size();
|
||||
|
||||
kernel_pmap = (pmap_t)&kernel_pmap_store;
|
||||
|
||||
ncontext = nctx;
|
||||
|
||||
#if defined(SUN4) && defined(SUN4C)
|
||||
|
@ -1493,7 +1490,7 @@ pmap_bootstrap(nmmu, nctx)
|
|||
* Intialize the kernel pmap.
|
||||
*/
|
||||
{
|
||||
register struct pmap *k = kernel_pmap;
|
||||
register struct pmap *k = pmap_kernel();
|
||||
|
||||
k->pm_ctx = ctxinfo;
|
||||
/* k->pm_ctxnum = 0; */
|
||||
|
@ -1513,7 +1510,7 @@ pmap_bootstrap(nmmu, nctx)
|
|||
*
|
||||
* XXX sun4c could use context 0 for users?
|
||||
*/
|
||||
ci->c_pmap = kernel_pmap;
|
||||
ci->c_pmap = pmap_kernel();
|
||||
ctx_freelist = ci + 1;
|
||||
for (i = 1; i < ncontext; i++) {
|
||||
ci++;
|
||||
|
@ -1557,14 +1554,14 @@ pmap_bootstrap(nmmu, nctx)
|
|||
me->me_pmeg = i;
|
||||
insque(me, me_locked.mh_prev);
|
||||
/* me->me_pmforw = NULL; */
|
||||
me->me_pmback = kernel_pmap->pm_mmuback;
|
||||
*kernel_pmap->pm_mmuback = me;
|
||||
kernel_pmap->pm_mmuback = &me->me_pmforw;
|
||||
me->me_pmap = kernel_pmap;
|
||||
me->me_pmback = pmap_kernel()->pm_mmuback;
|
||||
*pmap_kernel()->pm_mmuback = me;
|
||||
pmap_kernel()->pm_mmuback = &me->me_pmforw;
|
||||
me->me_pmap = pmap_kernel();
|
||||
me->me_vseg = vs;
|
||||
kernel_pmap->pm_segmap[vs] = i;
|
||||
pmap_kernel()->pm_segmap[vs] = i;
|
||||
n = ++i < z ? NPTESG : lastpage;
|
||||
kernel_pmap->pm_npte[vs] = n;
|
||||
pmap_kernel()->pm_npte[vs] = n;
|
||||
me++;
|
||||
vs++;
|
||||
if (i < z) {
|
||||
|
@ -1722,7 +1719,7 @@ pass2:
|
|||
|
||||
/* Map this piece of pv_table[] */
|
||||
for (va = sva; va < eva; va += PAGE_SIZE) {
|
||||
pmap_enter(kernel_pmap, va, pa,
|
||||
pmap_enter(pmap_kernel(), va, pa,
|
||||
VM_PROT_READ|VM_PROT_WRITE, 1);
|
||||
pa += PAGE_SIZE;
|
||||
}
|
||||
|
@ -1730,7 +1727,7 @@ pass2:
|
|||
}
|
||||
|
||||
if (pass1) {
|
||||
pa = pmap_extract(kernel_pmap, kmem_alloc(kernel_map, s));
|
||||
pa = pmap_extract(pmap_kernel(), kmem_alloc(kernel_map, s));
|
||||
pass1 = 0;
|
||||
goto pass2;
|
||||
}
|
||||
|
@ -1752,7 +1749,7 @@ pmap_map(va, pa, endpa, prot)
|
|||
register int pgsize = PAGE_SIZE;
|
||||
|
||||
while (pa < endpa) {
|
||||
pmap_enter(kernel_pmap, va, pa, prot, 1);
|
||||
pmap_enter(pmap_kernel(), va, pa, prot, 1);
|
||||
va += pgsize;
|
||||
pa += pgsize;
|
||||
}
|
||||
|
@ -1912,7 +1909,7 @@ pmap_remove(pm, va, endva)
|
|||
printf("pmap_remove(%x, %x, %x)\n", pm, va, endva);
|
||||
#endif
|
||||
|
||||
if (pm == kernel_pmap) {
|
||||
if (pm == pmap_kernel()) {
|
||||
/*
|
||||
* Removing from kernel address space.
|
||||
*/
|
||||
|
@ -2240,7 +2237,7 @@ pmap_page_protect(pa, prot)
|
|||
flags |= MR(tpte);
|
||||
if (pm->pm_ctx) {
|
||||
setsegmap(va, seginval);
|
||||
if (pm == kernel_pmap) {
|
||||
if (pm == pmap_kernel()) {
|
||||
for (i = ncontext; --i > 0;) {
|
||||
setcontext(i);
|
||||
setsegmap(va, seginval);
|
||||
|
@ -2379,7 +2376,7 @@ pmap_changeprot(pm, va, prot, wired)
|
|||
|
||||
write_user_windows(); /* paranoia */
|
||||
|
||||
if (pm == kernel_pmap)
|
||||
if (pm == pmap_kernel())
|
||||
newprot = prot & VM_PROT_WRITE ? PG_S|PG_W : PG_S;
|
||||
else
|
||||
newprot = prot & VM_PROT_WRITE ? PG_W : 0;
|
||||
|
@ -2486,7 +2483,7 @@ pmap_enter(pm, va, pa, prot, wired)
|
|||
pteproto |= PG_W;
|
||||
|
||||
ctx = getcontext();
|
||||
if (pm == kernel_pmap)
|
||||
if (pm == pmap_kernel())
|
||||
pmap_enk(pm, va, prot, wired, pv, pteproto | PG_S);
|
||||
else
|
||||
pmap_enu(pm, va, prot, wired, pv, pteproto);
|
||||
|
@ -2918,17 +2915,6 @@ pmap_pageable(pm, start, end, pageable)
|
|||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Routine: pmap_kernel
|
||||
* Function:
|
||||
* Returns the physical map handle for the kernel.
|
||||
*/
|
||||
pmap_t
|
||||
pmap_kernel()
|
||||
{
|
||||
return (kernel_pmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fill the given MI physical page with zero bytes.
|
||||
*
|
||||
|
@ -3044,7 +3030,7 @@ pmap_count_ptes(pm)
|
|||
{
|
||||
register int idx, total;
|
||||
|
||||
if (pm == kernel_pmap)
|
||||
if (pm == pmap_kernel())
|
||||
idx = NKSEG;
|
||||
else
|
||||
idx = NUSEG;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.h,v 1.12 1995/03/28 18:21:07 jtc Exp $ */
|
||||
/* $NetBSD: pmap.h,v 1.13 1995/04/10 12:42:29 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1994 Gordon W. Ross
|
||||
|
@ -50,9 +50,9 @@ struct pmap {
|
|||
typedef struct pmap *pmap_t;
|
||||
|
||||
#ifdef _KERNEL
|
||||
struct pmap kernel_pmap_store;
|
||||
|
||||
extern pmap_t kernel_pmap;
|
||||
#define pmap_kernel() (kernel_pmap)
|
||||
#define pmap_kernel() (&kernel_pmap_store)
|
||||
|
||||
#define PMAP_ACTIVATE(pmap, pcbp, iscurproc) \
|
||||
pmap_activate(pmap, pcbp)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap3.h,v 1.12 1995/03/28 18:21:07 jtc Exp $ */
|
||||
/* $NetBSD: pmap3.h,v 1.13 1995/04/10 12:42:29 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1994 Gordon W. Ross
|
||||
|
@ -50,9 +50,9 @@ struct pmap {
|
|||
typedef struct pmap *pmap_t;
|
||||
|
||||
#ifdef _KERNEL
|
||||
struct pmap kernel_pmap_store;
|
||||
|
||||
extern pmap_t kernel_pmap;
|
||||
#define pmap_kernel() (kernel_pmap)
|
||||
#define pmap_kernel() (&kernel_pmap_store)
|
||||
|
||||
#define PMAP_ACTIVATE(pmap, pcbp, iscurproc) \
|
||||
pmap_activate(pmap, pcbp)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.48 1995/04/08 04:45:43 gwr Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.49 1995/04/10 12:42:32 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1994 Gordon W. Ross
|
||||
|
@ -267,9 +267,8 @@ save_modref_bits(int pte)
|
|||
|
||||
#define PM_UPDATE_CACHE 1
|
||||
/* external structures */
|
||||
pmap_t kernel_pmap = NULL;
|
||||
static int pmap_version = 1;
|
||||
static struct pmap kernel_pmap_store;
|
||||
struct pmap kernel_pmap_store;
|
||||
|
||||
/* protection conversion */
|
||||
static unsigned int protection_converter[8];
|
||||
|
@ -437,8 +436,8 @@ context_allocate(pmap)
|
|||
if (pmap_debug & PMD_CONTEXT)
|
||||
printf("context_allocate: for pmap %x\n", pmap);
|
||||
#endif
|
||||
if (pmap == kernel_pmap)
|
||||
panic("context_allocate: kernel_pmap");
|
||||
if (pmap == pmap_kernel())
|
||||
panic("context_allocate: pmap_kernel()");
|
||||
if (has_context(pmap))
|
||||
panic("pmap: pmap already has context allocated to it");
|
||||
if (TAILQ_EMPTY(&context_free_queue)) {
|
||||
|
@ -747,7 +746,7 @@ pmeg_allocate(pmap, va)
|
|||
pmegp->pmeg_wired = 0;
|
||||
pmegp->pmeg_reserved = 0;
|
||||
pmegp->pmeg_vpages = 0;
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
TAILQ_INSERT_TAIL(&pmeg_kernel_queue, pmegp, pmeg_link);
|
||||
pmegp->pmeg_qstate = PMEGQ_KERNEL;
|
||||
} else {
|
||||
|
@ -786,8 +785,8 @@ pmeg_release(pmegp)
|
|||
#endif
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if (pmegp->pmeg_owner == kernel_pmap)
|
||||
panic("pmeg_release: kernel_pmap");
|
||||
if (pmegp->pmeg_owner == pmap_kernel())
|
||||
panic("pmeg_release: pmap_kernel()");
|
||||
if (pmegp->pmeg_qstate != PMEGQ_ACTIVE)
|
||||
panic("pmeg_release: not q_active %x", pmegp);
|
||||
#endif
|
||||
|
@ -799,7 +798,7 @@ pmeg_release(pmegp)
|
|||
|
||||
/*
|
||||
* Move the pmeg to the free queue from wherever it is.
|
||||
* The pmeg will be clean. It might be in kernel_pmap.
|
||||
* The pmeg will be clean. It might be in pmap_kernel().
|
||||
*/
|
||||
static void
|
||||
pmeg_free(pmegp, segnum)
|
||||
|
@ -857,8 +856,8 @@ pmeg_cache(pmap, va)
|
|||
CHECK_SPL();
|
||||
|
||||
#ifdef PMAP_DEBUG
|
||||
if (pmap == kernel_pmap)
|
||||
panic("pmeg_cache: kernel_pmap");
|
||||
if (pmap == pmap_kernel())
|
||||
panic("pmeg_cache: pmap_kernel()");
|
||||
#endif
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
|
@ -1018,7 +1017,7 @@ pv_changepte(head, set_bits, clear_bits)
|
|||
|
||||
/* Is the PTE currently accessable in some context? */
|
||||
in_ctx = FALSE;
|
||||
if (pmap == kernel_pmap)
|
||||
if (pmap == pmap_kernel())
|
||||
in_ctx = TRUE;
|
||||
else if (has_context(pmap)) {
|
||||
/* PMEG may be inactive. */
|
||||
|
@ -1117,7 +1116,7 @@ pv_syncflags(head)
|
|||
|
||||
/* Is the PTE currently accessable in some context? */
|
||||
in_ctx = FALSE;
|
||||
if (pmap == kernel_pmap)
|
||||
if (pmap == pmap_kernel())
|
||||
in_ctx = TRUE;
|
||||
else if (has_context(pmap)) {
|
||||
/* PMEG may be inactive. */
|
||||
|
@ -1450,8 +1449,7 @@ pmap_bootstrap()
|
|||
|
||||
/* after setting up some structures */
|
||||
|
||||
kernel_pmap = &kernel_pmap_store;
|
||||
pmap_common_init(kernel_pmap);
|
||||
pmap_common_init(pmap_kernel());
|
||||
|
||||
context_init();
|
||||
|
||||
|
@ -1582,7 +1580,7 @@ pmap_map(virt, start, end, prot)
|
|||
int prot;
|
||||
{
|
||||
while (start < end) {
|
||||
pmap_enter(kernel_pmap, virt, start, prot, FALSE);
|
||||
pmap_enter(pmap_kernel(), virt, start, prot, FALSE);
|
||||
virt += NBPG;
|
||||
start += NBPG;
|
||||
}
|
||||
|
@ -1637,8 +1635,8 @@ pmap_release(pmap)
|
|||
struct pmap *pmap;
|
||||
{
|
||||
|
||||
if (pmap == kernel_pmap)
|
||||
panic("pmap_release: kernel_pmap!");
|
||||
if (pmap == pmap_kernel())
|
||||
panic("pmap_release: pmap_kernel()!");
|
||||
|
||||
if (has_context(pmap))
|
||||
context_free(pmap);
|
||||
|
@ -1665,8 +1663,8 @@ pmap_destroy(pmap)
|
|||
if (pmap_debug & PMD_CREATE)
|
||||
printf("pmap_destroy(%x)\n", pmap);
|
||||
#endif
|
||||
if (pmap == kernel_pmap)
|
||||
panic("pmap_destroy: kernel_pmap!");
|
||||
if (pmap == pmap_kernel())
|
||||
panic("pmap_destroy: pmap_kernel()!");
|
||||
pmap_lock(pmap);
|
||||
count = pmap_del_ref(pmap);
|
||||
pmap_unlock(pmap);
|
||||
|
@ -1753,7 +1751,7 @@ pmap_remove_range_mmu(pmap, sva, eva)
|
|||
CHECK_SPL();
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if (pmap != kernel_pmap) {
|
||||
if (pmap != pmap_kernel()) {
|
||||
if (pmap->pm_ctxnum != get_context())
|
||||
panic("pmap_remove_range_mmu: wrong context");
|
||||
}
|
||||
|
@ -1819,7 +1817,7 @@ pmap_remove_range_mmu(pmap, sva, eva)
|
|||
}
|
||||
|
||||
/* First, remove it from the MMU. */
|
||||
if (kernel_pmap == pmap) {
|
||||
if (pmap_kernel() == pmap) {
|
||||
old_ctx = get_context();
|
||||
for (i=0; i < NCONTEXT; i++) { /* map out of all segments */
|
||||
set_context(i);
|
||||
|
@ -1862,8 +1860,8 @@ pmap_remove_range_noctx(pmap, sva, eva)
|
|||
|
||||
#ifdef PMAP_DEBUG
|
||||
/* Kernel always in a context (actually, in all contexts). */
|
||||
if (pmap == kernel_pmap)
|
||||
panic("pmap_remove_range_noctx: kernel_pmap");
|
||||
if (pmap == pmap_kernel())
|
||||
panic("pmap_remove_range_noctx: pmap_kernel()");
|
||||
if (pmap->pm_segmap == NULL)
|
||||
panic("pmap_remove_range_noctx: null segmap");
|
||||
#endif
|
||||
|
@ -1926,7 +1924,7 @@ pmap_remove_range(pmap, sva, eva)
|
|||
* user: has context, isn't available (NOTHING) |
|
||||
*/
|
||||
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
sme = get_segmap(sva);
|
||||
if (sme != SEGINV)
|
||||
pmap_remove_range_mmu(pmap, sva, eva);
|
||||
|
@ -1981,7 +1979,7 @@ pmap_remove(pmap, sva, eva)
|
|||
if (pmap == NULL)
|
||||
return;
|
||||
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
if (sva < VM_MIN_KERNEL_ADDRESS)
|
||||
sva = VM_MIN_KERNEL_ADDRESS;
|
||||
if (eva > DVMA_SPACE_END) {
|
||||
|
@ -2067,7 +2065,7 @@ pmap_enter_kernel(va, pa, prot, wired, new_pte)
|
|||
|
||||
sme = get_segmap(va);
|
||||
if (sme == SEGINV) {
|
||||
pmegp = pmeg_allocate(kernel_pmap, sun3_trunc_seg(va));
|
||||
pmegp = pmeg_allocate(pmap_kernel(), sun3_trunc_seg(va));
|
||||
sme = pmegp->pmeg_index;
|
||||
c = get_context();
|
||||
for (i=0; i < NCONTEXT; i++) { /* map into all contexts */
|
||||
|
@ -2078,7 +2076,7 @@ pmap_enter_kernel(va, pa, prot, wired, new_pte)
|
|||
#ifdef PMAP_DEBUG
|
||||
if (pmap_debug & PMD_SEGMAP) {
|
||||
printf("pmap: set_segmap pmap=%x va=%x sme=%x (ek1)\n",
|
||||
kernel_pmap, seg_va, sme);
|
||||
pmap_kernel(), seg_va, sme);
|
||||
}
|
||||
pmeg_verify_empty(sun3_trunc_seg(va));
|
||||
#endif
|
||||
|
@ -2090,7 +2088,7 @@ pmap_enter_kernel(va, pa, prot, wired, new_pte)
|
|||
pmegp = pmeg_p(sme);
|
||||
#ifdef DIAGNOSTIC
|
||||
/* Make sure it is ours. */
|
||||
if (pmegp->pmeg_owner && (pmegp->pmeg_owner != kernel_pmap))
|
||||
if (pmegp->pmeg_owner && (pmegp->pmeg_owner != pmap_kernel()))
|
||||
panic("pmap_enter_kernel: MMU has bad pmeg %x", sme);
|
||||
#endif
|
||||
|
||||
|
@ -2128,7 +2126,7 @@ pmap_enter_kernel(va, pa, prot, wired, new_pte)
|
|||
}
|
||||
|
||||
/* OK, different type or PA, have to kill old pv_entry. */
|
||||
pv_unlink(kernel_pmap, PG_PA(old_pte), va);
|
||||
pv_unlink(pmap_kernel(), PG_PA(old_pte), va);
|
||||
|
||||
add_pte: /* can be destructive */
|
||||
pmeg_set_wiring(pmegp, va, wired);
|
||||
|
@ -2139,7 +2137,7 @@ pmap_enter_kernel(va, pa, prot, wired, new_pte)
|
|||
do_pv = FALSE;
|
||||
}
|
||||
if (do_pv) {
|
||||
nflags = pv_link(kernel_pmap, pa, va,
|
||||
nflags = pv_link(pmap_kernel(), pa, va,
|
||||
PG_TO_PV_FLAGS(new_pte & PG_NC));
|
||||
if (nflags & PV_NC)
|
||||
new_pte |= PG_NC;
|
||||
|
@ -2147,7 +2145,7 @@ pmap_enter_kernel(va, pa, prot, wired, new_pte)
|
|||
#ifdef PMAP_DEBUG
|
||||
if ((pmap_debug & PMD_SETPTE) || (va == pmap_db_watchva)) {
|
||||
printf("pmap: set_pte pmap=%x va=%x old=%x new=%x (ek)\n",
|
||||
kernel_pmap, va, old_pte, new_pte);
|
||||
pmap_kernel(), va, old_pte, new_pte);
|
||||
}
|
||||
#endif
|
||||
set_pte(va, new_pte);
|
||||
|
@ -2397,7 +2395,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
*
|
||||
*/
|
||||
PMAP_LOCK();
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
/* This can be called recursively through malloc. */
|
||||
pte_proto |= PG_SYSTEM;
|
||||
pmap_enter_kernel(va, pa, prot, wired, pte_proto);
|
||||
|
@ -2425,8 +2423,8 @@ int pmap_fault_reload(pmap, va, ftype)
|
|||
pmeg_t pmegp;
|
||||
|
||||
#ifdef PMAP_DEBUG
|
||||
if (pmap == kernel_pmap)
|
||||
panic("pmap_fault_reload: kernel_pmap");
|
||||
if (pmap == pmap_kernel())
|
||||
panic("pmap_fault_reload: pmap_kernel()");
|
||||
#endif
|
||||
if (pmap->pm_segmap == NULL) {
|
||||
#ifdef PMAP_DEBUG
|
||||
|
@ -2563,8 +2561,8 @@ pmap_activate(pmap, pcbp)
|
|||
{
|
||||
CHECK_SPL();
|
||||
|
||||
if (pmap == kernel_pmap)
|
||||
panic("pmap_activate: kernel_pmap");
|
||||
if (pmap == pmap_kernel())
|
||||
panic("pmap_activate: pmap_kernel()");
|
||||
|
||||
if (!has_context(pmap)) {
|
||||
context_allocate(pmap);
|
||||
|
@ -2629,7 +2627,7 @@ pmap_change_wiring(pmap, va, wired)
|
|||
* pmap_enter() was called and we ignored wiring.
|
||||
* (VM code appears to wire a stack page during fork.)
|
||||
*/
|
||||
if (pmap != kernel_pmap) {
|
||||
if (pmap != pmap_kernel()) {
|
||||
#ifdef PMAP_DEBUG
|
||||
if (pmap_debug & PMD_WIRING)
|
||||
printf(" (user pmap -- ignored)\n");
|
||||
|
@ -2686,7 +2684,7 @@ pmap_extract(pmap, va)
|
|||
|
||||
pte = 0;
|
||||
PMAP_LOCK();
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
sme = get_segmap(va);
|
||||
if (sme != SEGINV)
|
||||
pte = get_pte(va);
|
||||
|
@ -2776,7 +2774,7 @@ pmap_protect_range_mmu(pmap, sva, eva)
|
|||
CHECK_SPL();
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
if (pmap != kernel_pmap) {
|
||||
if (pmap != pmap_kernel()) {
|
||||
if (pmap->pm_ctxnum != get_context())
|
||||
panic("pmap_protect_range_mmu: wrong context");
|
||||
}
|
||||
|
@ -2843,8 +2841,8 @@ pmap_protect_range_noctx(pmap, sva, eva)
|
|||
|
||||
#ifdef PMAP_DEBUG
|
||||
/* Kernel always in a context (actually, in all contexts). */
|
||||
if (pmap == kernel_pmap)
|
||||
panic("pmap_protect_range_noctx: kernel_pmap");
|
||||
if (pmap == pmap_kernel())
|
||||
panic("pmap_protect_range_noctx: pmap_kernel()");
|
||||
if (pmap->pm_segmap == NULL)
|
||||
panic("pmap_protect_range_noctx: null segmap");
|
||||
#endif
|
||||
|
@ -2891,7 +2889,7 @@ pmap_protect_range(pmap, sva, eva)
|
|||
panic("pmap_protect_range: bad range!");
|
||||
#endif
|
||||
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
sme = get_segmap(sva);
|
||||
if (sme != SEGINV)
|
||||
pmap_protect_range_mmu(pmap, sva, eva);
|
||||
|
@ -2962,7 +2960,7 @@ pmap_protect(pmap, sva, eva, prot)
|
|||
return;
|
||||
}
|
||||
|
||||
if (pmap == kernel_pmap) {
|
||||
if (pmap == pmap_kernel()) {
|
||||
if (sva < VM_MIN_KERNEL_ADDRESS)
|
||||
sva = VM_MIN_KERNEL_ADDRESS;
|
||||
if (eva > DVMA_SPACE_END) {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.h,v 1.6 1995/02/13 00:43:28 ragge Exp $ */
|
||||
/* $NetBSD: pmap.h,v 1.7 1995/04/10 12:42:36 mycroft Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1987 Carnegie-Mellon University
|
||||
|
@ -55,7 +55,7 @@
|
|||
/*
|
||||
* Pmap structure
|
||||
*
|
||||
* p0br == PR_P0BR in user struct, p0br is also == SBR in kernel_pmap
|
||||
* p0br == PR_P0BR in user struct, p0br is also == SBR in pmap_kernel()
|
||||
* p1br is the same for stack space, stack is base of alloced pte mem
|
||||
*/
|
||||
|
||||
|
@ -67,8 +67,6 @@ typedef struct pmap {
|
|||
simple_lock_data_t lock; /* lock on pmap */
|
||||
} *pmap_t;
|
||||
|
||||
extern pmap_t kernel_pmap;
|
||||
|
||||
/*
|
||||
* For each vm_page_t, there is a list of all currently valid virtual
|
||||
* mappings of that page. An entry is a pv_entry_t, the list is pv_table.
|
||||
|
@ -88,11 +86,12 @@ typedef struct pv_entry {
|
|||
#ifdef KERNEL
|
||||
pv_entry_t pv_table; /* array of entries,
|
||||
one per LOGICAL page */
|
||||
struct pmap kernel_pmap_store;
|
||||
|
||||
#define pa_index(pa) atop(pa)
|
||||
#define pa_to_pvh(pa) (&pv_table[atop(pa)])
|
||||
|
||||
#define pmap_kernel() (kernel_pmap)
|
||||
#define pmap_kernel() (&kernel_pmap_store)
|
||||
|
||||
extern char *vmmap; /* map for mem, dumps, etc. */
|
||||
#endif KERNEL
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.9 1995/04/10 03:54:27 mycroft Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.10 1995/04/10 12:42:39 mycroft Exp $ */
|
||||
#undef oldway
|
||||
#define DEBUG
|
||||
/*
|
||||
|
@ -67,7 +67,6 @@ pt_entry_t *pmap_virt2pte(pmap_t, u_int);
|
|||
|
||||
struct pmap kernel_pmap_store;
|
||||
unsigned int gurkskit[50],istack;
|
||||
pmap_t kernel_pmap = &kernel_pmap_store;
|
||||
|
||||
static pv_entry_t alloc_pv_entry();
|
||||
static void free_pv_entry();
|
||||
|
@ -174,8 +173,8 @@ pmap_bootstrap()
|
|||
VM_PROT_READ|VM_PROT_WRITE);
|
||||
|
||||
/* Init kernel pmap */
|
||||
kernel_pmap->ref_count = 1;
|
||||
simple_lock_init(&kernel_pmap->pm_lock);
|
||||
pmap_kernel()->ref_count = 1;
|
||||
simple_lock_init(&pmap_kernel()->pm_lock);
|
||||
p0pmap->pm_pcb=(struct pcb *)proc0paddr;
|
||||
|
||||
/* used for signal trampoline code */
|
||||
|
|
Loading…
Reference in New Issue