cosmetics (typo, KNF etc.)

This commit is contained in:
tsutsui 2001-12-16 03:41:57 +00:00
parent 72a263b8f7
commit f7312c690f

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.25 2001/12/13 04:39:52 chs Exp $ */
/* $NetBSD: pmap.c,v 1.26 2001/12/16 03:41:57 tsutsui Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
@ -159,11 +159,7 @@
#define PDB_PVDUMP 0x8000
int debugmap = 0;
#if 1
int pmapdebug = PDB_PARANOIA;
#else
int pmapdebug = 0xffff;
#endif
#define PMAP_DPRINTF(l, x) if (pmapdebug & (l)) printf x
@ -262,8 +258,10 @@ struct pv_entry *pv_table;
char *pmap_attributes; /* reference and modify bits */
TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
int pv_nfree;
int pmap_aliasmask; /* seperation at which VA aliasing ok */
#ifdef CACHE_HAVE_VAC
int pmap_aliasmask; /* seperation at which VA aliasing ok */
#endif
#if defined(M68040)
int protostfree; /* prototype (default) free ST map */
#endif
@ -372,13 +370,13 @@ pmap_procwr(p, va, len)
void
pmap_init()
{
vaddr_t addr, addr2;
vsize_t s;
struct pv_entry *pv;
char *attr;
int rv;
int npages;
int bank;
vaddr_t addr, addr2;
vsize_t s;
struct pv_entry *pv;
char *attr;
int rv;
int npages;
int bank;
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_init()\n"));
@ -395,18 +393,16 @@ pmap_init()
*/
addr = (vaddr_t) intiobase;
if (uvm_map(kernel_map, &addr,
intiotop_phys - intiobase_phys,
NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
UVM_INH_NONE, UVM_ADV_RANDOM,
UVM_FLAG_FIXED)) != 0)
intiotop_phys - intiobase_phys,
NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED)) != 0)
goto bogons;
addr = (vaddr_t) Sysmap;
if (uvm_map(kernel_map, &addr, MAX_PTSIZE,
NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
UVM_INH_NONE, UVM_ADV_RANDOM,
UVM_FLAG_FIXED)) != 0) {
NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED)) != 0) {
/*
* If this fails, it is probably because the static
* portion of the kernel page table isn't big enough
@ -469,7 +465,7 @@ pmap_init()
* Allocate physical memory for kernel PT pages and their management.
* We need 1 PT page per possible task plus some slop.
*/
npages = min(atop(MAX_KPTSIZE), maxproc+16);
npages = min(atop(MAX_KPTSIZE), maxproc + 16);
s = ptoa(npages) + round_page(npages * sizeof(struct kpt_page));
/*
@ -478,8 +474,8 @@ pmap_init()
*/
addr = 0;
rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
if (rv != 0 || (addr + s) >= (vaddr_t)Sysmap)
panic("pmap_init: kernel PT too small");
uvm_unmap(kernel_map, addr, addr + s);
@ -707,7 +703,7 @@ pmap_map(va, spa, epa, prot)
spa += NBPG;
}
pmap_update(pmap_kernel());
return (va);
return va;
}
/*
@ -723,12 +719,12 @@ pmap_create()
pmap_t pmap;
PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
("pmap_create\n"));
("pmap_create()\n"));
pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
memset(pmap, 0, sizeof(*pmap));
pmap_pinit(pmap);
return (pmap);
return pmap;
}
/*
@ -811,8 +807,7 @@ pmap_release(pmap)
(vaddr_t)pmap->pm_ptab + MAX_PTSIZE);
uvm_km_pgremove(uvm.kernel_object, (vaddr_t)pmap->pm_ptab,
(vaddr_t)pmap->pm_ptab + MAX_PTSIZE);
uvm_km_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab,
MAX_PTSIZE);
uvm_km_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab, MAX_PTSIZE);
}
KASSERT(pmap->pm_stab == Segtabzero);
}
@ -824,10 +819,10 @@ pmap_release(pmap)
*/
void
pmap_reference(pmap)
pmap_t pmap;
pmap_t pmap;
{
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
simple_lock(&pmap->pm_lock);
pmap->pm_count++;
simple_unlock(&pmap->pm_lock);
@ -995,7 +990,7 @@ pmap_remove(pmap, sva, eva)
void
pmap_page_protect(pg, prot)
struct vm_page *pg;
vm_prot_t prot;
vm_prot_t prot;
{
paddr_t pa = VM_PAGE_TO_PHYS(pg);
struct pv_entry *pv;
@ -1035,11 +1030,12 @@ pmap_page_protect(pg, prot)
pte, PRM_TFLUSH|PRM_CFLUSH);
else {
pv = pv->pv_next;
#ifdef DEBUG
if (pmapdebug & PDB_PARANOIA)
printf("%s wired mapping for %lx not removed\n",
"pmap_page_protect:", pa);
#endif
PMAP_DPRINTF(PDB_PARANOIA,
("%s wired mapping for %lx not removed\n",
"pmap_page_protect:", pa));
PMAP_DPRINTF(PDB_PARANOIA,
("vm wired count %d\n",
PHYS_TO_VM_PAGE(pa)->wired_count));
if (pv == NULL)
break;
}
@ -1055,9 +1051,9 @@ pmap_page_protect(pg, prot)
*/
void
pmap_protect(pmap, sva, eva, prot)
pmap_t pmap;
vaddr_t sva, eva;
vm_prot_t prot;
pmap_t pmap;
vaddr_t sva, eva;
vm_prot_t prot;
{
vaddr_t nssva;
pt_entry_t *pte;
@ -1177,8 +1173,8 @@ pmap_enter(pmap, va, pa, prot, flags)
* For user mapping, allocate kernel VM resources if necessary.
*/
if (pmap->pm_ptab == NULL)
pmap->pm_ptab = (pt_entry_t *)
uvm_km_valloc_wait(pt_map, MAX_PTSIZE);
pmap->pm_ptab =
(pt_entry_t *) uvm_km_valloc_wait(pt_map, MAX_PTSIZE);
/*
* Segment table entry not valid, we need a new PT page
@ -1425,6 +1421,14 @@ validate:
return 0;
}
/*
* pmap_kenter_pa: [ INTERFACE ]
*
* Enter a va -> pa mapping into the kernel pmap without any
* physical->virtual tracking.
*
* Note: no locking is necessary in this function.
*/
void
pmap_kenter_pa(va, pa, prot)
vaddr_t va;
@ -1442,7 +1446,7 @@ pmap_kenter_pa(va, pa, prot)
* Segment table entry not valid, we need a new PT page
*/
if (!pmap_ste_v(pmap, va)) {
if (!pmap_ste_v(pmap, va)) {
s = splvm();
pmap_enter_ptpage(pmap, va);
splx(s);
@ -1481,22 +1485,28 @@ pmap_kenter_pa(va, pa, prot)
*pte = npte;
}
/*
* pmap_kremove: [ INTERFACE ]
*
* Remove a mapping entered with pmap_kenter_pa() starting at va,
* for len bytes (assumed to be page rounded).
*
*/
void
pmap_kremove(va, len)
vaddr_t va;
vsize_t len;
{
struct pmap *pmap = pmap_kernel();
vaddr_t sva, eva, nssva;
vaddr_t eva, nssva;
pt_entry_t *pte;
PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
("pmap_kremove(%lx, %lx)\n", va, len));
sva = va;
eva = va + len;
while (sva < eva) {
nssva = m68k_trunc_seg(sva) + NBSEG;
while (va < eva) {
nssva = m68k_trunc_seg(va) + NBSEG;
if (nssva == 0 || nssva > eva)
nssva = eva;
@ -1505,8 +1515,8 @@ pmap_kremove(va, len)
* skip to the next segment boundary.
*/
if (!pmap_ste_v(pmap, sva)) {
sva = nssva;
if (!pmap_ste_v(pmap, va)) {
va = nssva;
continue;
}
@ -1514,8 +1524,8 @@ pmap_kremove(va, len)
* Invalidate every valid mapping within this segment.
*/
pte = pmap_pte(pmap, sva);
while (sva < nssva) {
pte = pmap_pte(pmap, va);
while (va < nssva) {
if (pmap_pte_v(pte)) {
#ifdef DEBUG
struct pv_entry *pv;
@ -1525,7 +1535,7 @@ pmap_kremove(va, len)
s = splvm();
while (pv->pv_pmap != NULL) {
KASSERT(pv->pv_pmap != pmap_kernel() ||
pv->pv_va != sva);
pv->pv_va != va);
pv = pv->pv_next;
if (pv == NULL) {
break;
@ -1545,10 +1555,10 @@ pmap_kremove(va, len)
*/
*pte = PG_NV;
TBIS(sva);
TBIS(va);
}
pte++;
sva += NBPG;
va += NBPG;
}
}
}
@ -1562,8 +1572,8 @@ pmap_kremove(va, len)
*/
void
pmap_unwire(pmap, va)
pmap_t pmap;
vaddr_t va;
pmap_t pmap;
vaddr_t va;
{
pt_entry_t *pte;
@ -1578,8 +1588,8 @@ pmap_unwire(pmap, va)
* we don't want to force allocation of unnecessary PTE pages.
*/
if (!pmap_ste_v(pmap, va)) {
if (pmapdebug & PDB_PARANOIA)
printf("pmap_unwire: invalid STE for %lx\n", va);
PMAP_DPRINTF(PDB_PARANOIA,
("pmap_unwire: invalid STE for %lx\n", va));
return;
}
/*
@ -1587,8 +1597,8 @@ pmap_unwire(pmap, va)
* Just continue and change wiring anyway.
*/
if (!pmap_pte_v(pte)) {
if (pmapdebug & PDB_PARANOIA)
printf("pmap_unwire: invalid PTE for %lx\n", va);
PMAP_DPRINTF(PDB_PARANOIA,
("pmap_unwire: invalid PTE for %lx\n", va));
}
#endif
/*
@ -1616,7 +1626,7 @@ pmap_unwire(pmap, va)
*/
boolean_t
pmap_extract(pmap, va, pap)
pmap_t pmap;
pmap_t pmap;
vaddr_t va;
paddr_t *pap;
{
@ -1627,7 +1637,7 @@ pmap_extract(pmap, va, pap)
PMAP_DPRINTF(PDB_FOLLOW,
("pmap_extract(%p, %lx) -> ", pmap, va));
if (pmap && pmap_ste_v(pmap, va)) {
if (pmap_ste_v(pmap, va)) {
pte = *(u_int *)pmap_pte(pmap, va);
if (pte) {
pa = (pte & PG_FRAME) | (va & ~PG_FRAME);
@ -1645,7 +1655,7 @@ pmap_extract(pmap, va, pap)
}
#endif
return (rv);
return rv;
}
/*
@ -1659,11 +1669,11 @@ pmap_extract(pmap, va, pap)
*/
void
pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
pmap_t dst_pmap;
pmap_t src_pmap;
vaddr_t dst_addr;
vsize_t len;
vaddr_t src_addr;
pmap_t dst_pmap;
pmap_t src_pmap;
vaddr_t dst_addr;
vsize_t len;
vaddr_t src_addr;
{
PMAP_DPRINTF(PDB_FOLLOW,
@ -1683,7 +1693,7 @@ pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
*/
void
pmap_collect(pmap)
pmap_t pmap;
pmap_t pmap;
{
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_collect(%p)\n", pmap));
@ -1729,8 +1739,8 @@ pmap_collect(pmap)
*/
void
pmap_collect1(pmap, startpa, endpa)
pmap_t pmap;
paddr_t startpa, endpa;
pmap_t pmap;
paddr_t startpa, endpa;
{
paddr_t pa;
struct pv_entry *pv;
@ -1765,7 +1775,7 @@ pmap_collect1(pmap, startpa, endpa)
goto ok;
pmap_pvdump(pa);
continue;
ok:
ok:
#endif
pte = (pt_entry_t *)(pv->pv_va + NBPG);
while (--pte >= (pt_entry_t *)pv->pv_va && *pte == PG_NV)
@ -1776,7 +1786,7 @@ ok:
#ifdef DEBUG
if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT)) {
printf("collect: freeing KPT page at %lx (ste %x@%p)\n",
pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
pv->pv_va, *pv->pv_ptste, pv->pv_ptste);
opmapdebug = pmapdebug;
pmapdebug |= PDB_PTPAGE;
}
@ -1788,9 +1798,9 @@ ok:
* We call pmap_remove_entry to take care of invalidating
* ST and Sysptmap entries.
*/
(void) pmap_extract(pmap, pv->pv_va, (paddr_t *)&kpa);
(void) pmap_extract(pmap, pv->pv_va, &kpa);
pmap_remove_mapping(pmap, pv->pv_va, PT_ENTRY_NULL,
PRM_TFLUSH|PRM_CFLUSH);
PRM_TFLUSH|PRM_CFLUSH);
/*
* Use the physical address to locate the original
* (kmem_alloc assigned) address for the page and put
@ -1804,9 +1814,8 @@ ok:
#ifdef DEBUG
if (kpt == NULL)
panic("pmap_collect: lost a KPT page");
if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
printf("collect: %lx (%lx) to free list\n",
kpt->kpt_va, kpa);
PMAP_DPRINTF(PDB_PTPAGE|PDB_COLLECT,
("collect: %lx (%lx) to free list\n", kpt->kpt_va, kpa));
#endif
*pkpt = kpt->kpt_next;
kpt->kpt_next = kpt_free_list;
@ -1817,11 +1826,11 @@ ok:
if (*ste != SG_NV)
printf("collect: kernel STE at %p still valid (%x)\n",
ste, *ste);
ste, *ste);
ste = &Sysptmap[ste - pmap_ste(pmap_kernel(), 0)];
if (*ste != SG_NV)
printf("collect: kernel PTmap at %p still valid (%x)\n",
ste, *ste);
ste, *ste);
#endif
}
}
@ -1885,7 +1894,7 @@ pmap_zero_page(phys)
/*
* pmap_zero_page_uncached:
*
* Same as above, except uncached. Used in uvm_pageidolezero,
* Same as above, except uncached. Used in uvm_pageidlezero,
* through PMAP_PAGEIDLEZERO macro. Returns TRUE if the page
* was zero'd, FALSE if we aborted.
*/
@ -1919,7 +1928,7 @@ pmap_zero_page_uncached(phys)
splx(s);
return (TRUE);
return TRUE;
}
/*
@ -1999,7 +2008,7 @@ pmap_clear_modify(pg)
paddr_t pa = VM_PAGE_TO_PHYS(pg);
boolean_t rv;
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%lx)\n", pa));
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%p)\n", pg));
rv = pmap_testbit(pa, PG_M);
pmap_changebit(pa, 0, ~PG_M);
@ -2041,10 +2050,10 @@ pmap_is_referenced(pg)
if (pmapdebug & PDB_FOLLOW) {
boolean_t rv = pmap_testbit(pa, PG_U);
printf("pmap_is_referenced(%lx) -> %c\n", pa, "FT"[rv]);
return(rv);
return rv;
}
#endif
return(pmap_testbit(pa, PG_U));
return pmap_testbit(pa, PG_U);
}
/*
@ -2062,11 +2071,11 @@ pmap_is_modified(pg)
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW) {
boolean_t rv = pmap_testbit(pa, PG_M);
printf("pmap_is_modified(%lx) -> %c\n", pa, "FT"[rv]);
return(rv);
printf("pmap_is_modified(%p) -> %c\n", pg, "FT"[rv]);
return rv;
}
#endif
return(pmap_testbit(pa, PG_M));
return pmap_testbit(pa, PG_M);
}
/*
@ -2082,7 +2091,8 @@ paddr_t
pmap_phys_address(ppn)
int ppn;
{
return(m68k_ptob(ppn));
return m68k_ptob(ppn);
}
#ifdef CACHE_HAVE_VAC
@ -2099,8 +2109,7 @@ pmap_prefer(foff, vap)
vaddr_t va;
vsize_t d;
if (pmap_aliasmask)
{
if (pmap_aliasmask) {
va = *vap;
d = foff - va;
d &= pmap_aliasmask;
@ -2131,7 +2140,7 @@ pmap_mapmulti(pmap, va)
if (pmapdebug & PDB_MULTIMAP) {
ste = pmap_ste(pmap, HPMMBASEADDR(va));
printf("pmap_mapmulti(%p, %lx): bste %p(%x)",
pmap, va, ste, *ste);
pmap, va, ste, *ste);
ste = pmap_ste(pmap, va);
printf(" ste %p(%x)\n", ste, *ste);
}
@ -2367,7 +2376,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
#ifdef DEBUG
if ((pmapdebug & PDB_PARANOIA) &&
ptpmap->pm_stab !=
(st_entry_t *)trunc_page((vaddr_t)ste))
(st_entry_t *)trunc_page((vaddr_t)ste))
panic("remove: bogus ste");
#endif
if (--(ptpmap->pm_sref) == 0) {
@ -2377,11 +2386,10 @@ pmap_remove_mapping(pmap, va, pte, flags)
pmap_remove(pmap_kernel(),
(vaddr_t)ptpmap->pm_stab,
(vaddr_t)ptpmap->pm_stab + STSIZE);
uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t)
ptpmap->pm_stpa));
uvm_pagefree(PHYS_TO_VM_PAGE(
(paddr_t)ptpmap->pm_stpa));
uvm_km_free_wakeup(st_map,
(vaddr_t)ptpmap->pm_stab,
STSIZE);
(vaddr_t)ptpmap->pm_stab, STSIZE);
ptpmap->pm_stab = Segtabzero;
ptpmap->pm_stpa = Segtabzeropa;
#if defined(M68040)
@ -2445,7 +2453,7 @@ pmap_testbit(pa, bit)
if (*pa_to_attribute(pa) & bit) {
splx(s);
return(TRUE);
return TRUE;
}
#ifdef CACHE_HAVE_VAC
/*
@ -2464,12 +2472,12 @@ pmap_testbit(pa, bit)
if (*pte & bit) {
*pa_to_attribute(pa) |= bit;
splx(s);
return(TRUE);
return TRUE;
}
}
}
splx(s);
return(FALSE);
return FALSE;
}
/*
@ -2586,8 +2594,7 @@ pmap_enter_ptpage(pmap, va)
* reference count drops to zero.
*/
if (pmap->pm_stab == Segtabzero) {
pmap->pm_stab = (st_entry_t *)
uvm_km_zalloc(st_map, STSIZE);
pmap->pm_stab = (st_entry_t *) uvm_km_zalloc(st_map, STSIZE);
(void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
(paddr_t *)&pmap->pm_stpa);
#if defined(M68040)
@ -2642,7 +2649,7 @@ pmap_enter_ptpage(pmap, va)
* to the first entry of that chunk which is validated in its
* entirety below.
*/
ste = (st_entry_t *)((int)ste & ~(NBPG/SG4_LEV3SIZE-1));
ste = (st_entry_t *)((int)ste & ~(NBPG / SG4_LEV3SIZE - 1));
PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB,
("enter: ste2 %p (%p)\n", pmap_ste2(pmap, va), ste));
@ -2683,7 +2690,7 @@ pmap_enter_ptpage(pmap, va)
int ix = pmap_ste(pmap, va) - pmap_ste(pmap, 0);
printf("enter: add &Sysptmap[%d]: %x (KPT page %lx)\n",
ix, Sysptmap[ix], kpt->kpt_va);
ix, Sysptmap[ix], kpt->kpt_va);
}
#endif
splx(s);
@ -2703,7 +2710,7 @@ pmap_enter_ptpage(pmap, va)
PMAP_DPRINTF(PDB_ENTER|PDB_PTPAGE,
("enter: about to alloc UPT pg at %lx\n", va));
while ((pg = uvm_pagealloc(uvm.kernel_object, va, NULL,
UVM_PGA_ZERO)) == NULL) {
UVM_PGA_ZERO)) == NULL) {
uvm_wait("ptpage");
}
pg->flags &= ~(PG_BUSY|PG_FAKE);
@ -2727,8 +2734,8 @@ pmap_enter_ptpage(pmap, va)
pt_entry_t *pte = pmap_pte(pmap_kernel(), va);
if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0)
printf("%s PT no CCB: kva=%lx ptpa=%lx pte@%p=%x\n",
pmap == pmap_kernel() ? "Kernel" : "User",
va, ptpa, pte, *pte);
pmap == pmap_kernel() ? "Kernel" : "User",
va, ptpa, pte, *pte);
#endif
pmap_changebit(ptpa, 0, ~PG_CCB);
}
@ -2827,7 +2834,7 @@ pmap_ptpage_delref(ptpva)
pg = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
rv = --pg->wire_count;
simple_unlock(&uvm.kernel_object->vmobjlock);
return (rv);
return rv;
}
#ifdef DEBUG
@ -2846,8 +2853,8 @@ pmap_pvdump(pa)
printf("pa %lx", pa);
for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next)
printf(" -> pmap %p, va %lx, ptste %p, ptpmap %p, flags %x",
pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap,
pv->pv_flags);
pv->pv_pmap, pv->pv_va, pv->pv_ptste, pv->pv_ptpmap,
pv->pv_flags);
printf("\n");
}