cosmetics (typo, KNF etc.)

This commit is contained in:
tsutsui 2001-12-16 03:41:57 +00:00
parent 72a263b8f7
commit f7312c690f

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.25 2001/12/13 04:39:52 chs Exp $ */
/* $NetBSD: pmap.c,v 1.26 2001/12/16 03:41:57 tsutsui Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
@ -159,11 +159,7 @@
#define PDB_PVDUMP 0x8000
int debugmap = 0;
#if 1
int pmapdebug = PDB_PARANOIA;
#else
int pmapdebug = 0xffff;
#endif
#define PMAP_DPRINTF(l, x) if (pmapdebug & (l)) printf x
@ -262,8 +258,10 @@ struct pv_entry *pv_table;
char *pmap_attributes; /* reference and modify bits */
TAILQ_HEAD(pv_page_list, pv_page) pv_page_freelist;
int pv_nfree;
int pmap_aliasmask; /* seperation at which VA aliasing ok */
#ifdef CACHE_HAVE_VAC
int pmap_aliasmask; /* seperation at which VA aliasing ok */
#endif
#if defined(M68040)
int protostfree; /* prototype (default) free ST map */
#endif
@ -398,15 +396,13 @@ pmap_init()
intiotop_phys - intiobase_phys,
NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
UVM_INH_NONE, UVM_ADV_RANDOM,
UVM_FLAG_FIXED)) != 0)
UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED)) != 0)
goto bogons;
addr = (vaddr_t) Sysmap;
if (uvm_map(kernel_map, &addr, MAX_PTSIZE,
NULL, UVM_UNKNOWN_OFFSET, 0,
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
UVM_INH_NONE, UVM_ADV_RANDOM,
UVM_FLAG_FIXED)) != 0) {
UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED)) != 0) {
/*
* If this fails, it is probably because the static
* portion of the kernel page table isn't big enough
@ -707,7 +703,7 @@ pmap_map(va, spa, epa, prot)
spa += NBPG;
}
pmap_update(pmap_kernel());
return (va);
return va;
}
/*
@ -723,12 +719,12 @@ pmap_create()
pmap_t pmap;
PMAP_DPRINTF(PDB_FOLLOW|PDB_CREATE,
("pmap_create\n"));
("pmap_create()\n"));
pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
memset(pmap, 0, sizeof(*pmap));
pmap_pinit(pmap);
return (pmap);
return pmap;
}
/*
@ -811,8 +807,7 @@ pmap_release(pmap)
(vaddr_t)pmap->pm_ptab + MAX_PTSIZE);
uvm_km_pgremove(uvm.kernel_object, (vaddr_t)pmap->pm_ptab,
(vaddr_t)pmap->pm_ptab + MAX_PTSIZE);
uvm_km_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab,
MAX_PTSIZE);
uvm_km_free_wakeup(pt_map, (vaddr_t)pmap->pm_ptab, MAX_PTSIZE);
}
KASSERT(pmap->pm_stab == Segtabzero);
}
@ -826,8 +821,8 @@ void
pmap_reference(pmap)
pmap_t pmap;
{
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_reference(%p)\n", pmap));
simple_lock(&pmap->pm_lock);
pmap->pm_count++;
simple_unlock(&pmap->pm_lock);
@ -1035,11 +1030,12 @@ pmap_page_protect(pg, prot)
pte, PRM_TFLUSH|PRM_CFLUSH);
else {
pv = pv->pv_next;
#ifdef DEBUG
if (pmapdebug & PDB_PARANOIA)
printf("%s wired mapping for %lx not removed\n",
"pmap_page_protect:", pa);
#endif
PMAP_DPRINTF(PDB_PARANOIA,
("%s wired mapping for %lx not removed\n",
"pmap_page_protect:", pa));
PMAP_DPRINTF(PDB_PARANOIA,
("vm wired count %d\n",
PHYS_TO_VM_PAGE(pa)->wired_count));
if (pv == NULL)
break;
}
@ -1177,8 +1173,8 @@ pmap_enter(pmap, va, pa, prot, flags)
* For user mapping, allocate kernel VM resources if necessary.
*/
if (pmap->pm_ptab == NULL)
pmap->pm_ptab = (pt_entry_t *)
uvm_km_valloc_wait(pt_map, MAX_PTSIZE);
pmap->pm_ptab =
(pt_entry_t *) uvm_km_valloc_wait(pt_map, MAX_PTSIZE);
/*
* Segment table entry not valid, we need a new PT page
@ -1425,6 +1421,14 @@ validate:
return 0;
}
/*
* pmap_kenter_pa: [ INTERFACE ]
*
* Enter a va -> pa mapping into the kernel pmap without any
* physical->virtual tracking.
*
* Note: no locking is necessary in this function.
*/
void
pmap_kenter_pa(va, pa, prot)
vaddr_t va;
@ -1481,22 +1485,28 @@ pmap_kenter_pa(va, pa, prot)
*pte = npte;
}
/*
* pmap_kremove: [ INTERFACE ]
*
* Remove a mapping entered with pmap_kenter_pa() starting at va,
* for len bytes (assumed to be page rounded).
*
*/
void
pmap_kremove(va, len)
vaddr_t va;
vsize_t len;
{
struct pmap *pmap = pmap_kernel();
vaddr_t sva, eva, nssva;
vaddr_t eva, nssva;
pt_entry_t *pte;
PMAP_DPRINTF(PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT,
("pmap_kremove(%lx, %lx)\n", va, len));
sva = va;
eva = va + len;
while (sva < eva) {
nssva = m68k_trunc_seg(sva) + NBSEG;
while (va < eva) {
nssva = m68k_trunc_seg(va) + NBSEG;
if (nssva == 0 || nssva > eva)
nssva = eva;
@ -1505,8 +1515,8 @@ pmap_kremove(va, len)
* skip to the next segment boundary.
*/
if (!pmap_ste_v(pmap, sva)) {
sva = nssva;
if (!pmap_ste_v(pmap, va)) {
va = nssva;
continue;
}
@ -1514,8 +1524,8 @@ pmap_kremove(va, len)
* Invalidate every valid mapping within this segment.
*/
pte = pmap_pte(pmap, sva);
while (sva < nssva) {
pte = pmap_pte(pmap, va);
while (va < nssva) {
if (pmap_pte_v(pte)) {
#ifdef DEBUG
struct pv_entry *pv;
@ -1525,7 +1535,7 @@ pmap_kremove(va, len)
s = splvm();
while (pv->pv_pmap != NULL) {
KASSERT(pv->pv_pmap != pmap_kernel() ||
pv->pv_va != sva);
pv->pv_va != va);
pv = pv->pv_next;
if (pv == NULL) {
break;
@ -1545,10 +1555,10 @@ pmap_kremove(va, len)
*/
*pte = PG_NV;
TBIS(sva);
TBIS(va);
}
pte++;
sva += NBPG;
va += NBPG;
}
}
}
@ -1578,8 +1588,8 @@ pmap_unwire(pmap, va)
* we don't want to force allocation of unnecessary PTE pages.
*/
if (!pmap_ste_v(pmap, va)) {
if (pmapdebug & PDB_PARANOIA)
printf("pmap_unwire: invalid STE for %lx\n", va);
PMAP_DPRINTF(PDB_PARANOIA,
("pmap_unwire: invalid STE for %lx\n", va));
return;
}
/*
@ -1587,8 +1597,8 @@ pmap_unwire(pmap, va)
* Just continue and change wiring anyway.
*/
if (!pmap_pte_v(pte)) {
if (pmapdebug & PDB_PARANOIA)
printf("pmap_unwire: invalid PTE for %lx\n", va);
PMAP_DPRINTF(PDB_PARANOIA,
("pmap_unwire: invalid PTE for %lx\n", va));
}
#endif
/*
@ -1627,7 +1637,7 @@ pmap_extract(pmap, va, pap)
PMAP_DPRINTF(PDB_FOLLOW,
("pmap_extract(%p, %lx) -> ", pmap, va));
if (pmap && pmap_ste_v(pmap, va)) {
if (pmap_ste_v(pmap, va)) {
pte = *(u_int *)pmap_pte(pmap, va);
if (pte) {
pa = (pte & PG_FRAME) | (va & ~PG_FRAME);
@ -1645,7 +1655,7 @@ pmap_extract(pmap, va, pap)
}
#endif
return (rv);
return rv;
}
/*
@ -1788,7 +1798,7 @@ ok:
* We call pmap_remove_entry to take care of invalidating
* ST and Sysptmap entries.
*/
(void) pmap_extract(pmap, pv->pv_va, (paddr_t *)&kpa);
(void) pmap_extract(pmap, pv->pv_va, &kpa);
pmap_remove_mapping(pmap, pv->pv_va, PT_ENTRY_NULL,
PRM_TFLUSH|PRM_CFLUSH);
/*
@ -1804,9 +1814,8 @@ ok:
#ifdef DEBUG
if (kpt == NULL)
panic("pmap_collect: lost a KPT page");
if (pmapdebug & (PDB_PTPAGE|PDB_COLLECT))
printf("collect: %lx (%lx) to free list\n",
kpt->kpt_va, kpa);
PMAP_DPRINTF(PDB_PTPAGE|PDB_COLLECT,
("collect: %lx (%lx) to free list\n", kpt->kpt_va, kpa));
#endif
*pkpt = kpt->kpt_next;
kpt->kpt_next = kpt_free_list;
@ -1885,7 +1894,7 @@ pmap_zero_page(phys)
/*
* pmap_zero_page_uncached:
*
* Same as above, except uncached. Used in uvm_pageidolezero,
* Same as above, except uncached. Used in uvm_pageidlezero,
* through PMAP_PAGEIDLEZERO macro. Returns TRUE if the page
* was zero'd, FALSE if we aborted.
*/
@ -1919,7 +1928,7 @@ pmap_zero_page_uncached(phys)
splx(s);
return (TRUE);
return TRUE;
}
/*
@ -1999,7 +2008,7 @@ pmap_clear_modify(pg)
paddr_t pa = VM_PAGE_TO_PHYS(pg);
boolean_t rv;
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%lx)\n", pa));
PMAP_DPRINTF(PDB_FOLLOW, ("pmap_clear_modify(%p)\n", pg));
rv = pmap_testbit(pa, PG_M);
pmap_changebit(pa, 0, ~PG_M);
@ -2041,10 +2050,10 @@ pmap_is_referenced(pg)
if (pmapdebug & PDB_FOLLOW) {
boolean_t rv = pmap_testbit(pa, PG_U);
printf("pmap_is_referenced(%lx) -> %c\n", pa, "FT"[rv]);
return(rv);
return rv;
}
#endif
return(pmap_testbit(pa, PG_U));
return pmap_testbit(pa, PG_U);
}
/*
@ -2062,11 +2071,11 @@ pmap_is_modified(pg)
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW) {
boolean_t rv = pmap_testbit(pa, PG_M);
printf("pmap_is_modified(%lx) -> %c\n", pa, "FT"[rv]);
return(rv);
printf("pmap_is_modified(%p) -> %c\n", pg, "FT"[rv]);
return rv;
}
#endif
return(pmap_testbit(pa, PG_M));
return pmap_testbit(pa, PG_M);
}
/*
@ -2082,7 +2091,8 @@ paddr_t
pmap_phys_address(ppn)
int ppn;
{
return(m68k_ptob(ppn));
return m68k_ptob(ppn);
}
#ifdef CACHE_HAVE_VAC
@ -2099,8 +2109,7 @@ pmap_prefer(foff, vap)
vaddr_t va;
vsize_t d;
if (pmap_aliasmask)
{
if (pmap_aliasmask) {
va = *vap;
d = foff - va;
d &= pmap_aliasmask;
@ -2377,11 +2386,10 @@ pmap_remove_mapping(pmap, va, pte, flags)
pmap_remove(pmap_kernel(),
(vaddr_t)ptpmap->pm_stab,
(vaddr_t)ptpmap->pm_stab + STSIZE);
uvm_pagefree(PHYS_TO_VM_PAGE((paddr_t)
ptpmap->pm_stpa));
uvm_pagefree(PHYS_TO_VM_PAGE(
(paddr_t)ptpmap->pm_stpa));
uvm_km_free_wakeup(st_map,
(vaddr_t)ptpmap->pm_stab,
STSIZE);
(vaddr_t)ptpmap->pm_stab, STSIZE);
ptpmap->pm_stab = Segtabzero;
ptpmap->pm_stpa = Segtabzeropa;
#if defined(M68040)
@ -2445,7 +2453,7 @@ pmap_testbit(pa, bit)
if (*pa_to_attribute(pa) & bit) {
splx(s);
return(TRUE);
return TRUE;
}
#ifdef CACHE_HAVE_VAC
/*
@ -2464,12 +2472,12 @@ pmap_testbit(pa, bit)
if (*pte & bit) {
*pa_to_attribute(pa) |= bit;
splx(s);
return(TRUE);
return TRUE;
}
}
}
splx(s);
return(FALSE);
return FALSE;
}
/*
@ -2586,8 +2594,7 @@ pmap_enter_ptpage(pmap, va)
* reference count drops to zero.
*/
if (pmap->pm_stab == Segtabzero) {
pmap->pm_stab = (st_entry_t *)
uvm_km_zalloc(st_map, STSIZE);
pmap->pm_stab = (st_entry_t *) uvm_km_zalloc(st_map, STSIZE);
(void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_stab,
(paddr_t *)&pmap->pm_stpa);
#if defined(M68040)
@ -2827,7 +2834,7 @@ pmap_ptpage_delref(ptpva)
pg = uvm_pagelookup(uvm.kernel_object, ptpva - vm_map_min(kernel_map));
rv = --pg->wire_count;
simple_unlock(&uvm.kernel_object->vmobjlock);
return (rv);
return rv;
}
#ifdef DEBUG