NBPG -> PAGE_SIZE

This commit is contained in:
thorpej 2000-11-14 22:55:05 +00:00
parent d007549576
commit ec0069dd37
8 changed files with 71 additions and 70 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: apm.c,v 1.53 2000/08/13 22:26:27 augustss Exp $ */
/* $NetBSD: apm.c,v 1.54 2000/11/14 22:55:05 thorpej Exp $ */
/*-
* Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
@ -959,7 +959,7 @@ apmattach(parent, self, aux)
struct bioscallregs regs;
int error, apm_data_seg_ok;
u_int okbases[] = { 0, biosbasemem*1024 };
u_int oklimits[] = { NBPG, IOM_END};
u_int oklimits[] = { PAGE_SIZE, IOM_END};
u_int i;
#ifdef APMDEBUG
char bits[128];

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_machdep.c,v 1.5 2000/09/07 17:20:59 thorpej Exp $ */
/* $NetBSD: bus_machdep.c,v 1.6 2000/11/14 22:55:05 thorpej Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@ -292,7 +292,7 @@ i386_mem_add_mapping(bpa, size, cacheable, bshp)
*bshp = (bus_space_handle_t)(va + (bpa & PGOFSET));
for (; pa < endpa; pa += NBPG, va += NBPG) {
for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
/*
@ -723,7 +723,7 @@ _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
for (curseg = 0; curseg < nsegs; curseg++) {
for (addr = segs[curseg].ds_addr;
addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
addr += NBPG, va += NBPG, size -= NBPG) {
addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
if (size == 0)
panic("_bus_dmamem_map: size botch");
pmap_enter(pmap_kernel(), va, addr,
@ -845,7 +845,7 @@ _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, lastaddrp, segp, first)
/*
* Compute the segment size, and adjust counts.
*/
sgsize = NBPG - ((u_long)vaddr & PGOFSET);
sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
if (buflen < sgsize)
sgsize = buflen;

View File

@ -1,4 +1,4 @@
/* $NetBSD: db_memrw.c,v 1.10 2000/09/24 23:46:37 itohy Exp $ */
/* $NetBSD: db_memrw.c,v 1.11 2000/11/14 22:55:05 thorpej Exp $ */
/*-
* Copyright (c) 1996, 2000 The NetBSD Foundation, Inc.
@ -140,7 +140,7 @@ db_write_text(vaddr_t addr, size_t size, char *data)
limit = NBPD - ((vaddr_t)dst & (NBPD - 1));
else
#endif
limit = NBPG - ((vaddr_t)dst & PGOFSET);
limit = PAGE_SIZE - ((vaddr_t)dst & PGOFSET);
if (limit > size)
limit = size;
size -= limit;

View File

@ -1,4 +1,4 @@
/* $NetBSD: kgdb_machdep.c,v 1.9 2000/10/06 18:37:39 thorpej Exp $ */
/* $NetBSD: kgdb_machdep.c,v 1.10 2000/11/14 22:55:05 thorpej Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
@ -117,7 +117,7 @@ kgdb_acc(va, len)
va = (va & PG_LGFRAME) + NBPD;
else
#endif
va += NBPG;
va += PAGE_SIZE;
} while (va < last_va);
return (1);

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.413 2000/11/13 16:40:40 jdolecek Exp $ */
/* $NetBSD: machdep.c,v 1.414 2000/11/14 22:55:05 thorpej Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998, 2000 The NetBSD Foundation, Inc.
@ -327,8 +327,8 @@ cpu_startup()
/* msgbuf_paddr was init'd in pmap */
for (x = 0; x < btoc(MSGBUFSIZE); x++)
pmap_kenter_pa((vaddr_t)msgbuf_vaddr + x * NBPG,
msgbuf_paddr + x * NBPG, VM_PROT_READ|VM_PROT_WRITE);
pmap_kenter_pa((vaddr_t)msgbuf_vaddr + x * PAGE_SIZE,
msgbuf_paddr + x * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE);
initmsgbuf((caddr_t)msgbuf_vaddr, round_page(MSGBUFSIZE));
@ -435,7 +435,7 @@ cpu_startup()
*/
format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free - bufpages));
printf("avail memory = %s\n", pbuf);
format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
printf("using %d buffers containing %s of memory\n", nbuf, pbuf);
#if NBIOSCALL > 0
@ -444,9 +444,9 @@ cpu_startup()
* in case someone tries to fake it out...
*/
#ifdef DIAGNOSTIC
if (biostramp_image_size > NBPG)
if (biostramp_image_size > PAGE_SIZE)
panic("biostramp_image_size too big: %x vs. %x\n",
biostramp_image_size, NBPG);
biostramp_image_size, PAGE_SIZE);
#endif
pmap_kenter_pa((vaddr_t)BIOSTRAMP_BASE, /* virtual */
(paddr_t)BIOSTRAMP_BASE, /* physical */
@ -512,7 +512,7 @@ i386_bufinit()
* "base" pages for the rest.
*/
curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
curbufsize = NBPG * ((i < residual) ? (base+1) : base);
curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base);
while (curbufsize) {
/*
@ -1462,7 +1462,7 @@ cpu_dump()
/*
* This is called by main to set dumplo and dumpsize.
* Dumps always skip the first NBPG of disk space
* Dumps always skip the first PAGE_SIZE of disk space
* in case there might be a disk label stored there.
* If there is extra space, put dump at the end to
* reduce the chance that swapping trashes it.
@ -1509,7 +1509,7 @@ cpu_dumpconf()
* getting on the dump stack, either when called above, or by
* the auto-restart code.
*/
#define BYTES_PER_DUMP NBPG /* must be a multiple of pagesize XXX small */
#define BYTES_PER_DUMP PAGE_SIZE /* must be a multiple of pagesize XXX small */
static vaddr_t dumpspace;
vaddr_t
@ -1772,14 +1772,6 @@ init386(first_avail)
consinit(); /* XXX SHOULD NOT BE DONE HERE */
#if NBIOSCALL > 0
avail_start = 3*NBPG; /* save us a page for trampoline code and
one additional PT page! */
#else
avail_start = NBPG; /* BIOS leaves data in low memory */
/* and VM system doesn't work with phys 0 */
#endif
/*
* Initailize PAGE_SIZE-dependent variables.
*/
@ -1791,6 +1783,14 @@ init386(first_avail)
if (PAGE_SIZE != NBPG)
panic("init386: PAGE_SIZE != NBPG");
#if NBIOSCALL > 0
avail_start = 3*PAGE_SIZE; /* save us a page for trampoline code and
one additional PT page! */
#else
avail_start = PAGE_SIZE; /* BIOS leaves data in low memory */
/* and VM system doesn't work with phys 0 */
#endif
/*
* Call pmap initialization to make new kernel address space.
* We must do this before loading pages into the VM system.
@ -2109,9 +2109,9 @@ init386(first_avail)
#if NBIOSCALL > 0
/* install page 2 (reserved above) as PT page for first 4M */
pmap_enter(pmap_kernel(), (vaddr_t)vtopte(0), 2*NBPG,
pmap_enter(pmap_kernel(), (vaddr_t)vtopte(0), 2*PAGE_SIZE,
VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED|VM_PROT_READ|VM_PROT_WRITE);
memset(vtopte(0), 0, NBPG); /* make sure it is clean before using */
memset(vtopte(0), 0, PAGE_SIZE);/* make sure it is clean before using */
#endif
pmap_enter(pmap_kernel(), idt_vaddr, idt_paddr,
@ -2403,7 +2403,7 @@ cpu_reset()
* Try to cause a triple fault and watchdog reset by unmapping the
* entire address space and doing a TLB flush.
*/
memset((caddr_t)PTD, 0, NBPG);
memset((caddr_t)PTD, 0, PAGE_SIZE);
pmap_update();
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: mem.c,v 1.46 2000/06/29 08:44:52 mrg Exp $ */
/* $NetBSD: mem.c,v 1.47 2000/11/14 22:55:06 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -143,10 +143,10 @@ mmrw(dev, uio, flags)
pmap_enter(pmap_kernel(), (vaddr_t)vmmap,
trunc_page(v), prot, PMAP_WIRED|prot);
o = uio->uio_offset & PGOFSET;
c = min(uio->uio_resid, (int)(NBPG - o));
c = min(uio->uio_resid, (int)(PAGE_SIZE - o));
error = uiomove((caddr_t)vmmap + o, c, uio);
pmap_remove(pmap_kernel(), (vaddr_t)vmmap,
(vaddr_t)vmmap + NBPG);
(vaddr_t)vmmap + PAGE_SIZE);
break;
/* minor device 1 is kernel memory */
@ -173,10 +173,10 @@ mmrw(dev, uio, flags)
}
if (zeropage == NULL) {
zeropage = (caddr_t)
malloc(NBPG, M_TEMP, M_WAITOK);
memset(zeropage, 0, NBPG);
malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
memset(zeropage, 0, PAGE_SIZE);
}
c = min(iov->iov_len, NBPG);
c = min(iov->iov_len, PAGE_SIZE);
error = uiomove(zeropage, c, uio);
break;

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.109 2000/10/08 22:59:38 thorpej Exp $ */
/* $NetBSD: pmap.c,v 1.110 2000/11/14 22:55:06 thorpej Exp $ */
/*
*
@ -137,7 +137,7 @@
*
* [A] new process' page directory page (PDP)
* - plan 1: done at pmap_pinit() we use
* uvm_km_alloc(kernel_map, NBPG) [fka kmem_alloc] to do this
* uvm_km_alloc(kernel_map, PAGE_SIZE) [fka kmem_alloc] to do this
* allocation.
*
* if we are low in free physical memory then we sleep in
@ -646,7 +646,7 @@ pmap_kenter_pa(va, pa, prot)
* => no need to lock anything
* => caller must dispose of any vm_page mapped in the va range
* => note: not an inline function
* => we assume the va is page aligned and the len is a multiple of NBPG
* => we assume the va is page aligned and the len is a multiple of PAGE_SIZE
* => we assume kernel only unmaps valid addresses and thus don't bother
* checking the valid bit before doing TLB flushing
*/
@ -659,7 +659,7 @@ pmap_kremove(va, len)
pt_entry_t *pte;
len >>= PAGE_SHIFT;
for ( /* null */ ; len ; len--, va += NBPG) {
for ( /* null */ ; len ; len--, va += PAGE_SIZE) {
if (va < VM_MIN_KERNEL_ADDRESS)
pte = vtopte(va);
else
@ -704,7 +704,7 @@ pmap_kenter_pgs(va, pgs, npgs)
#endif
for (lcv = 0 ; lcv < npgs ; lcv++) {
tva = va + lcv * NBPG;
tva = va + lcv * PAGE_SIZE;
if (va < VM_MIN_KERNEL_ADDRESS)
pte = vtopte(tva);
else
@ -821,7 +821,7 @@ pmap_bootstrap(kva_start)
/* add PG_G attribute to already mapped kernel pages */
for (kva = VM_MIN_KERNEL_ADDRESS ; kva < virtual_avail ;
kva += NBPG)
kva += PAGE_SIZE)
if (pmap_valid_entry(PTE_BASE[i386_btop(kva)]))
PTE_BASE[i386_btop(kva)] |= PG_G;
}
@ -875,33 +875,33 @@ pmap_bootstrap(kva_start)
pte = PTE_BASE + i386_btop(virtual_avail);
csrcp = (caddr_t) virtual_avail; csrc_pte = pte; /* allocate */
virtual_avail += NBPG; pte++; /* advance */
virtual_avail += PAGE_SIZE; pte++; /* advance */
cdstp = (caddr_t) virtual_avail; cdst_pte = pte;
virtual_avail += NBPG; pte++;
virtual_avail += PAGE_SIZE; pte++;
zerop = (caddr_t) virtual_avail; zero_pte = pte;
virtual_avail += NBPG; pte++;
virtual_avail += PAGE_SIZE; pte++;
ptpp = (caddr_t) virtual_avail; ptp_pte = pte;
virtual_avail += NBPG; pte++;
virtual_avail += PAGE_SIZE; pte++;
/* XXX: vmmap used by mem.c... should be uvm_map_reserve */
vmmap = (char *)virtual_avail; /* don't need pte */
virtual_avail += NBPG; pte++;
virtual_avail += PAGE_SIZE; pte++;
msgbuf_vaddr = virtual_avail; /* don't need pte */
virtual_avail += round_page(MSGBUFSIZE); pte++;
idt_vaddr = virtual_avail; /* don't need pte */
virtual_avail += NBPG; pte++;
virtual_avail += PAGE_SIZE; pte++;
idt_paddr = avail_start; /* steal a page */
avail_start += NBPG;
avail_start += PAGE_SIZE;
#if defined(I586_CPU)
/* pentium f00f bug stuff */
pentium_idt_vaddr = virtual_avail; /* don't need pte */
virtual_avail += NBPG; pte++;
virtual_avail += PAGE_SIZE; pte++;
#endif
/*
@ -999,7 +999,7 @@ pmap_init()
* structures. we never free this page.
*/
pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, NBPG);
pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE);
if (pv_initpage == NULL)
panic("pmap_init: pv_initpage");
pv_cachedva = 0; /* a VA we have allocated but not used yet */
@ -1139,7 +1139,7 @@ pmap_alloc_pvpage(pmap, mode)
s = splimp(); /* must protect kmem_map/kmem_object with splimp! */
if (pv_cachedva == 0) {
pv_cachedva = uvm_km_kmemalloc(kmem_map, uvmexp.kmem_object,
NBPG, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC);
if (pv_cachedva == 0) {
splx(s);
goto steal_one;
@ -1451,7 +1451,8 @@ pmap_free_pvpage()
/* unmap the page */
dead_entries = NULL;
(void)uvm_unmap_remove(map, (vaddr_t) pvp,
((vaddr_t) pvp) + NBPG, &dead_entries);
((vaddr_t) pvp) + PAGE_SIZE,
&dead_entries);
vm_map_unlock(map);
if (dead_entries != NULL)
@ -1765,7 +1766,7 @@ pmap_pinit(pmap)
pmap->pm_flags = 0;
/* allocate PDP */
pmap->pm_pdir = (pd_entry_t *) uvm_km_alloc(kernel_map, NBPG);
pmap->pm_pdir = (pd_entry_t *) uvm_km_alloc(kernel_map, PAGE_SIZE);
if (pmap->pm_pdir == NULL)
panic("pmap_pinit: kernel_map out of virtual space!");
(void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
@ -1794,7 +1795,7 @@ pmap_pinit(pmap)
nkpde * sizeof(pd_entry_t));
/* zero the rest */
memset(&pmap->pm_pdir[PDSLOT_KERN + nkpde], 0,
NBPG - ((PDSLOT_KERN + nkpde) * sizeof(pd_entry_t)));
PAGE_SIZE - ((PDSLOT_KERN + nkpde) * sizeof(pd_entry_t)));
LIST_INSERT_HEAD(&pmaps, pmap, pm_list);
simple_unlock(&pmaps_lock);
}
@ -1871,7 +1872,7 @@ pmap_release(pmap)
}
/* XXX: need to flush it out of other processor's APTE space? */
uvm_km_free(kernel_map, (vaddr_t)pmap->pm_pdir, NBPG);
uvm_km_free(kernel_map, (vaddr_t)pmap->pm_pdir, PAGE_SIZE);
#ifdef USER_LDT
if (pmap->pm_flags & PMF_USER_LDT) {
@ -2095,8 +2096,8 @@ pmap_map(va, spa, epa, prot)
{
while (spa < epa) {
pmap_enter(pmap_kernel(), va, spa, prot, 0);
va += NBPG;
spa += NBPG;
va += PAGE_SIZE;
spa += PAGE_SIZE;
}
return va;
}
@ -2113,7 +2114,7 @@ pmap_zero_page(pa)
simple_lock(&pmap_zero_page_lock);
*zero_pte = (pa & PG_FRAME) | PG_V | PG_RW; /* map in */
pmap_update_pg((vaddr_t)zerop); /* flush TLB */
memset(zerop, 0, NBPG); /* zero */
memset(zerop, 0, PAGE_SIZE); /* zero */
simple_unlock(&pmap_zero_page_lock);
}
@ -2136,7 +2137,7 @@ pmap_zero_page_uncached(pa)
((cpu_class != CPUCLASS_386) ? PG_N : 0);
pmap_update_pg((vaddr_t)zerop); /* flush TLB */
for (i = 0, ptr = (int *) zerop; i < NBPG / sizeof(int); i++) {
for (i = 0, ptr = (int *) zerop; i < PAGE_SIZE / sizeof(int); i++) {
if (sched_whichqs != 0) {
/*
* A process has become ready. Abort now,
@ -2171,7 +2172,7 @@ pmap_copy_page(srcpa, dstpa)
*csrc_pte = (srcpa & PG_FRAME) | PG_V | PG_RW;
*cdst_pte = (dstpa & PG_FRAME) | PG_V | PG_RW;
memcpy(cdstp, csrcp, NBPG);
memcpy(cdstp, csrcp, PAGE_SIZE);
*csrc_pte = *cdst_pte = 0; /* zap! */
pmap_update_2pg((vaddr_t)csrcp, (vaddr_t)cdstp);
simple_unlock(&pmap_copy_page_lock);
@ -2217,7 +2218,7 @@ pmap_remove_ptes(pmap, pmap_rr, ptp, ptpva, startva, endva, flags)
*/
for (/*null*/; startva < endva && (ptp == NULL || ptp->wire_count > 1)
; pte++, startva += NBPG) {
; pte++, startva += PAGE_SIZE) {
if (!pmap_valid_entry(*pte))
continue; /* VA not mapped */
if ((flags & PMAP_REMOVE_SKIPWIRED) && (*pte & PG_W)) {
@ -2406,7 +2407,7 @@ pmap_do_remove(pmap, sva, eva, flags)
* removing one page? take shortcut function.
*/
if (sva + NBPG == eva) {
if (sva + PAGE_SIZE == eva) {
if (pmap_valid_entry(pmap->pm_pdir[pdei(sva)])) {
@ -3346,7 +3347,7 @@ pmap_transfer_ptes(srcpmap, srcl, dstpmap, dstl, toxfer, move)
*/
for (/*null*/; toxfer > 0 ; toxfer--,
srcl->addr += NBPG, dstl->addr += NBPG,
srcl->addr += PAGE_SIZE, dstl->addr += PAGE_SIZE,
srcl->pte++, dstl->pte++) {
if (!pmap_valid_entry(*srcl->pte)) /* skip invalid entrys */
@ -3821,7 +3822,7 @@ pmap_dump(pmap, sva, eva)
continue;
pte = &ptes[i386_btop(sva)];
for (/* null */; sva < blkendva ; sva += NBPG, pte++) {
for (/* null */; sva < blkendva ; sva += PAGE_SIZE, pte++) {
if (!pmap_valid_entry(*pte))
continue;
printf("va %#lx -> pa %#x (pte=%#x)\n",

View File

@ -1,4 +1,4 @@
/* $NetBSD: vm_machdep.c,v 1.94 2000/09/07 17:20:59 thorpej Exp $ */
/* $NetBSD: vm_machdep.c,v 1.95 2000/11/14 22:55:06 thorpej Exp $ */
/*-
* Copyright (c) 1995 Charles M. Hannum. All rights reserved.
@ -303,7 +303,7 @@ pagemove(from, to, size)
{
register pt_entry_t *fpte, *tpte, ofpte, otpte;
if (size % NBPG)
if (size & PAGE_MASK)
panic("pagemove");
fpte = kvtopte((vaddr_t)from);
tpte = kvtopte((vaddr_t)to);
@ -328,9 +328,9 @@ pagemove(from, to, size)
if (ofpte & PG_V)
pmap_update_pg((vaddr_t) from);
}
from += NBPG;
to += NBPG;
size -= NBPG;
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
}
#if defined(I386_CPU)
if (cpu_class == CPUCLASS_386)