Use PAGE_SIZE rather than NBPG.

This commit is contained in:
thorpej 2003-04-01 15:23:07 +00:00
parent 1411d9951f
commit 3b9d583516
18 changed files with 145 additions and 140 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: cache.c,v 1.14 2001/09/05 13:21:09 tsutsui Exp $ */
/* $NetBSD: cache.c,v 1.15 2003/04/01 15:31:12 thorpej Exp $ */
/*-
* Copyright (c) 1996 The NetBSD Foundation, Inc.
@ -84,7 +84,7 @@ cache_flush_page(pgva)
/* Write to control space for each cache line. */
va = (char *) pgva;
endva = (char *) (pgva + NBPG);
endva = (char *) (pgva + PAGE_SIZE);
data = VAC_FLUSH_PAGE;
do {

View File

@ -1,4 +1,4 @@
/* $NetBSD: db_machdep.c,v 1.17 2001/05/28 22:00:12 chs Exp $ */
/* $NetBSD: db_machdep.c,v 1.18 2003/04/01 15:31:12 thorpej Exp $ */
/*-
* Copyright (c) 1996 The NetBSD Foundation, Inc.
@ -135,7 +135,7 @@ db_mach_pagemap(addr, have_addr, count, modif)
#endif /* SUN3X */
pte_print(pte);
db_next = va + NBPG;
db_next = va + PAGE_SIZE;
}
#ifdef _SUN3_

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.161 2003/01/18 07:03:36 thorpej Exp $ */
/* $NetBSD: machdep.c,v 1.162 2003/04/01 15:31:12 thorpej Exp $ */
/*
* Copyright (c) 1994, 1995 Gordon W. Ross
@ -212,7 +212,7 @@ cpu_startup()
/*
* Get scratch page for dumpsys().
*/
if ((dumppage = uvm_km_alloc(kernel_map, NBPG)) == 0)
if ((dumppage = uvm_km_alloc(kernel_map, PAGE_SIZE)) == 0)
panic("startup: alloc dumppage");
/*
@ -254,7 +254,7 @@ cpu_startup()
* "base" pages for the rest.
*/
curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
curbufsize = NBPG * ((i < residual) ? (base+1) : base);
curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base);
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL, 0);
@ -291,7 +291,7 @@ cpu_startup()
format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
printf("avail memory = %s\n", pbuf);
format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
/*
@ -299,7 +299,7 @@ cpu_startup()
* This page is handed to pmap_enter() therefore
* it has to be in the normal kernel VA range.
*/
vmmap = uvm_km_valloc_wait(kernel_map, NBPG);
vmmap = uvm_km_valloc_wait(kernel_map, PAGE_SIZE);
/*
* Create the DVMA maps.
@ -537,7 +537,7 @@ long dumplo = 0; /* blocks */
/*
* This is called by main to set dumplo, dumpsize.
* Dumps always skip the first NBPG of disk space
* Dumps always skip the first PAGE_SIZE of disk space
* in case there might be a disk label stored there.
* If there is extra space, put dump at the end to
* reduce the chance that swapping trashes it.
@ -590,8 +590,8 @@ extern paddr_t avail_start;
* Write a crash dump. The format while in swap is:
* kcore_seg_t cpu_hdr;
* cpu_kcore_hdr_t cpu_data;
* padding (NBPG-sizeof(kcore_seg_t))
* pagemap (2*NBPG)
* padding (PAGE_SIZE-sizeof(kcore_seg_t))
* pagemap (2*PAGE_SIZE)
* physical memory...
*/
void
@ -643,7 +643,7 @@ dumpsys()
blkno = dumplo;
todo = dumpsize; /* pages */
vaddr = (char*)dumppage;
memset(vaddr, 0, NBPG);
memset(vaddr, 0, PAGE_SIZE);
/* Set pointers to all three parts. */
kseg_p = (kcore_seg_t *)vaddr;
@ -656,31 +656,31 @@ dumpsys()
/* Fill in cpu_kcore_hdr_t part. */
strncpy(chdr_p->name, kernel_arch, sizeof(chdr_p->name));
chdr_p->page_size = NBPG;
chdr_p->page_size = PAGE_SIZE;
chdr_p->kernbase = KERNBASE;
/* Fill in the sun3_kcore_hdr part (MMU state). */
pmap_kcore_hdr(sh);
/* Write out the dump header. */
error = (*dsw->d_dump)(dumpdev, blkno, vaddr, NBPG);
error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE);
if (error)
goto fail;
blkno += btodb(NBPG);
blkno += btodb(PAGE_SIZE);
/* translation RAM (page zero) */
pmap_get_pagemap((int*)vaddr, 0);
error = (*dsw->d_dump)(dumpdev, blkno, vaddr, NBPG);
error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE);
if (error)
goto fail;
blkno += btodb(NBPG);
blkno += btodb(PAGE_SIZE);
/* translation RAM (page one) */
pmap_get_pagemap((int*)vaddr, NBPG);
error = (*dsw->d_dump)(dumpdev, blkno, vaddr, NBPG);
pmap_get_pagemap((int*)vaddr, PAGE_SIZE);
error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE);
if (error)
goto fail;
blkno += btodb(NBPG);
blkno += btodb(PAGE_SIZE);
/*
* Now dump physical memory. Have to do it in two chunks.
@ -702,11 +702,11 @@ dumpsys()
if ((todo & 0xf) == 0)
printf("\r%4d", todo);
vaddr = (char*)(paddr + KERNBASE);
error = (*dsw->d_dump)(dumpdev, blkno, vaddr, NBPG);
error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE);
if (error)
goto fail;
paddr += NBPG;
blkno += btodb(NBPG);
paddr += PAGE_SIZE;
blkno += btodb(PAGE_SIZE);
--todo;
} while (--chunk > 0);
@ -717,13 +717,13 @@ dumpsys()
printf("\r%4d", todo);
pmap_kenter_pa(vmmap, paddr | PMAP_NC, VM_PROT_READ);
pmap_update(pmap_kernel());
error = (*dsw->d_dump)(dumpdev, blkno, vaddr, NBPG);
pmap_kremove(vmmap, NBPG);
error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE);
pmap_kremove(vmmap, PAGE_SIZE);
pmap_update(pmap_kernel());
if (error)
goto fail;
paddr += NBPG;
blkno += btodb(NBPG);
paddr += PAGE_SIZE;
blkno += btodb(PAGE_SIZE);
} while (--todo > 0);
printf("\rdump succeeded\n");

View File

@ -1,4 +1,4 @@
/* $NetBSD: mem.c,v 1.44 2002/10/23 09:12:30 jdolecek Exp $ */
/* $NetBSD: mem.c,v 1.45 2003/04/01 15:31:12 thorpej Exp $ */
/*
* Copyright (c) 1994, 1995 Gordon W. Ross
@ -155,10 +155,10 @@ mmrw(dev, uio, flags)
trunc_page(v), prot, prot|PMAP_WIRED);
pmap_update(pmap_kernel());
o = v & PGOFSET;
c = min(uio->uio_resid, (int)(NBPG - o));
c = min(uio->uio_resid, (int)(PAGE_SIZE - o));
error = uiomove((caddr_t)vmmap + o, c, uio);
pmap_remove(pmap_kernel(), (vaddr_t)vmmap,
(vaddr_t)vmmap + NBPG);
(vaddr_t)vmmap + PAGE_SIZE);
pmap_update(pmap_kernel());
break;
@ -176,7 +176,7 @@ mmrw(dev, uio, flags)
* most requests are less than one page anyway.
*/
o = v & PGOFSET;
c = min(uio->uio_resid, (int)(NBPG - o));
c = min(uio->uio_resid, (int)(PAGE_SIZE - o));
rw = (uio->uio_rw == UIO_READ) ? B_READ : B_WRITE;
if (!(uvm_kernacc((caddr_t)v, c, rw) ||
promacc((caddr_t)v, c, rw)))
@ -210,10 +210,10 @@ mmrw(dev, uio, flags)
*/
if (devzeropage == NULL) {
devzeropage = (caddr_t)
malloc(NBPG, M_TEMP, M_WAITOK);
memset(devzeropage, 0, NBPG);
malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
memset(devzeropage, 0, PAGE_SIZE);
}
c = min(iov->iov_len, NBPG);
c = min(iov->iov_len, PAGE_SIZE);
error = uiomove(devzeropage, c, uio);
break;
@ -316,7 +316,7 @@ promacc(va, len, rw)
/* PROM data page is OK for read/write. */
if ((sva >= SUN3_MONSHORTPAGE) &&
(eva <= (SUN3_MONSHORTPAGE+NBPG)))
(eva <= (SUN3_MONSHORTPAGE+PAGE_SIZE)))
return (1);
/* otherwise, not OK to touch */

View File

@ -1,4 +1,4 @@
/* $NetBSD: obio.c,v 1.43 2002/10/02 16:02:29 thorpej Exp $ */
/* $NetBSD: obio.c,v 1.44 2003/04/01 15:31:12 thorpej Exp $ */
/*-
* Copyright (c) 1996 The NetBSD Foundation, Inc.
@ -40,6 +40,8 @@
#include <sys/systm.h>
#include <sys/device.h>
#include <uvm/uvm_extern.h>
#include <machine/autoconf.h>
#include <machine/mon.h>
#include <machine/pte.h>
@ -214,7 +216,7 @@ obio_find_mapping(paddr_t pa, psize_t sz)
sz += off;
/* The saved mappings are all one page long. */
if (sz > NBPG)
if (sz > PAGE_SIZE)
return (caddr_t)0;
/* Within our table? */
@ -279,7 +281,7 @@ save_prom_mappings __P((void))
set_pte(pgva, pte);
}
}
pgva += NBPG; /* next page */
pgva += PAGE_SIZE; /* next page */
}
}
}
@ -306,7 +308,7 @@ make_required_mappings __P((void))
rmp = required_mappings;
while (*rmp != (paddr_t)-1) {
if (!obio_find_mapping(*rmp, NBPG)) {
if (!obio_find_mapping(*rmp, PAGE_SIZE)) {
/*
* XXX - Ack! Need to create one!
* I don't think this can happen, but if

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.139 2003/01/18 07:03:36 thorpej Exp $ */
/* $NetBSD: pmap.c,v 1.140 2003/04/01 15:31:13 thorpej Exp $ */
/*-
* Copyright (c) 1996 The NetBSD Foundation, Inc.
@ -225,7 +225,7 @@ static vaddr_t temp_seg_va;
*/
vaddr_t tmp_vpages[2] = {
SUN3_MONSHORTSEG,
SUN3_MONSHORTSEG + NBPG };
SUN3_MONSHORTSEG + PAGE_SIZE };
int tmp_vpages_inuse;
static int pmap_version = 1;
@ -742,7 +742,7 @@ pmeg_mon_init(sva, eva, keep)
if (sme != SEGINV) {
valid = 0;
endseg = sva + NBSG;
for (pgva = sva; pgva < endseg; pgva += NBPG) {
for (pgva = sva; pgva < endseg; pgva += PAGE_SIZE) {
pte = get_pte(pgva);
if (pte & PG_VALID) {
valid++;
@ -779,7 +779,7 @@ pmeg_clean(pmegp)
sme = pmegp->pmeg_index;
set_segmap(0, sme);
for (va = 0; va < NBSG; va += NBPG)
for (va = 0; va < NBSG; va += PAGE_SIZE)
set_pte(va, PG_INVAL);
set_segmap(0, SEGINV);
@ -1045,7 +1045,7 @@ pmeg_verify_empty(va)
vaddr_t eva;
int pte;
for (eva = va + NBSG; va < eva; va += NBPG) {
for (eva = va + NBSG; va < eva; va += PAGE_SIZE) {
pte = get_pte(va);
if (pte & PG_VALID)
panic("pmeg_verify_empty");
@ -1335,7 +1335,7 @@ pv_remove_all(pa)
while ((pv = *head) != NULL) {
pmap = pv->pv_pmap;
va = pv->pv_va;
pmap_remove1(pmap, va, va + NBPG);
pmap_remove1(pmap, va, va + PAGE_SIZE);
#ifdef PMAP_DEBUG
/* Make sure it went away. */
if (pv == *head) {
@ -1562,7 +1562,7 @@ pmap_bootstrap(nextva)
mon_printf("Warning: ancient PROM version=%d\n",
rvec->romvecVersion);
/* Guess that PROM version 0.X used two pages. */
avail_end = *rvec->memorySize - (2*NBPG);
avail_end = *rvec->memorySize - (2*PAGE_SIZE);
} else {
/* PROM version 1 or later. */
avail_end = *rvec->memoryAvail;
@ -1592,7 +1592,7 @@ pmap_bootstrap(nextva)
* Done allocating PAGES of virtual space, so
* clean out the rest of the last used segment.
*/
for (va = nextva; va < virtual_avail; va += NBPG)
for (va = nextva; va < virtual_avail; va += PAGE_SIZE)
set_pte(va, PG_INVAL);
/*
@ -1658,7 +1658,7 @@ pmap_bootstrap(nextva)
eva = SUN3_MONSHORTPAGE;
sme = get_segmap(va);
pmeg_reserve(sme);
for ( ; va < eva; va += NBPG)
for ( ; va < eva; va += PAGE_SIZE)
set_pte(va, PG_INVAL);
/*
@ -1679,7 +1679,7 @@ pmap_bootstrap(nextva)
pte = get_pte(va);
pte |= (PG_SYSTEM | PG_WRITE | PG_NC);
set_pte(va, pte);
va += NBPG;
va += PAGE_SIZE;
/* Initialize msgbufaddr later, in machdep.c */
/* Next is the tmpstack page. */
@ -1687,7 +1687,7 @@ pmap_bootstrap(nextva)
pte &= ~(PG_NC);
pte |= (PG_SYSTEM | PG_WRITE);
set_pte(va, pte);
va += NBPG;
va += PAGE_SIZE;
/*
* Next is the kernel text.
@ -1706,7 +1706,7 @@ pmap_bootstrap(nextva)
/* Kernel text is read-only */
pte |= (PG_SYSTEM);
set_pte(va, pte);
va += NBPG;
va += PAGE_SIZE;
}
/* data, bss, etc. */
while (va < nextva) {
@ -1717,7 +1717,7 @@ pmap_bootstrap(nextva)
pte &= ~(PG_NC);
pte |= (PG_SYSTEM | PG_WRITE);
set_pte(va, pte);
va += NBPG;
va += PAGE_SIZE;
}
/*
@ -1769,7 +1769,7 @@ pmap_bootstrap(nextva)
/* Initialization for pmap_next_page() */
avail_next = avail_start;
uvmexp.pagesize = NBPG;
uvmexp.pagesize = PAGE_SIZE;
uvm_setpagesize();
/* after setting up some structures */
@ -1884,9 +1884,9 @@ pmap_map(va, pa, endpa, prot)
sz = endpa - pa;
do {
pmap_enter(kernel_pmap, va, pa, prot, 0);
va += NBPG;
pa += NBPG;
sz -= NBPG;
va += PAGE_SIZE;
pa += PAGE_SIZE;
sz -= PAGE_SIZE;
} while (sz > 0);
pmap_update(kernel_pmap);
return(va);
@ -2540,7 +2540,7 @@ pmap_kremove(va, len)
#endif
/* Invalidate the PTEs in the given range. */
for (pgva = va; pgva < neva; pgva += NBPG) {
for (pgva = va; pgva < neva; pgva += PAGE_SIZE) {
pte = get_pte(pgva);
if (pte & PG_VALID) {
#ifdef HAVECACHE
@ -3194,7 +3194,7 @@ pmap_protect_mmu(pmap, sva, eva)
#endif
/* Remove write permission in the given range. */
for (pgva = sva; pgva < eva; pgva += NBPG) {
for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
pte = get_pte(pgva);
if (pte & PG_VALID) {
#ifdef HAVECACHE
@ -3248,7 +3248,7 @@ pmap_protect_noctx(pmap, sva, eva)
set_segmap(segva, sme);
/* Remove write permission in the given range. */
for (pgva = sva; pgva < eva; pgva += NBPG) {
for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
pte = get_pte(pgva);
if (pte & PG_VALID) {
/* No cache flush needed. */
@ -3429,7 +3429,7 @@ pmap_remove_mmu(pmap, sva, eva)
#endif
/* Invalidate the PTEs in the given range. */
for (pgva = sva; pgva < eva; pgva += NBPG) {
for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
pte = get_pte(pgva);
if (pte & PG_VALID) {
#ifdef HAVECACHE
@ -3529,7 +3529,7 @@ pmap_remove_noctx(pmap, sva, eva)
set_segmap(segva, sme);
/* Invalidate the PTEs in the given range. */
for (pgva = sva; pgva < eva; pgva += NBPG) {
for (pgva = sva; pgva < eva; pgva += PAGE_SIZE) {
pte = get_pte(pgva);
if (pte & PG_VALID) {
/* No cache flush needed. */
@ -3795,7 +3795,7 @@ pmap_get_pagemap(pt, off)
va = temp_seg_va;
do {
*pt++ = get_pte(va);
va += NBPG;
va += PAGE_SIZE;
} while (va < va_end);
sme++;
} while (sme < sme_end);
@ -3827,7 +3827,7 @@ get_pte_pmeg(int pmeg_num, int page_num)
va = temp_seg_va;
set_segmap(temp_seg_va, pmeg_num);
va += NBPG*page_num;
va += PAGE_SIZE*page_num;
pte = get_pte(va);
set_segmap(temp_seg_va, SEGINV);
@ -3852,7 +3852,7 @@ set_pte_pmeg(int pmeg_num, int page_num, int pte)
/* We never access data in temp_seg_va so no need to flush. */
va = temp_seg_va;
set_segmap(temp_seg_va, pmeg_num);
va += NBPG*page_num;
va += PAGE_SIZE*page_num;
set_pte(va, pte);
set_segmap(temp_seg_va, SEGINV);

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_subr.c,v 1.23 2001/09/11 20:37:13 chs Exp $ */
/* $NetBSD: bus_subr.c,v 1.24 2003/04/01 15:28:41 thorpej Exp $ */
/*-
* Copyright (c) 1996 The NetBSD Foundation, Inc.
@ -125,7 +125,7 @@ void bus_tmapout(vp)
return;
s = splvm();
pmap_kremove(pgva, NBPG);
pmap_kremove(pgva, PAGE_SIZE);
pmap_update(pmap_kernel());
--tmp_vpages_inuse;
splx(s);

View File

@ -1,4 +1,4 @@
/* $NetBSD: dvma.c,v 1.25 2002/09/27 15:36:57 provos Exp $ */
/* $NetBSD: dvma.c,v 1.26 2003/04/01 15:28:41 thorpej Exp $ */
/*-
* Copyright (c) 1996 The NetBSD Foundation, Inc.
@ -211,7 +211,7 @@ dvma_mapin(kmem_va, len, canwait)
*/
dvma_addr = (void *) (tva + off);
for (;npf--; kva += NBPG, tva += NBPG) {
for (;npf--; kva += PAGE_SIZE, tva += PAGE_SIZE) {
/*
* Retrieve the physical address of each page in the buffer
* and enter mappings into the I/O MMU so they may be seen

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.85 2003/01/18 07:03:37 thorpej Exp $ */
/* $NetBSD: machdep.c,v 1.86 2003/04/01 15:28:41 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -214,7 +214,7 @@ cpu_startup()
/*
* Get scratch page for dumpsys().
*/
if ((dumppage = uvm_km_alloc(kernel_map, NBPG)) == 0)
if ((dumppage = uvm_km_alloc(kernel_map, PAGE_SIZE)) == 0)
panic("startup: alloc dumppage");
/*
@ -256,7 +256,7 @@ cpu_startup()
* "base" pages for the rest.
*/
curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
curbufsize = NBPG * ((i < residual) ? (base+1) : base);
curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base);
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL, 0);
@ -293,7 +293,7 @@ cpu_startup()
format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
printf("avail memory = %s\n", pbuf);
format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
/*
@ -301,7 +301,7 @@ cpu_startup()
* This page is handed to pmap_enter() therefore
* it has to be in the normal kernel VA range.
*/
vmmap = uvm_km_valloc_wait(kernel_map, NBPG);
vmmap = uvm_km_valloc_wait(kernel_map, PAGE_SIZE);
/*
* Create the DVMA maps.
@ -566,7 +566,7 @@ long dumplo = 0; /* blocks */
/*
* This is called by main to set dumplo, dumpsize.
* Dumps always skip the first NBPG of disk space
* Dumps always skip the first PAGE_SIZE of disk space
* in case there might be a disk label stored there.
* If there is extra space, put dump at the end to
* reduce the chance that swapping trashes it.
@ -618,8 +618,8 @@ struct pcb dumppcb;
* Write a crash dump. The format while in swap is:
* kcore_seg_t cpu_hdr;
* cpu_kcore_hdr_t cpu_data;
* padding (NBPG-sizeof(kcore_seg_t))
* pagemap (2*NBPG)
* padding (PAGE_SIZE-sizeof(kcore_seg_t))
* pagemap (2*PAGE_SIZE)
* physical memory...
*/
void
@ -670,7 +670,7 @@ dumpsys()
blkno = dumplo;
todo = dumpsize; /* pages */
vaddr = (char *)dumppage;
memset(vaddr, 0, NBPG);
memset(vaddr, 0, PAGE_SIZE);
/* Set pointers to all three parts. */
kseg_p = (kcore_seg_t *)vaddr;
@ -683,17 +683,17 @@ dumpsys()
/* Fill in cpu_kcore_hdr_t part. */
strncpy(chdr_p->name, kernel_arch, sizeof(chdr_p->name));
chdr_p->page_size = NBPG;
chdr_p->page_size = PAGE_SIZE;
chdr_p->kernbase = KERNBASE;
/* Fill in the sun3x_kcore_hdr part. */
pmap_kcore_hdr(sh);
/* Write out the dump header. */
error = (*dsw->d_dump)(dumpdev, blkno, vaddr, NBPG);
error = (*dsw->d_dump)(dumpdev, blkno, vaddr, PAGE_SIZE);
if (error)
goto fail;
blkno += btodb(NBPG);
blkno += btodb(PAGE_SIZE);
/*
* Now dump physical memory. Note that physical memory
@ -716,14 +716,15 @@ dumpsys()
/* Make a temporary mapping for the page. */
pmap_kenter_pa(vmmap, paddr | PMAP_NC, VM_PROT_READ);
pmap_update(pmap_kernel());
error = (*dsw->d_dump)(dumpdev, blkno, vaddr, NBPG);
pmap_kremove(vmmap, NBPG);
error = (*dsw->d_dump)(dumpdev, blkno, vaddr,
PAGE_SIZE);
pmap_kremove(vmmap, PAGE_SIZE);
pmap_update(pmap_kernel());
if (error)
goto fail;
paddr += NBPG;
segsz -= NBPG;
blkno += btodb(NBPG);
paddr += PAGE_SIZE;
segsz -= PAGE_SIZE;
blkno += btodb(PAGE_SIZE);
todo--;
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: mem.c,v 1.26 2002/10/23 09:12:32 jdolecek Exp $ */
/* $NetBSD: mem.c,v 1.27 2003/04/01 15:28:41 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -149,10 +149,10 @@ mmrw(dev, uio, flags)
trunc_page(v), prot, prot|PMAP_WIRED);
pmap_update(pmap_kernel());
o = v & PGOFSET;
c = min(uio->uio_resid, (int)(NBPG - o));
c = min(uio->uio_resid, (int)(PAGE_SIZE - o));
error = uiomove((caddr_t)vmmap + o, c, uio);
pmap_remove(pmap_kernel(), (vaddr_t)vmmap,
(vaddr_t)vmmap + NBPG);
(vaddr_t)vmmap + PAGE_SIZE);
pmap_update(pmap_kernel());
break;
@ -164,7 +164,7 @@ mmrw(dev, uio, flags)
* Note that we can get here from case 0 above!
*/
o = v & PGOFSET;
c = min(uio->uio_resid, (int)(NBPG - o));
c = min(uio->uio_resid, (int)(PAGE_SIZE - o));
rw = (uio->uio_rw == UIO_READ) ? B_READ : B_WRITE;
if (!(uvm_kernacc((caddr_t)v, c, rw) ||
promacc((caddr_t)v, c, rw)))
@ -198,10 +198,10 @@ mmrw(dev, uio, flags)
*/
if (devzeropage == NULL) {
devzeropage = (caddr_t)
malloc(NBPG, M_TEMP, M_WAITOK);
memset(devzeropage, 0, NBPG);
malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
memset(devzeropage, 0, PAGE_SIZE);
}
c = min(iov->iov_len, NBPG);
c = min(iov->iov_len, PAGE_SIZE);
error = uiomove(devzeropage, c, uio);
break;
@ -307,7 +307,7 @@ promacc(va, len, rw)
/* PROM data page is OK for read/write. */
if ((sva >= SUN3X_MONDATA) &&
(eva <= (SUN3X_MONDATA + NBPG)))
(eva <= (SUN3X_MONDATA + PAGE_SIZE)))
return (1);
/* otherwise, not OK to touch */

View File

@ -1,4 +1,4 @@
/* $NetBSD: obio.c,v 1.20 2002/10/02 16:02:30 thorpej Exp $ */
/* $NetBSD: obio.c,v 1.21 2003/04/01 15:28:41 thorpej Exp $ */
/*-
* Copyright (c) 1996 The NetBSD Foundation, Inc.
@ -40,6 +40,8 @@
#include <sys/systm.h>
#include <sys/device.h>
#include <uvm/uvm_extern.h>
#include <machine/autoconf.h>
#include <machine/mon.h>
#include <machine/pte.h>
@ -243,7 +245,7 @@ obio_find_mapping(paddr_t pa, psize_t sz)
sz += off;
/* The saved mappings are all one page long. */
if (sz > NBPG)
if (sz > PAGE_SIZE)
return (caddr_t)0;
/* Linear search for it. The list is short. */
@ -271,7 +273,7 @@ save_prom_mappings __P((void))
mon_pte = *romVectorPtr->monptaddr;
for (va = SUN3X_MON_KDB_BASE; va < SUN3X_MONEND;
va += NBPG, mon_pte++)
va += PAGE_SIZE, mon_pte++)
{
/* Is this a valid mapping to OBIO? */
/* XXX - Some macros would be nice... */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.78 2003/01/28 22:52:11 wiz Exp $ */
/* $NetBSD: pmap.c,v 1.79 2003/04/01 15:28:41 thorpej Exp $ */
/*-
* Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
@ -764,7 +764,7 @@ pmap_bootstrap(nextva)
* `virtual_avail' to the nearest page, and set the flag
* to prevent use of pmap_bootstrap_alloc() hereafter.
*/
pmap_bootstrap_aalign(NBPG);
pmap_bootstrap_aalign(PAGE_SIZE);
bootstrap_alloc_enabled = FALSE;
/*
@ -794,9 +794,9 @@ pmap_bootstrap(nextva)
* address-oritented operations.
*/
tmp_vpages[0] = virtual_avail;
virtual_avail += NBPG;
virtual_avail += PAGE_SIZE;
tmp_vpages[1] = virtual_avail;
virtual_avail += NBPG;
virtual_avail += PAGE_SIZE;
/** Initialize the PV system **/
pmap_init_pv();
@ -834,11 +834,11 @@ pmap_bootstrap(nextva)
* It is non-cached, mostly due to paranoia.
*/
pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
va += NBPG; pa += NBPG;
va += PAGE_SIZE; pa += PAGE_SIZE;
/* Next page is used as the temporary stack. */
pmap_enter_kernel(va, pa, VM_PROT_ALL);
va += NBPG; pa += NBPG;
va += PAGE_SIZE; pa += PAGE_SIZE;
/*
* Map all of the kernel's text segment as read-only and cacheable.
@ -848,7 +848,7 @@ pmap_bootstrap(nextva)
* has to be mapped as read/write, to accomodate the data.
*/
eva = m68k_trunc_page((vaddr_t)etext);
for (; va < eva; va += NBPG, pa += NBPG)
for (; va < eva; va += PAGE_SIZE, pa += PAGE_SIZE)
pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
/*
@ -856,7 +856,7 @@ pmap_bootstrap(nextva)
* This includes: data, BSS, symbols, and everything in the
* contiguous memory used by pmap_bootstrap_alloc()
*/
for (; pa < avail_start; va += NBPG, pa += NBPG)
for (; pa < avail_start; va += PAGE_SIZE, pa += PAGE_SIZE)
pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
/*
@ -869,7 +869,7 @@ pmap_bootstrap(nextva)
pmap_bootstrap_setprom();
/* Notify the VM system of our page size. */
uvmexp.pagesize = NBPG;
uvmexp.pagesize = PAGE_SIZE;
uvm_setpagesize();
pmap_page_upload();
@ -2100,7 +2100,7 @@ pmap_kremove(va, len)
while (idx < eidx) {
kernCbase[idx++].attr.raw = MMU_DT_INVALID;
TBIS(va);
va += NBPG;
va += PAGE_SIZE;
}
}
@ -2124,9 +2124,9 @@ pmap_map(va, pa, endpa, prot)
sz = endpa - pa;
do {
pmap_enter_kernel(va, pa, prot);
va += NBPG;
pa += NBPG;
sz -= NBPG;
va += PAGE_SIZE;
pa += PAGE_SIZE;
sz -= PAGE_SIZE;
} while (sz > 0);
pmap_update(pmap_kernel());
return(va);
@ -2234,7 +2234,7 @@ pmap_protect(pmap, startva, endva, prot)
if (iscurpmap)
TBIS(startva);
}
startva += NBPG;
startva += PAGE_SIZE;
if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
c_tbl = NULL;
@ -2275,7 +2275,7 @@ pmap_protect_kernel(startva, endva, prot)
mmu_short_pte_t *pte;
pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE)];
for (va = startva; va < endva; va += NBPG, pte++) {
for (va = startva; va < endva; va += PAGE_SIZE, pte++) {
if (MMU_VALID_DT(*pte)) {
switch (prot) {
case VM_PROT_ALL:
@ -2410,11 +2410,11 @@ pmap_copy_page(srcpa, dstpa)
pmap_kenter_pa(srcva, srcpa, VM_PROT_READ);
pmap_kenter_pa(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
/* Hand-optimized version of bcopy(src, dst, NBPG) */
/* Hand-optimized version of bcopy(src, dst, PAGE_SIZE) */
copypage((char *) srcva, (char *) dstva);
pmap_kremove(srcva, NBPG);
pmap_kremove(dstva, NBPG);
pmap_kremove(srcva, PAGE_SIZE);
pmap_kremove(dstva, PAGE_SIZE);
#ifdef DIAGNOSTIC
--tmp_vpages_inuse;
@ -2446,10 +2446,10 @@ pmap_zero_page(dstpa)
/* The comments in pmap_copy_page() above apply here also. */
pmap_kenter_pa(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
/* Hand-optimized version of bzero(ptr, NBPG) */
/* Hand-optimized version of bzero(ptr, PAGE_SIZE) */
zeropage((char *) dstva);
pmap_kremove(dstva, NBPG);
pmap_kremove(dstva, PAGE_SIZE);
#ifdef DIAGNOSTIC
--tmp_vpages_inuse;
#endif
@ -2953,7 +2953,7 @@ pmap_remove_kernel(sva, eva)
while (idx < eidx) {
pmap_remove_pte(&kernCbase[idx++]);
TBIS(sva);
sva += NBPG;
sva += PAGE_SIZE;
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus.c,v 1.7 2002/09/27 15:36:59 provos Exp $ */
/* $NetBSD: bus.c,v 1.8 2003/04/01 15:24:45 thorpej Exp $ */
/*
* Copyright (c) 2001 Matthew Fredette.
@ -550,7 +550,7 @@ sun68k_find_prom_map(pa, iospace, len, hp)
/*
* The mapping must fit entirely within one page.
*/
if ((((u_long)pa & PGOFSET) + len) > NBPG)
if ((((u_long)pa & PGOFSET) + len) > PAGE_SIZE)
return (EINVAL);
pf = PA_PGNUM(pa);
@ -575,7 +575,7 @@ sun68k_find_prom_map(pa, iospace, len, hp)
/*
* Walk the pages of this segment.
*/
for(eva = va + NBSG; va < eva; va += NBPG) {
for(eva = va + NBSG; va < eva; va += PAGE_SIZE) {
pte = get_pte(va);
if ((pte & (PG_VALID | PG_TYPE)) ==

View File

@ -1,4 +1,4 @@
/* $NetBSD: if_le_vsbus.c,v 1.15 2002/10/02 16:02:32 thorpej Exp $ */
/* $NetBSD: if_le_vsbus.c,v 1.16 2003/04/01 15:23:38 thorpej Exp $ */
/*-
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -237,7 +237,7 @@ le_vsbus_attach(parent, self, aux)
*/
#define ALLOCSIZ (64 * 1024)
err = bus_dmamem_alloc(va->va_dmat, ALLOCSIZ, NBPG, 0,
err = bus_dmamem_alloc(va->va_dmat, ALLOCSIZ, PAGE_SIZE, 0,
&seg, 1, &rseg, BUS_DMA_NOWAIT);
if (err) {
printf(": unable to alloc buffer block: err %d\n", err);

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_dma.c,v 1.16 2002/12/01 21:20:31 matt Exp $ */
/* $NetBSD: bus_dma.c,v 1.17 2003/04/01 15:23:07 thorpej Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@ -464,7 +464,7 @@ _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
for (curseg = 0; curseg < nsegs; curseg++) {
for (addr = segs[curseg].ds_addr;
addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
addr += NBPG, va += NBPG, size -= NBPG) {
addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
if (size == 0)
panic("_bus_dmamem_map: size botch");
if (vax_boardtype == VAX_BTYP_43)
@ -597,7 +597,7 @@ _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, lastaddrp, segp, first)
/*
* Compute the segment size, and adjust counts.
*/
sgsize = NBPG - ((u_long)vaddr & PGOFSET);
sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
if (buflen < sgsize)
sgsize = buflen;

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.133 2003/03/01 21:51:59 matt Exp $ */
/* $NetBSD: machdep.c,v 1.134 2003/04/01 15:23:07 thorpej Exp $ */
/*
* Copyright (c) 2002, Hugh Graham.
@ -207,7 +207,7 @@ cpu_startup()
* physical memory allocated for it.
*/
curbuf = (vaddr_t) buffers + i * MAXBSIZE;
curbufsize = NBPG * (i < residual ? base + 1 : base);
curbufsize = PAGE_SIZE * (i < residual ? base + 1 : base);
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL, 0);
if (pg == NULL)
@ -215,8 +215,8 @@ cpu_startup()
"not enough RAM for buffer cache");
pmap_kenter_pa(curbuf, VM_PAGE_TO_PHYS(pg),
VM_PROT_READ | VM_PROT_WRITE);
curbuf += NBPG;
curbufsize -= NBPG;
curbuf += PAGE_SIZE;
curbufsize -= PAGE_SIZE;
}
}
pmap_update(kernel_map->pmap);
@ -240,7 +240,7 @@ cpu_startup()
format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
printf("avail memory = %s\n", pbuf);
format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
/*
@ -283,11 +283,11 @@ cpu_dumpconf()
dumplo = nblks - btodb(ctob(dumpsize));
}
/*
* Don't dump on the first NBPG (why NBPG?) in case the dump
* Don't dump on the first PAGE_SIZE (why PAGE_SIZE?) in case the dump
* device includes a disk label.
*/
if (dumplo < btodb(NBPG))
dumplo = btodb(NBPG);
if (dumplo < btodb(PAGE_SIZE))
dumplo = btodb(PAGE_SIZE);
}
int

View File

@ -1,4 +1,4 @@
/* $NetBSD: mem.c,v 1.25 2002/12/01 21:20:32 matt Exp $ */
/* $NetBSD: mem.c,v 1.26 2003/04/01 15:23:07 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -121,10 +121,10 @@ mmrw(dev_t dev, struct uio *uio, int flags)
}
if (zeropage == NULL) {
zeropage = (caddr_t)
malloc(NBPG, M_TEMP, M_WAITOK);
bzero(zeropage, NBPG);
malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
bzero(zeropage, PAGE_SIZE);
}
c = min(iov->iov_len, NBPG);
c = min(iov->iov_len, PAGE_SIZE);
error = uiomove(zeropage, c, uio);
continue;
#if NLEDS

View File

@ -1,4 +1,4 @@
/* $NetBSD: multicpu.c,v 1.13 2002/09/27 15:36:59 provos Exp $ */
/* $NetBSD: multicpu.c,v 1.14 2003/04/01 15:23:07 thorpej Exp $ */
/*
* Copyright (c) 2000 Ludd, University of Lule}, Sweden. All rights reserved.
@ -122,7 +122,7 @@ cpu_slavesetup(struct device *dev)
ci->ci_dev = dev;
ci->ci_exit = scratch;
(u_long)ci->ci_pcb = (u_long)pcb & ~KERNBASE;
ci->ci_istack = istackbase + NBPG;
ci->ci_istack = istackbase + PAGE_SIZE;
pcb->KSP = (u_long)pcb + USPACE; /* Idle kernel stack */
pcb->SSP = (u_long)ci;
pcb->PC = (u_long)slaverun + 2;