Use PAGE_SIZE rather than NBPG.

This commit is contained in:
thorpej 2003-04-02 04:17:50 +00:00
parent 44b9a2a1ec
commit c9228c8ddd
26 changed files with 148 additions and 133 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: apbus.c,v 1.11 2003/01/01 01:55:42 thorpej Exp $ */
/* $NetBSD: apbus.c,v 1.12 2003/04/02 04:17:50 thorpej Exp $ */
/*-
* Copyright (C) 1999 SHIMIZU Ryo. All rights reserved.
@ -290,7 +290,7 @@ apbus_dma_mapalloc(t, map, flags)
{
int i, j, cnt;
cnt = round_page(map->_dm_size) / NBPG;
cnt = round_page(map->_dm_size) / PAGE_SIZE;
again:
for (i = 0; i < APBUS_NDMAMAP; i += j + 1) {
@ -349,7 +349,7 @@ apbus_dma_mapset(t, map)
for (seg = 0; seg < map->dm_nsegs; seg++) {
segs = &map->dm_segs[seg];
for (addr = segs->ds_addr, eaddr = addr + segs->ds_len;
addr < eaddr; addr += NBPG, i++) {
addr < eaddr; addr += PAGE_SIZE, i++) {
#ifdef DIAGNOSTIC
if (i >= map->_dm_maptblcnt)
panic("dma map table overflow");
@ -378,7 +378,7 @@ apbus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
int error;
if (flags & NEWSMIPS_DMAMAP_MAPTBL)
nsegments = round_page(size) / NBPG;
nsegments = round_page(size) / PAGE_SIZE;
error = _bus_dmamap_create(t, size, nsegments, maxsegsz, boundary,
flags, dmamp);
if (error == 0 && (flags & NEWSMIPS_DMAMAP_MAPTBL)) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: dmac3.c,v 1.4 2002/10/02 04:27:51 thorpej Exp $ */
/* $NetBSD: dmac3.c,v 1.5 2003/04/02 04:17:50 thorpej Exp $ */
/*-
* Copyright (c) 2000 Tsubai Masanari. All rights reserved.
@ -162,9 +162,9 @@ dmac3_start(sc, addr, len, direction)
start = mips_trunc_page(addr);
end = mips_round_page(addr + len);
p = sc->sc_dmamap;
for (v = start; v < end; v += NBPG) {
for (v = start; v < end; v += PAGE_SIZE) {
pa = kvtophys(v);
mips_dcache_wbinv_range(MIPS_PHYS_TO_KSEG0(pa), NBPG);
mips_dcache_wbinv_range(MIPS_PHYS_TO_KSEG0(pa), PAGE_SIZE);
*p++ = 0;
*p++ = (pa >> PGSHIFT) | 0xc0000000;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: if_sn.c,v 1.11 2002/09/27 15:36:29 provos Exp $ */
/* $NetBSD: if_sn.c,v 1.12 2003/04/02 04:17:50 thorpej Exp $ */
/*
* National Semiconductor DP8393X SONIC Driver
@ -153,7 +153,7 @@ snsetup(sc, lladdr)
* around problems near the end of 64k !!
*/
p = sc->space;
pp = (u_char *)ROUNDUP ((int)p, NBPG);
pp = (u_char *)ROUNDUP ((int)p, PAGE_SIZE);
p = pp;
for (i = 0; i < NRRA; i++) {
@ -180,29 +180,29 @@ snsetup(sc, lladdr)
p = (u_char *)SOALIGN(sc, p);
if ((p - pp) > NBPG) {
if ((p - pp) > PAGE_SIZE) {
printf ("%s: sizeof RRA (%ld) + CDA (%ld) +"
"TDA (%ld) > NBPG (%d). Punt!\n",
"TDA (%ld) > PAGE_SIZE (%d). Punt!\n",
sc->sc_dev.dv_xname,
(ulong)sc->p_cda - (ulong)sc->p_rra[0],
(ulong)sc->mtda[0].mtd_txp - (ulong)sc->p_cda,
(ulong)p - (ulong)sc->mtda[0].mtd_txp,
NBPG);
PAGE_SIZE);
return(1);
}
p = pp + NBPG;
p = pp + PAGE_SIZE;
pp = p;
sc->sc_nrda = NBPG / RXPKT_SIZE(sc);
sc->sc_nrda = PAGE_SIZE / RXPKT_SIZE(sc);
sc->p_rda = (caddr_t) p;
sc->v_rda = SONIC_GETDMA(p);
p = pp + NBPG;
p = pp + PAGE_SIZE;
for (i = 0; i < NRBA; i++) {
sc->rbuf[i] = (caddr_t)p;
p += NBPG;
p += PAGE_SIZE;
}
pp = p;
@ -834,8 +834,8 @@ initialise_rra(sc)
v = SONIC_GETDMA(sc->rbuf[i]);
SWO(bitmode, sc->p_rra[i], RXRSRC_PTRHI, UPPER(v));
SWO(bitmode, sc->p_rra[i], RXRSRC_PTRLO, LOWER(v));
SWO(bitmode, sc->p_rra[i], RXRSRC_WCHI, UPPER(NBPG/2));
SWO(bitmode, sc->p_rra[i], RXRSRC_WCLO, LOWER(NBPG/2));
SWO(bitmode, sc->p_rra[i], RXRSRC_WCHI, UPPER(PAGE_SIZE/2));
SWO(bitmode, sc->p_rra[i], RXRSRC_WCLO, LOWER(PAGE_SIZE/2));
}
sc->sc_rramark = NRBA;
NIC_PUT(sc, SNR_RWP, LOWER(sc->v_rra[sc->sc_rramark]));

View File

@ -1,4 +1,4 @@
/* $NetBSD: sc_wrap.c,v 1.20 2002/10/02 04:27:52 thorpej Exp $ */
/* $NetBSD: sc_wrap.c,v 1.21 2003/04/02 04:17:51 thorpej Exp $ */
/*
* This driver is slow! Need to rewrite.
@ -13,6 +13,8 @@
#include <sys/buf.h>
#include <sys/malloc.h>
#include <uvm/uvm_extern.h>
#include <dev/scsipi/scsi_all.h>
#include <dev/scsipi/scsipi_all.h>
#include <dev/scsipi/scsiconf.h>
@ -319,14 +321,14 @@ start:
va = (vaddr_t)xs->data;
offset = va & PGOFSET;
pages = (offset + xs->datalen + NBPG -1 ) >> PGSHIFT;
pages = (offset + xs->datalen + PAGE_SIZE -1 ) >> PGSHIFT;
if (pages >= NSCMAP)
panic("sc_map: Too many pages");
for (i = 0; i < pages; i++) {
pn = kvtophys(va) >> PGSHIFT;
sc->sc_map[chan].mp_addr[i] = pn;
va += NBPG;
va += PAGE_SIZE;
}
sc->sc_map[chan].mp_offset = offset;

View File

@ -1,4 +1,4 @@
/* $NetBSD: scsi_1185.c,v 1.9 2002/05/31 21:43:49 thorpej Exp $ */
/* $NetBSD: scsi_1185.c,v 1.10 2003/04/02 04:17:52 thorpej Exp $ */
/*
* Copyright (c) 1992, 1993
@ -60,6 +60,8 @@
#include <sys/systm.h>
#include <sys/device.h>
#include <uvm/uvm_extern.h>
#include <dev/scsipi/scsi_all.h>
#include <dev/scsipi/scsipi_all.h>
#include <dev/scsipi/scsiconf.h>
@ -1802,7 +1804,7 @@ clean_k2dcache(scb)
for (i = 0; i < pages; i++) {
pa = sc_map->mp_addr[i] << PGSHIFT;
mips_dcache_wbinv_range_index(MIPS_PHYS_TO_KSEG0(pa),
NBPG);
PAGE_SIZE);
}
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus.c,v 1.10 2002/06/02 14:44:40 drochner Exp $ */
/* $NetBSD: bus.c,v 1.11 2003/04/02 04:17:52 thorpej Exp $ */
/*
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -251,7 +251,7 @@ _bus_dmamap_load_buffer(map, buf, buflen, p, flags,
/*
* Compute the segment size, and adjust counts.
*/
sgsize = NBPG - ((u_long)vaddr & PGOFSET);
sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
if (buflen < sgsize)
sgsize = buflen;
@ -883,7 +883,7 @@ _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
for (curseg = 0; curseg < nsegs; curseg++) {
for (addr = segs[curseg].ds_addr;
addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
addr += NBPG, va += NBPG, size -= NBPG) {
addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
if (size == 0)
panic("_bus_dmamem_map: size botch");
pmap_enter(pmap_kernel(), va, addr,

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.65 2003/01/18 06:07:04 thorpej Exp $ */
/* $NetBSD: machdep.c,v 1.66 2003/04/02 04:17:52 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -43,7 +43,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.65 2003/01/18 06:07:04 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.66 2003/04/02 04:17:52 thorpej Exp $");
/* from: Utah Hdr: machdep.c 1.63 91/04/24 */
@ -463,7 +463,7 @@ cpu_startup()
* "base" pages for the rest.
*/
curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
curbufsize = NBPG * ((i < residual) ? (base+1) : base);
curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base);
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL, 0);
@ -501,7 +501,7 @@ cpu_startup()
#endif
format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
printf("avail memory = %s\n", pbuf);
format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_dma.c,v 1.3 2002/06/02 14:44:46 drochner Exp $ */
/* $NetBSD: bus_dma.c,v 1.4 2003/04/02 04:18:40 thorpej Exp $ */
/*
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -158,7 +158,7 @@ _bus_dmamap_load_buffer(bus_dmamap_t map, void *buf, bus_size_t buflen,
/*
* Compute the segment size, and adjust counts.
*/
sgsize = NBPG - ((u_long)vaddr & PGOFSET);
sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
if (buflen < sgsize)
sgsize = buflen;
@ -594,7 +594,7 @@ _bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
for (curseg = 0; curseg < nsegs; curseg++) {
for (addr = segs[curseg].ds_addr;
addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
addr += NBPG, va += NBPG, size -= NBPG) {
addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
if (size == 0)
panic("_bus_dmamem_map: size botch");
pmap_enter(pmap_kernel(), va, addr,

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.7 2003/01/18 06:13:00 thorpej Exp $ */
/* $NetBSD: machdep.c,v 1.8 2003/04/02 04:18:40 thorpej Exp $ */
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
@ -227,7 +227,7 @@ cpu_startup()
* "base" pages for the rest.
*/
curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
curbufsize = NBPG * ((i < residual) ? (base + 1) : base);
curbufsize = PAGE_SIZE * ((i < residual) ? (base + 1) : base);
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL, 0);
@ -262,7 +262,7 @@ cpu_startup()
format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
printf(", %s free", pbuf);
format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
printf(", %s in %u buffers\n", pbuf, nbuf);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: px.c,v 1.46 2003/02/20 22:16:06 atatat Exp $ */
/* $NetBSD: px.c,v 1.47 2003/04/02 04:19:49 thorpej Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
@ -43,7 +43,7 @@
#endif
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: px.c,v 1.46 2003/02/20 22:16:06 atatat Exp $");
__KERNEL_RCSID(0, "$NetBSD: px.c,v 1.47 2003/04/02 04:19:49 thorpej Exp $");
/*
* px.c: driver for the DEC TURBOchannel 2D and 3D accelerated framebuffers
@ -230,7 +230,7 @@ static const u_char px_shuffle[256] = {
0xab, 0xeb, 0xbb, 0xfb, 0xaf, 0xef, 0xbf, 0xff,
};
#define PXMAP_INFO_SIZE (NBPG)
#define PXMAP_INFO_SIZE (PAGE_SIZE)
#define PXMAP_RBUF_SIZE (4096 * 16 + 8192 * 2)
/* Need alignment to 8KB here... */
@ -1954,9 +1954,9 @@ pxmmap(dev, off, prot)
/*
* STIC control registers
*/
if (off < NBPG)
if (off < PAGE_SIZE)
return mips_btop(MIPS_KSEG1_TO_PHYS(pxi->pxi_stic) + off);
off -= NBPG;
off -= PAGE_SIZE;
/*
* STIC poll registers

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_dma.c,v 1.34 2002/06/02 14:44:38 drochner Exp $ */
/* $NetBSD: bus_dma.c,v 1.35 2003/04/02 04:19:50 thorpej Exp $ */
/*-
* Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
@ -192,7 +192,7 @@ _bus_dmamap_load_buffer(map, buf, buflen, p, flags,
/*
* Compute the segment size, and adjust counts.
*/
sgsize = NBPG - ((u_long)vaddr & PGOFSET);
sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
if (buflen < sgsize)
sgsize = buflen;
@ -824,7 +824,7 @@ _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
for (curseg = 0; curseg < nsegs; curseg++) {
for (addr = segs[curseg].ds_addr;
addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
addr += NBPG, va += NBPG, size -= NBPG) {
addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
if (size == 0)
panic("_bus_dmamem_map: size botch");
pmap_enter(pmap_kernel(), va, addr,

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.202 2003/01/18 06:15:24 thorpej Exp $ */
/* $NetBSD: machdep.c,v 1.203 2003/04/02 04:19:50 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -43,7 +43,7 @@
*/
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.202 2003/01/18 06:15:24 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.203 2003/04/02 04:19:50 thorpej Exp $");
#include "fs_mfs.h"
#include "opt_ddb.h"
@ -463,7 +463,7 @@ cpu_startup()
* "base" pages for the rest.
*/
curbuf = (vaddr_t)buffers + (i * MAXBSIZE);
curbufsize = NBPG * ((i < residual) ? (base+1) : base);
curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base);
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL, 0);
@ -502,7 +502,7 @@ cpu_startup()
#endif
format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
printf("avail memory = %s\n", pbuf);
format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
/*
@ -627,7 +627,7 @@ memsize_scan(first)
break;
*(int *)cp = i;
((int *)cp)[4] = j;
cp += NBPG;
cp += PAGE_SIZE;
mem++;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: asc_ioasic.c,v 1.14 2002/10/02 04:15:10 thorpej Exp $ */
/* $NetBSD: asc_ioasic.c,v 1.15 2003/04/02 04:20:32 thorpej Exp $ */
/*-
* Copyright (c) 2000 The NetBSD Foundation, Inc.
@ -37,7 +37,7 @@
*/
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: asc_ioasic.c,v 1.14 2002/10/02 04:15:10 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: asc_ioasic.c,v 1.15 2003/04/02 04:20:32 thorpej Exp $");
#include <sys/types.h>
#include <sys/param.h>
@ -45,6 +45,8 @@ __KERNEL_RCSID(0, "$NetBSD: asc_ioasic.c,v 1.14 2002/10/02 04:15:10 thorpej Exp
#include <sys/device.h>
#include <sys/buf.h>
#include <uvm/uvm_extern.h>
#include <dev/scsipi/scsi_all.h>
#include <dev/scsipi/scsipi_all.h>
#include <dev/scsipi/scsiconf.h>
@ -141,8 +143,9 @@ asc_ioasic_attach(parent, self, aux)
return;
}
asc->sc_dmat = ((struct ioasic_softc *)parent)->sc_dmat;
if (bus_dmamap_create(asc->sc_dmat, NBPG * 2,
2, NBPG, NBPG, BUS_DMA_NOWAIT, &asc->sc_dmamap)) {
if (bus_dmamap_create(asc->sc_dmat, PAGE_SIZE * 2,
2, PAGE_SIZE, PAGE_SIZE, BUS_DMA_NOWAIT,
&asc->sc_dmamap)) {
printf(": failed to create DMA map\n");
return;
}
@ -211,7 +214,7 @@ asc_ioasic_reset(sc)
asc->sc_flags &= ~(ASC_DMAACTIVE|ASC_MAPLOADED);
}
#define TWOPAGE(a) (NBPG*2 - ((a) & (NBPG-1)))
#define TWOPAGE(a) (PAGE_SIZE*2 - ((a) & (PAGE_SIZE-1)))
int
asc_ioasic_setup(sc, addr, len, ispullup, dmasize)

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.c,v 1.12 2003/03/11 10:40:16 hannken Exp $ */
/* $NetBSD: cpu.c,v 1.13 2003/04/02 04:22:03 thorpej Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
@ -40,6 +40,8 @@
#include <sys/device.h>
#include <sys/properties.h>
#include <uvm/uvm_extern.h>
#include <machine/cpu.h>
#include <powerpc/ibm4xx/dev/plbvar.h>
@ -235,7 +237,8 @@ dcache_flush_page(vaddr_t va)
int i;
if (curcpu()->ci_ci.dcache_line_size)
for (i = 0; i < NBPG; i += curcpu()->ci_ci.dcache_line_size)
for (i = 0; i < PAGE_SIZE;
i += curcpu()->ci_ci.dcache_line_size)
asm volatile("dcbf %0,%1" : : "r" (va), "r" (i));
asm volatile("sync;isync" : : );
}
@ -246,7 +249,8 @@ icache_flush_page(vaddr_t va)
int i;
if (curcpu()->ci_ci.icache_line_size)
for (i = 0; i < NBPG; i += curcpu()->ci_ci.icache_line_size)
for (i = 0; i < PAGE_SIZE;
i += curcpu()->ci_ci.icache_line_size)
asm volatile("icbi %0,%1" : : "r" (va), "r" (i));
asm volatile("sync;isync" : : );
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.18 2003/03/11 10:40:16 hannken Exp $ */
/* $NetBSD: pmap.c,v 1.19 2003/04/02 04:22:03 thorpej Exp $ */
/*
* Copyright 2001 Wasabi Systems, Inc.
@ -88,7 +88,7 @@
* kernmap is an array of PTEs large enough to map in
* 4GB. At 16KB/page it is 256K entries or 2MB.
*/
#define KERNMAP_SIZE ((0xffffffffU/NBPG)+1)
#define KERNMAP_SIZE ((0xffffffffU/PAGE_SIZE)+1)
caddr_t kernmap;
#define MINCTX 2
@ -207,7 +207,8 @@ pte_enter(struct pmap *pm, vaddr_t va, u_int pte)
if (!pte) return (1);
/* Allocate a page XXXX this will sleep! */
pa = 0;
pm->pm_ptbl[seg] = (uint *)uvm_km_alloc1(kernel_map, NBPG, 1);
pm->pm_ptbl[seg] =
(uint *)uvm_km_alloc1(kernel_map, PAGE_SIZE, 1);
}
pm->pm_ptbl[seg][ptn] = pte;
@ -637,7 +638,8 @@ pmap_release(struct pmap *pm)
for (i = 0; i < STSZ; i++)
if (pm->pm_ptbl[i]) {
uvm_km_free(kernel_map, (vaddr_t)pm->pm_ptbl[i], NBPG);
uvm_km_free(kernel_map, (vaddr_t)pm->pm_ptbl[i],
PAGE_SIZE);
pm->pm_ptbl[i] = NULL;
}
if (pm->pm_ctx) ctx_free(pm);
@ -686,11 +688,11 @@ pmap_zero_page(paddr_t pa)
{
#ifdef PPC_4XX_NOCACHE
memset((caddr_t)pa, 0, NBPG);
memset((caddr_t)pa, 0, PAGE_SIZE);
#else
int i;
for (i = NBPG/CACHELINESIZE; i > 0; i--) {
for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) {
__asm __volatile ("dcbz 0,%0" :: "r"(pa));
pa += CACHELINESIZE;
}
@ -704,7 +706,7 @@ void
pmap_copy_page(paddr_t src, paddr_t dst)
{
memcpy((caddr_t)dst, (caddr_t)src, NBPG);
memcpy((caddr_t)dst, (caddr_t)src, PAGE_SIZE);
dcache_flush_page(dst);
}
@ -802,7 +804,7 @@ pmap_enter(struct pmap *pm, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
/*
* Have to remove any existing mapping first.
*/
pmap_remove(pm, va, va + NBPG);
pmap_remove(pm, va, va + PAGE_SIZE);
if (flags & PMAP_WIRED) flags |= prot;
@ -1008,7 +1010,7 @@ pmap_remove(struct pmap *pm, vaddr_t va, vaddr_t endva)
ppc4xx_tlb_flush(va, pm->pm_ctx);
pm->pm_stats.resident_count--;
}
va += NBPG;
va += PAGE_SIZE;
}
splx(s);
@ -1051,7 +1053,7 @@ pmap_protect(struct pmap *pm, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
*ptp &= ~TTE_WR;
ppc4xx_tlb_flush(sva, pm->pm_ctx);
}
sva += NBPG;
sva += PAGE_SIZE;
}
splx(s);
return;
@ -1109,14 +1111,14 @@ pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
pm = pv->pv_pm;
va = pv->pv_va;
pmap_protect(pm, va, va+NBPG, prot);
pmap_protect(pm, va, va+PAGE_SIZE, prot);
}
/* Now check the head pv */
if (pvh->pv_pm) {
pv = pvh;
pm = pv->pv_pm;
va = pv->pv_va;
pmap_protect(pm, va, va+NBPG, prot);
pmap_protect(pm, va, va+PAGE_SIZE, prot);
}
}
@ -1547,7 +1549,7 @@ pmap_testout()
int ref, mod;
/* Allocate a page */
va = (vaddr_t)uvm_km_alloc1(kernel_map, NBPG, 1);
va = (vaddr_t)uvm_km_alloc1(kernel_map, PAGE_SIZE, 1);
loc = (int*)va;
pmap_extract(pmap_kernel(), va, &pa);
@ -1803,6 +1805,6 @@ pmap_testout()
pmap_enter(pmap_kernel(), va, pa, VM_PROT_ALL,
VM_PROT_ALL|PMAP_WIRED);
uvm_km_free(kernel_map, (vaddr_t)va, NBPG);
uvm_km_free(kernel_map, (vaddr_t)va, PAGE_SIZE);
}
#endif

View File

@ -1,4 +1,4 @@
/* $NetBSD: isadma_machdep.c,v 1.5 2001/07/22 14:58:20 wiz Exp $ */
/* $NetBSD: isadma_machdep.c,v 1.6 2003/04/02 04:27:17 thorpej Exp $ */
#define ISA_DMA_STATS
@ -212,7 +212,7 @@ _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
* 32-bit DMA, and indicate that here.
*
* ...or, there is an opposite case. The most segments
* a transfer will require is (maxxfer / NBPG) + 1. If
* a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
* the caller can't handle that many segments (e.g. the
* ISA DMA controller), we may have to bounce it as well.
*/
@ -223,7 +223,7 @@ _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
}
cookieflags = 0;
if (map->_dm_bounce_thresh != 0 ||
((map->_dm_size / NBPG) + 1) > map->_dm_segcnt) {
((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) {
cookieflags |= ID_MIGHT_NEED_BOUNCE;
cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
}
@ -657,7 +657,7 @@ _isa_dma_alloc_bouncebuf(t, map, size, flags)
cookie->id_bouncebuflen = round_page(size);
error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
NBPG, map->_dm_boundary, cookie->id_bouncesegs,
PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
if (error)
goto out;

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.46 2003/03/18 16:40:24 matt Exp $ */
/* $NetBSD: machdep.c,v 1.47 2003/04/02 04:27:18 thorpej Exp $ */
/*
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
@ -238,7 +238,7 @@ cpu_startup()
/*
* Mapping PReP interrput vector register.
*/
prep_intr_reg = (vaddr_t) mapiodev(PREP_INTR_REG, NBPG);
prep_intr_reg = (vaddr_t) mapiodev(PREP_INTR_REG, PAGE_SIZE);
if (!prep_intr_reg)
panic("startup: no room for interrupt register");

View File

@ -1,4 +1,4 @@
/* $NetBSD: isadma_machdep.c,v 1.3 2001/07/22 15:04:00 wiz Exp $ */
/* $NetBSD: isadma_machdep.c,v 1.4 2003/04/02 04:27:18 thorpej Exp $ */
#define ISA_DMA_STATS
@ -213,7 +213,7 @@ _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
* 32-bit DMA, and indicate that here.
*
* ...or, there is an opposite case. The most segments
* a transfer will require is (maxxfer / NBPG) + 1. If
* a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
* the caller can't handle that many segments (e.g. the
* ISA DMA controller), we may have to bounce it as well.
*/
@ -224,7 +224,7 @@ _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
}
cookieflags = 0;
if (map->_dm_bounce_thresh != 0 ||
((map->_dm_size / NBPG) + 1) > map->_dm_segcnt) {
((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) {
cookieflags |= ID_MIGHT_NEED_BOUNCE;
cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
}
@ -658,7 +658,7 @@ _isa_dma_alloc_bouncebuf(t, map, size, flags)
cookie->id_bouncebuflen = round_page(size);
error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
NBPG, map->_dm_boundary, cookie->id_bouncesegs,
PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
if (error)
goto out;

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.17 2003/02/07 17:46:12 cgd Exp $ */
/* $NetBSD: machdep.c,v 1.18 2003/04/02 04:27:19 thorpej Exp $ */
/*
* Copyright 2000, 2001
@ -395,7 +395,7 @@ cpu_startup(void)
* "base" pages for the rest.
*/
curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
curbufsize = NBPG * ((i < residual) ? (base + 1) : base);
curbufsize = PAGE_SIZE * ((i < residual) ? (base + 1) : base);
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL, 0);
@ -430,7 +430,7 @@ cpu_startup(void)
format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
printf(", %s free", pbuf);
format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
printf(", %s in %u buffers\n", pbuf, nbuf);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: hpcdma.c,v 1.5 2002/11/09 18:49:02 thorpej Exp $ */
/* $NetBSD: hpcdma.c,v 1.6 2003/04/02 04:27:19 thorpej Exp $ */
/*
* Copyright (c) 2001 Wayne Knowles
@ -48,6 +48,8 @@
#include <sys/device.h>
#include <sys/buf.h>
#include <uvm/uvm_extern.h>
#include <machine/bus.h>
#include <sgimips/hpc/hpcvar.h>
@ -76,10 +78,10 @@ hpcdma_init(struct hpc_attach_args *haa, struct hpc_dma_softc *sc, int ndesc)
/* Alloc 1 additional descriptor - needed for DMA bug fix */
allocsz = sizeof(struct hpc_dma_desc) * (ndesc + 1);
KASSERT(allocsz <= NBPG);
KASSERT(allocsz <= PAGE_SIZE);
if (bus_dmamap_create(sc->sc_dmat, NBPG, 1 /*seg*/,
NBPG, 0, BUS_DMA_WAITOK,
if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1 /*seg*/,
PAGE_SIZE, 0, BUS_DMA_WAITOK,
&sc->sc_dmamap) != 0) {
printf(": failed to create dmamap\n");
return;

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus.c,v 1.16 2002/12/23 20:41:49 pooka Exp $ */
/* $NetBSD: bus.c,v 1.17 2003/04/02 04:27:19 thorpej Exp $ */
/*
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -371,7 +371,7 @@ _bus_dmamap_load_buffer(map, buf, buflen, p, flags, lastaddrp, segp, first)
/*
* Compute the segment size, and adjust counts.
*/
sgsize = NBPG - ((u_long)vaddr & PGOFSET);
sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
if (buflen < sgsize)
sgsize = buflen;
@ -849,7 +849,7 @@ _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
for (curseg = 0; curseg < nsegs; curseg++) {
for (addr = segs[curseg].ds_addr;
addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
addr += NBPG, va += NBPG, size -= NBPG) {
addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
if (size == 0)
panic("_bus_dmamem_map: size botch");
pmap_enter(pmap_kernel(), va, addr,

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.52 2003/03/31 00:56:53 rafal Exp $ */
/* $NetBSD: machdep.c,v 1.53 2003/04/02 04:27:19 thorpej Exp $ */
/*
* Copyright (c) 2000 Soren S. Jorvang
@ -626,7 +626,7 @@ cpu_startup()
* "base" pages for the rest.
*/
curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
curbufsize = NBPG * ((i < residual) ? (base + 1) : base);
curbufsize = PAGE_SIZE * ((i < residual) ? (base + 1) : base);
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL, 0);
@ -663,7 +663,7 @@ cpu_startup()
printf(", %s free", pbuf);
format_bytes(pbuf, sizeof(pbuf), ctob(arcsmem));
printf(", %s for ARCS", pbuf);
format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
printf(", %s in %u buffers\n", pbuf, nbuf);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: isadma_machdep.c,v 1.3 2002/08/17 20:46:29 thorpej Exp $ */
/* $NetBSD: isadma_machdep.c,v 1.4 2003/04/02 04:27:20 thorpej Exp $ */
#define ISA_DMA_STATS
@ -193,7 +193,7 @@ _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
* 32-bit DMA, and indicate that here.
*
* ...or, there is an opposite case. The most segments
* a transfer will require is (maxxfer / NBPG) + 1. If
* a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
* the caller can't handle that many segments (e.g. the
* ISA DMA controller), we may have to bounce it as well.
*
@ -632,7 +632,7 @@ _isa_dma_alloc_bouncebuf(t, map, size, flags)
cookie->id_bouncebuflen = round_page(size);
error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
NBPG, map->_dm_boundary, cookie->id_bouncesegs,
PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
if (error)
goto out;

View File

@ -1,4 +1,4 @@
/* $NetBSD: ofrom.c,v 1.9 2002/10/23 09:12:00 jdolecek Exp $ */
/* $NetBSD: ofrom.c,v 1.10 2003/04/02 04:27:20 thorpej Exp $ */
/*
* Copyright 1998
@ -184,10 +184,10 @@ ofromrw(dev, uio, flags)
VM_PROT_READ : VM_PROT_WRITE, PMAP_WIRED);
pmap_update(pmap_kernel());
o = uio->uio_offset & PGOFSET;
c = min(uio->uio_resid, (int)(NBPG - o));
c = min(uio->uio_resid, (int)(PAGE_SIZE - o));
error = uiomove((caddr_t)memhook + o, c, uio);
pmap_remove(pmap_kernel(), (vm_offset_t)memhook,
(vm_offset_t)memhook + NBPG);
(vm_offset_t)memhook + PAGE_SIZE);
pmap_update(pmap_kernel());
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: ofw.c,v 1.22 2003/01/18 06:37:05 thorpej Exp $ */
/* $NetBSD: ofw.c,v 1.23 2003/04/02 04:27:20 thorpej Exp $ */
/*
* Copyright 1997
@ -875,7 +875,7 @@ ofw_configmem(void)
/* XXX Please kill this code dead. */
for (i = 0; i < bootconfig.dramblocks; i++) {
paddr_t start = (paddr_t)bootconfig.dram[i].address;
paddr_t end = start + (bootconfig.dram[i].pages * NBPG);
paddr_t end = start + (bootconfig.dram[i].pages * PAGE_SIZE);
#if NISADMA > 0
paddr_t istart, isize;
#endif
@ -1050,7 +1050,7 @@ ofw_callbackhandler(v)
int npages = size >> PGSHIFT;
ap_bits >>= 10;
for (; npages > 0; pte++, pa += NBPG, npages--)
for (; npages > 0; pte++, pa += PAGE_SIZE, npages--)
*pte = (pa | L2_AP(ap_bits) | L2_TYPE_S |
cb_bits);
PTE_SYNC_RANGE(vtopte(va), size >> PGSHIFT);
@ -1221,7 +1221,7 @@ ofw_callbackhandler(v)
/* Allocate size bytes with specified alignment. */
size = (vm_size_t)args_n_results[0];
align = (vm_offset_t)args_n_results[1];
if (align % NBPG != 0) {
if (align % PAGE_SIZE != 0) {
args_n_results[nargs + 1] = -1;
args->nreturns = 2;
return;
@ -1285,7 +1285,7 @@ ofw_construct_proc0_addrspace(proc0_ttbbase, proc0_ptpt)
/* Set-up the system page. */
KASSERT(vector_page == 0); /* XXX for now */
systempage.pv_va = ofw_claimvirt(vector_page, NBPG, 0);
systempage.pv_va = ofw_claimvirt(vector_page, PAGE_SIZE, 0);
if (systempage.pv_va == -1) {
/* Something was already mapped to vector_page's VA. */
systempage.pv_va = vector_page;
@ -1295,17 +1295,17 @@ ofw_construct_proc0_addrspace(proc0_ttbbase, proc0_ptpt)
} else {
/* We were just allocated the page-length range at VA 0. */
if (systempage.pv_va != vector_page)
panic("bogus result from claimvirt(vector_page, NBPG, 0)");
panic("bogus result from claimvirt(vector_page, PAGE_SIZE, 0)");
/* Now allocate a physical page, and establish the mapping. */
systempage.pv_pa = ofw_claimphys(0, NBPG, NBPG);
systempage.pv_pa = ofw_claimphys(0, PAGE_SIZE, PAGE_SIZE);
if (systempage.pv_pa == -1)
panic("bogus result from claimphys(0, NBPG, NBPG)");
panic("bogus result from claimphys(0, PAGE_SIZE, PAGE_SIZE)");
ofw_settranslation(systempage.pv_va, systempage.pv_pa,
NBPG, -1); /* XXX - mode? -JJK */
PAGE_SIZE, -1); /* XXX - mode? -JJK */
/* Zero the memory. */
bzero((char *)systempage.pv_va, NBPG);
bzero((char *)systempage.pv_va, PAGE_SIZE);
}
/* Allocate/initialize space for the proc0, NetBSD-managed */
@ -1324,11 +1324,11 @@ ofw_construct_proc0_addrspace(proc0_ttbbase, proc0_ptpt)
/* Allocate/initialize space for stacks. */
#ifndef OFWGENCFG
ofw_claimpages(&virt_freeptr, &irqstack, NBPG);
ofw_claimpages(&virt_freeptr, &irqstack, PAGE_SIZE);
#endif
ofw_claimpages(&virt_freeptr, &undstack, NBPG);
ofw_claimpages(&virt_freeptr, &abtstack, NBPG);
ofw_claimpages(&virt_freeptr, &kernelstack, UPAGES * NBPG);
ofw_claimpages(&virt_freeptr, &undstack, PAGE_SIZE);
ofw_claimpages(&virt_freeptr, &abtstack, PAGE_SIZE);
ofw_claimpages(&virt_freeptr, &kernelstack, UPAGES * PAGE_SIZE);
/* Allocate/initialize space for msgbuf area. */
ofw_claimpages(&virt_freeptr, &msgbuf, MSGBUFSIZE);
@ -1365,15 +1365,15 @@ ofw_construct_proc0_addrspace(proc0_ttbbase, proc0_ptpt)
oft++, tp++) {
vm_offset_t va, pa;
int npages = tp->size / NBPG;
int npages = tp->size / PAGE_SIZE;
/* Size must be an integral number of pages. */
if (npages == 0 || tp->size % NBPG != 0)
if (npages == 0 || tp->size % PAGE_SIZE != 0)
panic("illegal ofw translation (size)");
/* Make an entry for each page in the appropriate table. */
for (va = tp->virt, pa = tp->phys; npages > 0;
va += NBPG, pa += NBPG, npages--) {
va += PAGE_SIZE, pa += PAGE_SIZE, npages--) {
/*
* Map the top bits to the appropriate L2 pagetable.
* The only allowable regions are page0, the
@ -1451,10 +1451,10 @@ ofw_construct_proc0_addrspace(proc0_ttbbase, proc0_ptpt)
*/
pmap_map_entry(L1pagetable, proc0_pt_pte.pv_va,
proc0_pt_pte.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_NOCACHE);
for (i = 0; i < (L1_TABLE_SIZE / NBPG); ++i)
for (i = 0; i < (L1_TABLE_SIZE / PAGE_SIZE); ++i)
pmap_map_entry(L1pagetable,
proc0_pagedir.pv_va + NBPG * i,
proc0_pagedir.pv_pa + NBPG * i,
proc0_pagedir.pv_va + PAGE_SIZE * i,
proc0_pagedir.pv_pa + PAGE_SIZE * i,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
/*
@ -1570,9 +1570,9 @@ ofw_getphysmeminfo()
struct mem_region *mp1;
/* Page-align start of the block. */
s = mp->start % NBPG;
s = mp->start % PAGE_SIZE;
if (s != 0) {
s = (NBPG - s);
s = (PAGE_SIZE - s);
if (mp->size >= s) {
mp->start += s;
@ -1581,7 +1581,7 @@ ofw_getphysmeminfo()
}
/* Page-align the size. */
mp->size -= mp->size % NBPG;
mp->size -= mp->size % PAGE_SIZE;
/* Handle empty block. */
if (mp->size == 0) {
@ -1919,7 +1919,7 @@ ofw_malloc(size)
(*ppLeftover)->size = newSize;
}
} else {
claim_size = (size + NBPG - 1) & ~(NBPG - 1);
claim_size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
ofw_claimpages(&virt_freeptr, &new, claim_size);
if ((size + sizeof(LEFTOVER)) <= claim_size) {
pLeft = (PLEFTOVER)(new.pv_va + size);
@ -1953,7 +1953,7 @@ ofw_free(addr, size)
#endif
/*
* Allocate and zero round(size)/NBPG pages of memory.
* Allocate and zero round(size)/PAGE_SIZE pages of memory.
* We guarantee that the allocated memory will be
* aligned to a boundary equal to the smallest power of
* 2 greater than or equal to size.
@ -1970,7 +1970,7 @@ ofw_claimpages(free_pp, pv_p, size)
vm_size_t size;
{
/* round-up to page boundary */
vm_size_t alloc_size = (size + NBPG - 1) & ~(NBPG - 1);
vm_size_t alloc_size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
vm_size_t aligned_size;
vm_offset_t va, pa;
@ -2010,14 +2010,14 @@ ofw_discardmappings(L2pagetable, va, size)
vm_size_t size;
{
/* round-up to page boundary */
vm_size_t alloc_size = (size + NBPG - 1) & ~(NBPG - 1);
int npages = alloc_size / NBPG;
vm_size_t alloc_size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
int npages = alloc_size / PAGE_SIZE;
if (npages == 0)
panic("ofw_discardmappings zero");
/* Discard each mapping. */
for (; npages > 0; va += NBPG, npages--) {
for (; npages > 0; va += PAGE_SIZE, npages--) {
/* Sanity. The current entry should be non-null. */
if (ReadWord(L2pagetable + ((va >> 10) & 0x00000FFC)) == 0)
panic("ofw_discardmappings zero entry");

View File

@ -1,4 +1,4 @@
/* $NetBSD: shark_machdep.c,v 1.11 2003/01/20 01:44:10 toddpw Exp $ */
/* $NetBSD: shark_machdep.c,v 1.12 2003/04/02 04:27:21 thorpej Exp $ */
/*
* Copyright 1997
@ -251,9 +251,9 @@ initarm(ofw_handle)
* The kernel stack for SVC mode will be updated on return
* from this routine.
*/
set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + NBPG);
set_stackptr(PSR_UND32_MODE, undstack.pv_va + NBPG);
set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + NBPG);
set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + PAGE_SIZE);
set_stackptr(PSR_UND32_MODE, undstack.pv_va + PAGE_SIZE);
set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + PAGE_SIZE);
/* Set-up exception handlers. */