Use PAGE_SIZE rather than NBPG.

This commit is contained in:
thorpej 2003-04-02 03:51:33 +00:00
parent dd9bc74460
commit fd53a1c386
20 changed files with 122 additions and 109 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.6 2003/04/01 17:35:45 hpeyerl Exp $ */
/* $NetBSD: machdep.c,v 1.7 2003/04/02 03:51:33 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -43,7 +43,7 @@
*/
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.6 2003/04/01 17:35:45 hpeyerl Exp $");
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.7 2003/04/02 03:51:33 thorpej Exp $");
#include "opt_ddb.h"
#include "opt_kgdb.h"
@ -239,7 +239,7 @@ mach_init(int argc, char **argv, yamon_env_var *envp, u_long memsize)
printf("Memory size: 0x%08lx\n", memsize);
physmem = btoc(memsize);
mem_clusters[mem_cluster_cnt].start = NBPG;
mem_clusters[mem_cluster_cnt].start = PAGE_SIZE;
mem_clusters[mem_cluster_cnt].size =
memsize - mem_clusters[mem_cluster_cnt].start;
mem_cluster_cnt++;
@ -384,7 +384,7 @@ cpu_startup(void)
* "base" pages for the rest.
*/
curbuf = (vaddr_t)buffers + (i * MAXBSIZE);
curbufsize = NBPG * ((i < residual) ? (base+1) : base);
curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base);
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL, 0);
@ -423,7 +423,7 @@ cpu_startup(void)
#endif
format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
printf("avail memory = %s\n", pbuf);
format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.8 2003/01/17 22:47:10 thorpej Exp $ */
/* $NetBSD: machdep.c,v 1.9 2003/04/02 03:51:34 thorpej Exp $ */
/*
* Copyright 2001, 2002 Wasabi Systems, Inc.
@ -370,7 +370,7 @@ cpu_startup()
* "base" pages for the rest.
*/
curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
curbufsize = NBPG * ((i < residual) ? (base + 1) : base);
curbufsize = PAGE_SIZE * ((i < residual) ? (base + 1) : base);
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL, 0);
@ -405,7 +405,7 @@ cpu_startup()
format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
printf(", %s free", pbuf);
format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
printf(", %s in %u buffers\n", pbuf, nbuf);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.9 2003/03/27 07:19:11 matt Exp $ */
/* $NetBSD: machdep.c,v 1.10 2003/04/02 03:52:23 thorpej Exp $ */
/*
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
@ -211,7 +211,7 @@ initppc(startkernel, endkernel, args, btinfo)
DELAY(100000);
gt_bus_space_init();
gt_find_memory(&gt_mem_bs_tag, gt_memh, roundup(endkernel, NBPG));
gt_find_memory(&gt_mem_bs_tag, gt_memh, roundup(endkernel, PAGE_SIZE));
gt_halt(&gt_mem_bs_tag, gt_memh);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.7 2003/03/11 10:40:17 hannken Exp $ */
/* $NetBSD: machdep.c,v 1.8 2003/04/02 03:52:58 thorpej Exp $ */
/*
* Copyright 2001, 2002 Wasabi Systems, Inc.
@ -378,8 +378,8 @@ cpu_startup(void)
if (!(msgbuf_vaddr = uvm_km_alloc(kernel_map, round_page(MSGBUFSIZE))))
panic("startup: no room for message buffer");
for (i = 0; i < btoc(MSGBUFSIZE); i++)
pmap_kenter_pa(msgbuf_vaddr + i * NBPG,
msgbuf_paddr + i * NBPG, VM_PROT_READ|VM_PROT_WRITE);
pmap_kenter_pa(msgbuf_vaddr + i * PAGE_SIZE,
msgbuf_paddr + i * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE);
initmsgbuf((caddr_t)msgbuf_vaddr, round_page(MSGBUFSIZE));
#else
initmsgbuf((caddr_t)msgbuf, round_page(MSGBUFSIZE));
@ -427,7 +427,7 @@ cpu_startup(void)
struct vm_page *pg;
curbuf = (vaddr_t)buffers + i * MAXBSIZE;
curbufsize = NBPG * (i < residual ? base + 1 : base);
curbufsize = PAGE_SIZE * (i < residual ? base + 1 : base);
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL, 0);
@ -462,7 +462,7 @@ cpu_startup(void)
format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
printf("avail memory = %s\n", pbuf);
format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.44 2003/01/17 22:49:27 thorpej Exp $ */
/* $NetBSD: machdep.c,v 1.45 2003/04/02 03:53:51 thorpej Exp $ */
/*-
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
@ -466,7 +466,7 @@ shpcmcia_mem_add_mapping(bpa, size, type, bshp)
}
#undef MODE
for (; pa < endpa; pa += NBPG, va += NBPG) {
for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
pte = __pmap_kpte_lookup(va);
KDASSERT(pte);

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.13 2003/03/13 13:44:20 scw Exp $ */
/* $NetBSD: machdep.c,v 1.14 2003/04/02 03:54:27 thorpej Exp $ */
/*
* Copyright 2002 Wasabi Systems, Inc.
@ -358,7 +358,7 @@ cpu_startup(void)
* "base" pages for the rest.
*/
curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
curbufsize = NBPG * ((i < residual) ? (base+1) : base);
curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base);
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL, 0);
@ -400,7 +400,7 @@ cpu_startup(void)
printf("total memory = %s\n", pbuf);
format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
printf("avail memory = %s\n", pbuf);
format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
printf("using %u buffers containing %s bytes of memory\n", nbuf, pbuf);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: hpc_machdep.c,v 1.55 2002/10/05 17:12:09 chs Exp $ */
/* $NetBSD: hpc_machdep.c,v 1.56 2003/04/02 03:55:53 thorpej Exp $ */
/*
* Copyright (c) 1994-1998 Mark Brinicombe.
@ -337,7 +337,7 @@ initarm(argc, argv, bi)
printf("kernsize=0x%x\n", kerneldatasize);
kerneldatasize += symbolsize;
kerneldatasize = ((kerneldatasize - 1) & ~(NBPG * 4 - 1)) + NBPG * 8;
kerneldatasize = ((kerneldatasize - 1) & ~(PAGE_SIZE * 4 - 1)) + PAGE_SIZE * 8;
/* parse kernel args */
boot_file[0] = '\0';
@ -390,7 +390,7 @@ initarm(argc, argv, bi)
physical_freestart = physical_start
+ (KERNEL_TEXT_BASE - KERNEL_BASE) + kerneldatasize;
physical_end = bootconfig.dram[bootconfig.dramblocks - 1].address
+ bootconfig.dram[bootconfig.dramblocks - 1].pages * NBPG;
+ bootconfig.dram[bootconfig.dramblocks - 1].pages * PAGE_SIZE;
physical_freeend = physical_end;
/* free_pages = bootconfig.drampages;*/
@ -435,15 +435,15 @@ initarm(argc, argv, bi)
/* Define a macro to simplify memory allocation */
#define valloc_pages(var, np) \
(var).pv_pa = (var).pv_va = freemempos; \
freemempos += (np) * NBPG;
freemempos += (np) * PAGE_SIZE;
#define alloc_pages(var, np) \
(var) = freemempos; \
freemempos += (np) * NBPG;
freemempos += (np) * PAGE_SIZE;
valloc_pages(kernel_l1pt, L1_TABLE_SIZE / NBPG);
valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
alloc_pages(kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE / NBPG);
alloc_pages(kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE / PAGE_SIZE);
kernel_pt_table[loop].pv_va = kernel_pt_table[loop].pv_pa;
}
@ -455,7 +455,7 @@ initarm(argc, argv, bi)
valloc_pages(systempage, 1);
/* Allocate a page for the page table to map kernel page tables*/
valloc_pages(kernel_ptpt, L2_TABLE_SIZE / NBPG);
valloc_pages(kernel_ptpt, L2_TABLE_SIZE / PAGE_SIZE);
/* Allocate stacks for all modes */
valloc_pages(irqstack, IRQ_STACK_SIZE);
@ -470,7 +470,7 @@ initarm(argc, argv, bi)
printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa, kernelstack.pv_va);
#endif
alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / NBPG);
alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);
/*
* XXX Actually, we only need virtual space and don't need
@ -488,7 +488,7 @@ initarm(argc, argv, bi)
}
{
vaddr_t dummy;
alloc_pages(dummy, CPU_SA110_CACHE_CLEAN_SIZE / NBPG - 1);
alloc_pages(dummy, CPU_SA110_CACHE_CLEAN_SIZE / PAGE_SIZE - 1);
}
sa1_cache_clean_addr = sa1_cc_base;
sa1_cache_clean_size = CPU_SA110_CACHE_CLEAN_SIZE / 2;
@ -561,13 +561,13 @@ initarm(argc, argv, bi)
/* Map the stack pages */
pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
@ -637,9 +637,12 @@ initarm(argc, argv, bi)
*/
printf("init subsystems: stacks ");
set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * NBPG);
set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * NBPG);
set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * NBPG);
set_stackptr(PSR_IRQ32_MODE,
irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
set_stackptr(PSR_ABT32_MODE,
abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
set_stackptr(PSR_UND32_MODE,
undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
#ifdef PMAP_DEBUG
if (pmap_debug_level >= 0)
printf("kstack V%08lx P%08lx\n", kernelstack.pv_va,
@ -669,7 +672,7 @@ initarm(argc, argv, bi)
setttb(kernel_l1pt.pv_pa);
#ifdef BOOT_DUMP
dumppages((char *)0xc0000000, 16 * NBPG);
dumppages((char *)0xc0000000, 16 * PAGE_SIZE);
dumppages((char *)0xb0100000, 64); /* XXX */
#endif
/* Enable MMU, I-cache, D-cache, write buffer. */
@ -688,7 +691,7 @@ initarm(argc, argv, bi)
uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */
for (loop = 0; loop < bootconfig.dramblocks; loop++) {
paddr_t start = (paddr_t)bootconfig.dram[loop].address;
paddr_t end = start + (bootconfig.dram[loop].pages * NBPG);
paddr_t end = start + (bootconfig.dram[loop].pages * PAGE_SIZE);
if (start < physical_freestart)
start = physical_freestart;
@ -795,7 +798,7 @@ rpc_sa110_cc_setup(void)
pt_entry_t *pte;
(void) pmap_extract(pmap_kernel(), KERNEL_TEXT_BASE, &kaddr);
for (loop = 0; loop < CPU_SA110_CACHE_CLEAN_SIZE; loop += NBPG) {
for (loop = 0; loop < CPU_SA110_CACHE_CLEAN_SIZE; loop += PAGE_SIZE) {
pte = vtopte(sa1_cc_base + loop);
*pte = L2_S_PROTO | kaddr |
L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;

View File

@ -1,4 +1,4 @@
/* $NetBSD: stubs.c,v 1.14 2003/03/25 10:41:39 chris Exp $ */
/* $NetBSD: stubs.c,v 1.15 2003/04/02 03:55:53 thorpej Exp $ */
/*
* Copyright (c) 1994-1998 Mark Brinicombe.
@ -167,19 +167,19 @@ dumpsys()
for (block = 0; block < bootconfig.dramblocks && error == 0; ++block) {
addr = bootconfig.dram[block].address;
for (;addr < (bootconfig.dram[block].address
+ (bootconfig.dram[block].pages * NBPG)); addr += NBPG) {
+ (bootconfig.dram[block].pages * PAGE_SIZE)); addr += PAGE_SIZE) {
if ((len % (1024*1024)) == 0)
printf("%d ", len / (1024*1024));
pmap_kenter_pa(dumpspace, addr, VM_PROT_READ);
pmap_update(pmap_kernel());
error = (*bdev->d_dump)(dumpdev,
blkno, (caddr_t) dumpspace, NBPG);
pmap_kremove(dumpspace, NBPG);
blkno, (caddr_t) dumpspace, PAGE_SIZE);
pmap_kremove(dumpspace, PAGE_SIZE);
pmap_update(pmap_kernel());
if (error) break;
blkno += btodb(NBPG);
len += NBPG;
blkno += btodb(PAGE_SIZE);
len += PAGE_SIZE;
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_dma.c,v 1.16 2002/06/02 14:44:42 drochner Exp $ */
/* $NetBSD: bus_dma.c,v 1.17 2003/04/02 03:58:11 thorpej Exp $ */
/*-
* Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
@ -166,7 +166,7 @@ _hpcmips_bd_map_load_buffer(bus_dmamap_t mapx, void *buf, bus_size_t buflen,
/*
* Compute the segment size, and adjust counts.
*/
sgsize = NBPG - ((u_long)vaddr & PGOFSET);
sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
if (buflen < sgsize)
sgsize = buflen;
@ -649,7 +649,7 @@ _hpcmips_bd_mem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
for (curseg = 0; curseg < nsegs; curseg++) {
for (addr = segs[curseg].ds_addr;
addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
addr += NBPG, va += NBPG, size -= NBPG) {
addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
if (size == 0)
panic("_hpcmips_bd_mem_map: size botch");
pmap_enter(pmap_kernel(), va, addr,

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_space.c,v 1.20 2002/09/27 15:36:05 provos Exp $ */
/* $NetBSD: bus_space.c,v 1.21 2003/04/02 03:58:11 thorpej Exp $ */
/*-
* Copyright (c) 1998 The NetBSD Foundation, Inc.
@ -211,7 +211,7 @@ hpcmips_init_bus_space(struct bus_space_tag_hpcmips *t,
(unsigned int)t->base, (unsigned int)va, t->size));
t->base = va; /* kseg2 addr */
for (; pa < endpa; pa += NBPG, va += NBPG) {
for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
}
pmap_update(pmap_kernel());
@ -241,7 +241,7 @@ __hpcmips_cacheable(struct bus_space_tag_hpcmips *t, bus_addr_t bpa,
mips_dcache_wbinv_range(va, endva - va);
for (; va < endva; va += NBPG) {
for (; va < endva; va += PAGE_SIZE) {
pte = kvtopte(va);
opte = pte->pt_entry;
if (cacheable) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.79 2003/01/17 22:58:53 thorpej Exp $ */
/* $NetBSD: machdep.c,v 1.80 2003/04/02 03:58:12 thorpej Exp $ */
/*-
* Copyright (c) 1999 Shin Takemura, All rights reserved.
@ -73,7 +73,7 @@
*/
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.79 2003/01/17 22:58:53 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.80 2003/04/02 03:58:12 thorpej Exp $");
#include "opt_vr41xx.h"
#include "opt_tx39xx.h"
@ -572,7 +572,7 @@ cpu_startup()
* "base" pages for the rest.
*/
curbuf = (vaddr_t)buffers + (i * MAXBSIZE);
curbufsize = NBPG * ((i < residual) ? (base+1) : base);
curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base);
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL, 0);
@ -611,7 +611,7 @@ cpu_startup()
#endif
format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
printf("avail memory = %s\n", pbuf);
format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: tx39.c,v 1.31 2002/01/31 17:56:35 uch Exp $ */
/* $NetBSD: tx39.c,v 1.32 2003/04/02 03:58:12 thorpej Exp $ */
/*-
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
@ -44,12 +44,13 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <uvm/uvm_extern.h>
#include <mips/cache.h>
#include <machine/locore.h> /* cpu_id */
#include <machine/bootinfo.h> /* bootinfo */
#include <machine/sysconf.h> /* platform */
#include <machine/vmparam.h> /* mem_cluster */
#include <machine/platid.h>
#include <machine/platid_mask.h>
@ -206,7 +207,7 @@ tx_find_dram(paddr_t start, paddr_t end)
if (MAGIC0 != magic0 || MAGIC1 != magic1)
return;
for (page += NBPG; page < endaddr; page += NBPG) {
for (page += PAGE_SIZE; page < endaddr; page += PAGE_SIZE) {
if (badaddr(page, 4))
return;
if (MAGIC0 == magic0 &&
@ -216,7 +217,7 @@ tx_find_dram(paddr_t start, paddr_t end)
}
/* check for 32MByte memory */
page -= NBPG;
page -= PAGE_SIZE;
MAGIC0 = magic0;
MAGIC1 = magic1;
wbflush();

View File

@ -1,4 +1,4 @@
/* $NetBSD: vr.c,v 1.40 2002/11/24 06:02:24 shin Exp $ */
/* $NetBSD: vr.c,v 1.41 2003/04/02 03:58:13 thorpej Exp $ */
/*-
* Copyright (c) 1999-2002
@ -350,7 +350,7 @@ vr_find_dram(paddr_t addr, paddr_t end)
end = VR_FIND_DRAMLIM;
#endif /* VR_FIND_DRAMLIM */
n = mem_cluster_cnt;
for (; addr < end; addr += NBPG) {
for (; addr < end; addr += PAGE_SIZE) {
page = (void *)MIPS_PHYS_TO_KSEG1(addr);
if (badaddr(page, 4))
@ -374,25 +374,25 @@ vr_find_dram(paddr_t addr, paddr_t end)
#ifdef NARLY_MEMORY_PROBE
x = random();
for (i = 0; i < NBPG; i += 4)
for (i = 0; i < PAGE_SIZE; i += 4)
*(volatile int *)(page+i) = (x ^ i);
wbflush();
for (i = 0; i < NBPG; i += 4)
for (i = 0; i < PAGE_SIZE; i += 4)
if (*(volatile int *)(page+i) != (x ^ i))
goto bad;
x = random();
for (i = 0; i < NBPG; i += 4)
for (i = 0; i < PAGE_SIZE; i += 4)
*(volatile int *)(page+i) = (x ^ i);
wbflush();
for (i = 0; i < NBPG; i += 4)
for (i = 0; i < PAGE_SIZE; i += 4)
if (*(volatile int *)(page+i) != (x ^ i))
goto bad;
#endif /* NARLY_MEMORY_PROBE */
if (!mem_clusters[n].size)
mem_clusters[n].start = addr;
mem_clusters[n].size += NBPG;
mem_clusters[n].size += PAGE_SIZE;
continue;
bad:

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.39 2003/01/18 23:20:24 thorpej Exp $ */
/* $NetBSD: machdep.c,v 1.40 2003/04/02 03:59:23 thorpej Exp $ */
/*-
* Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
@ -54,6 +54,8 @@
#include <sys/kcore.h>
#include <sys/boot_flag.h>
#include <uvm/uvm_extern.h>
#include <ufs/mfs/mfs_extern.h> /* mfs_initminiroot() */
#include <sh3/cpu.h>
@ -488,14 +490,14 @@ __find_dram_shadow(paddr_t start, paddr_t end)
*(volatile int *)(page + 4) != ~x)
return;
for (page += NBPG; page < endaddr; page += NBPG) {
for (page += PAGE_SIZE; page < endaddr; page += PAGE_SIZE) {
if (*(volatile int *)(page + 0) == x &&
*(volatile int *)(page + 4) == ~x) {
goto memend_found;
}
}
page -= NBPG;
page -= PAGE_SIZE;
*(volatile int *)(page + 0) = x;
*(volatile int *)(page + 4) = ~x;
@ -524,18 +526,18 @@ __check_dram(paddr_t start, paddr_t end)
int i, x;
_DPRINTF(" checking...");
for (; start < end; start += NBPG) {
for (; start < end; start += PAGE_SIZE) {
page = (u_int8_t *)SH3_PHYS_TO_P2SEG (start);
x = random();
for (i = 0; i < NBPG; i += 4)
for (i = 0; i < PAGE_SIZE; i += 4)
*(volatile int *)(page + i) = (x ^ i);
for (i = 0; i < NBPG; i += 4)
for (i = 0; i < PAGE_SIZE; i += 4)
if (*(volatile int *)(page + i) != (x ^ i))
goto bad;
x = random();
for (i = 0; i < NBPG; i += 4)
for (i = 0; i < PAGE_SIZE; i += 4)
*(volatile int *)(page + i) = (x ^ i);
for (i = 0; i < NBPG; i += 4)
for (i = 0; i < PAGE_SIZE; i += 4)
if (*(volatile int *)(page + i) != (x ^ i))
goto bad;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: bus_dma.c,v 1.9 2002/06/02 14:44:43 drochner Exp $ */
/* $NetBSD: bus_dma.c,v 1.10 2003/04/02 04:00:45 thorpej Exp $ */
/*-
* Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
@ -182,7 +182,7 @@ _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, lastaddrp, segp, first)
/*
* Compute the segment size, and adjust counts.
*/
sgsize = NBPG - ((u_long)vaddr & PGOFSET);
sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
if (buflen < sgsize)
sgsize = buflen;
@ -688,7 +688,7 @@ _bus_dmamem_map(t, segs, nsegs, size, kvap, flags)
segs[curseg]._ds_vaddr = va;
for (addr = segs[curseg]._ds_paddr;
addr < (segs[curseg]._ds_paddr + segs[curseg].ds_len);
addr += NBPG, va += NBPG, size -= NBPG) {
addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
if (size == 0)
panic("_bus_dmamem_map: size botch");
pmap_enter(pmap_kernel(), va, addr,

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.36 2003/01/17 23:38:08 thorpej Exp $ */
/* $NetBSD: machdep.c,v 1.37 2003/04/02 04:00:45 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -43,7 +43,7 @@
#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.36 2003/01/17 23:38:08 thorpej Exp $");
__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.37 2003/04/02 04:00:45 thorpej Exp $");
/* from: Utah Hdr: machdep.c 1.63 91/04/24 */
@ -431,7 +431,7 @@ cpu_startup()
* "base" pages for the rest.
*/
curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
curbufsize = NBPG * ((i < residual) ? (base+1) : base);
curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base);
while (curbufsize) {
pg = uvm_pagealloc(NULL, 0, NULL, 0);
@ -469,7 +469,7 @@ cpu_startup()
#endif
format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
printf("avail memory = %s\n", pbuf);
format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
/*
@ -713,7 +713,7 @@ memsize_scan(first)
if (*vp != PATTERN2)
break;
*vp = tmp;
vp += NBPG/sizeof(int);
vp += PAGE_SIZE/sizeof(int);
mem++;
}
*vp0 = tmp0;

View File

@ -1,4 +1,4 @@
/* $NetBSD: asc.c,v 1.12 2002/10/02 05:38:10 thorpej Exp $ */
/* $NetBSD: asc.c,v 1.13 2003/04/02 04:00:46 thorpej Exp $ */
/*-
* Copyright (c) 2000 The NetBSD Foundation, Inc.
* All rights reserved.
@ -44,6 +44,8 @@
#include <sys/buf.h>
#include <sys/malloc.h>
#include <uvm/uvm_extern.h>
#include <dev/scsipi/scsi_all.h>
#include <dev/scsipi/scsipi_all.h>
#include <dev/scsipi/scsiconf.h>
@ -116,7 +118,7 @@ static int asc_intr (void *);
#define MAX_SCSI_XFER (64*1024)
#define MAX_DMA_SZ MAX_SCSI_XFER
#define DMA_SEGS (MAX_DMA_SZ/NBPG)
#define DMA_SEGS (MAX_DMA_SZ/PAGE_SIZE)
static int
ascmatch(struct device *parent, struct cfdata *cf, void *aux)

View File

@ -1,4 +1,4 @@
/* $NetBSD: isadma_machdep.c,v 1.1 2002/02/27 21:02:22 scw Exp $ */
/* $NetBSD: isadma_machdep.c,v 1.2 2003/04/02 04:01:33 thorpej Exp $ */
#define ISA_DMA_STATS
@ -212,7 +212,7 @@ _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
* 32-bit DMA, and indicate that here.
*
* ...or, there is an opposite case. The most segments
* a transfer will require is (maxxfer / NBPG) + 1. If
* a transfer will require is (maxxfer / PAGE_SIZE) + 1. If
* the caller can't handle that many segments (e.g. the
* ISA DMA controller), we may have to bounce it as well.
*/
@ -223,7 +223,7 @@ _isa_bus_dmamap_create(t, size, nsegments, maxsegsz, boundary, flags, dmamp)
}
cookieflags = 0;
if (map->_dm_bounce_thresh != 0 ||
((map->_dm_size / NBPG) + 1) > map->_dm_segcnt) {
((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) {
cookieflags |= ID_MIGHT_NEED_BOUNCE;
cookiesize += (sizeof(bus_dma_segment_t) * map->_dm_segcnt);
}
@ -657,7 +657,7 @@ _isa_dma_alloc_bouncebuf(t, map, size, flags)
cookie->id_bouncebuflen = round_page(size);
error = _isa_bus_dmamem_alloc(t, cookie->id_bouncebuflen,
NBPG, map->_dm_boundary, cookie->id_bouncesegs,
PAGE_SIZE, map->_dm_boundary, cookie->id_bouncesegs,
map->_dm_segcnt, &cookie->id_nbouncesegs, flags);
if (error)
goto out;

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.10 2003/03/18 16:40:21 matt Exp $ */
/* $NetBSD: machdep.c,v 1.11 2003/04/02 04:01:33 thorpej Exp $ */
/*
* Copyright (C) 1995, 1996 Wolfgang Solfrank.
@ -230,7 +230,7 @@ cpu_startup()
/*
* Mapping PReP-compatible interrput vector register.
*/
mvmeppc_intr_reg = (vaddr_t) mapiodev(MVMEPPC_INTR_REG, NBPG);
mvmeppc_intr_reg = (vaddr_t) mapiodev(MVMEPPC_INTR_REG, PAGE_SIZE);
if (!mvmeppc_intr_reg)
panic("startup: no room for interrupt register");

View File

@ -1,4 +1,4 @@
/* $NetBSD: netwinder_machdep.c,v 1.43 2003/03/28 10:45:40 he Exp $ */
/* $NetBSD: netwinder_machdep.c,v 1.44 2003/04/02 04:04:00 thorpej Exp $ */
/*
* Copyright (c) 1997,1998 Mark Brinicombe.
@ -54,6 +54,8 @@
#include <sys/reboot.h>
#include <sys/termios.h>
#include <uvm/uvm_extern.h>
#include <dev/cons.h>
#include <machine/db_machdep.h>
@ -429,19 +431,19 @@ initarm(void *arg)
* is free. We start there and allocate upwards.
*/
physical_start = bootconfig.dram[0].address;
physical_end = physical_start + (bootconfig.dram[0].pages * NBPG);
physical_end = physical_start + (bootconfig.dram[0].pages * PAGE_SIZE);
physical_freestart = ((((vaddr_t) _end) + PGOFSET) & ~PGOFSET) -
KERNEL_BASE;
physical_freeend = physical_end;
free_pages = (physical_freeend - physical_freestart) / NBPG;
free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE;
#ifdef VERBOSE_INIT_ARM
printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n",
physical_freestart, free_pages, free_pages);
#endif
physmem = (physical_end - physical_start) / NBPG;
physmem = (physical_end - physical_start) / PAGE_SIZE;
/* Tell the user about the memory */
printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem,
@ -472,9 +474,9 @@ initarm(void *arg)
#define alloc_pages(var, np) \
(var) = physical_freestart; \
physical_freestart += ((np) * NBPG); \
physical_freestart += ((np) * PAGE_SIZE);\
free_pages -= (np); \
memset((char *)(var), 0, ((np) * NBPG));
memset((char *)(var), 0, ((np) * PAGE_SIZE));
loop1 = 0;
kernel_l1pt.pv_pa = 0;
@ -482,10 +484,10 @@ initarm(void *arg)
/* Are we 16KB aligned for an L1 ? */
if ((physical_freestart & (L1_TABLE_SIZE - 1)) == 0
&& kernel_l1pt.pv_pa == 0) {
valloc_pages(kernel_l1pt, L1_TABLE_SIZE / NBPG);
valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
} else {
alloc_pages(kernel_pt_table[loop1].pv_pa,
L2_TABLE_SIZE / NBPG);
L2_TABLE_SIZE / PAGE_SIZE);
kernel_pt_table[loop1].pv_va =
kernel_pt_table[loop1].pv_pa;
++loop1;
@ -504,7 +506,7 @@ initarm(void *arg)
alloc_pages(systempage.pv_pa, 1);
/* Allocate a page for the page table to map kernel page tables*/
valloc_pages(kernel_ptpt, L2_TABLE_SIZE / NBPG);
valloc_pages(kernel_ptpt, L2_TABLE_SIZE / PAGE_SIZE);
/* Allocate stacks for all modes */
valloc_pages(irqstack, IRQ_STACK_SIZE);
@ -523,7 +525,7 @@ initarm(void *arg)
kernelstack.pv_va);
#endif
alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / NBPG);
alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE);
/*
* Ok we have allocated physical pages for the primary kernel
@ -594,13 +596,13 @@ initarm(void *arg)
/* Map the stack pages */
pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa,
IRQ_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa,
ABT_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa,
UND_STACK_SIZE * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa,
UPAGES * NBPG, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa,
L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
@ -699,9 +701,12 @@ initarm(void *arg)
*/
printf("init subsystems: stacks ");
set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * NBPG);
set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * NBPG);
set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * NBPG);
set_stackptr(PSR_IRQ32_MODE,
irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE);
set_stackptr(PSR_ABT32_MODE,
abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE);
set_stackptr(PSR_UND32_MODE,
undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE);
/*
* Well we should set a data abort handler.
@ -728,7 +733,7 @@ initarm(void *arg)
/* XXX Always one RAM block -- nuke the loop. */
for (loop = 0; loop < bootconfig.dramblocks; loop++) {
paddr_t start = (paddr_t)bootconfig.dram[loop].address;
paddr_t end = start + (bootconfig.dram[loop].pages * NBPG);
paddr_t end = start + (bootconfig.dram[loop].pages * PAGE_SIZE);
#if NISADMA > 0
paddr_t istart, isize;
extern struct arm32_dma_range *footbridge_isa_dma_ranges;