Use PAGE_SIZE rather than NBPG.
This commit is contained in:
parent
d3f30fc625
commit
747aa1e024
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: iommu.c,v 1.63 2003/01/16 21:55:52 petrov Exp $ */
|
||||
/* $NetBSD: iommu.c,v 1.64 2003/04/01 16:34:58 thorpej Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2001, 2002 Eduardo Horvath
|
||||
|
@ -121,9 +121,9 @@ iommu_init(name, is, tsbsize, iovabase)
|
|||
* contiguous.
|
||||
*/
|
||||
|
||||
size = NBPG << is->is_tsbsize;
|
||||
size = PAGE_SIZE << is->is_tsbsize;
|
||||
if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1,
|
||||
(paddr_t)NBPG, (paddr_t)0, &pglist, 1, 0) != 0)
|
||||
(paddr_t)PAGE_SIZE, (paddr_t)0, &pglist, 1, 0) != 0)
|
||||
panic("iommu_init: no memory");
|
||||
|
||||
va = uvm_km_valloc(kernel_map, size);
|
||||
|
@ -137,7 +137,7 @@ iommu_init(name, is, tsbsize, iovabase)
|
|||
TAILQ_FOREACH(pg, &pglist, pageq) {
|
||||
pa = VM_PAGE_TO_PHYS(pg);
|
||||
pmap_kenter_pa(va, pa | PMAP_NVC, VM_PROT_READ | VM_PROT_WRITE);
|
||||
va += NBPG;
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
pmap_update(pmap_kernel());
|
||||
memset(is->is_tsb, 0, size);
|
||||
|
@ -182,8 +182,8 @@ iommu_init(name, is, tsbsize, iovabase)
|
|||
(unsigned long long)is->is_ptsb,
|
||||
(unsigned long long)(is->is_ptsb + size));
|
||||
is->is_dvmamap = extent_create(name,
|
||||
is->is_dvmabase, is->is_dvmaend - NBPG,
|
||||
M_DEVBUF, 0, 0, EX_NOWAIT);
|
||||
is->is_dvmabase, is->is_dvmaend - PAGE_SIZE,
|
||||
M_DEVBUF, 0, 0, EX_NOWAIT);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -326,16 +326,16 @@ iommu_remove(is, va, len)
|
|||
"for va %p size %lx\n",
|
||||
(int)IOTSBSLOT(va,is->is_tsbsize), (void *)(u_long)va,
|
||||
(u_long)len));
|
||||
if (len <= NBPG)
|
||||
if (len <= PAGE_SIZE)
|
||||
len = 0;
|
||||
else
|
||||
len -= NBPG;
|
||||
len -= PAGE_SIZE;
|
||||
|
||||
/* XXX Zero-ing the entry would not require RMW */
|
||||
is->is_tsb[IOTSBSLOT(va,is->is_tsbsize)] &= ~IOTTE_V;
|
||||
bus_space_write_8(is->is_bustag, is->is_iommu,
|
||||
IOMMUREG(iommu_flush), va);
|
||||
va += NBPG;
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -457,7 +457,7 @@ iommu_dvmamap_load(t, sb, map, buf, buflen, p, flags)
|
|||
*/
|
||||
if ((boundary = (map->dm_segs[0]._ds_boundary)) == 0)
|
||||
boundary = map->_dm_boundary;
|
||||
align = max(map->dm_segs[0]._ds_align, NBPG);
|
||||
align = max(map->dm_segs[0]._ds_align, PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* If our segment size is larger than the boundary we need to
|
||||
|
@ -548,7 +548,7 @@ iommu_dvmamap_load(t, sb, map, buf, buflen, p, flags)
|
|||
/*
|
||||
* Compute the segment size, and adjust counts.
|
||||
*/
|
||||
sgsize = NBPG - ((u_long)vaddr & PGOFSET);
|
||||
sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
|
||||
if (buflen < sgsize)
|
||||
sgsize = buflen;
|
||||
|
||||
|
@ -556,7 +556,7 @@ iommu_dvmamap_load(t, sb, map, buf, buflen, p, flags)
|
|||
("iommu_dvmamap_load: map %p loading va %p "
|
||||
"dva %lx at pa %lx\n",
|
||||
map, (void *)vaddr, (long)dvmaddr,
|
||||
(long)(curaddr & ~(NBPG-1))));
|
||||
(long)(curaddr & ~(PAGE_SIZE-1))));
|
||||
iommu_enter(sb, trunc_page(dvmaddr), trunc_page(curaddr),
|
||||
flags|0x4000);
|
||||
|
||||
|
@ -940,7 +940,7 @@ iommu_dvmamap_sync(t, sb, map, offset, len, ops)
|
|||
if ((tte & IOTTE_STREAM) && sb->sb_flush) {
|
||||
vaend = (va + len + PGOFSET) & ~PGOFSET;
|
||||
|
||||
for (va &= ~PGOFSET; va <= vaend; va += NBPG) {
|
||||
for (va &= ~PGOFSET; va <= vaend; va += PAGE_SIZE) {
|
||||
DPRINTF(IDB_BUSDMA,
|
||||
("iommu_dvmamap_sync: flushing va %p\n",
|
||||
(void *)(u_long)va));
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: psycho.c,v 1.58 2003/03/22 06:33:09 nakayama Exp $ */
|
||||
/* $NetBSD: psycho.c,v 1.59 2003/04/01 16:34:58 thorpej Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 2001, 2002 Eduardo E. Horvath
|
||||
|
@ -765,7 +765,7 @@ psycho_ue(arg)
|
|||
struct psychoreg *regs = sc->sc_regs;
|
||||
long long afsr = regs->psy_ue_afsr;
|
||||
long long afar = regs->psy_ue_afar;
|
||||
long size = NBPG<<(sc->sc_is->is_tsbsize);
|
||||
long size = PAGE_SIZE<<(sc->sc_is->is_tsbsize);
|
||||
struct iommu_state *is = sc->sc_is;
|
||||
char bits[128];
|
||||
|
||||
|
@ -781,7 +781,7 @@ psycho_ue(arg)
|
|||
/* Sometimes the AFAR points to an IOTSB entry */
|
||||
if (afar >= is->is_ptsb && afar < is->is_ptsb + size) {
|
||||
printf("IOVA %llx IOTTE %llx\n",
|
||||
(long long)((afar - is->is_ptsb) * NBPG + is->is_dvmabase),
|
||||
(long long)((afar - is->is_ptsb) * PAGE_SIZE + is->is_dvmabase),
|
||||
(long long)ldxa(afar, ASI_PHYS_CACHED));
|
||||
}
|
||||
#ifdef DDB
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: sbus.c,v 1.58 2003/01/01 02:22:56 thorpej Exp $ */
|
||||
/* $NetBSD: sbus.c,v 1.59 2003/04/01 16:34:58 thorpej Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1999-2002 Eduardo Horvath
|
||||
|
@ -50,7 +50,7 @@
|
|||
#include <sparc64/dev/sbusreg.h>
|
||||
#include <dev/sbus/sbusvar.h>
|
||||
|
||||
#include <uvm/uvm_prot.h>
|
||||
#include <uvm/uvm_extern.h>
|
||||
|
||||
#include <machine/autoconf.h>
|
||||
#include <machine/cpu.h>
|
||||
|
@ -290,8 +290,9 @@ sbus_attach(parent, self, aux)
|
|||
u_long dummy;
|
||||
|
||||
if (extent_alloc_subregion(sc->sc_is.is_dvmamap,
|
||||
sc->sc_is.is_dvmabase, sc->sc_is.is_dvmabase + NBPG, NBPG,
|
||||
NBPG, 0, EX_NOWAIT|EX_BOUNDZERO, (u_long *)&dummy) != 0)
|
||||
sc->sc_is.is_dvmabase, sc->sc_is.is_dvmabase + PAGE_SIZE,
|
||||
PAGE_SIZE, PAGE_SIZE, 0, EX_NOWAIT|EX_BOUNDZERO,
|
||||
(u_long *)&dummy) != 0)
|
||||
panic("sbus iommu: can't toss first dvma page");
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: clock.c,v 1.55 2003/02/05 12:06:52 nakayama Exp $ */
|
||||
/* $NetBSD: clock.c,v 1.56 2003/04/01 16:34:59 thorpej Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1992, 1993
|
||||
|
@ -255,7 +255,7 @@ clockattach_sbus(parent, self, aux)
|
|||
|
||||
if (sbus_bus_map(bt,
|
||||
sa->sa_slot,
|
||||
(sa->sa_offset & ~NBPG),
|
||||
(sa->sa_offset & ~PAGE_SIZE),
|
||||
sz,
|
||||
BUS_SPACE_MAP_LINEAR|BUS_SPACE_MAP_READONLY,
|
||||
&ci.ci_bh) != 0) {
|
||||
|
@ -298,7 +298,8 @@ clock_wenable(handle, onoff)
|
|||
(vaddr_t)bus_space_vaddr(ci->ci_bt, ci->ci_bh);
|
||||
|
||||
if (vaddr)
|
||||
pmap_protect(pmap_kernel(), vaddr, vaddr+NBPG, prot);
|
||||
pmap_protect(pmap_kernel(), vaddr, vaddr+PAGE_SIZE,
|
||||
prot);
|
||||
else
|
||||
printf("clock_wenable: WARNING -- cannot get va\n");
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: cpu.c,v 1.28 2003/02/05 12:06:52 nakayama Exp $ */
|
||||
/* $NetBSD: cpu.c,v 1.29 2003/04/01 16:34:59 thorpej Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1996
|
||||
|
@ -139,7 +139,7 @@ cpu_init(pa, cpu_num)
|
|||
struct pglist pglist;
|
||||
int error;
|
||||
|
||||
size = NBPG; /* XXXX 8K, 64K, 512K, or 4MB */
|
||||
size = PAGE_SIZE; /* XXXX 8K, 64K, 512K, or 4MB */
|
||||
if ((error = uvm_pglistalloc(size, (paddr_t)0, (paddr_t)-1,
|
||||
(paddr_t)size, (paddr_t)0, &pglist, 1, 0)) != 0)
|
||||
panic("cpu_start: no memory, error %d", error);
|
||||
|
@ -165,7 +165,7 @@ cpu_init(pa, cpu_num)
|
|||
pa = VM_PAGE_TO_PHYS(pg);
|
||||
pmap_zero_page(pa);
|
||||
pmap_kenter_pa(va, pa | PMAP_NVC, VM_PROT_READ | VM_PROT_WRITE);
|
||||
va += NBPG;
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
pmap_update(pmap_kernel());
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: kgdb_stub.c,v 1.10 2003/01/21 20:42:02 martin Exp $ */
|
||||
/* $NetBSD: kgdb_stub.c,v 1.11 2003/04/01 16:34:59 thorpej Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1992, 1993
|
||||
|
@ -636,7 +636,7 @@ kgdb_acc(addr, len, rw, usertoo)
|
|||
return (1);
|
||||
}
|
||||
addr = (caddr_t)((int)addr & ~PGOFSET);
|
||||
for (; len > 0; len -= NBPG, addr += NBPG) {
|
||||
for (; len > 0; len -= PAGE_SIZE, addr += PAGE_SIZE) {
|
||||
if (((int)addr >> PG_VSHIFT) != 0 &&
|
||||
((int)addr >> PG_VSHIFT) != -1)
|
||||
return (0);
|
||||
|
@ -669,7 +669,7 @@ kdb_mkwrite(addr, len)
|
|||
}
|
||||
|
||||
addr = (caddr_t)((int)addr & ~PGOFSET);
|
||||
for (; len > 0; len -= NBPG, addr += NBPG)
|
||||
for (; len > 0; len -= PAGE_SIZE, addr += PAGE_SIZE)
|
||||
#if defined(SUN4M)
|
||||
if (CPU_ISSUN4M)
|
||||
setpte4m((vaddr_t)addr,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: machdep.c,v 1.139 2003/02/09 19:44:20 martin Exp $ */
|
||||
/* $NetBSD: machdep.c,v 1.140 2003/04/01 16:34:59 thorpej Exp $ */
|
||||
|
||||
/*-
|
||||
* Copyright (c) 1996, 1997, 1998 The NetBSD Foundation, Inc.
|
||||
|
@ -240,7 +240,7 @@ cpu_startup()
|
|||
* "base" pages for the rest.
|
||||
*/
|
||||
curbuf = (vaddr_t) buffers + (i * MAXBSIZE);
|
||||
curbufsize = NBPG * ((i < residual) ? (base+1) : base);
|
||||
curbufsize = PAGE_SIZE * ((i < residual) ? (base+1) : base);
|
||||
|
||||
while (curbufsize) {
|
||||
pg = uvm_pagealloc(NULL, 0, NULL, 0);
|
||||
|
@ -273,7 +273,7 @@ cpu_startup()
|
|||
#endif
|
||||
format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free));
|
||||
printf("avail memory = %s\n", pbuf);
|
||||
format_bytes(pbuf, sizeof(pbuf), bufpages * NBPG);
|
||||
format_bytes(pbuf, sizeof(pbuf), bufpages * PAGE_SIZE);
|
||||
printf("using %u buffers containing %s of memory\n", nbuf, pbuf);
|
||||
|
||||
/*
|
||||
|
@ -980,7 +980,7 @@ cpu_dumpconf()
|
|||
dumpsize = physmem;
|
||||
}
|
||||
|
||||
#define BYTES_PER_DUMP (NBPG) /* must be a multiple of pagesize */
|
||||
#define BYTES_PER_DUMP (PAGE_SIZE) /* must be a multiple of pagesize */
|
||||
static vaddr_t dumpspace;
|
||||
|
||||
caddr_t
|
||||
|
@ -1054,9 +1054,9 @@ printf("starting dump, blkno %lld\n", (long long)blkno);
|
|||
/* Remind me: why don't we dump page 0 ? */
|
||||
if (maddr == 0) {
|
||||
/* Skip first page at physical address 0 */
|
||||
maddr += NBPG;
|
||||
i += NBPG;
|
||||
blkno += btodb(NBPG);
|
||||
maddr += PAGE_SIZE;
|
||||
i += PAGE_SIZE;
|
||||
blkno += btodb(PAGE_SIZE);
|
||||
}
|
||||
#endif
|
||||
for (; i < mp->size; i += n) {
|
||||
|
@ -1301,7 +1301,7 @@ _bus_dmamap_load(t, map, buf, buflen, p, flags)
|
|||
map->dm_segs[i].ds_addr = NULL;
|
||||
map->dm_segs[i].ds_len = 0;
|
||||
|
||||
incr = NBPG - (vaddr & PGOFSET);
|
||||
incr = PAGE_SIZE - (vaddr & PGOFSET);
|
||||
while (sgsize > 0) {
|
||||
paddr_t pa;
|
||||
|
||||
|
@ -1316,13 +1316,13 @@ _bus_dmamap_load(t, map, buf, buflen, p, flags)
|
|||
&& ((map->dm_segs[i].ds_len + incr) <= map->_dm_maxsegsz)) {
|
||||
/* Hey, waddyaknow, they're contiguous */
|
||||
map->dm_segs[i].ds_len += incr;
|
||||
incr = NBPG;
|
||||
incr = PAGE_SIZE;
|
||||
continue;
|
||||
}
|
||||
if (++i >= map->_dm_segcnt)
|
||||
return (E2BIG);
|
||||
map->dm_segs[i].ds_addr = pa;
|
||||
map->dm_segs[i].ds_len = incr = NBPG;
|
||||
map->dm_segs[i].ds_len = incr = PAGE_SIZE;
|
||||
}
|
||||
map->dm_nsegs = i + 1;
|
||||
/* Mapping is bus dependent */
|
||||
|
@ -1358,7 +1358,7 @@ _bus_dmamap_load_mbuf(t, map, m, flags)
|
|||
paddr_t pa;
|
||||
long incr;
|
||||
|
||||
incr = NBPG - (vaddr & PGOFSET);
|
||||
incr = PAGE_SIZE - (vaddr & PGOFSET);
|
||||
incr = min(buflen, incr);
|
||||
|
||||
(void) pmap_extract(pmap_kernel(), vaddr, &pa);
|
||||
|
@ -1494,7 +1494,7 @@ _bus_dmamap_load_uio(t, map, uio, flags)
|
|||
paddr_t pa;
|
||||
long incr;
|
||||
|
||||
incr = min(buflen, NBPG);
|
||||
incr = min(buflen, PAGE_SIZE);
|
||||
(void) pmap_extract(pm, vaddr, &pa);
|
||||
buflen -= incr;
|
||||
vaddr += incr;
|
||||
|
@ -1624,9 +1624,9 @@ _bus_dmamap_sync(t, map, offset, len, ops)
|
|||
continue;
|
||||
TAILQ_FOREACH(pg, pglist, pageq) {
|
||||
paddr_t start;
|
||||
psize_t size = NBPG;
|
||||
psize_t size = PAGE_SIZE;
|
||||
|
||||
if (offset < NBPG) {
|
||||
if (offset < PAGE_SIZE) {
|
||||
start = VM_PAGE_TO_PHYS(pg) + offset;
|
||||
if (size > len)
|
||||
size = len;
|
||||
|
@ -1937,7 +1937,7 @@ sparc_bus_map(t, addr, size, flags, unused, hp)
|
|||
|
||||
if (!(flags & BUS_SPACE_MAP_CACHEABLE)) pm_flags |= PMAP_NC;
|
||||
|
||||
if ((err = extent_alloc(io_space, size, NBPG,
|
||||
if ((err = extent_alloc(io_space, size, PAGE_SIZE,
|
||||
0, EX_NOWAIT|EX_BOUNDZERO, (u_long *)&v)))
|
||||
panic("sparc_bus_map: cannot allocate io_space: %d", err);
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: mem.c,v 1.24 2003/01/18 06:55:25 thorpej Exp $ */
|
||||
/* $NetBSD: mem.c,v 1.25 2003/04/01 16:34:59 thorpej Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1988 University of Utah.
|
||||
|
@ -126,10 +126,10 @@ mmrw(dev, uio, flags)
|
|||
trunc_page(v), prot, prot|PMAP_WIRED);
|
||||
pmap_update(pmap_kernel());
|
||||
o = uio->uio_offset & PGOFSET;
|
||||
c = min(uio->uio_resid, (int)(NBPG - o));
|
||||
c = min(uio->uio_resid, (int)(PAGE_SIZE - o));
|
||||
error = uiomove((caddr_t)vmmap + o, c, uio);
|
||||
pmap_remove(pmap_kernel(), (vaddr_t)vmmap,
|
||||
(vaddr_t)vmmap + NBPG);
|
||||
(vaddr_t)vmmap + PAGE_SIZE);
|
||||
pmap_update(pmap_kernel());
|
||||
break;
|
||||
#else
|
||||
|
@ -140,7 +140,7 @@ mmrw(dev, uio, flags)
|
|||
goto unlock;
|
||||
}
|
||||
o = uio->uio_offset & PGOFSET;
|
||||
c = min(uio->uio_resid, (int)(NBPG - o));
|
||||
c = min(uio->uio_resid, (int)(PAGE_SIZE - o));
|
||||
/* However, we do need to partially re-implement uiomove() */
|
||||
if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE)
|
||||
panic("mmrw: uio mode");
|
||||
|
@ -231,10 +231,10 @@ mmrw(dev, uio, flags)
|
|||
}
|
||||
if (zeropage == NULL) {
|
||||
zeropage = (caddr_t)
|
||||
malloc(NBPG, M_TEMP, M_WAITOK);
|
||||
bzero(zeropage, NBPG);
|
||||
malloc(PAGE_SIZE, M_TEMP, M_WAITOK);
|
||||
bzero(zeropage, PAGE_SIZE);
|
||||
}
|
||||
c = min(iov->iov_len, NBPG);
|
||||
c = min(iov->iov_len, PAGE_SIZE);
|
||||
error = uiomove(zeropage, c, uio);
|
||||
break;
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.137 2003/03/30 00:28:19 thorpej Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.138 2003/04/01 16:34:59 thorpej Exp $ */
|
||||
#undef NO_VCACHE /* Don't forget the locked TLB in dostart */
|
||||
#define HWREF
|
||||
/*
|
||||
|
@ -110,7 +110,7 @@ extern int pseg_set __P((struct pmap *, vaddr_t, int64_t, paddr_t));
|
|||
#define PV_NC 0x10LL
|
||||
#define PV_WE 0x20LL /* Debug -- this page was writable somtime */
|
||||
#define PV_MASK (0x03fLL)
|
||||
#define PV_VAMASK (~(NBPG - 1))
|
||||
#define PV_VAMASK (~(PAGE_SIZE - 1))
|
||||
#define PV_MATCH(pv,va) (!(((pv)->pv_va ^ (va)) & PV_VAMASK))
|
||||
#define PV_SETVA(pv,va) ((pv)->pv_va = (((va) & PV_VAMASK) | \
|
||||
(((pv)->pv_va) & PV_MASK)))
|
||||
|
@ -411,7 +411,7 @@ pmap_bootdebug()
|
|||
|
||||
/*
|
||||
* Calculate the correct number of page colors to use. This should be the
|
||||
* size of the E$/NBPG. However, different CPUs can have different sized
|
||||
* size of the E$/PAGE_SIZE. However, different CPUs can have different sized
|
||||
* E$, so we need to take the GCM of the E$ size.
|
||||
*/
|
||||
static int pmap_calculate_colors __P((void));
|
||||
|
@ -436,7 +436,7 @@ pmap_calculate_colors() {
|
|||
sizeof(assoc)) != sizeof(assoc))
|
||||
/* Fake asociativity of 1 */
|
||||
assoc = 1;
|
||||
color = size/assoc/NBPG;
|
||||
color = size/assoc/PAGE_SIZE;
|
||||
if (color > maxcolor)
|
||||
maxcolor = color;
|
||||
}
|
||||
|
@ -526,7 +526,7 @@ pmap_bootstrap(kernelstart, kernelend, maxctx)
|
|||
*/
|
||||
msgbufp = (struct kern_msgbuf *)(vaddr_t)MSGBUF_VA;
|
||||
/* XXXXX -- increase msgbufsiz for uvmhist printing */
|
||||
msgbufsiz = 4*NBPG /* round_page(sizeof(struct msgbuf)) */;
|
||||
msgbufsiz = 4*PAGE_SIZE /* round_page(sizeof(struct msgbuf)) */;
|
||||
BDPRINTF(PDB_BOOT, ("Trying to allocate msgbuf at %lx, size %lx\r\n",
|
||||
(long)msgbufp, (long)msgbufsiz));
|
||||
if ((long)msgbufp !=
|
||||
|
@ -896,7 +896,7 @@ remap_data:
|
|||
/*
|
||||
* Allocate a 64MB page for the cpu_info structure now.
|
||||
*/
|
||||
if ((cpu0paddr = prom_alloc_phys(8*NBPG, 8*NBPG)) == 0 ) {
|
||||
if ((cpu0paddr = prom_alloc_phys(8*PAGE_SIZE, 8*PAGE_SIZE)) == 0 ) {
|
||||
prom_printf("Cannot allocate new cpu_info\r\n");
|
||||
OF_exit();
|
||||
}
|
||||
|
@ -948,7 +948,7 @@ remap_data:
|
|||
(u_long)firstaddr));
|
||||
firstaddr = ((firstaddr + TSBSIZE - 1) & ~(TSBSIZE-1));
|
||||
#ifdef DEBUG
|
||||
i = (firstaddr + (NBPG-1)) & ~(NBPG-1); /* First, page align */
|
||||
i = (firstaddr + (PAGE_SIZE-1)) & ~(PAGE_SIZE-1); /* First, page align */
|
||||
if ((int)firstaddr < i) {
|
||||
prom_printf("TSB alloc fixup failed\r\n");
|
||||
prom_printf("frobbed i, firstaddr before TSB=%x, %lx\r\n",
|
||||
|
@ -1024,8 +1024,8 @@ remap_data:
|
|||
|
||||
/* Throw away page zero if we have it. */
|
||||
if (avail->start == 0) {
|
||||
avail->start += NBPG;
|
||||
avail->size -= NBPG;
|
||||
avail->start += PAGE_SIZE;
|
||||
avail->size -= PAGE_SIZE;
|
||||
}
|
||||
/*
|
||||
* Now we need to remove the area we valloc'ed from the available
|
||||
|
@ -1063,7 +1063,7 @@ remap_data:
|
|||
/*
|
||||
* Now page align the start of the region.
|
||||
*/
|
||||
s = mp->start % NBPG;
|
||||
s = mp->start % PAGE_SIZE;
|
||||
if (mp->size >= s) {
|
||||
mp->size -= s;
|
||||
mp->start += s;
|
||||
|
@ -1071,7 +1071,7 @@ remap_data:
|
|||
/*
|
||||
* And now align the size of the region.
|
||||
*/
|
||||
mp->size -= mp->size % NBPG;
|
||||
mp->size -= mp->size % PAGE_SIZE;
|
||||
/*
|
||||
* Check whether some memory is left here.
|
||||
*/
|
||||
|
@ -1104,7 +1104,8 @@ remap_data:
|
|||
#if 0
|
||||
{
|
||||
paddr_t p;
|
||||
for (p = mp->start; p < mp->start+mp->size; p += NBPG)
|
||||
for (p = mp->start; p < mp->start+mp->size;
|
||||
p += PAGE_SIZE)
|
||||
pmap_zero_page(p);
|
||||
}
|
||||
#endif
|
||||
|
@ -1194,17 +1195,17 @@ remap_data:
|
|||
0 /* IE */);
|
||||
do {
|
||||
pmap_enter_kpage(va, data);
|
||||
va += NBPG;
|
||||
msgbufsiz -= NBPG;
|
||||
phys_msgbuf += NBPG;
|
||||
} while (psize-=NBPG);
|
||||
va += PAGE_SIZE;
|
||||
msgbufsiz -= PAGE_SIZE;
|
||||
phys_msgbuf += PAGE_SIZE;
|
||||
} while (psize-=PAGE_SIZE);
|
||||
}
|
||||
BDPRINTF(PDB_BOOT1, ("Done inserting mesgbuf into pmap_kernel()\r\n"));
|
||||
|
||||
BDPRINTF(PDB_BOOT1, ("Inserting PROM mappings into pmap_kernel()\r\n"));
|
||||
for (i = 0; i < prom_map_size; i++)
|
||||
if (prom_map[i].vstart && ((prom_map[i].vstart>>32) == 0))
|
||||
for (j = 0; j < prom_map[i].vsize; j += NBPG) {
|
||||
for (j = 0; j < prom_map[i].vsize; j += PAGE_SIZE) {
|
||||
int k;
|
||||
|
||||
for (k = 0; page_size_map[k].mask; k++) {
|
||||
|
@ -1230,7 +1231,7 @@ remap_data:
|
|||
*/
|
||||
vmmap = (vaddr_t)roundup(ekdata, 4*MEG);
|
||||
/* Let's keep 1 page of redzone after the kernel */
|
||||
vmmap += NBPG;
|
||||
vmmap += PAGE_SIZE;
|
||||
{
|
||||
extern vaddr_t u0[2];
|
||||
extern struct pcb* proc0paddr;
|
||||
|
@ -1252,7 +1253,7 @@ remap_data:
|
|||
int64_t data;
|
||||
|
||||
pmap_get_page(&pa);
|
||||
prom_map_phys(pa, NBPG, vmmap, -1);
|
||||
prom_map_phys(pa, PAGE_SIZE, vmmap, -1);
|
||||
data = TSB_DATA(0 /* global */,
|
||||
PGSZ_8K,
|
||||
pa,
|
||||
|
@ -1263,17 +1264,17 @@ remap_data:
|
|||
1 /* valid */,
|
||||
0 /* IE */);
|
||||
pmap_enter_kpage(vmmap, data);
|
||||
vmmap += NBPG;
|
||||
vmmap += PAGE_SIZE;
|
||||
}
|
||||
BDPRINTF(PDB_BOOT1,
|
||||
("Done inserting stack 0 into pmap_kernel()\r\n"));
|
||||
|
||||
/* Now map in and initialize our cpu_info structure */
|
||||
#ifdef DIAGNOSTIC
|
||||
vmmap += NBPG; /* redzone -- XXXX do we need one? */
|
||||
vmmap += PAGE_SIZE; /* redzone -- XXXX do we need one? */
|
||||
#endif
|
||||
if ((vmmap ^ INTSTACK) & VA_ALIAS_MASK)
|
||||
vmmap += NBPG; /* Matchup virtual color for D$ */
|
||||
vmmap += PAGE_SIZE; /* Matchup virtual color for D$ */
|
||||
intstk = vmmap;
|
||||
cpus = (struct cpu_info *)(intstk+CPUINFO_VA-INTSTACK);
|
||||
|
||||
|
@ -1304,13 +1305,13 @@ remap_data:
|
|||
1 /* valid */,
|
||||
0 /* IE */);
|
||||
pmap_enter_kpage(vmmap, data);
|
||||
vmmap += NBPG;
|
||||
pa += NBPG;
|
||||
vmmap += PAGE_SIZE;
|
||||
pa += PAGE_SIZE;
|
||||
}
|
||||
BDPRINTF(PDB_BOOT1, ("Initializing cpu_info\r\n"));
|
||||
|
||||
/* Initialize our cpu_info structure */
|
||||
bzero((void *)intstk, 8*NBPG);
|
||||
bzero((void *)intstk, 8*PAGE_SIZE);
|
||||
cpus->ci_next = NULL; /* Redundant, I know. */
|
||||
cpus->ci_curlwp = &lwp0;
|
||||
cpus->ci_cpcb = (struct pcb *)u0[0]; /* Need better source */
|
||||
|
@ -1352,12 +1353,10 @@ pmap_init()
|
|||
vaddr_t va;
|
||||
|
||||
BDPRINTF(PDB_BOOT1, ("pmap_init()\r\n"));
|
||||
if (PAGE_SIZE != NBPG)
|
||||
panic("pmap_init: PAGE_SIZE!=NBPG");
|
||||
|
||||
size = sizeof(struct pv_entry) * physmem;
|
||||
if (uvm_pglistalloc((psize_t)size, (paddr_t)0, (paddr_t)-1,
|
||||
(paddr_t)NBPG, (paddr_t)0, &pglist, 1, 0) != 0)
|
||||
(paddr_t)PAGE_SIZE, (paddr_t)0, &pglist, 1, 0) != 0)
|
||||
panic("cpu_start: no memory");
|
||||
|
||||
va = uvm_km_valloc(kernel_map, size);
|
||||
|
@ -1378,7 +1377,7 @@ pmap_init()
|
|||
1 /* valid */,
|
||||
0 /* IE */);
|
||||
pmap_enter_kpage(va, data);
|
||||
va += NBPG;
|
||||
va += PAGE_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1406,7 +1405,7 @@ pmap_virtual_space(start, end)
|
|||
* Reserve one segment for kernel virtual memory
|
||||
*/
|
||||
/* Reserve two pages for pmap_copy_page && /dev/mem */
|
||||
*start = kbreak = (vaddr_t)(vmmap + 2*NBPG);
|
||||
*start = kbreak = (vaddr_t)(vmmap + 2*PAGE_SIZE);
|
||||
*end = VM_MAX_KERNEL_ADDRESS;
|
||||
BDPRINTF(PDB_BOOT1, ("pmap_virtual_space: %x-%x\r\n", *start, *end));
|
||||
}
|
||||
|
@ -1756,7 +1755,7 @@ pmap_kremove(va, size)
|
|||
KASSERT(va < kdata || va > ekdata);
|
||||
|
||||
DPRINTF(PDB_DEMAP, ("pmap_kremove: start 0x%lx size %lx\n", va, size));
|
||||
for (; size >= NBPG; va += NBPG, size -= NBPG) {
|
||||
for (; size >= PAGE_SIZE; va += PAGE_SIZE, size -= PAGE_SIZE) {
|
||||
|
||||
#ifdef DIAGNOSTIC
|
||||
/*
|
||||
|
@ -2089,7 +2088,7 @@ pmap_remove(pm, va, endva)
|
|||
REMOVE_STAT(calls);
|
||||
|
||||
/* Now do the real work */
|
||||
for (; va < endva; va += NBPG) {
|
||||
for (; va < endva; va += PAGE_SIZE) {
|
||||
#ifdef DIAGNOSTIC
|
||||
/*
|
||||
* Is this part of the permanent 4MB mapping?
|
||||
|
@ -2186,7 +2185,7 @@ pmap_protect(pm, sva, eva, prot)
|
|||
|
||||
simple_lock(&pm->pm_lock);
|
||||
sva = sva & ~PGOFSET;
|
||||
for (; sva < eva; sva += NBPG) {
|
||||
for (; sva < eva; sva += PAGE_SIZE) {
|
||||
#ifdef DEBUG
|
||||
/*
|
||||
* Is this part of the permanent 4MB mapping?
|
||||
|
@ -3523,7 +3522,7 @@ pmap_testout()
|
|||
int ref, mod;
|
||||
|
||||
/* Allocate a page */
|
||||
va = (vaddr_t)(vmmap - NBPG);
|
||||
va = (vaddr_t)(vmmap - PAGE_SIZE);
|
||||
KASSERT(va != NULL);
|
||||
loc = (int*)va;
|
||||
|
||||
|
|
Loading…
Reference in New Issue