Refer to KERNBASE instead of its current numerical value in comments

and some early debugging code.
This commit is contained in:
pk 1997-09-14 19:20:48 +00:00
parent dc4f93c2a1
commit 383448522d

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.95 1997/08/31 21:08:03 pk Exp $ */
/* $NetBSD: pmap.c,v 1.96 1997/09/14 19:20:48 pk Exp $ */
/*
* Copyright (c) 1996
@ -989,8 +989,8 @@ mmu_reservemon4_4c(nrp, nsp)
/*
* mmu_reservemon4m(): Copies the existing (ROM) page tables to kernel space,
* converting any L1/L2 PTEs to L3 PTEs. Does *not* copy the L1 entry mapping
* the kernel at KERNBASE (0xf8000000) since we don't want to map 16M of
* physical memory for the kernel. Thus the kernel must be installed later!
* the kernel at KERNBASE since we don't want to map 16M of physical
* memory for the kernel. Thus the kernel must be installed later!
* Also installs ROM mappings into the kernel pmap.
* NOTE: This also revokes all user-mode access to the mapped regions.
*/
@ -1067,10 +1067,11 @@ mmu_setup4m_L1(regtblptd, kpmap)
/*
* Here we scan the region table to copy any entries which appear.
* We are only concerned with regions in kernel space and above
* (i.e. regions 0xf8 to 0xff). We also ignore region 0xf8, since
* that is the 16MB L1 mapping that the ROM used to map the kernel
* in initially. Later, we will rebuild a new L3 mapping for the
* kernel and install it before switching to the new pagetables.
* (i.e. regions VA_VREG(KERNBASE)+1 to 0xff). We ignore the first
* region (at VA_VREG(KERNBASE)), since that is the 16MB L1 mapping
* that the ROM used to map the kernel in initially. Later, we will
* rebuild a new L3 mapping for the kernel and install it before
* switching to the new pagetables.
*/
regtblrover =
((regtblptd & ~SRMMU_TETYPE) << SRMMU_PPNPASHIFT) +
@ -3072,7 +3073,7 @@ pmap_bootstrap4m(void)
/*
* Reserve memory for segment and page tables needed to map the entire
* kernel (from regions 0xf8 -> 0xff). This takes 130k of space, but
* kernel. This takes (2k + NKREG * 16k) of space, but
* unfortunately is necessary since pmap_enk *must* be able to enter
* a kernel mapping without resorting to malloc, or else the
* possibility of deadlock arises (pmap_enk4m is called to enter a
@ -3123,28 +3124,27 @@ pmap_bootstrap4m(void)
/* XXX:rethink - Store pointer to region table address */
cpuinfo.L1_ptps = pmap_kernel()->pm_reg_ptps;
for (reg = VA_VREG(KERNBASE); reg < NKREG+VA_VREG(KERNBASE); reg++) {
for (reg = 0; reg < NKREG; reg++) {
struct regmap *rp;
caddr_t kphyssegtbl;
/*
* Entering new region; install & build segtbl
*/
int kregnum = reg - VA_VREG(KERNBASE);
rp = &pmap_kernel()->pm_regmap[reg];
rp = &pmap_kernel()->pm_regmap[reg + VA_VREG(KERNBASE)];
kphyssegtbl = (caddr_t)
&kernel_segtable_store[kregnum * SRMMU_L2SIZE];
&kernel_segtable_store[reg * SRMMU_L2SIZE];
setpgt4m(&pmap_kernel()->pm_reg_ptps[reg],
setpgt4m(&pmap_kernel()->pm_reg_ptps[reg + VA_VREG(KERNBASE)],
(VA2PA(kphyssegtbl) >> SRMMU_PPNPASHIFT) | SRMMU_TEPTD);
rp->rg_seg_ptps = (int *)kphyssegtbl;
if (rp->rg_segmap == NULL) {
printf("rp->rg_segmap == NULL!\n");
rp->rg_segmap = &kernel_segmap_store[kregnum * NSEGRG];
rp->rg_segmap = &kernel_segmap_store[reg * NSEGRG];
}
for (seg = 0; seg < NSEGRG; seg++) {
@ -3156,7 +3156,7 @@ pmap_bootstrap4m(void)
sp = &rp->rg_segmap[seg];
kphyspagtbl = (caddr_t)
&kernel_pagtable_store
[((kregnum * NSEGRG) + seg) * SRMMU_L3SIZE];
[((reg * NSEGRG) + seg) * SRMMU_L3SIZE];
setpgt4m(&rp->rg_seg_ptps[seg],
(VA2PA(kphyspagtbl) >> SRMMU_PPNPASHIFT) |
@ -6688,8 +6688,8 @@ debug_pagetables()
test_region(0xfe,0,16*1024*1024);
printf("Testing region 0xff: ");
test_region(0xff,0,16*1024*1024);
printf("Testing kernel region 0xf8: ");
test_region(0xf8, 4096, avail_start);
printf("Testing kernel region 0x%x: ", VA_VREG(KERNBASE));
test_region(VA_VREG(KERNBASE), 4096, avail_start);
cngetc();
for (i = 0; i < SRMMU_L1SIZE; i++) {
@ -6736,7 +6736,7 @@ VA2PAsw(ctx, addr, pte)
return 0;
}
/* L1 */
curtbl = ((curpte & ~0x3) << 4) | (0xf8 << RGSHIFT); /* correct for krn*/
curtbl = ((curpte & ~0x3) << 4) | KERNBASE; /* correct for krn*/
*pte = curpte = curtbl[VA_VREG(addr)];
#ifdef EXTREME_EXTREME_DEBUG
printf("L1 table at 0x%x.\nGot L1 pte 0x%x\n",curtbl,curpte);
@ -6750,7 +6750,7 @@ VA2PAsw(ctx, addr, pte)
return 0;
}
/* L2 */
curtbl = ((curpte & ~0x3) << 4) | (0xf8 << RGSHIFT); /* correct for krn*/
curtbl = ((curpte & ~0x3) << 4) | KERNBASE; /* correct for krn*/
*pte = curpte = curtbl[VA_VSEG(addr)];
#ifdef EXTREME_EXTREME_DEBUG
printf("L2 table at 0x%x.\nGot L2 pte 0x%x\n",curtbl,curpte);
@ -6764,7 +6764,7 @@ VA2PAsw(ctx, addr, pte)
return 0;
}
/* L3 */
curtbl = ((curpte & ~0x3) << 4) | (0xf8 << RGSHIFT); /* correct for krn*/
curtbl = ((curpte & ~0x3) << 4) | KERNBASE; /* correct for krn*/
*pte = curpte = curtbl[VA_VPG(addr)];
#ifdef EXTREME_EXTREME_DEBUG
printf("L3 table at 0x%x.\nGot L3 pte 0x%x\n",curtbl,curpte);
@ -6805,8 +6805,9 @@ void test_region(reg, start, stop)
printf("Mismatch at address 0x%x.\n",addr);
if (cngetc()=='q') break;
}
if (reg == 0xf8) /* kernel permissions are different */
continue;
if (reg == VA_VREG(KERNBASE))
/* kernel permissions are different */
continue;
if ((pte&SRMMU_PROT_MASK)!=(ptesw&SRMMU_PROT_MASK)) {
printf("Mismatched protections at address "
"0x%x; pte=0x%x, ptesw=0x%x\n",