'040 changes from Amiga.

This commit is contained in:
briggs 1994-04-21 23:30:45 +00:00
parent c0a4759ca8
commit c731281873
1 changed files with 99 additions and 18 deletions

View File

@ -65,7 +65,7 @@
*/
/*
* from: @(#)pmap.c 7.5 (Berkeley) 5/10/91
* $Id: pmap.c,v 1.7 1994/04/18 03:02:16 briggs Exp $
* $Id: pmap.c,v 1.8 1994/04/21 23:30:45 briggs Exp $
*/
/*
@ -186,7 +186,7 @@ extern vm_offset_t pager_sva, pager_eva;
/*
* Get STEs and PTEs for user/kernel address space
*/
#define pmap_ste(m, v) (&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT]))
#define pmap_ste(m, v) (&((m)->pm_stab[(vm_offset_t)(v) >> pmap_ishift]))
#define pmap_pte(m, v) (&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT]))
#define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME)
@ -229,6 +229,7 @@ struct kpt_page *kpt_pages;
* Segtabzero is an empty segment table which all processes share til they
* reference something.
*/
st_entry_t *Sysseg1; /* root segment for 68040 */
st_entry_t *Sysseg;
pt_entry_t *Sysmap, *Sysptmap;
st_entry_t *Segtabzero;
@ -253,6 +254,8 @@ int macpagesperpage;/* PAGE_SIZE / MAC_PAGE_SIZE */
boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
int pmap_aliasmask; /* seperation at which VA aliasing ok */
char *pmap_attributes; /* reference and modify bits */
static int pmap_ishift; /* segment table index shift */
extern int cpu040;
boolean_t pmap_testbit();
void pmap_enter_ptpage();
@ -303,7 +306,7 @@ pmap_bootstrap(firstaddr, loadaddr)
mem_size = physmem << PGSHIFT;
virtual_avail = VM_MIN_KERNEL_ADDRESS + (firstaddr - loadaddr);
virtual_end = VM_MAX_KERNEL_ADDRESS;
macpagesperpage = 1; /* MAC_PAGE_SIZE / PAGE_SIZE */
macpagesperpage = 1;
/* BARF HELP ME! DAYSTAR CACHE fails */
@ -344,9 +347,11 @@ pmap_bootstrap(firstaddr, loadaddr)
* Kernel page/segment table allocated in locore,
* just initialize pointers.
*/
if (cpu040)
kernel_pmap->pm_rtab = Sysseg1;
kernel_pmap->pm_stab = Sysseg;
kernel_pmap->pm_ptab = Sysmap;
pmap_ishift = cpu040 ? SG_040ISHIFT : SG_ISHIFT;
simple_lock_init(&kernel_pmap->pm_lock);
kernel_pmap->pm_count = 1;
@ -486,11 +491,12 @@ bogons:
* initial segment table, pv_head_table and pmap_attributes.
*/
npg = atop(phys_end - phys_start);
s = (vm_size_t) (MAC_STSIZE + sizeof(struct pv_entry) * npg + npg);
s = (vm_size_t) ((cpu040 ? MAC_040STSIZE*128 : MAC_STSIZE) +
sizeof(struct pv_entry) * npg + npg);
s = round_page(s);
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
Segtabzero = (st_entry_t *) addr;
addr += MAC_STSIZE;
addr += cpu040 ? MAC_040STSIZE*128 : MAC_STSIZE;
pv_table = (pv_entry_t) addr;
addr += sizeof(struct pv_entry) * npg;
pmap_attributes = (char *) addr;
@ -654,6 +660,8 @@ pmap_pinit(pmap)
* "null" segment table. On the first pmap_enter, a real
* segment table will be allocated.
*/
if (cpu040)
pmap->pm_rtab = Segtabzero;
pmap->pm_stab = Segtabzero;
pmap->pm_stchanged = TRUE;
pmap->pm_count = 1;
@ -711,7 +719,11 @@ pmap_release(pmap)
kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
MAC_MAX_PTSIZE);
if (pmap->pm_stab != Segtabzero)
kmem_free(kernel_map, (vm_offset_t)pmap->pm_stab, MAC_STSIZE);
if (cpu040) {
kmem_free(kernel_map, (vm_offset_t) pmap->pm_rtab, MAC_040RTSIZE);
kmem_free(kernel_map, (vm_offset_t) pmap->pm_stab, MAC_040STSIZE*128);
} else
kmem_free(kernel_map, (vm_offset_t)pmap->pm_stab, MAC_STSIZE);
}
/*
@ -927,7 +939,23 @@ pmap_remove(pmap, sva, eva)
*(int *)&opte, pmap_pte(pmap, va));
}
#endif
*ste = SG_NV;
if (cpu040) {
/*
* On the 68040, the PT page contains 64 page tables,
* so we need to remove all the associated segment
* table entries
* (This may be incorrect: if a single page table is
* being removed, the whole page should not be removed.)
*/
for (ix = 0; ix < 64; ++ix)
*ste++ = SG_NV;
ste -= 64;
#ifdef DEBUG
if (pmapdebug & (PDB_REMOVE|PDB_SEGTAB|0x10000))
printf("pmap_remove: PT at %x removed\n", va);
#endif
} else
*ste = SG_NV;
/*
* If it was a user PT page, we decrement the
* reference count on the segment table as well,
@ -949,6 +977,16 @@ pmap_remove(pmap, sva, eva)
printf("remove: free stab %x\n",
ptpmap->pm_stab);
#endif
if (cpu040) {
kmem_free(kernel_map,
(vm_offset_t)ptpmap->pm_rtab,
MAC_040RTSIZE);
kmem_free(kernel_map,
(vm_offset_t)ptpmap->pm_stab,
MAC_040STSIZE*128);
ptpmap->pm_rtab = Segtabzero;
}
else
kmem_free(kernel_map,
(vm_offset_t)ptpmap->pm_stab,
MAC_STSIZE);
@ -1160,7 +1198,7 @@ pmap_enter(pmap, va, pa, prot, wired)
vm_offset_t opa;
boolean_t cacheable = TRUE;
boolean_t checkpv = TRUE;
extern int dbg_flg;
extern u_int cache_copyback;
#ifdef DEBUG
if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
@ -1168,8 +1206,6 @@ pmap_enter(pmap, va, pa, prot, wired)
pmap, va, pa, prot, wired);
#endif
if (dbg_flg) printf("pmap_enter(%x, %x, %x, %x, %x)\n", pmap, va, pa, prot, wired);
if (pmap == NULL)
return;
@ -1388,20 +1424,24 @@ validate:
/*
* Flush VAC to ensure we get correct state of HW bits
* so we don't clobber them.
*/
if (pmap_aliasmask)
DCIS();
*/
/*
* Now validate mapping with desired protection/wiring.
* Assume uniform modified and referenced status for all
* HP pages in a MACH page.
* MAC pages in a MACH page.
*/
if (cpu040 && pmap == kernel_pmap && va >= MAC_PTBASE)
cacheable = FALSE; /* Don't cache user page tables */
npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
npte |= (*(int *)pte & (PG_M|PG_U));
if (wired)
npte |= PG_W;
if (!checkpv && !cacheable)
npte |= PG_CI;
else if (cpu040)
npte |= cache_copyback;
#ifdef DEBUG
if (pmapdebug & PDB_ENTER)
printf("enter: new pte value %x\n", npte);
@ -2084,6 +2124,7 @@ pmap_enter_ptpage(pmap, va)
register pv_entry_t pv;
st_entry_t *ste;
int s;
u_int sg_proto, *sg;
#ifdef DEBUG
if (pmapdebug & (PDB_FOLLOW|PDB_ENTER|PDB_PTPAGE))
@ -2098,8 +2139,28 @@ pmap_enter_ptpage(pmap, va)
* reference count drops to zero.
*/
if (pmap->pm_stab == Segtabzero) {
pmap->pm_stab = (st_entry_t *)
kmem_alloc(kernel_map, MAC_STSIZE);
if (cpu040) {
pmap->pm_rtab = (st_entry_t *)
kmem_alloc(kernel_map, MAC_040RTSIZE);
pmap->pm_stab = (st_entry_t *)
kmem_alloc(kernel_map, MAC_040STSIZE*128);
/* intialize root table entries */
sg = (u_int *) pmap->pm_rtab;
sg_proto = pmap_extract(kernel_pmap, (vm_offset_t) pmap->pm_stab) |
SG_RW | SG_V;
#ifdef DEBUG
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
printf ("pmap_enter_ptpage: ROOT TABLE SETUP %x %x\n",
pmap->pm_rtab, sg_proto);
#endif
while (sg < (u_int *) ((u_int) pmap->pm_rtab + MAC_040RTSIZE)) {
*sg++ = sg_proto;
sg_proto += MAC_040STSIZE;
}
}
else
pmap->pm_stab = (st_entry_t *)
kmem_alloc(kernel_map, MAC_STSIZE);
pmap->pm_stchanged = TRUE;
/*
* XXX may have changed segment table pointer for current
@ -2114,8 +2175,18 @@ pmap_enter_ptpage(pmap, va)
#endif
}
ste = pmap_ste(pmap, va);
va = trunc_page((vm_offset_t)pmap_pte(pmap, va));
/*
* On the 68040, a page will hold 64 page tables, so the segment
* table will have to have 64 entries set up. First get the ste
* for the page mapped by the first PT entry.
*/
if (cpu040) {
ste = pmap_ste(pmap, va & (SG_040IMASK << 6));
va = trunc_page((vm_offset_t)pmap_pte(pmap, va & (SG_040IMASK << 6)));
} else {
ste = pmap_ste(pmap, va);
va = trunc_page((vm_offset_t)pmap_pte(pmap, va));
}
/*
* In the kernel we allocate a page from the kernel PT page
@ -2206,7 +2277,17 @@ pmap_enter_ptpage(pmap, va)
* it would be difficult to identify ST pages in pmap_pageable to
* release them. We also avoid the overhead of vm_map_pageable.
*/
*(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
if (cpu040) {
/* 68040 has 64 page tables, so we have to map all 64 */
sg = (u_int *) ste;
sg_proto = (ptpa & SG_FRAME) | SG_RW | SG_V;
while (sg < (u_int *) (ste + 64)) {
*sg++ = sg_proto;
sg_proto += MAC_040PTSIZE;
}
}
else
*(int *)ste = (ptpa & SG_FRAME) | SG_RW | SG_V;
if (pmap != kernel_pmap) {
pmap->pm_sref++;
#ifdef DEBUG