Add UVM support to amiga.
Use a second free memory list for 16-bit Zorro II memory other than the first memory segment.
This commit is contained in:
parent
dc9188e3e1
commit
7b04d96d02
@ -1,4 +1,4 @@
|
||||
/* $NetBSD: pmap.c,v 1.51 1998/05/24 20:55:01 is Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.52 1998/07/10 20:26:35 mhitch Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
@ -75,6 +75,8 @@
|
||||
* and to when physical maps must be made correct.
|
||||
*/
|
||||
|
||||
#include "opt_uvm.h"
|
||||
|
||||
#include <sys/param.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/proc.h>
|
||||
@ -83,6 +85,11 @@
|
||||
#include <sys/user.h>
|
||||
#include <vm/vm_kern.h>
|
||||
#include <vm/vm_page.h>
|
||||
|
||||
#if defined(UVM)
|
||||
#include <uvm/uvm.h>
|
||||
#endif
|
||||
|
||||
#include <machine/pte.h>
|
||||
#include <machine/cpu.h>
|
||||
#include <machine/vmparam.h>
|
||||
@ -233,7 +240,10 @@ u_int *Segtabzero, *Segtabzeropa;
|
||||
vm_size_t Sysptsize = VM_KERNEL_PT_PAGES + 4 / NPTEPG;
|
||||
|
||||
struct pmap kernel_pmap_store;
|
||||
vm_map_t pt_map;
|
||||
vm_map_t st_map, pt_map;
|
||||
#if defined(UVM)
|
||||
struct vm_map st_map_store, pt_map_store;
|
||||
#endif
|
||||
|
||||
vm_size_t mem_size; /* memory size in bytes */
|
||||
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
|
||||
@ -330,8 +340,16 @@ pmap_bootstrap(firstaddr, loadaddr)
|
||||
* for loading the kernel into.
|
||||
*/
|
||||
|
||||
#if defined(UVM)
|
||||
/*
|
||||
* May want to check if first segment is Zorro-II?
|
||||
*/
|
||||
uvm_page_physload(atop(fromads), atop(toads),
|
||||
atop(fromads), atop(toads), VM_FREELIST_DEFAULT);
|
||||
#else
|
||||
vm_page_physload(atop(fromads), atop(toads),
|
||||
atop(fromads), atop(toads));
|
||||
#endif
|
||||
|
||||
sp = memlist->m_seg;
|
||||
esp = sp + memlist->m_nseg;
|
||||
@ -371,8 +389,14 @@ pmap_bootstrap(firstaddr, loadaddr)
|
||||
if ((fromads <= z2mem_start) && (toads > z2mem_start))
|
||||
toads = z2mem_start;
|
||||
|
||||
#if defined(UVM)
|
||||
uvm_page_physload(atop(fromads), atop(toads),
|
||||
atop(fromads), atop(toads), (fromads & 0xff000000) ?
|
||||
VM_FREELIST_DEFAULT : VM_FREELIST_ZORROII);
|
||||
#else
|
||||
vm_page_physload(atop(fromads), atop(toads),
|
||||
atop(fromads), atop(toads));
|
||||
#endif
|
||||
physmem += (toads - fromads) / NBPG;
|
||||
++i;
|
||||
if (noncontig_enable == 1)
|
||||
@ -454,6 +478,30 @@ pmap_init()
|
||||
* unavailable regions which we have mapped in locore.
|
||||
* XXX in pmap_bootstrap() ???
|
||||
*/
|
||||
#if defined(UVM)
|
||||
addr = (vm_offset_t) amigahwaddr;
|
||||
if (uvm_map(kernel_map, &addr,
|
||||
ptoa(namigahwpg),
|
||||
NULL, UVM_UNKNOWN_OFFSET,
|
||||
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
|
||||
UVM_INH_NONE, UVM_ADV_RANDOM,
|
||||
UVM_FLAG_FIXED)) != KERN_SUCCESS)
|
||||
goto bogons;
|
||||
addr = (vm_offset_t) Sysmap;
|
||||
if (uvm_map(kernel_map, &addr, AMIGA_KPTSIZE,
|
||||
NULL, UVM_UNKNOWN_OFFSET,
|
||||
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE,
|
||||
UVM_INH_NONE, UVM_ADV_RANDOM,
|
||||
UVM_FLAG_FIXED)) != KERN_SUCCESS) {
|
||||
/*
|
||||
* If this fails, it is probably because the static
|
||||
* portion of the kernel page table isn't big enough
|
||||
* and we overran the page table map.
|
||||
*/
|
||||
bogons:
|
||||
panic("pmap_init: bogons in the VM system!\n");
|
||||
}
|
||||
#else
|
||||
addr = amigahwaddr;
|
||||
(void)vm_map_find(kernel_map, NULL, 0, &addr, ptoa(namigahwpg), FALSE);
|
||||
if (addr != amigahwaddr)
|
||||
@ -470,6 +518,7 @@ pmap_init()
|
||||
*/
|
||||
if (addr != (vm_offset_t)Sysmap)
|
||||
panic("pmap_init: bogons in the VM system!\n");
|
||||
#endif
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & PDB_INIT) {
|
||||
printf("pmap_init: Sysseg %p, Sysmap %p, Sysptmap %p\n",
|
||||
@ -497,7 +546,13 @@ pmap_init()
|
||||
s += page_cnt * sizeof(char); /* attribute table */
|
||||
s = round_page(s);
|
||||
|
||||
#if defined(UVM)
|
||||
addr = (vm_offset_t) uvm_km_zalloc(kernel_map, s);
|
||||
if (addr == 0)
|
||||
panic("pmap_init: can't allocate data structures");
|
||||
#else
|
||||
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
|
||||
#endif
|
||||
Segtabzero = (u_int *) addr;
|
||||
Segtabzeropa = (u_int *) pmap_extract(pmap_kernel(), addr);
|
||||
addr += AMIGA_STSIZE;
|
||||
@ -549,17 +604,35 @@ pmap_init()
|
||||
* Verify that space will be allocated in region for which
|
||||
* we already have kernel PT pages.
|
||||
*/
|
||||
#if defined(UVM)
|
||||
addr = 0;
|
||||
rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET,
|
||||
UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE,
|
||||
UVM_ADV_RANDOM, UVM_FLAG_NOMERGE));
|
||||
if (rv != KERN_SUCCESS || (addr + s) >= (vm_offset_t)Sysmap)
|
||||
panic("pmap_init: kernel PT too small");
|
||||
rv = uvm_unmap(kernel_map, addr, addr + s, FALSE);
|
||||
if (rv != KERN_SUCCESS)
|
||||
panic("pmap_init: uvm_unmap failed");
|
||||
#else
|
||||
addr = 0;
|
||||
rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE);
|
||||
if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap)
|
||||
panic("pmap_init: kernel PT too small");
|
||||
vm_map_remove(kernel_map, addr, addr + s);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Now allocate the space and link the pages together to
|
||||
* form the KPT free list.
|
||||
*/
|
||||
#if defined(UVM)
|
||||
addr = (vm_offset_t) uvm_km_zalloc(kernel_map, s);
|
||||
if (addr == 0)
|
||||
panic("pmap_init: cannot allocate KPT free list");
|
||||
#else
|
||||
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
|
||||
#endif
|
||||
s = ptoa(npages);
|
||||
addr2 = addr + s;
|
||||
kpt_pages = &((struct kpt_page *)addr2)[npages];
|
||||
@ -579,6 +652,29 @@ pmap_init()
|
||||
atop(s), addr, addr + s);
|
||||
#endif
|
||||
|
||||
#if defined(UVM)
|
||||
/*
|
||||
* Allocate the segment table map and the page table map.
|
||||
*/
|
||||
s = maxproc * AMIGA_STSIZE;
|
||||
st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, TRUE,
|
||||
FALSE, &st_map_store);
|
||||
|
||||
addr = AMIGA_UPTBASE;
|
||||
if ((AMIGA_UPTMAXSIZE / AMIGA_MAX_PTSIZE) < maxproc) {
|
||||
s = AMIGA_UPTMAXSIZE;
|
||||
/*
|
||||
* XXX We don't want to hang when we run out of
|
||||
* page tables, so we lower maxproc so that fork()
|
||||
* will fail instead. Note that root could still raise
|
||||
* this value via sysctl(2).
|
||||
*/
|
||||
maxproc = (AMIGA_UPTMAXSIZE / AMIGA_MAX_PTSIZE);
|
||||
} else
|
||||
s = (maxproc * AMIGA_MAX_PTSIZE);
|
||||
pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, TRUE,
|
||||
TRUE, &pt_map_store);
|
||||
#else
|
||||
/*
|
||||
* Slightly modified version of kmem_suballoc() to get page table
|
||||
* map where we want it.
|
||||
@ -600,6 +696,7 @@ pmap_init()
|
||||
if (pmapdebug & PDB_INIT)
|
||||
printf("pmap_init: pt_map [%lx - %lx)\n", addr, addr2);
|
||||
#endif
|
||||
#endif /* UVM */
|
||||
|
||||
#if defined(M68040) || defined(M68060)
|
||||
if (mmutype == MMU_68040) {
|
||||
@ -648,9 +745,15 @@ pmap_alloc_pv()
|
||||
int i;
|
||||
|
||||
if (pv_nfree == 0) {
|
||||
#if defined(UVM)
|
||||
pvp = (struct pv_page *)uvm_km_zalloc(kernel_map, NBPG);
|
||||
if (pvp == 0)
|
||||
panic("pmap_alloc_pv: uvm_km_zalloc() failed");
|
||||
#else
|
||||
pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG);
|
||||
if (pvp == 0)
|
||||
panic("pmap_alloc_pv: kmem_alloc() failed");
|
||||
#endif
|
||||
pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1];
|
||||
for (i = NPVPPG - 2; i; i--, pv++)
|
||||
pv->pv_next = pv + 1;
|
||||
@ -692,7 +795,11 @@ pmap_free_pv(pv)
|
||||
case NPVPPG:
|
||||
pv_nfree -= NPVPPG - 1;
|
||||
TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list);
|
||||
#if defined(UVM)
|
||||
uvm_km_free(kernel_map, (vm_offset_t)pvp, NBPG);
|
||||
#else
|
||||
kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -751,7 +858,11 @@ pmap_collect_pv()
|
||||
|
||||
for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) {
|
||||
npvp = pvp->pvp_pgi.pgi_list.tqe_next;
|
||||
#if defined(UVM)
|
||||
uvm_km_free(kernel_map, (vm_offset_t)pvp, NBPG);
|
||||
#else
|
||||
kmem_free(kernel_map, (vm_offset_t)pvp, NBPG);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -898,11 +1009,21 @@ pmap_release(pmap)
|
||||
panic("pmap_release count");
|
||||
#endif
|
||||
if (pmap->pm_ptab)
|
||||
#if defined(UVM)
|
||||
uvm_km_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
|
||||
AMIGA_UPTSIZE);
|
||||
#else
|
||||
kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
|
||||
AMIGA_UPTSIZE);
|
||||
#endif
|
||||
if (pmap->pm_stab != Segtabzero)
|
||||
#if defined(UVM)
|
||||
uvm_km_free_wakeup(st_map, (vm_offset_t)pmap->pm_stab,
|
||||
AMIGA_STSIZE);
|
||||
#else
|
||||
kmem_free_wakeup(kernel_map, (vm_offset_t)pmap->pm_stab,
|
||||
AMIGA_STSIZE);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1006,8 +1127,13 @@ pmap_remove(pmap, sva, eva)
|
||||
*/
|
||||
if (pmap != pmap_kernel()) {
|
||||
pte = pmap_pte(pmap, va);
|
||||
#if defined(UVM)
|
||||
(void) uvm_map_pageable(pt_map, trunc_page(pte),
|
||||
round_page(pte+1), TRUE);
|
||||
#else
|
||||
vm_map_pageable(pt_map, trunc_page(pte),
|
||||
round_page(pte+1), TRUE);
|
||||
#endif
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & PDB_WIRING)
|
||||
pmap_check_wiring("remove", trunc_page(pte));
|
||||
@ -1123,9 +1249,15 @@ pmap_remove(pmap, sva, eva)
|
||||
printf("remove: free stab %p\n",
|
||||
ptpmap->pm_stab);
|
||||
#endif
|
||||
#if defined(UVM)
|
||||
uvm_km_free_wakeup(st_map,
|
||||
(vm_offset_t)ptpmap->pm_stab,
|
||||
AMIGA_STSIZE);
|
||||
#else
|
||||
kmem_free_wakeup(kernel_map,
|
||||
(vm_offset_t)ptpmap->pm_stab,
|
||||
AMIGA_STSIZE);
|
||||
#endif
|
||||
ptpmap->pm_stab = Segtabzero;
|
||||
ptpmap->pm_stpa = Segtabzeropa;
|
||||
#if defined(M68040) || defined(M68060)
|
||||
@ -1341,8 +1473,13 @@ pmap_enter(pmap, va, pa, prot, wired)
|
||||
* For user mapping, allocate kernel VM resources if necessary.
|
||||
*/
|
||||
if (pmap->pm_ptab == NULL)
|
||||
#if defined(UVM)
|
||||
pmap->pm_ptab = (pt_entry_t *)
|
||||
uvm_km_valloc_wait(pt_map, AMIGA_UPTSIZE);
|
||||
#else
|
||||
pmap->pm_ptab = (u_int *)
|
||||
kmem_alloc_wait(pt_map, AMIGA_UPTSIZE);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Segment table entry not valid, we need a new PT page
|
||||
@ -1413,8 +1550,13 @@ pmap_enter(pmap, va, pa, prot, wired)
|
||||
* is a valid mapping in the page.
|
||||
*/
|
||||
if (pmap != pmap_kernel())
|
||||
#if defined(UVM)
|
||||
(void) uvm_map_pageable(pt_map, trunc_page(pte),
|
||||
round_page(pte+1), FALSE);
|
||||
#else
|
||||
vm_map_pageable(pt_map, trunc_page(pte),
|
||||
round_page(pte+1), FALSE);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Enter on the PV list if part of our managed memory
|
||||
@ -2176,10 +2318,15 @@ pmap_changebit(pa, bit, setem)
|
||||
* XXX don't write protect pager mappings
|
||||
*/
|
||||
if (bit == PG_RO) {
|
||||
#if defined(UVM)
|
||||
if (va >= uvm.pager_sva && va < uvm.pager_eva)
|
||||
continue;
|
||||
#else
|
||||
extern vm_offset_t pager_sva, pager_eva;
|
||||
|
||||
if (va >= pager_sva && va < pager_eva)
|
||||
continue;
|
||||
#endif
|
||||
}
|
||||
|
||||
pte = (int *) pmap_pte(pv->pv_pmap, va);
|
||||
@ -2236,8 +2383,13 @@ pmap_enter_ptpage(pmap, va)
|
||||
* reference count drops to zero.
|
||||
*/
|
||||
if (pmap->pm_stab == Segtabzero) {
|
||||
#if defined(UVM)
|
||||
pmap->pm_stab = (st_entry_t *)
|
||||
uvm_km_zalloc(st_map, AMIGA_STSIZE);
|
||||
#else
|
||||
pmap->pm_stab = (u_int *)
|
||||
kmem_alloc(kernel_map, AMIGA_STSIZE);
|
||||
#endif
|
||||
pmap->pm_stpa = (u_int *) pmap_extract(
|
||||
pmap_kernel(), (vm_offset_t)pmap->pm_stab);
|
||||
#if defined(M68040) || defined(M68060)
|
||||
@ -2382,12 +2534,27 @@ pmap_enter_ptpage(pmap, va)
|
||||
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
|
||||
printf("enter_pt: about to fault UPT pg at %lx\n", va);
|
||||
#endif
|
||||
#if defined(UVM)
|
||||
if (uvm_fault(pt_map, va, 0, VM_PROT_READ|VM_PROT_WRITE)
|
||||
!= KERN_SUCCESS) {
|
||||
panic("pmap_enter: uvm_fault failed");
|
||||
}
|
||||
#else
|
||||
if (vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE)
|
||||
!= KERN_SUCCESS)
|
||||
panic("pmap_enter: vm_fault failed");
|
||||
#endif
|
||||
ptpa = pmap_extract(pmap_kernel(), va);
|
||||
/*
|
||||
* Mark the page clean now to avoid its pageout (and
|
||||
* hence creation of a pager) between now and when it
|
||||
* is wired; i.e. while it is on a paging queue.
|
||||
*/
|
||||
PHYS_TO_VM_PAGE(ptpa)->flags |= PG_CLEAN;
|
||||
#if !defined(UVM)
|
||||
#ifdef DEBUG
|
||||
PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE;
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -2491,10 +2658,17 @@ pmap_check_wiring(str, va)
|
||||
!pmap_pte_v(pmap_pte(pmap_kernel(), va)))
|
||||
return;
|
||||
|
||||
#if defined(UVM)
|
||||
if (!uvm_map_lookup_entry(pt_map, va, &entry)) {
|
||||
printf("wired_check: entry for %lx not found\n", va);
|
||||
return;
|
||||
}
|
||||
#else
|
||||
if (!vm_map_lookup_entry(pt_map, va, &entry)) {
|
||||
printf("wired_check: entry for %lx not found\n", va);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
count = 0;
|
||||
for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++)
|
||||
if (*pte)
|
||||
|
Loading…
Reference in New Issue
Block a user