Implement MACHINE_NEW_NONCONTIG and switch the hp300 port to use it.

This commit is contained in:
thorpej 1998-02-08 18:37:55 +00:00
parent 39f8b8c99b
commit 2f55a48f5c
4 changed files with 206 additions and 46 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.103 1998/01/24 16:46:33 mycroft Exp $ */
/* $NetBSD: machdep.c,v 1.104 1998/02/08 18:37:55 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -192,6 +192,17 @@ hp300_init()
{
int i;
#if defined(MACHINE_NEW_NONCONTIG)
extern vm_offset_t avail_start, avail_end;
/*
* Tell the VM system about available physical memory. The
* hp300 only has one segment.
*/
vm_page_physload(atop(avail_start), atop(avail_end),
atop(avail_start), atop(avail_end));
#endif
/* Initialize the interrupt handlers. */
intr_init();
@ -823,16 +834,10 @@ cpu_init_kcore_hdr()
m->relocend = (u_int32_t)end;
/*
* hp300 has one contiguous memory segment. Note,
* RAM size is physmem + 1 to account for the msgbuf
* page.
*
* XXX There's actually one more page... the last one mapped
* XXX va == pa. Should we dump it? It's not really used
* XXX for anything except to reboot and the MMU trampoline.
* hp300 has one contiguous memory segment.
*/
m->ram_segs[0].start = lowram;
m->ram_segs[0].size = ctob(physmem + 1);
m->ram_segs[0].size = ctob(physmem);
}
/*

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.40 1998/01/31 01:32:59 ross Exp $ */
/* $NetBSD: pmap.c,v 1.41 1998/02/08 18:37:56 thorpej Exp $ */
/*
* Copyright (c) 1991, 1993
@ -273,9 +273,7 @@ vm_offset_t avail_end; /* PA of last available physical page */
vm_size_t mem_size; /* memory size in bytes */
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
vm_offset_t vm_first_phys; /* PA of first managed page */
vm_offset_t vm_last_phys; /* PA just past last managed page */
int npages;
int page_cnt; /* number of pages managed by VM system */
boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
struct pv_entry *pv_table;
@ -297,6 +295,36 @@ void pmap_collect_pv __P((void));
int pmap_mapmulti __P((pmap_t, vm_offset_t));
#endif /* COMPAT_HPUX */
#if !defined(MACHINE_NEW_NONCONTIG)
vm_offset_t vm_first_phys; /* PA of first managed page */
vm_offset_t vm_last_phys; /* PA just past last managed page */
#define PAGE_IS_MANAGED(pa) (pmap_initialized && \
(pa) >= vm_first_phys && (pa) < vm_last_phys)
#define pa_to_pvh(pa) (&pv_table[pmap_page_index((pa))])
#define pa_to_attribute(pa) (&pmap_attributes[pmap_page_index((pa))])
#else
#define PAGE_IS_MANAGED(pa) (pmap_initialized && \
vm_physseg_find(atop((pa)), NULL) != -1)
#define pa_to_pvh(pa) \
({ \
int bank_, pg_; \
\
bank_ = vm_physseg_find(atop((pa)), &pg_); \
&vm_physmem[bank_].pmseg.pvent[pg_]; \
})
#define pa_to_attribute(pa) \
({ \
int bank_, pg_; \
\
bank_ = vm_physseg_find(atop((pa)), &pg_); \
&vm_physmem[bank_].pmseg.attrs[pg_]; \
})
#endif /* MACHINE_NEW_NONCONTIG */
/*
* Internal routines
*/
@ -304,6 +332,7 @@ void pmap_remove_mapping __P((pmap_t, vm_offset_t, pt_entry_t *, int));
boolean_t pmap_testbit __P((vm_offset_t, int));
void pmap_changebit __P((vm_offset_t, int, boolean_t));
void pmap_enter_ptpage __P((pmap_t, vm_offset_t));
void pmap_collect1 __P((pmap_t, vm_offset_t, vm_offset_t));
#ifdef DEBUG
void pmap_pvdump __P((vm_offset_t));
@ -314,6 +343,7 @@ void pmap_check_wiring __P((char *, vm_offset_t));
#define PRM_TFLUSH 1
#define PRM_CFLUSH 2
#if !defined(MACHINE_NEW_NONCONTIG)
/*
* Bootstrap memory allocator. This function allows for early dynamic
* memory allocation until the virtual memory system has been bootstrapped.
@ -344,27 +374,65 @@ pmap_bootstrap_alloc(size)
bzero ((caddr_t) val, size);
return ((void *) val);
}
#endif /* ! MACHINE_NEW_NONCONTIG */
#if defined(MACHINE_NEW_NONCONTIG)
/*
* Routine: pmap_virtual_space
*
* Function:
* Report the range of available kernel virtual address
* space to the VM system during bootstrap. Called by
* vm_bootstrap_steal_memory().
*/
void
pmap_virtual_space(vstartp, vendp)
vm_offset_t *vstartp, *vendp;
{
*vstartp = virtual_avail;
*vendp = virtual_end;
}
#endif /* MACHINE_NEW_NONCONTIG */
/*
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
* system needs to map virtual memory.
* Routine: pmap_init
*
* Function:
* Initialize the pmap module.
* Called by vm_init, to initialize any structures that the pmap
* system needs to map virtual memory.
*/
#if defined(MACHINE_NEW_NONCONTIG)
void
pmap_init()
#else
void
pmap_init(phys_start, phys_end)
vm_offset_t phys_start, phys_end;
#endif
{
vm_offset_t addr, addr2;
vm_size_t s;
int rv;
int npages;
#if defined(MACHINE_NEW_NONCONTIG)
struct pv_entry *pv;
char *attr;
int bank;
#endif
#ifdef DEBUG
if (pmapdebug & PDB_FOLLOW)
#if defined(MACHINE_NEW_NONCONTIG)
printf("pmap_init()\n");
#else
printf("pmap_init(%lx, %lx)\n", phys_start, phys_end);
#endif
#endif
/*
* Now that kernel map has been allocated, we can mark as
* unavailable regions which we have mapped in locore.
* unavailable regions which we have mapped in pmap_bootstrap().
*/
addr = (vm_offset_t) intiobase;
(void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
@ -378,7 +446,7 @@ pmap_init(phys_start, phys_end)
/*
* If this fails it is probably because the static portion of
* the kernel page table isn't big enough and we overran the
* page table map. Need to adjust pmap_size() in hp300_init.c.
* page table map.
*/
if (addr != (vm_offset_t)Sysmap)
bogons:
@ -397,23 +465,51 @@ bogons:
* Allocate memory for random pmap data structures. Includes the
* initial segment table, pv_head_table and pmap_attributes.
*/
npages = atop(phys_end - phys_start);
s = (vm_size_t) (HP_STSIZE + sizeof(struct pv_entry) * npages + npages);
#if defined(MACHINE_NEW_NONCONTIG)
for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++)
page_cnt += vm_physmem[bank].end - vm_physmem[bank].start;
#else
page_cnt = atop(phys_end - phys_start);
#endif
s = HP_STSIZE; /* Segtabzero */
s += page_cnt * sizeof(struct pv_entry); /* pv table */
s += page_cnt * sizeof(char); /* attribute table */
s = round_page(s);
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
Segtabzero = (st_entry_t *) addr;
Segtabzeropa = (st_entry_t *) pmap_extract(pmap_kernel(), addr);
addr += HP_STSIZE;
pv_table = (struct pv_entry *) addr;
addr += sizeof(struct pv_entry) * npages;
addr += page_cnt * sizeof(struct pv_entry);
pmap_attributes = (char *) addr;
#ifdef DEBUG
if (pmapdebug & PDB_INIT)
printf("pmap_init: %lx bytes: npages %x s0 %p(%p) tbl %p atr %p\n",
s, npages, Segtabzero, Segtabzeropa,
printf("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) "
"tbl %p atr %p\n",
s, page_cnt, Segtabzero, Segtabzeropa,
pv_table, pmap_attributes);
#endif
#if defined(MACHINE_NEW_NONCONTIG)
/*
* Now that the pv and attribute tables have been allocated,
* assign them to the memory segments.
*/
pv = pv_table;
attr = pmap_attributes;
for (bank = 0; bank < vm_nphysseg; bank++) {
npages = vm_physmem[bank].end - vm_physmem[bank].start;
vm_physmem[bank].pmseg.pvent = pv;
vm_physmem[bank].pmseg.attrs = attr;
pv += npages;
attr += npages;
}
#endif
/*
* Allocate physical memory for kernel PT pages and their management.
* We need 1 PT page per possible task plus some slop.
@ -505,8 +601,10 @@ bogons:
/*
* Now it is safe to enable pv_table recording.
*/
#if !defined(MACHINE_NEW_NONCONTIG)
vm_first_phys = phys_start;
vm_last_phys = phys_end;
#endif
pmap_initialized = TRUE;
}
@ -592,7 +690,7 @@ pmap_collect_pv()
if (pv_page_collectlist.tqh_first == 0)
return;
for (ph = &pv_table[npages - 1]; ph >= &pv_table[0]; ph--) {
for (ph = &pv_table[page_cnt - 1]; ph >= &pv_table[0]; ph--) {
if (ph->pv_pmap == 0)
continue;
s = splimp();
@ -963,7 +1061,7 @@ pmap_page_protect(pa, prot)
(prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)))
printf("pmap_page_protect(%lx, %x)\n", pa, prot);
#endif
if (pa < vm_first_phys || pa >= vm_last_phys)
if (PAGE_IS_MANAGED(pa) == 0)
return;
switch (prot) {
@ -1250,7 +1348,7 @@ pmap_enter(pmap, va, pa, prot, wired)
* Note that we raise IPL while manipulating pv_table
* since pmap_enter can be called at interrupt time.
*/
if (pa >= vm_first_phys && pa < vm_last_phys) {
if (PAGE_IS_MANAGED(pa)) {
struct pv_entry *pv, *npv;
int s;
@ -1590,16 +1688,12 @@ void
pmap_collect(pmap)
pmap_t pmap;
{
vm_offset_t pa;
struct pv_entry *pv;
pt_entry_t *pte;
vm_offset_t kpa;
#if defined(MACHINE_NEW_NONCONTIG)
int bank, s;
#else
int s;
#endif /* MACHINE_NEW_NONCONTIG */
#ifdef DEBUG
st_entry_t *ste;
int opmapdebug = 0 /* XXX initialize to quiet gcc -Wall */;
#endif
if (pmap != pmap_kernel())
return;
@ -1611,7 +1705,43 @@ pmap_collect(pmap)
kpt_stats.collectscans++;
#endif
s = splimp();
for (pa = vm_first_phys; pa < vm_last_phys; pa += NBPG) {
#if defined(MACHINE_NEW_NONCONTIG)
for (bank = 0; bank < vm_nphysseg; bank++)
pmap_collect1(pmap, ptoa(vm_physmem[bank].start),
ptoa(vm_physmem[bank].end));
#else
pmap_collect1(pmap, vm_first_phys, vm_last_phys);
#endif /* MACHINE_NEW_NONCONTIG */
splx(s);
#ifdef notyet
/* Go compact and garbage-collect the pv_table. */
pmap_collect_pv();
#endif
}
/*
* Routine: pmap_collect1()
*
* Function:
* Helper function for pmap_collect(). Do the actual
* garbage-collection of range of physical addresses.
*/
void
pmap_collect1(pmap, startpa, endpa)
pmap_t pmap;
vm_offset_t startpa, endpa;
{
vm_offset_t pa;
struct pv_entry *pv;
pt_entry_t *pte;
vm_offset_t kpa;
#ifdef DEBUG
st_entry_t *ste;
int opmapdebug = 0 /* XXX initialize to quiet gcc -Wall */;
#endif
for (pa = startpa; pa < endpa; pa += NBPG) {
struct kpt_page *kpt, **pkpt;
/*
@ -1698,7 +1828,6 @@ ok:
ste, *ste);
#endif
}
splx(s);
}
/*
@ -1809,7 +1938,7 @@ pmap_pageable(pmap, sva, eva, pageable)
if (!pmap_ste_v(pmap, sva))
return;
pa = pmap_pte_pa(pmap_pte(pmap, sva));
if (pa < vm_first_phys || pa >= vm_last_phys)
if (PAGE_IS_MANAGED(pa) == 0)
return;
pv = pa_to_pvh(pa);
if (pv->pv_ptste == NULL)
@ -2061,7 +2190,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
/*
* If this isn't a managed page, we are all done.
*/
if (pa < vm_first_phys || pa >= vm_last_phys)
if (PAGE_IS_MANAGED(pa) == 0)
return;
/*
* Otherwise remove it from the PV table
@ -2210,7 +2339,7 @@ pmap_remove_mapping(pmap, va, pte, flags)
/*
* Update saved attributes for managed page
*/
pmap_attributes[pmap_page_index(pa)] |= bits;
*pa_to_attribute(pa) |= bits;
splx(s);
}
@ -2224,7 +2353,7 @@ pmap_testbit(pa, bit)
pt_entry_t *pte;
int s;
if (pa < vm_first_phys || pa >= vm_last_phys)
if (PAGE_IS_MANAGED(pa) == 0)
return(FALSE);
pv = pa_to_pvh(pa);
@ -2232,7 +2361,7 @@ pmap_testbit(pa, bit)
/*
* Check saved info first
*/
if (pmap_attributes[pmap_page_index(pa)] & bit) {
if (*pa_to_attribute(pa) & bit) {
splx(s);
return(TRUE);
}
@ -2283,7 +2412,7 @@ pmap_changebit(pa, bit, setem)
printf("pmap_changebit(%lx, %x, %s)\n",
pa, bit, setem ? "set" : "clear");
#endif
if (pa < vm_first_phys || pa >= vm_last_phys)
if (PAGE_IS_MANAGED(pa) == 0)
return;
#ifdef PMAPSTATS
@ -2299,7 +2428,7 @@ pmap_changebit(pa, bit, setem)
* Clear saved attributes (modify, reference)
*/
if (!setem)
pmap_attributes[pmap_page_index(pa)] &= ~bit;
*pa_to_attribute(pa) &= ~bit;
/*
* Loop over all current mappings setting/clearing as appropos
* If setting RO do we need to clear the VAC?

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.18 1998/01/06 07:03:06 thorpej Exp $ */
/* $NetBSD: pmap.h,v 1.19 1998/02/08 18:37:58 thorpej Exp $ */
/*
* Copyright (c) 1987 Carnegie-Mellon University
@ -144,7 +144,6 @@ extern struct pmap kernel_pmap_store;
extern struct pv_entry *pv_table; /* array of entries, one per page */
#define pmap_page_index(pa) atop(pa - vm_first_phys)
#define pa_to_pvh(pa) (&pv_table[pmap_page_index(pa)])
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)

View File

@ -1,4 +1,4 @@
/* $NetBSD: vmparam.h,v 1.12 1997/07/12 16:18:58 perry Exp $ */
/* $NetBSD: vmparam.h,v 1.13 1998/02/08 18:37:59 thorpej Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -42,9 +42,13 @@
* @(#)vmparam.h 8.2 (Berkeley) 4/19/94
*/
#ifndef _HP300_VMPARAM_H_
#define _HP300_VMPARAM_H_
/*
* Machine dependent constants for HP300
*/
/*
* USRTEXT is the start of the user text/data space, while USRSTACK
* is the top (end) of the user stack. LOWPAGES and HIGHPAGES are
@ -241,3 +245,26 @@
/* pcb base */
#define pcbb(p) ((u_int)(p)->p_addr)
/* Use new VM page bootstrap interface. */
#define MACHINE_NEW_NONCONTIG
#if defined(MACHINE_NEW_NONCONTIG)
/*
* Constants which control the way the VM system deals with memory segments.
* The hp300 only has one physical memory segment.
*/
#define VM_PHYSSEG_MAX 1
#define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
#define VM_PHYSSEG_NOADD
/*
* pmap-specific data stored in the vm_physmem[] array.
*/
struct pmap_physseg {
struct pv_entry *pvent; /* pv table for this seg */
char *attrs; /* page attributes for this seg */
};
#endif /* MACHINE_NEW_NONCONTIG */
#endif /* _HP300_VMPARAM_H_ */