Support & require MACHINE_NONCONTIG
This commit is contained in:
parent
e6c81430d2
commit
80bf490a56
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: atari_init.c,v 1.6 1995/05/28 19:10:17 leo Exp $ */
|
||||
/* $NetBSD: atari_init.c,v 1.7 1995/06/09 19:42:22 leo Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1995 Leo Weppelman
|
||||
|
@ -72,17 +72,6 @@ u_long boot_ttphystart, boot_ttphysize, boot_stphysize;
|
|||
|
||||
extern char *esym;
|
||||
|
||||
/*
|
||||
* The following vars become valid just before pmap_bootstrap is called.
|
||||
* They give info about free memory with the kernel and the tables allocated
|
||||
* in 'start_c' excluded.
|
||||
*/
|
||||
extern vm_offset_t st_ramstart; /* First available ST RAM address */
|
||||
extern vm_offset_t st_ramend; /* First ST RAM address not available */
|
||||
extern vm_offset_t tt_ramstart; /* First available TT RAM address */
|
||||
extern vm_offset_t tt_ramend; /* First TT RAM address not available */
|
||||
|
||||
|
||||
/*
|
||||
* This is the virtual address of physical page 0. Used by 'do_boot()'.
|
||||
*/
|
||||
|
@ -346,18 +335,22 @@ char *esym_addr; /* Address of kernel '_esym' symbol */
|
|||
lowram = 0 >> PGSHIFT; /* XXX */
|
||||
|
||||
/*
|
||||
* Initialize memory sizes
|
||||
* Fill in segments. The page indexes will be initialized
|
||||
* later when all reservations are made.
|
||||
*/
|
||||
st_ramstart = 0;
|
||||
st_ramend = stphysize;
|
||||
tt_ramstart = ttphystart;
|
||||
tt_ramend = ttphystart + ttphysize;
|
||||
phys_segs[0].start = 0;
|
||||
phys_segs[0].end = stphysize;
|
||||
phys_segs[1].start = ttphystart;
|
||||
phys_segs[1].end = ttphystart + ttphysize;
|
||||
phys_segs[2].start = 0; /* End of segments! */
|
||||
|
||||
if(kbase) {
|
||||
/*
|
||||
* First page of ST-ram is unusable.
|
||||
* First page of ST-ram is unusable, reserve the space
|
||||
* for the kernel in the TT-ram segment.
|
||||
*/
|
||||
st_ramstart = NBPG;
|
||||
tt_ramstart += pstart;
|
||||
phys_segs[0].start = NBPG;
|
||||
phys_segs[1].start += pstart;
|
||||
}
|
||||
else {
|
||||
/*
|
||||
|
@ -387,14 +380,26 @@ char *esym_addr; /* Address of kernel '_esym' symbol */
|
|||
|
||||
|
||||
/*
|
||||
* Reserve space for page 0
|
||||
* Reserve space for page 0, and allocate the kernel
|
||||
* space from the ST-ram segment.
|
||||
*/
|
||||
pstart += NBPG;
|
||||
|
||||
st_ramstart += pstart;
|
||||
phys_segs[0].start += pstart;
|
||||
}
|
||||
|
||||
physmem = ((st_ramend-st_ramstart)+(tt_ramend-tt_ramstart)) >> PGSHIFT;
|
||||
/*
|
||||
* As all segment sizes are now valid, calculate page indexes and
|
||||
* available physical memory.
|
||||
*/
|
||||
phys_segs[0].first_page = 0;
|
||||
for (i = 1; phys_segs[i].start; i++) {
|
||||
phys_segs[i].first_page = phys_segs[i-1].first_page;
|
||||
phys_segs[i].first_page +=
|
||||
(phys_segs[i-1].end - phys_segs[i-1].start) / NBPG;
|
||||
}
|
||||
for (i = 0, physmem = 0; phys_segs[i].start; i++)
|
||||
physmem += phys_segs[i].end - phys_segs[i].start;
|
||||
physmem >>= PGSHIFT;
|
||||
|
||||
/*
|
||||
* get the pmap module in sync with reality.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.c,v 1.3 1995/05/14 15:20:27 leo Exp $ */
|
||||
/* $NetBSD: pmap.c,v 1.4 1995/06/09 19:42:36 leo Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1991 Regents of the University of California.
|
||||
|
@ -100,8 +100,8 @@ struct kpt_stats {
|
|||
int kptmaxuse;
|
||||
};
|
||||
struct enter_stats {
|
||||
int kernel; /* entering kernel mapping */
|
||||
int user; /* entering user mapping */
|
||||
int kernel; /* entering kernel mapping */
|
||||
int user; /* entering user mapping */
|
||||
int ptpneeded; /* needed to allocate a PT page */
|
||||
int pwchange; /* no mapping change, just wiring or protection */
|
||||
int wchange; /* no mapping change, just wiring */
|
||||
|
@ -109,7 +109,7 @@ struct enter_stats {
|
|||
int managed; /* a managed page */
|
||||
int firstpv; /* first mapping for this PA */
|
||||
int secondpv; /* second mapping for this PA */
|
||||
int ci; /* cache inhibited */
|
||||
int ci; /* cache inhibited */
|
||||
int unmanaged; /* not a managed page */
|
||||
int flushes; /* cache flushes */
|
||||
};
|
||||
|
@ -127,20 +127,20 @@ struct remove_stats remove_stats;
|
|||
struct enter_stats enter_stats;
|
||||
struct kpt_stats kpt_stats;
|
||||
|
||||
#define PDB_FOLLOW 0x0001
|
||||
#define PDB_INIT 0x0002
|
||||
#define PDB_ENTER 0x0004
|
||||
#define PDB_REMOVE 0x0008
|
||||
#define PDB_CREATE 0x0010
|
||||
#define PDB_PTPAGE 0x0020
|
||||
#define PDB_CACHE 0x0040
|
||||
#define PDB_BITS 0x0080
|
||||
#define PDB_COLLECT 0x0100
|
||||
#define PDB_PROTECT 0x0200
|
||||
#define PDB_SEGTAB 0x0400
|
||||
#define PDB_FOLLOW 0x0001
|
||||
#define PDB_INIT 0x0002
|
||||
#define PDB_ENTER 0x0004
|
||||
#define PDB_REMOVE 0x0008
|
||||
#define PDB_CREATE 0x0010
|
||||
#define PDB_PTPAGE 0x0020
|
||||
#define PDB_CACHE 0x0040
|
||||
#define PDB_BITS 0x0080
|
||||
#define PDB_COLLECT 0x0100
|
||||
#define PDB_PROTECT 0x0200
|
||||
#define PDB_SEGTAB 0x0400
|
||||
#define PDB_PARANOIA 0x2000
|
||||
#define PDB_WIRING 0x4000
|
||||
#define PDB_PVDUMP 0x8000
|
||||
#define PDB_WIRING 0x4000
|
||||
#define PDB_PVDUMP 0x8000
|
||||
int debugmap = 0;
|
||||
int pmapdebug = PDB_PARANOIA;
|
||||
#endif
|
||||
|
@ -202,7 +202,7 @@ struct kpt_page *kpt_pages;
|
|||
*/
|
||||
u_int *Sysseg1; /* root segment for 68040 */
|
||||
u_int *Sysseg;
|
||||
u_int *Sysptmap;
|
||||
u_int *Sysmap, *Sysptmap;
|
||||
u_int *Segtabzero;
|
||||
vm_size_t Sysptsize = VM_KERNEL_PT_PAGES + 4 / NPTEPG;
|
||||
|
||||
|
@ -211,6 +211,7 @@ vm_map_t pt_map;
|
|||
|
||||
vm_offset_t avail_start; /* PA of first available physical page */
|
||||
vm_offset_t avail_end; /* PA of last available physical page */
|
||||
vm_size_t mem_size; /* memory size in bytes */
|
||||
vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/
|
||||
vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
|
||||
vm_offset_t vm_first_phys; /* PA of first managed page */
|
||||
|
@ -219,19 +220,20 @@ boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
|
|||
char *pmap_attributes; /* reference and modify bits */
|
||||
static int pmap_ishift; /* segment table index shift */
|
||||
|
||||
#ifdef MACHINE_NONCONTIG
|
||||
static vm_offset_t avail_next;
|
||||
static vm_size_t avail_remaining;
|
||||
#endif
|
||||
|
||||
boolean_t pmap_testbit();
|
||||
void pmap_enter_ptpage();
|
||||
|
||||
/*
|
||||
* The following vars become valid just before pmap_bootstrap is called.
|
||||
* They give info about free memory with the kernel and the tables allocated
|
||||
* in 'start_c' excluded.
|
||||
*/
|
||||
vm_offset_t st_ramstart; /* First available ST RAM address */
|
||||
vm_offset_t st_ramend; /* First ST RAM address not available */
|
||||
vm_offset_t tt_ramstart; /* First available TT RAM address */
|
||||
vm_offset_t tt_ramend; /* First TT RAM address not available */
|
||||
|
||||
#ifdef MACHINE_NONCONTIG
|
||||
#define pmap_valid_page(pa) (pmap_initialized && pmap_isvalidphys(pa))
|
||||
#else
|
||||
#define pmap_valid_page(pa) (pmap_initialized && pa >= vm_first_phys && \
|
||||
pa < vm_last_phys)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* All those kernel PT submaps that BSD is so fond of
|
||||
|
@ -250,24 +252,36 @@ u_int *CMAP1, *CMAP2, *vmpte, *msgbufmap;
|
|||
* from the linked base (virtual) address 0 to the actual (physical)
|
||||
* address of 0xFFxxxxxx.]
|
||||
*/
|
||||
void pmap_bootstrap(kernel_size)
|
||||
void
|
||||
pmap_bootstrap(kernel_size)
|
||||
vm_offset_t kernel_size;
|
||||
{
|
||||
vm_offset_t va;
|
||||
u_int *pte;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Setup physical address ranges
|
||||
*/
|
||||
avail_start = st_ramstart;
|
||||
avail_end = tt_ramend ? tt_ramend : st_ramend;
|
||||
|
||||
for (i = 0; phys_segs[i+1].start; i++)
|
||||
;
|
||||
/* XXX: allow for msgbuf */
|
||||
avail_end -= atari_round_page(sizeof(struct msgbuf));
|
||||
phys_segs[i].end -= atari_round_page(sizeof(struct msgbuf));
|
||||
|
||||
avail_start = phys_segs[0].start;
|
||||
avail_end = phys_segs[i].end;
|
||||
|
||||
#ifdef MACHINE_NONCONTIG
|
||||
/*
|
||||
* Setup virtual address ranges
|
||||
* Setup var's for pmap_next_page()
|
||||
*/
|
||||
for (i = 0, avail_remaining = 0; phys_segs[i].start; i++)
|
||||
avail_remaining += phys_segs[i].end - phys_segs[i].start;
|
||||
|
||||
avail_remaining >>= PGSHIFT;
|
||||
avail_next = phys_segs[0].start;
|
||||
#endif
|
||||
|
||||
virtual_avail = VM_MIN_KERNEL_ADDRESS + kernel_size;
|
||||
virtual_end = VM_MAX_KERNEL_ADDRESS;
|
||||
|
||||
|
@ -285,7 +299,7 @@ vm_offset_t kernel_size;
|
|||
#ifdef M68040
|
||||
if (cpu040) {
|
||||
pmap_kernel()->pm_rtab = Sysseg1;
|
||||
pmap_ishift = SG4_ISHIFT;
|
||||
pmap_ishift = SG4_SHIFT2;
|
||||
} else
|
||||
#endif
|
||||
pmap_ishift = SG_ISHIFT;
|
||||
|
@ -299,7 +313,7 @@ vm_offset_t kernel_size;
|
|||
#define SYSMAP(c, p, v, n) \
|
||||
v = (c)va; va += ((n)*NBPG); p = pte; pte += (n);
|
||||
|
||||
va = virtual_avail;
|
||||
va = virtual_avail;
|
||||
pte = pmap_pte(pmap_kernel(), va);
|
||||
|
||||
SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 )
|
||||
|
@ -331,7 +345,7 @@ pmap_bootstrap_alloc(size)
|
|||
if (vm_page_startup_initialized)
|
||||
panic("pmap_bootstrap_alloc: called after startup initialized");
|
||||
size = round_page(size);
|
||||
val = virtual_avail;
|
||||
val = virtual_avail;
|
||||
|
||||
virtual_avail = pmap_map(virtual_avail, avail_start,
|
||||
avail_start + size, VM_PROT_READ|VM_PROT_WRITE);
|
||||
|
@ -348,8 +362,12 @@ pmap_bootstrap_alloc(size)
|
|||
* system needs to map virtual memory.
|
||||
*/
|
||||
void
|
||||
#ifdef MACHINE_NONCONTIG
|
||||
pmap_init()
|
||||
#else
|
||||
pmap_init(phys_start, phys_end)
|
||||
vm_offset_t phys_start, phys_end;
|
||||
#endif
|
||||
{
|
||||
vm_offset_t addr, addr2;
|
||||
vm_size_t npg, s;
|
||||
|
@ -357,7 +375,11 @@ pmap_init(phys_start, phys_end)
|
|||
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & PDB_FOLLOW)
|
||||
#ifdef MACHINE_NONCONTIG
|
||||
printf("pmap_init(%x, %x)\n", avail_start, avail_end);
|
||||
#else
|
||||
printf("pmap_init(%x, %x)\n", phys_start, phys_end);
|
||||
#endif
|
||||
#endif
|
||||
addr = (vm_offset_t) Sysmap;
|
||||
vm_object_reference(kernel_object);
|
||||
|
@ -383,7 +405,19 @@ pmap_init(phys_start, phys_end)
|
|||
* Allocate memory for random pmap data structures. Includes the
|
||||
* initial segment table, pv_head_table and pmap_attributes.
|
||||
*/
|
||||
#ifdef MACHINE_NONCONTIG
|
||||
{
|
||||
int i;
|
||||
for (npg = 0, i = 0; phys_segs[i].start; ++i)
|
||||
npg += atop(phys_segs[i].end - phys_segs[i].start);
|
||||
}
|
||||
#ifdef DEBUG
|
||||
printf ("pmap_init: avail_start %08x phys_segs[0].start %08x npg %d\n",
|
||||
avail_start, phys_segs[0].start, npg);
|
||||
#endif
|
||||
#else
|
||||
npg = atop(phys_end - phys_start);
|
||||
#endif
|
||||
#ifdef M68040
|
||||
if (cpu040)
|
||||
s = (vm_size_t)ATARI_040STSIZE * 128 +
|
||||
|
@ -478,11 +512,80 @@ pmap_init(phys_start, phys_end)
|
|||
/*
|
||||
* Now it is safe to enable pv_table recording.
|
||||
*/
|
||||
#ifdef MACHINE_NONCONTIG
|
||||
vm_first_phys = avail_start;
|
||||
vm_last_phys = avail_end;
|
||||
#else
|
||||
vm_first_phys = phys_start;
|
||||
vm_last_phys = phys_end;
|
||||
#endif
|
||||
pmap_initialized = TRUE;
|
||||
}
|
||||
|
||||
#ifdef MACHINE_NONCONTIG
|
||||
unsigned int
|
||||
pmap_free_pages()
|
||||
{
|
||||
|
||||
return avail_remaining;
|
||||
}
|
||||
|
||||
int
|
||||
pmap_next_page(addrp)
|
||||
vm_offset_t *addrp;
|
||||
{
|
||||
static int cur_seg = 0;
|
||||
|
||||
#ifdef DEBUG
|
||||
if(!cur_seg && (avail_next == phys_segs[cur_seg].start))
|
||||
printf ("pmap_next_page: next %08x remain %d\n",
|
||||
avail_next, avail_remaining);
|
||||
#endif
|
||||
if (phys_segs[cur_seg].start == 0)
|
||||
return FALSE;
|
||||
if (avail_next == phys_segs[cur_seg].end) {
|
||||
avail_next = phys_segs[++cur_seg].start;
|
||||
#ifdef DEBUG
|
||||
printf ("pmap_next_page: next %08x remain %d\n",
|
||||
avail_next, avail_remaining);
|
||||
#endif
|
||||
}
|
||||
|
||||
if (avail_next == 0)
|
||||
return FALSE;
|
||||
*addrp = avail_next;
|
||||
avail_next += NBPG;
|
||||
avail_remaining--;
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
int
|
||||
pmap_page_index(pa)
|
||||
vm_offset_t pa;
|
||||
{
|
||||
|
||||
struct physeg *sep = &phys_segs[0];
|
||||
|
||||
while (sep->start) {
|
||||
if (pa >= sep->start && pa < sep->end)
|
||||
return (atari_btop(pa - sep->start) + sep->first_page);
|
||||
++sep;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
void
|
||||
pmap_virtual_space(startp, endp)
|
||||
vm_offset_t *startp;
|
||||
vm_offset_t *endp;
|
||||
{
|
||||
*startp = virtual_avail;
|
||||
*endp = virtual_end;
|
||||
}
|
||||
#else
|
||||
#define pmap_page_index(pa) (pa_index(pa))
|
||||
#endif /* MACHINE_NONCONTIG */
|
||||
|
||||
/*
|
||||
* Used to map a range of physical addresses into kernel
|
||||
* virtual address space.
|
||||
|
@ -749,7 +852,7 @@ pmap_remove(pmap, sva, eva)
|
|||
* Remove from the PV table (raise IPL since we
|
||||
* may be called at interrupt time).
|
||||
*/
|
||||
if (pa < vm_first_phys || pa >= vm_last_phys)
|
||||
if (!pmap_valid_page(pa))
|
||||
continue;
|
||||
pv = pa_to_pvh(pa);
|
||||
ste = (int *)0;
|
||||
|
@ -782,8 +885,10 @@ pmap_remove(pmap, sva, eva)
|
|||
pv = npv;
|
||||
}
|
||||
#ifdef DEBUG
|
||||
if (npv == NULL)
|
||||
if (npv == NULL) {
|
||||
printf ("pmap_remove: PA %08x index %d\n", pa, pa_index(pa));
|
||||
panic("pmap_remove: PA not in pv_tab");
|
||||
}
|
||||
#endif
|
||||
ste = (int *)npv->pv_ptste;
|
||||
ptpmap = npv->pv_ptpmap;
|
||||
|
@ -920,7 +1025,7 @@ pmap_page_protect(pa, prot)
|
|||
prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))
|
||||
printf("pmap_page_protect(%x, %x)\n", pa, prot);
|
||||
#endif
|
||||
if (pa < vm_first_phys || pa >= vm_last_phys)
|
||||
if (!pmap_valid_page(pa))
|
||||
return;
|
||||
|
||||
switch (prot) {
|
||||
|
@ -939,7 +1044,13 @@ pmap_page_protect(pa, prot)
|
|||
#ifdef DEBUG
|
||||
if (!pmap_ste_v(pmap_ste(pv->pv_pmap,pv->pv_va)) ||
|
||||
pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)) != pa)
|
||||
{
|
||||
printf ("pmap_page_protect: va %08x, pmap_ste_v %d pmap_pte_pa %08x/%08x\n",
|
||||
pv->pv_va, pmap_ste_v(pmap_ste(pv->pv_pmap,pv->pv_va)),
|
||||
pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)),pa);
|
||||
printf (" pvh %08x pv %08x pv_next %08x\n", pa_to_pvh(pa), pv, pv->pv_next);
|
||||
panic("pmap_page_protect: bad mapping");
|
||||
}
|
||||
#endif
|
||||
pmap_remove(pv->pv_pmap, pv->pv_va,
|
||||
pv->pv_va + PAGE_SIZE);
|
||||
|
@ -1079,7 +1190,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
opa = pmap_pte_pa(pte);
|
||||
#ifdef DEBUG
|
||||
if (pmapdebug & PDB_ENTER)
|
||||
printf("enter: pte %x, *pte %x, opa: %x\n", pte, *(int *)pte, opa);
|
||||
printf("enter: pte %x, *pte %x\n", pte, *(int *)pte);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -1146,7 +1257,7 @@ pmap_enter(pmap, va, pa, prot, wired)
|
|||
* Note that we raise IPL while manipulating pv_table
|
||||
* since pmap_enter can be called at interrupt time.
|
||||
*/
|
||||
if (pa >= vm_first_phys && pa < vm_last_phys) {
|
||||
if (pmap_valid_page(pa)) {
|
||||
register pv_entry_t pv, npv;
|
||||
int s;
|
||||
|
||||
|
@ -1604,7 +1715,7 @@ pmap_pageable(pmap, sva, eva, pageable)
|
|||
if (!pmap_ste_v(pmap_ste(pmap, sva)))
|
||||
return;
|
||||
pa = pmap_pte_pa(pmap_pte(pmap, sva));
|
||||
if (pa < vm_first_phys || pa >= vm_last_phys)
|
||||
if (!pmap_valid_page(pa))
|
||||
return;
|
||||
pv = pa_to_pvh(pa);
|
||||
if (pv->pv_ptste == NULL)
|
||||
|
@ -1750,7 +1861,7 @@ pmap_testbit(pa, bit)
|
|||
register int *pte;
|
||||
int s;
|
||||
|
||||
if (pa < vm_first_phys || pa >= vm_last_phys)
|
||||
if (!pmap_valid_page(pa))
|
||||
return(FALSE);
|
||||
|
||||
pv = pa_to_pvh(pa);
|
||||
|
@ -1798,7 +1909,7 @@ pmap_changebit(pa, bit, setem)
|
|||
printf("pmap_changebit(%x, %x, %s)\n",
|
||||
pa, bit, setem ? "set" : "clear");
|
||||
#endif
|
||||
if (pa < vm_first_phys || pa >= vm_last_phys)
|
||||
if (!pmap_valid_page(pa))
|
||||
return;
|
||||
|
||||
pv = pa_to_pvh(pa);
|
||||
|
@ -1926,8 +2037,9 @@ pmap_enter_ptpage(pmap, va)
|
|||
*/
|
||||
#ifdef M68040
|
||||
if (cpu040) {
|
||||
ste = pmap_ste(pmap, va & (SG4_IMASK << 6));
|
||||
va = trunc_page((vm_offset_t)pmap_pte(pmap, va & (SG4_IMASK << 6)));
|
||||
ste = pmap_ste(pmap, va & ((SG4_MASK1 | SG4_MASK2) << 6));
|
||||
va = trunc_page((vm_offset_t)pmap_pte(pmap,
|
||||
va & ((SG4_MASK1|SG4_MASK2) << 6)));
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
|
@ -2059,14 +2171,18 @@ pmap_enter_ptpage(pmap, va)
|
|||
splx(s);
|
||||
}
|
||||
|
||||
int pmap_isvalidphys(pa)
|
||||
vm_offset_t pa;
|
||||
int
|
||||
pmap_isvalidphys(pa)
|
||||
vm_offset_t pa;
|
||||
{
|
||||
if((pa >= st_ramstart) && (pa < st_ramend))
|
||||
struct physeg *sep = &phys_segs[0];
|
||||
|
||||
while (sep->start) {
|
||||
if (pa >= sep->start && pa < sep->end)
|
||||
return(TRUE);
|
||||
if((pa >= tt_ramstart) && (pa < tt_ramend))
|
||||
return(TRUE);
|
||||
return(FALSE);
|
||||
++sep;
|
||||
}
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
|
|
|
@ -1,8 +1,11 @@
|
|||
# $NetBSD: std.atari,v 1.2 1995/04/28 11:36:33 leo Exp $
|
||||
# $NetBSD: std.atari,v 1.3 1995/06/09 19:43:23 leo Exp $
|
||||
#
|
||||
# standard atari information
|
||||
#
|
||||
machine atari m68k
|
||||
machine atari m68k
|
||||
|
||||
options MACHINE_NONCONTIG # Non-contiguous memory support
|
||||
# (required)
|
||||
|
||||
mainbus0 at root
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* $NetBSD: pmap.h,v 1.5 1995/04/16 14:55:09 leo Exp $ */
|
||||
/* $NetBSD: pmap.h,v 1.6 1995/06/09 19:43:41 leo Exp $ */
|
||||
|
||||
/*
|
||||
* Copyright (c) 1987 Carnegie-Mellon University
|
||||
|
@ -75,6 +75,18 @@ typedef struct pmap *pmap_t;
|
|||
}
|
||||
#define PMAP_DEACTIVATE(pmapp, pcbp)
|
||||
|
||||
/*
|
||||
* Description of the memory segments. Build in atari_init/start_c().
|
||||
* This gives a better separation between machine dependent stuff and
|
||||
* the pmap-module.
|
||||
*/
|
||||
#define NPHYS_SEGS 8
|
||||
struct physeg {
|
||||
vm_offset_t start; /* PA of first page in segment */
|
||||
vm_offset_t end; /* PA of last page in segment */
|
||||
int first_page; /* relative page# of 'start' */
|
||||
};
|
||||
|
||||
/*
|
||||
* For each vm_page_t, there is a list of all currently valid virtual
|
||||
* mappings of that page. An entry is a pv_entry_t, the list is pv_table.
|
||||
|
@ -92,14 +104,19 @@ typedef struct pv_entry {
|
|||
#define PV_PTPAGE 0x02 /* entry maps a page table page */
|
||||
|
||||
#ifdef _KERNEL
|
||||
struct physeg phys_segs[NPHYS_SEGS];
|
||||
pv_entry_t pv_table; /* array of entries, one per page */
|
||||
u_int *Sysmap;
|
||||
char *vmmap; /* map for mem, dumps, etc. */
|
||||
struct pmap kernel_pmap_store;
|
||||
|
||||
#define pa_index(pa) atop(pa - vm_first_phys)
|
||||
#define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
|
||||
#define pmap_kernel() (&kernel_pmap_store)
|
||||
#ifdef MACHINE_NONCONTIG
|
||||
#define pa_index(pa) pmap_page_index(pa)
|
||||
#else
|
||||
#define pa_index(pa) atop(pa - vm_first_phys)
|
||||
#endif /* MACHINE_NONCONTIG */
|
||||
#define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
|
||||
#define pmap_kernel() (&kernel_pmap_store)
|
||||
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
|
||||
#endif /* _KERNEL */
|
||||
|
||||
|
|
Loading…
Reference in New Issue