Updated many of the pmap files to use current versions from the

mvme68k port.  The next68k port now uses MACHINE_NEW_NONCONTIG.
This commit is contained in:
dbj 1998-08-28 23:05:53 +00:00
parent abbb6f13e3
commit 12107a5187
11 changed files with 894 additions and 566 deletions

View File

@ -1,4 +1,4 @@
# $NetBSD: files.next68k,v 1.2 1998/07/05 07:53:44 dbj Exp $
# $NetBSD: files.next68k,v 1.3 1998/08/28 23:05:53 dbj Exp $
# next68k-specific configuration info
@ -29,6 +29,8 @@ file dev/ic/z8530tty.c zstty needs-flag
#file arch/m68k/m68k/db_memrw.c ddb
file arch/next68k/next68k/db_memrw.c ddb
file arch/m68k/m68k/cacheops.c
# include "arch/m68k/fpe/files.fpe"
file arch/next68k/next68k/trap.c
@ -46,6 +48,7 @@ file arch/next68k/next68k/nextrom.c
file arch/next68k/next68k/rtc.c
file arch/next68k/next68k/disksubr.c
#file arch/next68k/dev/dbj_debug.c
file arch/next68k/dev/bus_dma.c
file arch/next68k/dev/nextdma.c

View File

@ -1,4 +1,11 @@
/* $NetBSD: pmap.h,v 1.1.1.1 1998/06/09 07:53:05 dbj Exp $ */
/* $ NetBSD: pmap.h,v 1.12 1998/08/22 10:55:34 scw Exp $ */
/*
* This file was taken from from mvme68k/include/pmap.h and
* should probably be re-synced when needed.
* Darrin B Jewell <jewell@mit.edu> Fri Aug 28 03:22:07 1998
* original cvs id: NetBSD: pmap.h,v 1.12 1998/08/22 10:55:34 scw Exp
*/
/*
* Copyright (c) 1987 Carnegie-Mellon University
@ -43,11 +50,9 @@
#ifndef _MACHINE_PMAP_H_
#define _MACHINE_PMAP_H_
#include <machine/cpu.h>
#include <machine/pte.h>
#if defined(M68040) && 0
@@@ Why is this not always NBSEG ? -- jewell@mit.edu
#define HP_SEG_SIZE (mmutype == MMU_68040 ? 0x40000 : NBSEG)
#else
#define HP_SEG_SIZE NBSEG
@ -92,7 +97,7 @@ typedef struct pmap *pmap_t;
#define PMAP_ACTIVATE(pmap, loadhw) \
{ \
if ((loadhw)) \
loadustp(m68k_btop((vm_offset_t)(pmap)->pm_stpa)); \
loadustp(m68k_btop((paddr_t)(pmap)->pm_stpa)); \
}
/*
@ -102,7 +107,7 @@ typedef struct pmap *pmap_t;
struct pv_entry {
struct pv_entry *pv_next; /* next pv_entry */
struct pmap *pv_pmap; /* pmap where mapping lies */
vm_offset_t pv_va; /* virtual address for mapping */
vaddr_t pv_va; /* virtual address for mapping */
st_entry_t *pv_ptste; /* non-zero if VA maps a PT page */
struct pmap *pv_ptpmap; /* if pv_ptste, pmap for PT page */
int pv_flags; /* flags */
@ -133,7 +138,6 @@ struct pv_page {
#ifdef _KERNEL
extern struct pmap kernel_pmap_store;
extern vm_offset_t vm_first_phys, vm_num_phys;
#define pmap_kernel() (&kernel_pmap_store)
#define active_pmap(pm) \
@ -144,16 +148,18 @@ extern vm_offset_t vm_first_phys, vm_num_phys;
extern struct pv_entry *pv_table; /* array of entries, one per page */
#ifndef MACHINE_NONCONTIG
#ifndef MACHINE_NEW_NONCONTIG
#define pmap_page_index(pa) atop(pa - vm_first_phys)
#endif
#define pa_to_pvh(pa) (&pv_table[pmap_page_index(pa)])
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
extern pt_entry_t *Sysmap;
extern char *vmmap; /* map for mem, dumps, etc. */
vaddr_t pmap_map __P((vaddr_t, paddr_t, paddr_t, int));
#endif /* _KERNEL */
#endif /* !_MACHINE_PMAP_H_ */

View File

@ -1,8 +1,16 @@
/* $NetBSD: proc.h,v 1.1.1.1 1998/06/09 07:53:05 dbj Exp $ */
/* $NetBSD: proc.h,v 1.2 1998/08/28 23:05:53 dbj Exp $ */
/*
* Copyright (c) 1991 Regents of the University of California.
* All rights reserved.
* This file was taken from from mvme68k/include/proc.h and
* should probably be re-synced when needed.
* Darrin B Jewell <jewell@mit.edu> Fri Aug 28 03:22:07 1998
* original cvs id: NetBSD: proc.h,v 1.1.1.1 1995/07/25 23:12:16 chuck Exp
*/
/*
* Copyright (c) 1991, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -32,19 +40,22 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)proc.h 7.1 (Berkeley) 5/15/91
* @(#)proc.h 8.1 (Berkeley) 6/10/93
*/
/*
* Machine-dependent part of the proc structure for next68k.
* @@@ not yet updated.. jewell
* Machine-dependent part of the proc structure for mvme68k.
*/
struct mdproc {
int md_flags; /* machine-dependent flags */
int *md_regs; /* registers on current frame */
int md_flags; /* machine-dependent flags */
};
/* md_flags */
#define MDP_STACKADJ 0x0001 /* Frame SP adjusted, might have to
undo when system call returns
ERESTART. */
#define MDP_STACKADJ 0x0002 /* frame SP adjusted, might have to
undo when system call returns
ERESTART. */
#define MDP_HPUXTRACE 0x0004 /* being traced by HP-UX process */
#define MDP_HPUXMMAP 0x0008 /* VA space is multiply mapped */
#define MDP_CCBDATA 0x0010 /* copyback caching of data (68040) */
#define MDP_CCBSTACK 0x0020 /* copyback caching of stack (68040) */

View File

@ -1,4 +1,12 @@
/* $NetBSD: pte.h,v 1.1.1.1 1998/06/09 07:53:05 dbj Exp $ */
/* $NetBSD: pte.h,v 1.2 1998/08/28 23:05:54 dbj Exp $ */
/*
* This file was taken from from mvme68k/include/pte.h and
* should probably be re-synced when needed.
* Darrin B Jewell <jewell@mit.edu> Fri Aug 28 03:22:07 1998
* original cvs id: NetBSD: pte.h,v 1.1.1.1 1995/07/25 23:12:17 chuck Exp
*/
/*
* Copyright (c) 1988 University of Utah.
@ -42,12 +50,11 @@
* @(#)pte.h 8.1 (Berkeley) 6/10/93
*/
#ifndef _NEXT68K_PTE_H_
#define _NEXT68K_PTE_H_
#ifndef _MACHINE_PTE_H_
#define _MACHINE_PTE_H_
/*
* next68K hardware segment/page table entries
* derived from hp300
* m68k hardware segment/page table entries
*/
#if 0
@ -150,4 +157,4 @@ typedef int pt_entry_t; /* Mach page table entry */
#define kvtophys(va) \
((kvtopte(va)->pg_pfnum << PGSHIFT) | ((int)(va) & PGOFSET))
#endif /* !_NEXT68K_PTE_H_ */
#endif /* !_MACHINE_PTE_H_ */

View File

@ -1,4 +1,11 @@
/* $NetBSD: vmparam.h,v 1.1.1.1 1998/06/09 07:53:05 dbj Exp $ */
/* $NetBSD: vmparam.h,v 1.2 1998/08/28 23:05:54 dbj Exp $ */
/*
* This file was taken from from mvme68k/include/vmparam.h and
* should probably be re-synced when needed.
* Darrin B Jewell <jewell@mit.edu> Fri Aug 28 03:22:07 1998
* original cvs id: NetBSD: vmparam.h,v 1.9 1998/08/22 10:55:34 scw Exp
*/
/*
* Copyright (c) 1988 University of Utah.
@ -45,10 +52,10 @@
#ifndef _NEXT68K_VMPARAM_H_
#define _NEXT68K_VMPARAM_H_
/*
* Machine dependent constants for NEXT68K
*/
/*
* USRTEXT is the start of the user text/data space, while USRSTACK
* is the top (end) of the user stack. LOWPAGES and HIGHPAGES are
@ -219,11 +226,11 @@
*/
/* user/kernel map constants */
#define VM_MIN_ADDRESS ((vm_offset_t)0)
#define VM_MAXUSER_ADDRESS ((vm_offset_t)0xFFF00000)
#define VM_MAX_ADDRESS ((vm_offset_t)0xFFF00000)
#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)KERNBASE)
#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t)0xFFFFF000)
#define VM_MIN_ADDRESS ((vaddr_t)0)
#define VM_MAXUSER_ADDRESS ((vaddr_t)0xFFF00000)
#define VM_MAX_ADDRESS ((vaddr_t)0xFFF00000)
#define VM_MIN_KERNEL_ADDRESS ((vaddr_t)0)
#define VM_MAX_KERNEL_ADDRESS ((vaddr_t)0xFFFFF000)
/* virtual sizes (bytes) for various kernel submaps */
#define VM_MBUF_SIZE (NMBCLUSTERS*MCLBYTES)
@ -231,11 +238,39 @@
#define VM_PHYS_SIZE (USRIOSIZE*CLBYTES)
/* # of kernel PT pages (initial only, can grow dynamically) */
#define VM_KERNEL_PT_PAGES ((vm_size_t)2) /* XXX: SYSPTSIZE */
#define VM_KERNEL_PT_PAGES ((vsize_t)2) /* XXX: SYSPTSIZE */
/* pcb base */
#define pcbb(p) ((u_int)(p)->p_addr)
#define MACHINE_NONCONTIG /* VM <=> pmap interface modifier */
/* Use new VM page bootstrap interface. */
#define MACHINE_NEW_NONCONTIG
#endif /* _NEXT68K_VMPARAM_H_ */
#if 0
/*
* Constants which control the way the VM system deals with memory segments.
* The mvme68k port has two physical memory segments: 1 for onboard RAM
* and another for contiguous VMEbus RAM.
*/
#define VM_PHYSSEG_MAX 2
#define VM_PHYSSEG_STRAT VM_PSTRAT_RANDOM
#define VM_PHYSSEG_NOADD
#define VM_NFREELIST 2
#define VM_FREELIST_DEFAULT 0
#define VM_FREELIST_VMEMEM 1
#else
/* @@@ check and verify these, also get values from seglist.h */
#define VM_PHYSSEG_MAX 5
#define VM_PHYSSEG_STRAT VM_PSTRAT_RANDOM
#define VM_PHYSSEG_NOADD
#endif
/*
* pmap-specific data stored in the vm_physmem[] array.
*/
struct pmap_physseg {
struct pv_entry *pvent; /* pv table for this seg */
char *attrs; /* page attributes for this seg */
};
#endif /* _MVME68K_VMPARAM_H_ */

View File

@ -1,4 +1,11 @@
/* $NetBSD: db_memrw.c,v 1.1.1.1 1998/06/09 07:53:05 dbj Exp $ */
/* $NetBSD: db_memrw.c,v 1.2 1998/08/28 23:05:54 dbj Exp $ */
/*
* This file was taken from from mvme68k/mvme68k/db_memrw.c
* should probably be re-synced when needed.
* Darrin B Jewell <jewell@mit.edu> Fri Aug 28 03:22:07 1998
* original cvs id: NetBSD: db_memrw.c,v 1.4 1998/08/22 10:55:34 scw Exp
*/
/*-
* Copyright (c) 1996 The NetBSD Foundation, Inc.
@ -64,13 +71,11 @@
#include <machine/pte.h>
#include <machine/db_machdep.h>
#include <machine/cpu.h>
#if 0
#include <machine/hp300spu.h>
#endif
#include <m68k/cacheops.h>
#include <ddb/db_access.h>
static void db_write_text __P((vm_offset_t, size_t, char *));
static void db_write_text __P((vaddr_t, size_t, char *));
/*
* Read bytes from kernel address space for debugger.
@ -79,7 +84,7 @@ static void db_write_text __P((vm_offset_t, size_t, char *));
*/
void
db_read_bytes(addr, size, data)
vm_offset_t addr;
vaddr_t addr;
size_t size;
char *data;
{
@ -108,13 +113,13 @@ db_read_bytes(addr, size, data)
*/
static void
db_write_text(addr, size, data)
vm_offset_t addr;
size_t size;
char *data;
vaddr_t addr;
size_t size;
char *data;
{
char *dst, *odst;
pt_entry_t *pte, oldpte, tmppte;
vm_offset_t pgva;
vaddr_t pgva;
int limit;
if (size == 0)
@ -169,7 +174,7 @@ db_write_text(addr, size, data)
tmppte = (oldpte & ~PG_RO) | PG_RW | PG_CI;
*pte = tmppte;
TBIS((vm_offset_t)odst);
TBIS((vaddr_t)odst);
/*
* Page is now writable. Do as much access as we
@ -182,7 +187,7 @@ db_write_text(addr, size, data)
* Restore the old PTE.
*/
*pte = oldpte;
TBIS((vm_offset_t)odst);
TBIS((vaddr_t)odst);
} while (size != 0);
/*
@ -198,7 +203,7 @@ db_write_text(addr, size, data)
extern char kernel_text[], etext[];
void
db_write_bytes(addr, size, data)
vm_offset_t addr;
vaddr_t addr;
size_t size;
char *data;
{

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.6 1998/07/19 21:41:17 dbj Exp $ */
/* $NetBSD: machdep.c,v 1.7 1998/08/28 23:05:54 dbj Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -96,6 +96,8 @@
#include <dev/cons.h>
#include <next68k/next68k/seglist.h>
#define MAXMEM 64*1024*CLSIZE /* XXX - from cmap.h */
#include <vm/vm.h>
#include <vm/vm_kern.h>
@ -123,13 +125,11 @@ int bufpages = BUFPAGES;
#else
int bufpages = 0;
#endif
caddr_t msgbufaddr;
#ifndef MACHINE_NONCONTIG
int maxmem; /* max memory per process */
#endif
caddr_t msgbufaddr; /* KVA of message buffer */
paddr_t msgbufpa; /* PA of message buffer */
int physmem = MAXMEM; /* max supported memory, measured in pages
changes to actual in locore.s */
int maxmem; /* max memory per process */
int physmem;
/*
* safepri is a safe priority for sleep to set for a spin-wait
* during autoconfiguration or after a panic.
@ -163,6 +163,19 @@ void nmihand __P((struct frame));
*/
cpu_kcore_hdr_t cpu_kcore_hdr;
/*
* Memory segments initialized in locore, which are eventually loaded
* as managed VM pages.
*/
phys_seg_list_t phys_seg_list[VM_PHYSSEG_MAX];
/*
* Memory segments to dump. This is initialized from the phys_seg_list
* before pages are stolen from it for VM system overhead. I.e. this
* covers the entire range of physical memory.
*/
phys_ram_seg_t mem_clusters[VM_PHYSSEG_MAX];
int mem_cluster_cnt;
/****************************************************************/
@ -174,7 +187,34 @@ next68k_init()
{
int i;
#if 0
/*
* Tell the VM system about available physical memory.
*/
for (i = 0; i < mem_cluster_cnt; i++) {
if (phys_seg_list[i].ps_start == phys_seg_list[i].ps_end) {
/*
* Segment has been completely gobbled up.
*/
continue;
}
#if defined(UVM)
/*
* Note the index of the mem cluster is the free
* list we want to put the memory on.
*/
uvm_page_physload(atop(phys_seg_list[i].ps_start),
atop(phys_seg_list[i].ps_end),
atop(phys_seg_list[i].ps_start),
atop(phys_seg_list[i].ps_end), i);
#else
vm_page_physload(atop(phys_seg_list[i].ps_start),
atop(phys_seg_list[i].ps_end),
atop(phys_seg_list[i].ps_start),
atop(phys_seg_list[i].ps_end));
#endif
}
#if 1
/* @@@ Since the boot rom doesn't know how to pass in
* these parameters yet, I manually set them here while debugging
* the scsi driver.
@ -189,14 +229,13 @@ next68k_init()
/* Calibrate the delay loop. */
next68k_calibrate_delay();
/*
* Initialize error message buffer (at end of core).
* avail_end was pre-decremented in pmap_bootstrap to compensate.
*/
for (i = 0; i < btoc(MSGBUFSIZE); i++)
pmap_enter(pmap_kernel(), (vm_offset_t)msgbufaddr + i * NBPG,
avail_end + i * NBPG, VM_PROT_ALL, TRUE);
initmsgbuf(msgbufaddr, m68k_round_page(MSGBUFSIZE));
/*
* Initialize error message buffer (at end of core).
*/
for (i = 0; i < btoc(round_page(MSGBUFSIZE)); i++)
pmap_enter(pmap_kernel(), (vaddr_t)msgbufaddr + i * NBPG,
msgbufpa + i * NBPG, VM_PROT_ALL, TRUE);
initmsgbuf(msgbufaddr, round_page(MSGBUFSIZE));
}
/*
@ -699,6 +738,7 @@ cpu_init_kcore_hdr()
{
cpu_kcore_hdr_t *h = &cpu_kcore_hdr;
struct m68k_kcore_hdr *m = &h->un._m68k;
int i;
extern char end[];
bzero(&cpu_kcore_hdr, sizeof(cpu_kcore_hdr));
@ -746,16 +786,12 @@ cpu_init_kcore_hdr()
m->relocend = (u_int32_t)end;
/*
* hp300 has one contiguous memory segment. Note,
* RAM size is physmem + 1 to account for the msgbuf
* page.
*
* XXX There's actually one more page... the last one mapped
* XXX va == pa. Should we dump it? It's not really used
* XXX for anything except to reboot and the MMU trampoline.
* The next68k has multiple memory segments.
*/
m->ram_segs[0].start = lowram;
m->ram_segs[0].size = ctob(physmem + 1);
for (i = 0; i < mem_cluster_cnt; i++) {
m->ram_segs[i].start = mem_clusters[i].start;
m->ram_segs[i].size = mem_clusters[i].size;
}
}
/*

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,13 @@
/* $NetBSD: pmap_bootstrap.c,v 1.1.1.1 1998/06/09 07:53:06 dbj Exp $ */
/* $NetBSD: pmap_bootstrap.c,v 1.2 1998/08/28 23:05:55 dbj Exp $ */
/*
* This file was taken from from mvme68k/mvme68k/pmap_bootstrap.c
* should probably be re-synced when needed.
* Darrin B Jewell <jewell@mit.edu> Fri Aug 28 03:22:07 1998
* original cvs id:
* NetBSD: pmap_bootstrap.c,v 1.10 1998/08/22 10:55:35 scw Exp
*/
/*
* Copyright (c) 1991, 1993
@ -40,13 +49,13 @@
*/
#include <sys/param.h>
#include <sys/kcore.h>
#include <machine/kcore.h>
#include <machine/pte.h>
#include <machine/vmparam.h>
#include <machine/cpu.h>
#ifdef MACHINE_NONCONTIG
#include <next68k/next68k/seglist.h>
#endif
#include <vm/vm.h>
@ -59,12 +68,12 @@ extern st_entry_t *Sysseg;
extern pt_entry_t *Sysptmap, *Sysmap;
extern int maxmem, physmem;
extern vm_offset_t avail_start, avail_end, virtual_avail, virtual_end;
extern vm_size_t mem_size;
#ifdef MACHINE_NONCONTIG
extern vm_size_t avail_remaining;
extern vm_offset_t avail_next;
#endif
extern paddr_t avail_start, avail_end;
extern vaddr_t virtual_avail, virtual_end;
extern vsize_t mem_size;
extern phys_ram_seg_t mem_clusters[];
extern int mem_cluster_cnt;
extern paddr_t msgbufpa;
extern int protection_codes[];
#ifdef HAVEVAC
extern int pmap_aliasmask;
@ -76,12 +85,10 @@ extern int pmap_aliasmask;
*
* CADDR1, CADDR2: pmap zero/copy operations
* vmmap: /dev/mem, crash dumps, parity error checking
* ledbase: SPU LEDs
* msgbufaddr: kernel message buffer
*/
caddr_t CADDR1, CADDR2, vmmap, ledbase;
caddr_t CADDR1, CADDR2, vmmap;
extern caddr_t msgbufaddr;
#ifdef MAP_LEDATABUF
extern void *ledatabuf; /* XXXCDC */
#endif
@ -99,16 +106,15 @@ extern void *ledatabuf; /* XXXCDC */
*/
void
pmap_bootstrap(nextpa, firstpa)
vm_offset_t nextpa;
register vm_offset_t firstpa;
paddr_t nextpa;
paddr_t firstpa;
{
vm_offset_t kstpa, kptpa, eiiopa, iiopa, kptmpa, lkptpa, p0upa;
paddr_t kstpa, kptpa, eiiopa, iiopa, kptmpa, lkptpa, p0upa;
u_int nptpages, kstsize;
register st_entry_t protoste, *ste;
register pt_entry_t protopte, *pte, *epte;
#ifdef MACHINE_NONCONTIG
st_entry_t protoste, *ste;
pt_entry_t protopte, *pte, *epte;
psize_t size;
int i;
#endif
/*
* Calculate important physical addresses:
@ -192,7 +198,7 @@ pmap_bootstrap(nextpa, firstpa)
* likely be insufficient in the future (at least for the kernel).
*/
if (RELOC(mmutype, int) == MMU_68040) {
register int num;
int num;
/*
* First invalidate the entire "segment table" pages
@ -411,85 +417,80 @@ pmap_bootstrap(nextpa, firstpa)
RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa);
/*
* VM data structures are now initialized, set up data for
* the pmap module.
* Initialize the mem_clusters[] array for the crash dump
* code. While we're at it, compute the total amount of
* physical memory in the system.
*/
RELOC(avail_start, vm_offset_t) = nextpa;
#ifndef MACHINE_NONCONTIG
RELOC(avail_end, vm_offset_t) =
m68k_ptob(RELOC(maxmem, int))
/* XXX allow for msgbuf */
- m68k_round_page(MSGBUFSIZE);
#else
RELOC(avail_next, vm_offset_t) = RELOC(avail_start, vm_offset_t);
/* leave space at end of onboard RAM for message buffer */
RELOC(phys_seg_list[0].ps_end, vm_offset_t) -=
m68k_round_page(MSGBUFSIZE);
/* initial avail_end is end of onboard RAM */
RELOC(avail_end, vm_offset_t) =
m68k_round_page(RELOC(phys_seg_list[0].ps_end, vm_offset_t));
RELOC(avail_remaining, vm_size_t) =
(RELOC(phys_seg_list[0].ps_end, vm_offset_t) -
RELOC(avail_start, vm_offset_t)) >> PGSHIFT;
RELOC(phys_seg_list[0].ps_start, vm_offset_t) =
RELOC(avail_start, vm_offset_t);
RELOC(phys_seg_list[0].ps_startpage, vm_offset_t) = 0;
/* initial physmem is size of segment zero (onboard RAM) */
RELOC(physmem, int) =
(RELOC(phys_seg_list[0].ps_end, vm_offset_t) -
RELOC(phys_seg_list[0].ps_start, vm_offset_t)) / NBPG;
/* iterate over any remaining segments */
for (i = 1; i < MAX_PHYS_SEGS; i++) {
vm_offset_t len;
if ( RELOC(phys_seg_list[i].ps_start, vm_offset_t) == 0 )
for (i = 0; i < VM_PHYSSEG_MAX; i++) {
if (RELOC(phys_seg_list[i].ps_start, paddr_t) ==
RELOC(phys_seg_list[i].ps_end, paddr_t)) {
/*
* No more memory.
*/
break;
len = RELOC(phys_seg_list[i].ps_end, vm_offset_t) -
RELOC(phys_seg_list[i].ps_start, vm_offset_t);
/* compute the first page number for this segment */
RELOC(phys_seg_list[i].ps_startpage, int) =
RELOC(phys_seg_list[i - 1].ps_startpage, int) +
(RELOC(phys_seg_list[i - 1].ps_end, vm_offset_t) -
RELOC(phys_seg_list[i - 1].ps_start, vm_offset_t)) / NBPG;
RELOC(avail_remaining, vm_size_t) += (len / NBPG);
RELOC(physmem, int) += (len / NBPG);
if ( m68k_round_page(RELOC(phys_seg_list[i].ps_end,
vm_offset_t)) >
RELOC(avail_end, vm_offset_t) ) {
RELOC(avail_end, vm_offset_t) =
m68k_round_page(RELOC(phys_seg_list[i].ps_end,
vm_offset_t));
}
/*
* Make sure these are properly rounded.
*/
RELOC(phys_seg_list[i].ps_start, paddr_t) =
m68k_round_page(RELOC(phys_seg_list[i].ps_start,
paddr_t));
RELOC(phys_seg_list[i].ps_end, paddr_t) =
m68k_trunc_page(RELOC(phys_seg_list[i].ps_end,
paddr_t));
size = RELOC(phys_seg_list[i].ps_end, paddr_t) -
RELOC(phys_seg_list[i].ps_start, paddr_t);
RELOC(mem_clusters[i].start, u_quad_t) =
RELOC(phys_seg_list[i].ps_start, paddr_t);
RELOC(mem_clusters[i].size, u_quad_t) = size;
RELOC(physmem, int) += size >> PGSHIFT;
RELOC(mem_cluster_cnt, int) += 1;
}
#endif
RELOC(mem_size, vm_size_t) = m68k_ptob(RELOC(physmem, int));
RELOC(virtual_avail, vm_offset_t) =
VM_MIN_KERNEL_ADDRESS + (nextpa - firstpa);
RELOC(virtual_end, vm_offset_t) = VM_MAX_KERNEL_ADDRESS;
#ifdef HAVEVAC
/*
* Determine VA aliasing distance if any
* Scoot the start of available on-board RAM forward to
* account for:
*
* (1) The bootstrap programs in low memory (so
* that we can jump back to them without
* reloading).
*
* (2) The kernel text, data, and bss.
*
* (3) The pages we stole above for pmap data
* structures.
*/
if (RELOC(ectype, int) == EC_VIRT)
if (RELOC(machineid, int) == HP_320)
RELOC(pmap_aliasmask, int) = 0x3fff; /* 16k */
else if (RELOC(machineid, int) == HP_350)
RELOC(pmap_aliasmask, int) = 0x7fff; /* 32k */
#endif
RELOC(phys_seg_list[0].ps_start, paddr_t) = nextpa;
/*
* Reserve space at the end of on-board RAM for the message
* buffer. We force it into on-board RAM because VME RAM
* isn't cached by the hardware (s-l-o-w).
*/
RELOC(phys_seg_list[0].ps_end, paddr_t) -=
m68k_round_page(MSGBUFSIZE);
RELOC(msgbufpa, paddr_t) =
RELOC(phys_seg_list[0].ps_end, paddr_t);
/*
* Initialize avail_start and avail_end.
*/
i = RELOC(mem_cluster_cnt, int) - 1;
RELOC(avail_start, paddr_t) =
RELOC(phys_seg_list[0].ps_start, paddr_t);
RELOC(avail_end, paddr_t) =
RELOC(phys_seg_list[i].ps_end, paddr_t);
RELOC(mem_size, vsize_t) = m68k_ptob(RELOC(physmem, int));
RELOC(virtual_avail, vaddr_t) =
VM_MIN_KERNEL_ADDRESS + (vaddr_t)(nextpa - firstpa);
RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS;
/*
* Initialize protection array.
@ -497,7 +498,7 @@ pmap_bootstrap(nextpa, firstpa)
* absolute "jmp" table.
*/
{
register int *kp;
int *kp;
kp = &RELOC(protection_codes, int);
kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0;
@ -511,7 +512,7 @@ pmap_bootstrap(nextpa, firstpa)
}
/*
* Kernel page/segment table allocated in locore,
* Kernel page/segment table allocated above,
* just initialize pointers.
*/
{
@ -530,7 +531,7 @@ pmap_bootstrap(nextpa, firstpa)
* MAXKL2SIZE-1: maps last-page page table
*/
if (RELOC(mmutype, int) == MMU_68040) {
register int num;
int num;
kpm->pm_stfree = ~l2tobm(0);
num = roundup((nptpages + 1) * (NPTEPG / SG4_LEV3SIZE),
@ -549,7 +550,7 @@ pmap_bootstrap(nextpa, firstpa)
* Allocate some fixed, special purpose kernel virtual addresses
*/
{
vm_offset_t va = RELOC(virtual_avail, vm_offset_t);
vaddr_t va = RELOC(virtual_avail, vaddr_t);
RELOC(CADDR1, caddr_t) = (caddr_t)va;
va += NBPG;
@ -557,10 +558,8 @@ pmap_bootstrap(nextpa, firstpa)
va += NBPG;
RELOC(vmmap, caddr_t) = (caddr_t)va;
va += NBPG;
RELOC(ledbase, caddr_t) = (caddr_t)va;
va += NBPG;
RELOC(msgbufaddr, caddr_t) = (caddr_t)va;
va += m68k_round_page(MSGBUFSIZE);
RELOC(virtual_avail, vm_offset_t) = va;
RELOC(virtual_avail, vaddr_t) = va;
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: seglist.h,v 1.3 1998/08/04 19:08:23 dbj Exp $ */
/* $NetBSD: seglist.h,v 1.4 1998/08/28 23:05:55 dbj Exp $ */
/*
* Copyright (c) 1997 The Steve Woodford
@ -41,11 +41,15 @@
*
* NOTE: If you change this, you'll need to update locore.s ...
*/
struct phys_seg_list_t {
typedef struct {
vm_offset_t ps_start; /* Start of segment */
vm_offset_t ps_end; /* End of segment */
int ps_startpage; /* Page number of first page */
};
} phys_seg_list_t;
/* @@@ this next specific stuff should be moved elsewhere
* and this file should be re-synced to the mvme version
*/
#define N_SIMM 4 /* number of SIMMs in machine */
@ -72,4 +76,4 @@ struct phys_seg_list_t {
/* Instantiated in pmap.c */
/* size +1 is for list termination */
extern struct phys_seg_list_t phys_seg_list[MAX_PHYS_SEGS];
extern phys_seg_list_t phys_seg_list[];

View File

@ -1,4 +1,13 @@
/* $NetBSD: vm_machdep.c,v 1.2 1998/07/28 18:34:56 thorpej Exp $ */
/* $NetBSD: vm_machdep.c,v 1.3 1998/08/28 23:05:55 dbj Exp $ */
/*
* This file was taken from from mvme68k/mvme68k/vm_machdep.c
* should probably be re-synced when needed.
* Darrin B Jewell <jewell@mit.edu> Fri Aug 28 03:22:07 1998
* original cvs id:
* NetBSD: vm_machdep.c,v 1.15 1998/08/22 10:55:36 scw Exp
*/
/*
* Copyright (c) 1988 University of Utah.
@ -42,6 +51,9 @@
* @(#)vm_machdep.c 8.6 (Berkeley) 1/12/94
*/
#include "opt_uvm.h"
#include "opt_compat_hpux.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
@ -52,13 +64,17 @@
#include <sys/core.h>
#include <sys/exec.h>
#include <machine/frame.h>
#include <vm/vm.h>
#include <vm/vm_kern.h>
#ifdef UVM
#include <uvm/uvm_extern.h>
#endif
#include <machine/cpu.h>
#include <machine/pte.h>
#include <machine/reg.h>
#include <vm/vm.h>
#include <vm/vm_kern.h>
#include <m68k/cacheops.h>
/*
* Finish a fork operation, with process p2 nearly set up.
@ -73,13 +89,13 @@ void
cpu_fork(p1, p2)
struct proc *p1, *p2;
{
void child_return __P((struct proc *, struct frame));
struct pcb *pcb = &p2->p_addr->u_pcb;
struct trapframe *tf;
struct switchframe *sf;
extern struct pcb *curpcb;
extern void proc_trampoline(), child_return();
p2->p_md.md_flags = p1->p_md.md_flags;
p2->p_md.md_flags = p1->p_md.md_flags & ~MDP_HPUXTRACE;
/* Sync curpcb (which is presumably p1's PCB) and copy it to p2. */
savectx(curpcb);
@ -105,7 +121,7 @@ cpu_set_kpc(p, pc)
void (*pc) __P((struct proc *));
{
p->p_addr->u_pcb.pcb_regs[6] = (int) pc; /* A2 */
p->p_addr->u_pcb.pcb_regs[6] = (u_long) pc; /* A2 */
}
/*
@ -121,10 +137,18 @@ cpu_exit(p)
struct proc *p;
{
#ifdef UVM
uvmspace_free(p->p_vmspace);
#else
vmspace_free(p->p_vmspace);
#endif
(void) splimp();
(void) splhigh();
#ifdef UVM
uvmexp.swtch++;
#else
cnt.v_swtch++;
#endif
switch_exit(p);
/* NOTREACHED */
}
@ -147,6 +171,18 @@ cpu_coredump(p, vp, cred, chdr)
struct coreseg cseg;
int error;
#ifdef COMPAT_HPUX
extern struct emul emul_hpux;
/*
* If we loaded from an HP-UX format binary file we dump enough
* of an HP-UX style user struct so that the HP-UX debuggers can
* grok it.
*/
if (p->p_emul == &emul_hpux)
return (hpux_dumpu(vp, cred));
#endif
CORE_SETMAGIC(*chdr, COREMAGIC, MID_M68K, 0);
chdr->c_hdrsize = ALIGN(sizeof(*chdr));
chdr->c_seghdrsize = ALIGN(sizeof(cseg));
@ -157,15 +193,10 @@ cpu_coredump(p, vp, cred, chdr)
if (error)
return error;
if (fputype) {
/* Save floating point registers. */
error = process_read_fpregs(p, &md_core.freg);
if (error)
return error;
} else {
/* Make sure these are clear. */
bzero((caddr_t)&md_core.freg, sizeof(md_core.freg));
}
/* Save floating point registers. */
error = process_read_fpregs(p, &md_core.freg);
if (error)
return error;
CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_M68K, CORE_CPU);
cseg.c_addr = 0;
@ -197,24 +228,24 @@ pagemove(from, to, size)
caddr_t from, to;
size_t size;
{
vm_offset_t pa;
paddr_t pa;
#ifdef DEBUG
if (size & CLOFSET)
panic("pagemove");
#endif
while (size > 0) {
pa = pmap_extract(pmap_kernel(), (vm_offset_t)from);
pa = pmap_extract(pmap_kernel(), (vaddr_t)from);
#ifdef DEBUG
if (pa == 0)
panic("pagemove 2");
if (pmap_extract(pmap_kernel(), (vm_offset_t)to) != 0)
if (pmap_extract(pmap_kernel(), (vaddr_t)to) != 0)
panic("pagemove 3");
#endif
pmap_remove(pmap_kernel(),
(vm_offset_t)from, (vm_offset_t)from + PAGE_SIZE);
(vaddr_t)from, (vaddr_t)from + PAGE_SIZE);
pmap_enter(pmap_kernel(),
(vm_offset_t)to, pa, VM_PROT_READ|VM_PROT_WRITE, 1);
(vaddr_t)to, pa, VM_PROT_READ|VM_PROT_WRITE, 1);
from += PAGE_SIZE;
to += PAGE_SIZE;
size -= PAGE_SIZE;
@ -256,16 +287,84 @@ physunaccess(vaddr, size)
TBIAS();
}
/*
* Allocate/deallocate a cache-inhibited range of kernel virtual address
* space mapping the indicated physical range [pa - pa+size].
*/
void *
iomap(paddr, size)
u_long paddr;
size_t size;
{
u_long pa, off;
vaddr_t va, rval;
off = paddr & PGOFSET;
pa = m68k_trunc_page(paddr);
size += off;
size = m68k_round_page(size);
/* Get some kernel virtual space. */
#ifdef UVM
va = uvm_km_alloc(kernel_map, size);
#else
va = kmem_alloc_pageable(kernel_map, size);
#endif
if (va == 0)
return (NULL);
rval = va + off;
/* Map the PA range. */
physaccess((caddr_t)va, (caddr_t)pa, size, PG_RW|PG_CI);
return ((void *)rval);
}
void
iounmap(kva, size)
void *kva;
size_t size;
{
vaddr_t va;
va = m68k_trunc_page((vaddr_t)kva);
size = m68k_round_page(size);
physunaccess((caddr_t)va, size);
#ifdef UVM
uvm_km_free(kernel_map, va, size);
#else
kmem_free(kernel_map, va, size);
#endif
}
/*
* Set a red zone in the kernel stack after the u. area.
* We don't support a redzone right now. It really isn't clear
* that it is a good idea since, if the kernel stack were to roll
* into a write protected page, the processor would lock up (since
* it cannot create an exception frame) and we would get no useful
* post-mortem info. Currently, under the DEBUG option, we just
* check at every clock interrupt to see if the current k-stack has
* gone too far (i.e. into the "redzone" page) and if so, panic.
* Look at _lev6intr in locore.s for more details.
*/
/*ARGSUSED*/
setredzone(pte, vaddr)
pt_entry_t *pte;
caddr_t vaddr;
{
}
/*
* Convert kernel VA to physical address
*/
int
kvtop(addr)
caddr_t addr;
{
vm_offset_t va;
vaddr_t va;
va = pmap_extract(pmap_kernel(), (vm_offset_t)addr);
va = pmap_extract(pmap_kernel(), (vaddr_t)addr);
if (va == 0)
panic("kvtop: zero page frame");
return((int)va);
@ -284,21 +383,25 @@ extern vm_map_t phys_map;
void
vmapbuf(bp, len)
struct buf *bp;
vm_size_t len;
vsize_t len;
{
struct pmap *upmap, *kpmap;
vm_offset_t uva; /* User VA (map from) */
vm_offset_t kva; /* Kernel VA (new to) */
vm_offset_t pa; /* physical address */
vm_size_t off;
vaddr_t uva; /* User VA (map from) */
vaddr_t kva; /* Kernel VA (new to) */
paddr_t pa; /* physical address */
vsize_t off;
if ((bp->b_flags & B_PHYS) == 0)
panic("vmapbuf");
uva = m68k_trunc_page(bp->b_saveaddr = bp->b_data);
off = (vm_offset_t)bp->b_data - uva;
off = (vaddr_t)bp->b_data - uva;
len = m68k_round_page(off + len);
#ifdef UVM
kva = uvm_km_valloc_wait(phys_map, len);
#else
kva = kmem_alloc_wait(phys_map, len);
#endif
bp->b_data = (caddr_t)(kva + off);
upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
@ -320,23 +423,27 @@ vmapbuf(bp, len)
void
vunmapbuf(bp, len)
struct buf *bp;
vm_size_t len;
vsize_t len;
{
vm_offset_t kva;
vm_size_t off;
vaddr_t kva;
vsize_t off;
if ((bp->b_flags & B_PHYS) == 0)
panic("vunmapbuf");
kva = m68k_trunc_page(bp->b_data);
off = (vm_offset_t)bp->b_data - kva;
off = (vaddr_t)bp->b_data - kva;
len = m68k_round_page(off + len);
/*
* pmap_remove() is unnecessary here, as kmem_free_wakeup()
* will do it for us.
*/
#ifdef UVM
uvm_km_free_wakeup(phys_map, kva, len);
#else
kmem_free_wakeup(phys_map, kva, len);
#endif
bp->b_data = bp->b_saveaddr;
bp->b_saveaddr = 0;
}