move 040 to hp300 style, use new MI lance ethernet driver

a few minor fixes WRT prototypes.
if_le from Charles rest from osymh@gemini.oscs.montana.edu (Michael L. Hitch)
This commit is contained in:
chopps 1995-09-29 13:51:30 +00:00
parent eab73b27b1
commit 319c531354
15 changed files with 447 additions and 1080 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: amiga_init.c,v 1.31 1995/09/16 16:11:03 chopps Exp $ */
/* $NetBSD: amiga_init.c,v 1.32 1995/09/29 13:51:30 chopps Exp $ */
/*
* Copyright (c) 1994 Michael L. Hitch
@ -61,7 +61,7 @@
extern int machineid, mmutype;
extern u_int lowram;
extern u_int Sysptmap, Sysptsize, Sysseg, Umap, proc0paddr;
extern u_int Sysseg2; /* 68040 2nd level descriptor table */
extern u_int Sysseg_pa;
extern u_int virtual_avail;
extern char *esym;
@ -166,11 +166,11 @@ start_c(id, fphystart, fphysize, cphysize, esym_addr, flags)
extern u_int protorp[2];
struct cfdev *cd;
u_int pstart, pend, vstart, vend, avail;
u_int pt, ptpa, ptsize, ptextra;
u_int Sysseg_pa, Sysptmap_pa, umap_pa, Sysseg2_pa;
u_int sg_proto, pg_proto;
u_int pt, ptpa, ptsize, ptextra, kstsize;
u_int Sysptmap_pa, umap_pa;
register st_entry_t sg_proto, *sg, *esg;
register pt_entry_t pg_proto, *pg;
u_int tc, end_loaded, ncd, i;
u_int *sg, *pg, *pg2;
boot_fphystart = fphystart;
boot_fphysize = fphysize;
@ -215,7 +215,7 @@ start_c(id, fphystart, fphysize, cphysize, esym_addr, flags)
sp = memlist->m_seg;
esp = sp + memlist->m_nseg;
for (; sp < esp; i++, sp++) {
for (; sp < esp; sp++) {
if ((sp->ms_attrib & (MEMF_FAST | MEMF_24BITDMA))
!= (MEMF_FAST|MEMF_24BITDMA))
continue;
@ -277,36 +277,20 @@ start_c(id, fphystart, fphysize, cphysize, esym_addr, flags)
avail -= vstart;
#ifdef M68040
if (mmutype == MMU_68040) {
/*
* allocate the kernel 1st level segment table
*/
Sysseg_pa = pstart;
Sysseg = vstart;
vstart += NBPG;
pstart += NBPG;
avail -= NBPG;
/*
* allocate the kernel segment table
*/
Sysseg2_pa = pstart;
Sysseg2 = vstart;
vstart += AMIGA_040RTSIZE / 4 * AMIGA_040STSIZE;
pstart += AMIGA_040RTSIZE / 4 * AMIGA_040STSIZE;
avail -= AMIGA_040RTSIZE / 4 * AMIGA_040STSIZE;
} else
if (mmutype == MMU_68040)
kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
else
#endif
{
/*
* allocate the kernel segment table
*/
Sysseg = vstart;
Sysseg_pa = pstart;
vstart += NBPG;
pstart += NBPG;
avail -= NBPG;
}
kstsize = 1;
/*
* allocate the kernel segment table
*/
Sysseg_pa = pstart;
Sysseg = vstart;
vstart += NBPG * kstsize;
pstart += NBPG * kstsize;
avail -= NBPG * kstsize;
/*
* allocate initial page table pages
@ -350,75 +334,96 @@ start_c(id, fphystart, fphysize, cphysize, esym_addr, flags)
*/
#ifdef M68040
if (mmutype == MMU_68040) {
sg_proto = Sysseg2_pa | SG_RW | SG_V;
/*
* map all level 1 entries to the segment table
* First invalidate the entire "segment table" pages
* (levels 1 and 2 have the same "invalid" values).
*/
sg = (u_int *)Sysseg_pa;
while (sg_proto < ptpa) {
*sg++ = sg_proto;
sg_proto += AMIGA_040RTSIZE;
}
sg_proto = ptpa | SG_RW | SG_V;
pg_proto = ptpa | PG_RW | PG_CI | PG_V;
esg = &sg[kstsize * NPTEPG];
while (sg < esg)
*sg++ = SG_NV;
/*
* map so many segs
* Initialize level 2 descriptors (which immediately
* follow the level 1 table). We need:
* NPTEPG / SG4_LEV3SIZE
* level 2 descriptors to map eachof the nptpages + 1
* pages of PTEs. Note that we set the "used" bit
* now to save the HW the expense of doing it.
*/
sg = (u_int *)Sysseg2_pa;
pg = (u_int *)Sysptmap_pa;
while (sg_proto < pstart) {
i = ((ptsize >> PGSHIFT) + 1) * (NPTEPG / SG4_LEV3SIZE);
sg = &((u_int *)Sysseg_pa)[SG4_LEV1SIZE];
esg = &sg[i];
sg_proto = ptpa | SG_U | SG_RW | SG_V;
while (sg < esg) {
*sg++ = sg_proto;
if (pg_proto < pstart)
*pg++ = pg_proto;
else if (pg < (u_int *)pstart)
*pg++ = PG_NV;
sg_proto += AMIGA_040PTSIZE;
sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t));
}
/*
* Initialize level 1 descriptors. We need:
* roundup(num, SG4_LEV2SIZE) / SG4_LEVEL2SIZE
* level 1 descriptors to map the 'num' level 2's.
*/
sg = (u_int *) Sysseg_pa;
esg = &sg[roundup(i, SG4_LEV2SIZE) / SG4_LEV2SIZE];
sg_proto = (u_int)&sg[SG4_LEV1SIZE] | SG_U | SG_RW |SG_V;
while (sg < esg) {
*sg++ = sg_proto;
sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t));
}
/*
* Initialize Sysptmap
*/
sg = (u_int *)Sysptmap_pa;
esg = &sg[(ptsize >> PGSHIFT) + 1];
pg_proto = ptpa | PG_RW | PG_CI | PG_V;
while (sg < esg) {
*sg++ = pg_proto;
pg_proto += NBPG;
}
/*
* invalidate the remainder of the table
* Invalidate rest of Sysptmap page
*/
do {
esg = (u_int *)(Sysptmap_pa + NBPG);
while (sg < esg)
*sg++ = SG_NV;
if (pg < (u_int *)pstart)
*pg++ = PG_NV;
} while (sg < (u_int *)(Sysseg2_pa + AMIGA_040RTSIZE / 4 * AMIGA_040STSIZE));
} else
#endif /* M68040 */
{
sg_proto = ptpa | SG_RW | SG_V;
pg_proto = ptpa | PG_RW | PG_CI | PG_V;
/*
* map so many segs
* Map the page table pages in both the HW segment table
* and the software Sysptmap. Note that Sysptmap is also
* considered a PT page, hence the +1.
*/
sg = (u_int *)Sysseg_pa;
pg = (u_int *)Sysptmap_pa;
while (sg_proto < pstart) {
esg = &pg[(ptsize >> PGSHIFT) + 1];
sg_proto = ptpa | SG_RW | SG_V;
pg_proto = ptpa | PG_RW | PG_CI | PG_V;
while (pg < esg) {
*sg++ = sg_proto;
*pg++ = pg_proto;
sg_proto += NBPG;
pg_proto += NBPG;
}
/*
* invalidate the remainder of the tables
* invalidate the remainder of each table
*/
do {
esg = (u_int *)(Sysptmap_pa + NBPG);
while (pg < esg) {
*sg++ = SG_NV;
*pg++ = PG_NV;
} while (sg < (u_int *)(Sysseg_pa + AMIGA_STSIZE));
}
}
/*
* record KVA at which to access current u-area PTE(s)
*/
/* Umap = (u_int)Sysmap + AMIGA_MAX_PTSIZE - UPAGES * 4; */
/*
* initialize kernel page table page(s) (assume load at VA 0)
*/
pg_proto = fphystart | PG_RO | PG_V; /* text pages are RO */
pg = (u_int *) ptpa;
for (i = 0; i < (u_int) etext; i += NBPG, pg_proto += NBPG)
/* XXX make first page PG_NV when vectors get moved */
*pg++ = pg_proto;
pg_proto += NBPG;
for (i = NBPG; i < (u_int) etext; i += NBPG, pg_proto += NBPG)
*pg++ = pg_proto;
/*
@ -721,10 +726,18 @@ kernel_reload_write(uio)
*/
kernel_text_size = (kernel_exec.a_text
+ __LDPGSZ - 1) & (-__LDPGSZ);
/*
* Estimate space needed for symbol names, since we don't
* know how big it really is.
*/
if (esym != NULL) {
kernel_symbol_size = kernel_exec.a_syms;
kernel_symbol_size += 16 * (kernel_symbol_size / 12);
}
/*
* XXX - should check that image will fit in CHIP memory
* XXX return an error if it doesn't
*/
kernel_image = malloc(kernel_text_size + kernel_exec.a_data
+ kernel_exec.a_bss
+ kernel_symbol_size
@ -797,6 +810,12 @@ kernel_reload_write(uio)
boot_cphysize, kernel_symbol_esym, eclockfreq,
boot_flags);
/*NOTREACHED*/
/*
* XXX - kernel_reload() needs to verify that the
* reload code is at the same location in the new
* kernel. If it isn't, it will return and we will
* return an error.
*/
case 3: /* done loading kernel symbol table */
c = *((u_long *)(kernel_image + kernel_load_ofs - 4));
if (c > 16 * (kernel_exec.a_syms / 12))

View File

@ -1,4 +1,4 @@
/* $NetBSD: disksubr.c,v 1.19 1995/03/28 19:59:56 jtc Exp $ */
/* $NetBSD: disksubr.c,v 1.20 1995/09/29 13:51:33 chopps Exp $ */
/*
* Copyright (c) 1994 Christian E. Hopps
@ -36,6 +36,7 @@
* @(#)ufs_disksubr.c 7.16 (Berkeley) 5/4/91
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/buf.h>
#include <sys/device.h>
#include <sys/disklabel.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: locore.s,v 1.44 1995/09/23 13:42:03 chopps Exp $ */
/* $NetBSD: locore.s,v 1.45 1995/09/29 13:51:35 chopps Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -47,6 +47,10 @@
*/
#include "assym.s"
.long 0x4ef80400+NBPG /* jmp jmp0.w */
.fill NBPG/4-1,4,0/*xdeadbeef*/
#include <amiga/amiga/vectors.s>
#include <amiga/amiga/custom.h>
@ -762,8 +766,8 @@ Lsetcpu040:
movl #CACHE40_OFF,d0 | 68040 cache disable
Lstartnot040:
movc d0,cacr | clear and disable on-chip cache(s)
moveq #0,d0
movc d0,vbr
movl #Lvectab,a0
movc a0,vbr
/* initialize source/destination control registers for movs */
moveq #FC_USERD,d0 | user space

View File

@ -1,4 +1,4 @@
/* $NetBSD: machdep.c,v 1.56 1995/09/19 23:06:31 thorpej Exp $ */
/* $NetBSD: machdep.c,v 1.57 1995/09/29 13:51:38 chopps Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -913,7 +913,7 @@ dumpconf()
* getting on the dump stack, either when called above, or by
* the auto-restart code.
*/
#define BYTES_PER_DUMP NBPG /* Must be a multiple of pagesize XXX small */
#define BYTES_PER_DUMP MAXPHYS /* Must be a multiple of pagesize XXX small */
static vm_offset_t dumpspace;
vm_offset_t

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.c,v 1.25 1995/09/16 16:11:09 chopps Exp $ */
/* $NetBSD: pmap.c,v 1.26 1995/09/29 13:51:41 chopps Exp $ */
/*
* Copyright (c) 1991 Regents of the University of California.
@ -76,6 +76,7 @@
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/malloc.h>
#include <sys/msgbuf.h>
@ -149,12 +150,27 @@ int pmapdebug = PDB_PARANOIA;
/*
* Get STEs and PTEs for user/kernel address space
*/
#ifdef M68040
#define pmap_ste(m, v) (&((m)->pm_stab[(vm_offset_t)(v) >> pmap_ishift]))
#define pmap_ste1(m, v) \
(&((m)->pm_stab[(vm_offset_t)(v) >> SG4_SHIFT1]))
/* XXX assumes physically contiguous ST pages (if more than one) */
#define pmap_ste2(m, v) \
(&((m)->pm_stab[(u_int *)(*(u_int *)pmap_ste1(m,v) & SG4_ADDR1) \
- (m)->pm_stpa + (((v) & SG4_MASK2) >> SG4_SHIFT2)]))
#define pmap_ste_v(m, v) \
(mmutype == MMU_68040 \
? ((*pmap_ste1(m, v) & SG_V) && \
(*pmap_ste2(m, v) & SG_V)) \
: (*pmap_ste(m, v) & SG_V))
#else
#define pmap_ste(m, v) (&((m)->pm_stab[(vm_offset_t)(v) >> SG_ISHIFT]))
#define pmap_ste_v(m, v) (*pmap_ste(m, v) & SG_V)
#endif
#define pmap_pte(m, v) (&((m)->pm_ptab[(vm_offset_t)(v) >> PG_SHIFT]))
#define pmap_pte_pa(pte) (*(u_int *)(pte) & PG_FRAME)
#define pmap_ste_v(ste) (*(u_int *)(ste) & SG_V)
#define pmap_pte_w(pte) (*(u_int *)(pte) & PG_W)
#define pmap_pte_ci(pte) (*(u_int *)(pte) & PG_CI)
#define pmap_pte_m(pte) (*(u_int *)(pte) & PG_M)
@ -201,10 +217,10 @@ struct kpt_page *kpt_pages;
* Segtabzero is an empty segment table which all processes share til they
* reference something.
*/
u_int *Sysseg2; /* 68040 2nd level descriptor table */
u_int *Sysseg;
u_int *Sysseg_pa;
u_int *Sysmap, *Sysptmap;
u_int *Segtabzero;
u_int *Segtabzero, *Segtabzeropa;
vm_size_t Sysptsize = VM_KERNEL_PT_PAGES + 4 / NPTEPG;
struct pmap kernel_pmap_store;
@ -219,7 +235,9 @@ vm_offset_t vm_first_phys; /* PA of first managed page */
vm_offset_t vm_last_phys; /* PA just past last managed page */
boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */
char *pmap_attributes; /* reference and modify bits */
#ifdef M68040
static int pmap_ishift; /* segment table index shift */
#endif
#ifdef MACHINE_NONCONTIG
struct physeg {
@ -268,7 +286,6 @@ pmap_bootstrap(firstaddr, loadaddr)
vm_offset_t firstaddr;
vm_offset_t loadaddr;
{
extern vm_offset_t maxmem, physmem;
vm_offset_t va;
u_int *pte;
#ifdef MACHINE_NONCONTIG
@ -340,16 +357,16 @@ pmap_bootstrap(firstaddr, loadaddr)
* Kernel page/segment table allocated in locore,
* just initialize pointers.
*/
pmap_kernel()->pm_stpa = Sysseg_pa;
pmap_kernel()->pm_stab = Sysseg;
pmap_kernel()->pm_ptab = Sysmap;
#ifdef M68040
if (mmutype == MMU_68040) {
pmap_kernel()->pm_stpa = Sysseg;
pmap_kernel()->pm_stab = Sysseg2;
pmap_ishift = SG4_SHIFT2;
pmap_ishift = SG4_SHIFT1;
pmap_kernel()->pm_stfree = 0xfffffff8; /* XXXX */
} else
#endif
pmap_ishift = SG_ISHIFT;
#endif
simple_lock_init(&pmap_kernel()->pm_lock);
pmap_kernel()->pm_count = 1;
@ -476,24 +493,14 @@ pmap_init(phys_start, phys_end)
#else
npg = atop(phys_end - phys_start);
#endif
#ifdef M68040
if (mmutype == MMU_68040)
s = (vm_size_t)AMIGA_040STSIZE * 128 +
sizeof(struct pv_entry) * npg + npg;
else
#endif
s = (vm_size_t)AMIGA_STSIZE +
sizeof(struct pv_entry) * npg + npg;
s = (vm_size_t)AMIGA_STSIZE +
sizeof(struct pv_entry) * npg + npg;
s = round_page(s);
addr = (vm_offset_t) kmem_alloc(kernel_map, s);
Segtabzero = (u_int *) addr;
#ifdef M68040
if (mmutype == MMU_68040)
addr += AMIGA_040STSIZE * 128;
else
#endif
addr += AMIGA_STSIZE;
Segtabzeropa = (u_int *) pmap_extract(pmap_kernel(), addr);
addr += AMIGA_STSIZE;
pv_table = (pv_entry_t) addr;
addr += sizeof(struct pv_entry) * npg;
pmap_attributes = (char *) addr;
@ -723,11 +730,12 @@ pmap_pinit(pmap)
* "null" segment table. On the first pmap_enter, a real
* segment table will be allocated.
*/
pmap->pm_stab = Segtabzero;
pmap->pm_stpa = Segtabzeropa;
#ifdef M68040
if (mmutype == MMU_68040)
pmap->pm_stpa = Segtabzero;
pmap->pm_stfree = 0x0000fffe; /* XXXX */
#endif
pmap->pm_stab = Segtabzero;
pmap->pm_stchanged = TRUE;
pmap->pm_count = 1;
simple_lock_init(&pmap->pm_lock);
@ -784,16 +792,7 @@ pmap_release(pmap)
kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab,
AMIGA_UPTSIZE);
if (pmap->pm_stab != Segtabzero)
#ifdef M68040
if (mmutype == MMU_68040) {
kmem_free(kernel_map, (vm_offset_t)pmap->pm_stpa,
AMIGA_040RTSIZE);
kmem_free(kernel_map, (vm_offset_t)pmap->pm_stab,
AMIGA_040STSIZE*128);
}
else
#endif
kmem_free(kernel_map, (vm_offset_t)pmap->pm_stab, AMIGA_STSIZE);
kmem_free(kernel_map, (vm_offset_t)pmap->pm_stab, AMIGA_STSIZE);
}
/*
@ -849,7 +848,7 @@ pmap_remove(pmap, sva, eva)
* Weed out invalid mappings.
* Note: we assume that the segment table is always allocated.
*/
if (!pmap_ste_v(pmap_ste(pmap, va))) {
if (!pmap_ste_v(pmap, va)) {
/* XXX: avoid address wrap around */
if (va >= amiga_trunc_seg((vm_offset_t)-1))
break;
@ -965,19 +964,19 @@ printf ("pmap_remove: PA %08x index %d\n", pa, pa_index(pa));
#ifdef M68040
if (mmutype == MMU_68040) {
/*
* On the 68040, the PT page contains 64 page tables,
* so we need to remove all the associated segment
* table entries
* On the 68040, the PT page contains NPTEPG/SG4_LEV3SIZE
* page tables, so we need to remove all the associated
* segment table entries
* (This may be incorrect: if a single page table is
* being removed, the whole page should not be
* removed.)
*/
for (i = 0; i < 64; ++i)
for (i = 0; i < NPTEPG / SG4_LEV3SIZE; ++i)
*ste++ = SG_NV;
ste -= 64;
ste -= NPTEPG / SG4_LEV3SIZE;
#ifdef DEBUG
if (pmapdebug &(PDB_REMOVE|PDB_SEGTAB|0x10000))
printf("pmap_remove:PT at %x remved\n",
printf("pmap_remove:PT at %x removed\n",
va);
#endif
}
@ -1005,29 +1004,23 @@ printf ("pmap_remove: PA %08x index %d\n", pa, pa_index(pa));
printf("remove: free stab %x\n",
ptpmap->pm_stab);
#endif
#ifdef M68040
if (mmutype == MMU_68040) {
kmem_free(kernel_map,
(vm_offset_t)ptpmap->pm_stpa,
AMIGA_040RTSIZE);
kmem_free(kernel_map,
(vm_offset_t)ptpmap->pm_stab,
AMIGA_040STSIZE*128);
ptpmap->pm_stpa = Segtabzero;
}
else
#endif
kmem_free(kernel_map,
(vm_offset_t)ptpmap->pm_stab,
AMIGA_STSIZE);
kmem_free(kernel_map,
(vm_offset_t)ptpmap->pm_stab,
AMIGA_STSIZE);
ptpmap->pm_stab = Segtabzero;
ptpmap->pm_stpa = Segtabzeropa;
#ifdef M68040
if (mmutype == MMU_68040)
ptpmap->pm_stfree = 0x0000fffe; /* XXXX */
#endif
ptpmap->pm_stchanged = TRUE;
/*
* XXX may have changed segment table
* pointer for current process so
* update now to reload hardware.
*/
if (ptpmap == curproc->p_vmspace->vm_map.pmap)
if (curproc &&
ptpmap == curproc->p_vmspace->vm_map.pmap)
PMAP_ACTIVATE(ptpmap,
(struct pcb *)curproc->p_addr, 1);
}
@ -1095,11 +1088,11 @@ pmap_page_protect(pa, prot)
s = splimp();
while (pv->pv_pmap != NULL) {
#ifdef DEBUG
if (!pmap_ste_v(pmap_ste(pv->pv_pmap,pv->pv_va)) ||
if (!pmap_ste_v(pv->pv_pmap,pv->pv_va) ||
pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)) != pa)
{
printf ("pmap_page_protect: va %08x, pmap_ste_v %d pmap_pte_pa %08x/%08x\n",
pv->pv_va, pmap_ste_v(pmap_ste(pv->pv_pmap,pv->pv_va)),
pv->pv_va, pmap_ste_v(pv->pv_pmap,pv->pv_va),
pmap_pte_pa(pmap_pte(pv->pv_pmap,pv->pv_va)),pa);
printf (" pvh %08x pv %08x pv_next %08x\n", pa_to_pvh(pa), pv, pv->pv_next);
panic("pmap_page_protect: bad mapping");
@ -1151,7 +1144,7 @@ pmap_protect(pmap, sva, eva, prot)
* Skip it, we don't want to force allocation
* of unnecessary PTE pages just to set the protection.
*/
if (!pmap_ste_v(pmap_ste(pmap, va))) {
if (!pmap_ste_v(pmap, va)) {
/* XXX: avoid address wrap around */
if (va >= amiga_trunc_seg((vm_offset_t)-1))
break;
@ -1236,7 +1229,7 @@ pmap_enter(pmap, va, pa, prot, wired)
/*
* Segment table entry not valid, we need a new PT page
*/
if (!pmap_ste_v(pmap_ste(pmap, va)))
if (!pmap_ste_v(pmap, va))
pmap_enter_ptpage(pmap, va);
pte = pmap_pte(pmap, va);
@ -1459,7 +1452,7 @@ pmap_change_wiring(pmap, va, wired)
* Should this ever happen? Ignore it for now,
* we don't want to force allocation of unnecessary PTE pages.
*/
if (!pmap_ste_v(pmap_ste(pmap, va))) {
if (!pmap_ste_v(pmap, va)) {
if (pmapdebug & PDB_PARANOIA)
printf("pmap_change_wiring: invalid STE for %x\n", va);
return;
@ -1505,7 +1498,7 @@ pmap_extract(pmap, va)
printf("pmap_extract(%x, %x) -> ", pmap, va);
#endif
pa = 0;
if (pmap && pmap_ste_v(pmap_ste(pmap, va)))
if (pmap && pmap_ste_v(pmap, va))
pa = *(int *)pmap_pte(pmap, va);
if (pa)
pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
@ -1767,7 +1760,7 @@ pmap_pageable(pmap, sva, eva, pageable)
printf("pmap_pageable(%x, %x, %x, %x)\n",
pmap, sva, eva, pageable);
#endif
if (!pmap_ste_v(pmap_ste(pmap, sva)))
if (!pmap_ste_v(pmap, sva))
return;
pa = pmap_pte_pa(pmap_pte(pmap, sva));
if (!pmap_valid_page(pa))
@ -2047,30 +2040,15 @@ pmap_enter_ptpage(pmap, va)
* reference count drops to zero.
*/
if (pmap->pm_stab == Segtabzero) {
pmap->pm_stab = (u_int *)
kmem_alloc(kernel_map, AMIGA_STSIZE);
pmap->pm_stpa = (u_int *) pmap_extract(
pmap_kernel(), (vm_offset_t)pmap->pm_stab);
#ifdef M68040
if (mmutype == MMU_68040) {
pmap->pm_stpa = (u_int *)
kmem_alloc(kernel_map, AMIGA_040RTSIZE);
pmap->pm_stab = (u_int *)
kmem_alloc(kernel_map, AMIGA_040STSIZE*128);
/* intialize root table entries */
sg = (u_int *) pmap->pm_stpa;
sg_proto = pmap_extract(pmap_kernel(),
(vm_offset_t) pmap->pm_stab) | SG_RW | SG_V;
#ifdef DEBUG
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
printf ("pmap_enter_ptpage: ROOT TABLE SETUP %x %x\n",
pmap->pm_stpa, sg_proto);
#endif
while (sg < (u_int *) ((u_int) pmap->pm_stpa + AMIGA_040RTSIZE)) {
*sg++ = sg_proto;
sg_proto += AMIGA_040STSIZE;
}
pmap->pm_stfree = 0x0000fffe; /* XXXX */
}
else
#endif
pmap->pm_stab = (u_int *)
kmem_alloc(kernel_map, AMIGA_STSIZE);
pmap->pm_stchanged = TRUE;
/*
* XXX may have changed segment table pointer for current
@ -2080,27 +2058,53 @@ pmap_enter_ptpage(pmap, va)
PMAP_ACTIVATE(pmap, (struct pcb *)curproc->p_addr, 1);
#ifdef DEBUG
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
printf("enter: pmap %x stab %x\n",
pmap, pmap->pm_stab);
printf("enter_pt: pmap %x stab %x(%x)\n",
pmap, pmap->pm_stab, pmap->pm_stpa);
#endif
}
/*
* On the 68040, a page will hold 64 page tables, so the segment
* table will have to have 64 entries set up. First get the ste
* for the page mapped by the first PT entry.
*/
ste = pmap_ste(pmap, va);
#ifdef M68040
/*
* Allocate level 2 descriptor block if necessary
*/
if (mmutype == MMU_68040) {
ste = pmap_ste(pmap, va & ((SG4_MASK1 | SG4_MASK2) << 6));
va = trunc_page((vm_offset_t)pmap_pte(pmap,
va & ((SG4_MASK1|SG4_MASK2) << 6)));
} else
if (*ste == SG_NV) {
int ix;
caddr_t addr;
ix = bmtol2(pmap->pm_stfree);
if (ix == -1)
panic("enter_pt: out of address space");
pmap->pm_stfree &= ~l2tobm(ix);
addr = (caddr_t)&pmap->pm_stab[ix * SG4_LEV2SIZE];
bzero(addr, SG4_LEV2SIZE * sizeof(st_entry_t));
addr = (caddr_t)&pmap->pm_stpa[ix * SG4_LEV2SIZE];
*ste = (u_int) addr | SG_RW | SG_U | SG_V;
#ifdef DEBUG
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
printf("enter_pt: alloc ste2 %d(%x)\n", ix, addr);
#endif
}
ste = pmap_ste2(pmap, va);
/*
* Since a level 2 descriptor maps a block of SG4_LEV3SIZE
* level 3 descriptors, we need a chunk of NPTEPG/SEG4_LEV3SIZE
* (64) such descriptors (NBPG/SG4_LEV3SIZE bytes) to map a
* PT page -- the unit of allocation. We set 'ste' to point
* to the first entry of that chunk which is validated in its
* entirety below.
*/
ste = (u_int *)((int)ste & ~(NBPG / SG4_LEV3SIZE - 1));
#ifdef DEBUG
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
printf("enter_pt: ste2 %x (%x)\n",
pmap_ste2(pmap, va), ste);
#endif
{
ste = pmap_ste(pmap, va);
va = trunc_page((vm_offset_t)pmap_pte(pmap, va));
}
#endif
va = trunc_page((vm_offset_t)pmap_pte(pmap, va));
/*
* In the kernel we allocate a page from the kernel PT page
@ -2118,7 +2122,7 @@ pmap_enter_ptpage(pmap, va)
*/
#ifdef DEBUG
if (pmapdebug & PDB_COLLECT)
printf("enter: no KPT pages, collecting...\n");
printf("enter_pt: no KPT pages, collecting...\n");
#endif
pmap_collect(pmap_kernel());
if ((kpt = kpt_free_list) == (struct kpt_page *)0)
@ -2132,11 +2136,11 @@ pmap_enter_ptpage(pmap, va)
kpt->kpt_next = kpt_used_list;
kpt_used_list = kpt;
ptpa = kpt->kpt_pa;
bzero(kpt->kpt_va, NBPG);
bzero((char *)kpt->kpt_va, NBPG);
pmap_enter(pmap, va, ptpa, VM_PROT_DEFAULT, TRUE);
#ifdef DEBUG
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
printf("enter: add &Sysptmap[%d]: %x (KPT page %x)\n",
printf("enter_pt: add &Sysptmap[%d]: %x (KPT page %x)\n",
ste - pmap_ste(pmap, 0),
*(int *)&Sysptmap[ste - pmap_ste(pmap, 0)],
kpt->kpt_va);
@ -2155,7 +2159,7 @@ pmap_enter_ptpage(pmap, va)
pmap->pm_sref++;
#ifdef DEBUG
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
printf("enter: about to fault UPT pg at %x\n", va);
printf("enter_pt: about to fault UPT pg at %x\n", va);
#endif
if (vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE)
!= KERN_SUCCESS)
@ -2181,14 +2185,16 @@ pmap_enter_ptpage(pmap, va)
} while (pv = pv->pv_next);
}
#ifdef DEBUG
if (pv == NULL)
if (pv == NULL) {
printf("enter_pt: PV entry for PT page %x not found\n", ptpa);
panic("pmap_enter_ptpage: PT page not entered");
}
#endif
pv->pv_ptste = ste;
pv->pv_ptpmap = pmap;
#ifdef DEBUG
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE))
printf("enter: new PT page at PA %x, ste at %x\n", ptpa, ste);
printf("enter_pt: new PT page at PA %x, ste at %x\n", ptpa, ste);
#endif
/*
@ -2201,12 +2207,11 @@ pmap_enter_ptpage(pmap, va)
*/
#ifdef M68040
if (mmutype == MMU_68040) {
/* 68040 has 64 page tables, so we have to map all 64 */
sg = (u_int *) ste;
sg_proto = (ptpa & SG_FRAME) | SG_RW | SG_V;
while (sg < (u_int *) (ste + 64)) {
*sg++ = sg_proto;
sg_proto += AMIGA_040PTSIZE;
u_int *este;
for (este = &ste[NPTEPG / SG4_LEV3SIZE]; ste < este; ++ste) {
*ste = ptpa | SG_U | SG_RW | SG_V;
ptpa += SG4_LEV3SIZE * sizeof(st_entry_t);
}
}
else
@ -2215,7 +2220,7 @@ pmap_enter_ptpage(pmap, va)
if (pmap != pmap_kernel()) {
#ifdef DEBUG
if (pmapdebug & (PDB_ENTER|PDB_PTPAGE|PDB_SEGTAB))
printf("enter: stab %x refcnt %d\n",
printf("enter_pt: stab %x refcnt %d\n",
pmap->pm_stab, pmap->pm_sref);
#endif
}
@ -2252,7 +2257,7 @@ pmap_check_wiring(str, va)
register int count, *pte;
va = trunc_page(va);
if (!pmap_ste_v(pmap_ste(pmap_kernel(), va)) ||
if (!pmap_ste_v(pmap_kernel(), va) ||
!pmap_pte_v(pmap_pte(pmap_kernel(), va)))
return;

View File

@ -1,4 +1,4 @@
/* $NetBSD: trap.c,v 1.36 1995/08/18 15:27:39 chopps Exp $ */
/* $NetBSD: trap.c,v 1.37 1995/09/29 13:51:43 chopps Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -224,7 +224,9 @@ panictrap(type, code, v, fp)
regdump(fp, 128);
}
type &= ~T_USER;
DCIS(); /* XXX? push cache */
#ifdef DEBUG
DCIS(); /* XXX? push cache */
#endif
if ((u_int)type < trap_types)
panic(trap_type[type]);
panic("trap");
@ -265,8 +267,6 @@ trapmmufault(type, code, v, fp, p, sticks)
vm_map_t map;
u_int nss;
int rv;
vm = p->p_vmspace;
/*
* It is only a kernel address space fault iff:
@ -280,6 +280,7 @@ trapmmufault(type, code, v, fp, p, sticks)
/*
* Print out some data about the fault
*/
/*page0*/if (v < NBPG) mmudebug |= 0x100;
if (mmudebug && mmutype == MMU_68040) {
printf ("68040 access error: pc %x, code %x,"
" ea %x, fa %x\n", fp->f_pc, code, fp->f_fmt7.f_ea, v);
@ -287,10 +288,16 @@ trapmmufault(type, code, v, fp, p, sticks)
printf (" curpcb %x ->pcb_ustp %x / %x\n",
curpcb, curpcb->pcb_ustp,
curpcb->pcb_ustp << PG_SHIFT);
/*page0*/if (v < NBPG) Debugger();
/*page0*/mmudebug &= ~0x100;
}
#endif
if (p)
vm = p->p_vmspace;
if (type == T_MMUFLT &&
(p->p_addr->u_pcb.pcb_onfault == 0 ||
(!p || !p->p_addr || p->p_addr->u_pcb.pcb_onfault == 0 ||
(mmutype == MMU_68040 && (code & SSW_TMMASK) == FC_SUPERD) ||
(mmutype != MMU_68040 && (code & (SSW_DF|FC_SUPERD)) == (SSW_DF|FC_SUPERD))))
map = kernel_map;
@ -315,7 +322,7 @@ trapmmufault(type, code, v, fp, p, sticks)
* XXX: rude hack to make stack limits "work"
*/
nss = 0;
if ((caddr_t)va >= vm->vm_maxsaddr && map != kernel_map) {
if (map != kernel_map && (caddr_t)va >= vm->vm_maxsaddr) {
nss = clrnd(btoc(USRSTACK - (unsigned)va));
if (nss > btoc(p->p_rlimit[RLIMIT_STACK].rlim_cur)) {
rv = KERN_FAILURE;
@ -400,7 +407,7 @@ trapmmufault(type, code, v, fp, p, sticks)
* the current limit and we need to reflect that as an access
* error.
*/
if ((caddr_t)va >= vm->vm_maxsaddr && map != kernel_map) {
if (map != kernel_map && (caddr_t)va >= vm->vm_maxsaddr) {
if (rv == KERN_SUCCESS) {
nss = clrnd(btoc(USRSTACK-(unsigned)va));
if (nss > vm->vm_ssize)
@ -493,7 +500,7 @@ trap(type, code, v, frame)
* Kernel Bus error
*/
case T_BUSERR:
if (!p->p_addr->u_pcb.pcb_onfault)
if (!p || !p->p_addr || !p->p_addr->u_pcb.pcb_onfault)
panictrap(type, code, v, &frame);
trapcpfault(p, &frame);
return;
@ -627,8 +634,14 @@ trap(type, code, v, frame)
* Kernel/User page fault
*/
case T_MMUFLT:
if (p->p_addr->u_pcb.pcb_onfault == (caddr_t)fubail ||
p->p_addr->u_pcb.pcb_onfault == (caddr_t)subail) {
/*page0*/if (v < NBPG) {
/*page0*/ printf("page 0 access pc %x fa %x fp %x\n", frame.f_pc, v, &frame);
/*page0*/ mmudebug |= 0x100;
/*page0*/ Debugger();
/*page0*/}
if (p && p->p_addr &&
(p->p_addr->u_pcb.pcb_onfault == (caddr_t)fubail ||
p->p_addr->u_pcb.pcb_onfault == (caddr_t)subail)) {
trapcpfault(p, &frame);
return;
}
@ -641,7 +654,7 @@ trap(type, code, v, frame)
#ifdef DEBUG
if (i != SIGTRAP)
printf("trapsignal(%d, %d, %d, %x, %x)\n", p->p_pid, i,
ucode, v, frame.f_regs[PC]);
ucode, v, frame.f_pc);
#endif
trapsignal(p, i, ucode);
if ((type & T_USER) == 0)

View File

@ -59,6 +59,7 @@
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/errno.h>
#include <sys/ioctl.h>
#include <sys/device.h>

View File

@ -1,4 +1,4 @@
/* $NetBSD: grfabs_cc.c,v 1.8 1995/06/26 01:39:55 chopps Exp $ */
/* $NetBSD: grfabs_cc.c,v 1.9 1995/09/29 13:51:53 chopps Exp $ */
/*
* Copyright (c) 1994 Christian E. Hopps
@ -36,6 +36,7 @@
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/errno.h>
#include <sys/cdefs.h>
#include <sys/queue.h>

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
/* $NetBSD: mfc.c,v 1.6 1995/07/04 18:06:40 chopps Exp $ */
/* $NetBSD: mfc.c,v 1.7 1995/09/29 13:51:57 chopps Exp $ */
/*
* Copyright (c) 1994 Michael L. Hitch
@ -59,8 +59,12 @@
#include "mfcs.h"
#ifndef SEROBUF_SIZE
#define SEROBUF_SIZE 128
#endif
#ifndef SERIBUF_SIZE
#define SERIBUF_SIZE 1024
#endif
#define splser() spl6()

View File

@ -1,4 +1,4 @@
/* $NetBSD: sbic.c,v 1.17 1995/09/16 16:11:26 chopps Exp $ */
/* $NetBSD: sbic.c,v 1.18 1995/09/29 13:51:59 chopps Exp $ */
/*
* Copyright (c) 1994 Christian E. Hopps
@ -147,6 +147,7 @@ int report_sense = 0;
int data_pointer_debug = 0;
u_char debug_asr, debug_csr, routine;
void sbictimeout __P((struct sbic_softc *dev));
void sbic_dump __P((struct sbic_softc *dev));
#define CSR_TRACE_SIZE 32
#if CSR_TRACE_SIZE
@ -561,10 +562,9 @@ sbic_scsidone(acb, stat)
return;
}
#endif
#if 1
if (((struct device *)(slp->device_softc))->dv_unit < dk_ndrive)
if (slp->device_softc &&
((struct device *)(slp->device_softc))->dv_unit < dk_ndrive)
++dk_xfer[((struct device *)(slp->device_softc))->dv_unit];
#endif
/*
* is this right?
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: sci.c,v 1.13 1995/08/12 20:30:50 mycroft Exp $ */
/* $NetBSD: sci.c,v 1.14 1995/09/29 13:52:02 chopps Exp $ */
/*
* Copyright (c) 1994 Michael L. Hitch
@ -215,10 +215,9 @@ sci_scsidone(dev, stat)
if (xs == NULL)
panic("sci_scsidone");
#endif
#if 1
if (((struct device *)(xs->sc_link->device_softc))->dv_unit < dk_ndrive)
if (xs->sc_link->device_softc &&
((struct device *)(xs->sc_link->device_softc))->dv_unit < dk_ndrive)
++dk_xfer[((struct device *)(xs->sc_link->device_softc))->dv_unit];
#endif
/*
* is this right?
*/

View File

@ -1,4 +1,4 @@
/* $NetBSD: siop.c,v 1.24 1995/09/16 16:11:29 chopps Exp $ */
/* $NetBSD: siop.c,v 1.25 1995/09/29 13:52:04 chopps Exp $ */
/*
* Copyright (c) 1994 Michael L. Hitch
@ -365,7 +365,8 @@ siop_scsidone(acb, stat)
if (acb == NULL || xs == NULL)
panic("siop_scsidone");
#endif
if (((struct device *)(slp->device_softc))->dv_unit < dk_ndrive)
if (slp->device_softc &&
((struct device *)(slp->device_softc))->dv_unit < dk_ndrive)
++dk_xfer[((struct device *)(slp->device_softc))->dv_unit];
/*
* is this right?

View File

@ -1,4 +1,4 @@
/* $NetBSD: pmap.h,v 1.15 1995/09/16 16:11:37 chopps Exp $ */
/* $NetBSD: pmap.h,v 1.16 1995/09/29 13:52:08 chopps Exp $ */
/*
* Copyright (c) 1987 Carnegie-Mellon University
@ -46,19 +46,35 @@
* Pmap stuff
*/
struct pmap {
u_int *pm_ptab; /* KVA of page table */
u_int *pm_stab; /* KVA of segment table */
int pm_stfree; /* 040: free lev2 blocks */
u_int *pm_stpa; /* 040: ST phys addr */
int pm_stchanged; /* ST changed */
short pm_sref; /* segment table ref count */
short pm_count; /* pmap reference count */
long pm_ptpages; /* more stats: PT pages */
pt_entry_t *pm_ptab; /* KVA of page table */
st_entry_t *pm_stab; /* KVA of segment table */
int pm_stchanged; /* ST changed */
int pm_stfree; /* 040: free lev2 blocks */
u_int *pm_stpa; /* 040: ST phys addr */
short pm_sref; /* segment table ref count */
short pm_count; /* pmap reference count */
long pm_ptpages; /* more stats: PT pages */
simple_lock_data_t pm_lock; /* lock on pmap */
struct pmap_statistics pm_stats; /* pmap statistics */
};
typedef struct pmap *pmap_t;
typedef struct pmap *pmap_t;
/*
* On the 040 we keep track of which level 2 blocks are already in use
* with the pm_stfree mask. Bits are arranged from LSB (block 0) to MSB
* (block 31). For convenience, the level 1 table is considered to be
* block 0.
*
* MAX[KU]L2SIZE control how many pages of level 2 descriptors are allowed.
* for the kernel and users. 16 implies only the initial "segment table"
* page is used. WARNING: don't change MAXUL2SIZE unless you can allocate
* physically contiguous pages for the ST in pmap.c!
*/
#define MAXKL2SIZE 32
#define MAXUL2SIZE 16
#define l2tobm(n) (1 << (n))
#define bmtol2(n) (ffs(n) - 1)
/*
* Macros for speed
@ -66,9 +82,7 @@ typedef struct pmap *pmap_t;
#define PMAP_ACTIVATE(pmapp, pcbp, iscurproc) \
if ((pmapp) != NULL && (pmapp)->pm_stchanged) { \
(pcbp)->pcb_ustp = \
amiga_btop(pmap_extract(pmap_kernel(), \
(mmutype == MMU_68040) ? (vm_offset_t)(pmapp)->pm_stpa : \
(vm_offset_t)(pmapp)->pm_stab)); \
amiga_btop((vm_offset_t)(pmapp)->pm_stpa); \
if (iscurproc) \
loadustp((pcbp)->pcb_ustp); \
(pmapp)->pm_stchanged = FALSE; \
@ -91,6 +105,25 @@ typedef struct pv_entry {
#define PV_CI 0x01 /* all entries must be cache inhibited */
#define PV_PTPAGE 0x02 /* entry maps a page table page */
struct pv_page;
struct pv_page_info {
TAILQ_ENTRY(pv_page) pgi_list;
struct pv_entry *pgi_freelist;
int pgi_nfree;
};
/*
* This is basically:
* ((NBPG - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
*/
#define NPVPPG 340
struct pv_page {
struct pv_page_info pvp_pgi;
struct pv_entry pvp_pv[NPVPPG];
};
#ifdef _KERNEL
pv_entry_t pv_table; /* array of entries, one per page */
u_int *Sysmap;
@ -104,7 +137,10 @@ struct pmap kernel_pmap_store;
#endif
#define pa_to_pvh(pa) (&pv_table[pa_index(pa)])
#define pmap_kernel() (&kernel_pmap_store)
#define active_pmap(pm) \
((pm) == pmap_kernel() || (pm) == curproc->p_vmspace->vm_map.pmap)
#define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count)
#define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count)
#endif /* _KERNEL */
#endif /* !_MACHINE_PMAP_H_ */

View File

@ -1,4 +1,4 @@
/* $NetBSD: pte.h,v 1.13 1995/09/16 16:11:38 chopps Exp $ */
/* $NetBSD: pte.h,v 1.14 1995/09/29 13:52:09 chopps Exp $ */
/*
* Copyright (c) 1988 University of Utah.
@ -51,15 +51,15 @@
struct pte {
u_int pte;
};
typedef u_int pt_entry_t;
typedef u_int pt_entry_t; /* Mach page table entry */
struct ste {
u_int ste;
};
typedef u_int st_entry_t;
typedef u_int st_entry_t; /* segment table entry */
#define PT_ENTRY_NULL ((u_int *) 0)
#define ST_ENTRY_NULL ((u_int *) 0)
#define PT_ENTRY_NULL ((pt_entry_t *) 0)
#define ST_ENTRY_NULL ((st_entry_t *) 0)
#define SG_V 0x00000002 /* segment is valid */
#define SG_NV 0x00000000
@ -82,6 +82,9 @@ typedef u_int st_entry_t;
#define SG4_SHIFT3 13
#define SG4_ADDR1 0xfffffe00 /* pointer table address mask */
#define SG4_ADDR2 0xffffff80 /* page table address mask */
#define SG4_LEV1SIZE 128 /* entries in pointer table 1 */
#define SG4_LEV2SIZE 128 /* entries in pointer table 2 */
#define SG4_LEV3SIZE 32 /* entries in page table */
#define PG_V 0x00000001
#define PG_NV 0x00000000
@ -107,7 +110,11 @@ typedef u_int st_entry_t;
#define AMIGA_040RTSIZE 512 /* root (level 1) table size */
#define AMIGA_040STSIZE 512 /* segment (level 2) table size */
#define AMIGA_040PTSIZE 128 /* page (level 3) table size */
#if 0
#define AMIGA_STSIZE 1024 /* segment table size */
#else
#define AMIGA_STSIZE (MAXUL2SIZE*SG4_LEV2SIZE*sizeof(st_entry_t))
#endif
/*
* AMIGA_MAX_COREUPT maximum number of incore user page tables
* AMIGA_USER_PTSIZE the number of bytes for user pagetables