From c0eed6da3d05ca7032b34094d0e7aa7aac316113 Mon Sep 17 00:00:00 2001 From: msaitoh Date: Tue, 30 Jun 1998 11:59:09 +0000 Subject: [PATCH] sync with -current after a long silence --- sys/arch/x68k/conf/INSTALL | 4 +- sys/arch/x68k/dev/com.c | 47 ++- sys/arch/x68k/dev/fd.c | 4 +- sys/arch/x68k/dev/grf.c | 82 +++-- sys/arch/x68k/dev/ite.c | 7 +- sys/arch/x68k/dev/mha.c | 156 +++++---- sys/arch/x68k/dev/spc.c | 32 +- sys/arch/x68k/dev/zs.c | 146 +++++---- sys/arch/x68k/include/disklabel.h | 4 +- sys/arch/x68k/include/param.h | 10 +- sys/arch/x68k/include/pmap.h | 3 +- sys/arch/x68k/include/proc.h | 3 +- sys/arch/x68k/include/vmparam.h | 26 +- sys/arch/x68k/stand/Makefile | 4 +- sys/arch/x68k/stand/bootufs.c | 4 +- sys/arch/x68k/stand/writefdboot | 6 +- sys/arch/x68k/x68k/autoconf.c | 6 +- sys/arch/x68k/x68k/genassym.cf | 12 +- sys/arch/x68k/x68k/locore.s | 216 +++++++++---- sys/arch/x68k/x68k/machdep.c | 119 ++++++- sys/arch/x68k/x68k/mem.c | 15 +- sys/arch/x68k/x68k/pmap.c | 472 +++++++++++++++++++++------- sys/arch/x68k/x68k/pmap_bootstrap.c | 254 +++++++++++---- sys/arch/x68k/x68k/trap.c | 68 +++- sys/arch/x68k/x68k/vectors.s | 6 +- sys/arch/x68k/x68k/vm_machdep.c | 60 ++-- 26 files changed, 1285 insertions(+), 481 deletions(-) diff --git a/sys/arch/x68k/conf/INSTALL b/sys/arch/x68k/conf/INSTALL index 445fcc5c6e97..a495a5cf9c49 100644 --- a/sys/arch/x68k/conf/INSTALL +++ b/sys/arch/x68k/conf/INSTALL @@ -1,4 +1,4 @@ -# $NetBSD: INSTALL,v 1.3 1998/06/26 01:54:14 lukem Exp $ +# $NetBSD: INSTALL,v 1.4 1998/06/30 11:59:09 msaitoh Exp $ # # ALL -- everything that's currently supported @@ -52,7 +52,7 @@ file-system FFS # Berkeley fast file system file-system MFS # memory file system; uses RAM and swap file-system NFS # Network File System client file-system CD9660 # ISO 9660 CD-ROM file system, with RR -#file-system MSDOSFS # MS-DOS FAT file system +file-system MSDOSFS # MS-DOS FAT file system file-system KERNFS # kernel file system; recommended options FASTLINKS # fast symbolic links in FFS diff --git a/sys/arch/x68k/dev/com.c b/sys/arch/x68k/dev/com.c index 3e8e627e45a1..1a246e0f2c93 100644 --- a/sys/arch/x68k/dev/com.c +++ b/sys/arch/x68k/dev/com.c @@ -1,4 +1,4 @@ -/* $NetBSD: com.c,v 1.10 1998/01/12 21:13:42 thorpej Exp $ */ +/* $NetBSD: com.c,v 1.11 1998/06/30 11:59:09 msaitoh Exp $ */ /*- * Copyright (c) 1993, 1994, 1995, 1996 @@ -157,7 +157,8 @@ extern int kgdb_rate; extern int kgdb_debug_init; #endif -#define COMUNIT(x) (minor(x)) +#define COMUNIT(x) (minor(x) & 0x7F) +#define COMDIALOUT(x) (minor(x) & 0x80) /* Macros to clear/set/test flags. */ #define SET(t, f) (t) |= (f) @@ -420,8 +421,15 @@ comopen(dev, flag, mode, p) tp->t_oproc = comstart; tp->t_param = comparam; tp->t_dev = dev; - if (!ISSET(tp->t_state, TS_ISOPEN)) { - SET(tp->t_state, TS_WOPEN); + + if ((tp->t_state & TS_ISOPEN) && + (tp->t_state & TS_XCLUDE) && + p->p_ucred->cr_uid != 0) + return (EBUSY); + + s = spltty(); + + if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) { ttychars(tp); tp->t_iflag = TTYDEF_IFLAG; tp->t_oflag = TTYDEF_OFLAG; @@ -435,8 +443,6 @@ comopen(dev, flag, mode, p) tp->t_lflag = TTYDEF_LFLAG; tp->t_ispeed = tp->t_ospeed = comdefaultrate; - s = spltty(); - comparam(tp, &tp->t_termios); ttsetwater(tp); @@ -502,28 +508,17 @@ comopen(dev, flag, mode, p) SET(tp->t_state, TS_CARR_ON); else CLR(tp->t_state, TS_CARR_ON); - } else if (ISSET(tp->t_state, TS_XCLUDE) && p->p_ucred->cr_uid != 0) - return EBUSY; - else - s = spltty(); - - /* wait for carrier if necessary */ - if (!ISSET(flag, O_NONBLOCK)) - while (!ISSET(tp->t_cflag, CLOCAL) && - !ISSET(tp->t_state, TS_CARR_ON)) { - SET(tp->t_state, TS_WOPEN); - error = ttysleep(tp, &tp->t_rawq, TTIPRI | PCATCH, - ttopen, 0); - if (error) { - /* XXX should turn off chip if we're the - only waiter */ - splx(s); - return error; - } - } + } splx(s); - return (*linesw[tp->t_line].l_open)(dev, tp); + error = ttyopen(tp, COMDIALOUT(dev), ISSET(flag, O_NONBLOCK)); + + if (!error) + error = (*linesw[tp->t_line].l_open)(dev, tp); + + /* XXX cleanup on error */ + + return error; } int diff --git a/sys/arch/x68k/dev/fd.c b/sys/arch/x68k/dev/fd.c index 91426d36dfcc..ceb26916ba0b 100644 --- a/sys/arch/x68k/dev/fd.c +++ b/sys/arch/x68k/dev/fd.c @@ -1,4 +1,4 @@ -/* $NetBSD: fd.c,v 1.17 1998/05/24 19:32:49 is Exp $ */ +/* $NetBSD: fd.c,v 1.18 1998/06/30 11:59:10 msaitoh Exp $ */ /*- * Copyright (c) 1993, 1994, 1995 Charles Hannum. @@ -128,6 +128,8 @@ struct cfattach fdc_ca = { sizeof(struct fdc_softc), fdcprobe, fdcattach }; +extern struct cfdriver fdc_cd; + /* * Floppies come in various flavors, e.g., 1.2MB vs 1.44MB; here is how * we tell them apart. diff --git a/sys/arch/x68k/dev/grf.c b/sys/arch/x68k/dev/grf.c index 9e007708c85d..165bce34fef3 100644 --- a/sys/arch/x68k/dev/grf.c +++ b/sys/arch/x68k/dev/grf.c @@ -1,4 +1,4 @@ -/* $NetBSD: grf.c,v 1.8 1998/06/25 23:59:15 thorpej Exp $ */ +/* $NetBSD: grf.c,v 1.9 1998/06/30 11:59:10 msaitoh Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -48,6 +48,7 @@ * Hardware access is through the machine dependent grf switch routines. */ +#include "opt_uvm.h" #include "opt_compat_hpux.h" #include @@ -78,12 +79,17 @@ extern struct emul emul_hpux; #include #include +#if defined(UVM) +#include +#endif + #include #include "ite.h" #if NITE == 0 #define iteon(u,f) #define iteoff(u,f) +#define ite_reinit(u) #endif #ifdef DEBUG @@ -111,18 +117,21 @@ grfopen(dev, flags, mode, p) struct proc *p; { int unit = GRFUNIT(dev); - register struct grf_softc *gp = grf_cd.cd_devs[unit]; + register struct grf_softc *gp; int error = 0; - if (unit >= grf_cd.cd_ndevs || (gp->g_flags & GF_ALIVE) == 0) - return(ENXIO); + if (unit >= grf_cd.cd_ndevs || + (gp = grf_cd.cd_devs[unit]) == NULL || + (gp->g_flags & GF_ALIVE) == 0) + return (ENXIO); + if ((gp->g_flags & (GF_OPEN|GF_EXCLUDE)) == (GF_OPEN|GF_EXCLUDE)) return(EBUSY); #ifdef COMPAT_HPUX /* * XXX: cannot handle both HPUX and BSD processes at the same time */ - if (curproc->p_emul == &emul_hpux) + if (p->p_emul == &emul_hpux) if (gp->g_flags & GF_BSDOPEN) return(EBUSY); else @@ -154,6 +163,9 @@ grfclose(dev, flags, mode, p) { register struct grf_softc *gp = grf_cd.cd_devs[GRFUNIT(dev)]; + if ((gp->g_flags & GF_ALIVE) == 0) + return (ENXIO); + (void) grfoff(dev); #ifdef COMPAT_HPUX (void) grfunlock(gp); @@ -175,6 +187,9 @@ grfioctl(dev, cmd, data, flag, p) register struct grf_softc *gp = grf_cd.cd_devs[unit]; int error; + if ((gp->g_flags & GF_ALIVE) == 0) + return (ENXIO); + #ifdef COMPAT_HPUX if (p->p_emul == &emul_hpux) return(hpuxgrfioctl(dev, cmd, data, flag, p)); @@ -223,11 +238,8 @@ grfpoll(dev, events, p) int events; struct proc *p; { - int revents = 0; - if (events & (POLLOUT | POLLWRNORM)) - revents |= events & (POLLOUT | POLLWRNORM); - return (revents); + return (events & (POLLOUT | POLLWRNORM)); } /*ARGSUSED*/ @@ -236,6 +248,7 @@ grfmmap(dev, off, prot) dev_t dev; int off, prot; { + return (grfaddr(grf_cd.cd_devs[GRFUNIT(dev)], off)); } @@ -300,9 +313,10 @@ grfaddr(gp, off) #ifdef COMPAT_HPUX /*ARGSUSED*/ +int hpuxgrfioctl(dev, cmd, data, flag, p) dev_t dev; - u_long cmd; + int cmd; caddr_t data; int flag; struct proc *p; @@ -404,6 +418,7 @@ hpuxgrfioctl(dev, cmd, data, flag, p) return(error); } +int grflock(gp, block) register struct grf_softc *gp; int block; @@ -414,8 +429,8 @@ grflock(gp, block) #ifdef DEBUG if (grfdebug & GDB_LOCK) - printf("grflock(%d): dev %x flags %x lockpid %x\n", - p->p_pid, gp-grf_softc, gp->g_flags, + printf("grflock(%d): flags %x lockpid %x\n", + p->p_pid, gp->g_flags, gp->g_lockp ? gp->g_lockp->p_pid : -1); #endif if (gp->g_pid) { @@ -438,8 +453,8 @@ grflock(gp, block) return(OEAGAIN); do { gp->g_flags |= GF_WANTED; - if (error = tsleep((caddr_t)&gp->g_flags, - (PZERO+1) | PCATCH, devioc, 0)) + if ((error = tsleep((caddr_t)&gp->g_flags, + (PZERO+1) | PCATCH, devioc, 0))) return (error); } while (gp->g_lockp); } @@ -457,13 +472,14 @@ grflock(gp, block) return(0); } +int grfunlock(gp) register struct grf_softc *gp; { #ifdef DEBUG if (grfdebug & GDB_LOCK) - printf("grfunlock(%d): dev %x flags %x lockpid %d\n", - curproc->p_pid, gp-grf_softc, gp->g_flags, + printf("grfunlock(%d): flags %x lockpid %d\n", + curproc->p_pid, gp->g_flags, gp->g_lockp ? gp->g_lockp->p_pid : -1); #endif if (gp->g_lockp != curproc) @@ -493,14 +509,17 @@ grfunlock(gp) * XXX: This may give the wrong result for remote stats of other * machines where device 10 exists. */ +int grfdevno(dev) dev_t dev; { int unit = GRFUNIT(dev); - struct grf_softc *gp = grf_cd.cd_devs[unit]; + struct grf_softc *gp; int newdev; - if (unit >= grf_cd.cd_ndevs || (gp->g_flags&GF_ALIVE) == 0) + if (unit >= grf_cd.cd_ndevs || + (gp = grf_cd.cd_devs[unit]) == NULL || + (gp->g_flags&GF_ALIVE) == 0) return(bsdtohpuxdev(dev)); /* magic major number */ newdev = 12 << 24; @@ -547,9 +566,15 @@ grfmap(dev, addrp, p) vn.v_type = VCHR; /* XXX */ vn.v_specinfo = &si; /* XXX */ vn.v_rdev = dev; /* XXX */ +#if defined(UVM) + error = uvm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)addrp, + (vm_size_t)len, VM_PROT_ALL, VM_PROT_ALL, + flags, (caddr_t)&vn, 0); +#else error = vm_mmap(&p->p_vmspace->vm_map, (vm_offset_t *)addrp, (vm_size_t)len, VM_PROT_ALL, VM_PROT_ALL, flags, (caddr_t)&vn, 0); +#endif if (error == 0) (void) (*gp->g_sw->gd_mode)(gp, GM_MAP, *addrp); return(error); @@ -573,11 +598,17 @@ grfunmap(dev, addr, p) return(EINVAL); /* XXX: how do we deal with this? */ (void) (*gp->g_sw->gd_mode)(gp, GM_UNMAP, 0); size = round_page(gp->g_display.gd_regsize + gp->g_display.gd_fbsize); +#if defined(UVM) + rv = uvm_unmap(&p->p_vmspace->vm_map, (vm_offset_t)addr, + (vm_offset_t)addr + size, FALSE); +#else rv = vm_deallocate(&p->p_vmspace->vm_map, (vm_offset_t)addr, size); +#endif return(rv == KERN_SUCCESS ? 0 : EINVAL); } #ifdef COMPAT_HPUX +int iommap(dev, addrp) dev_t dev; caddr_t *addrp; @@ -585,20 +616,21 @@ iommap(dev, addrp) #ifdef DEBUG if (grfdebug & (GDB_MMAP|GDB_IOMAP)) - printf("iommap(%d): addr %x\n", curproc->p_pid, *addrp); + printf("iommap(%d): addr %p\n", curproc->p_pid, *addrp); #endif return(EINVAL); } +int iounmmap(dev, addr) dev_t dev; caddr_t addr; { +#ifdef DEBUG int unit = minor(dev); -#ifdef DEBUG if (grfdebug & (GDB_MMAP|GDB_IOMAP)) - printf("iounmmap(%d): id %d addr %x\n", + printf("iounmmap(%d): id %d addr %p\n", curproc->p_pid, unit, addr); #endif return(0); @@ -611,6 +643,7 @@ iounmmap(dev, addr) * process ids. Returns a slot number between 1 and GRFMAXLCK or 0 if no * slot is available. */ +int grffindpid(gp) struct grf_softc *gp; { @@ -649,6 +682,7 @@ done: return(i); } +void grfrmpid(gp) struct grf_softc *gp; { @@ -677,6 +711,7 @@ grfrmpid(gp) #endif } +int grflckmmap(dev, addrp) dev_t dev; caddr_t *addrp; @@ -685,12 +720,13 @@ grflckmmap(dev, addrp) struct proc *p = curproc; /* XXX */ if (grfdebug & (GDB_MMAP|GDB_LOCK)) - printf("grflckmmap(%d): addr %x\n", + printf("grflckmmap(%d): addr %p\n", p->p_pid, *addrp); #endif return(EINVAL); } +int grflckunmmap(dev, addr) dev_t dev; caddr_t addr; @@ -699,7 +735,7 @@ grflckunmmap(dev, addr) int unit = minor(dev); if (grfdebug & (GDB_MMAP|GDB_LOCK)) - printf("grflckunmmap(%d): id %d addr %x\n", + printf("grflckunmmap(%d): id %d addr %p\n", curproc->p_pid, unit, addr); #endif return(EINVAL); diff --git a/sys/arch/x68k/dev/ite.c b/sys/arch/x68k/dev/ite.c index f24495630d07..e90362e3b0f0 100644 --- a/sys/arch/x68k/dev/ite.c +++ b/sys/arch/x68k/dev/ite.c @@ -1,4 +1,4 @@ -/* $NetBSD: ite.c,v 1.9 1998/01/12 21:13:45 thorpej Exp $ */ +/* $NetBSD: ite.c,v 1.10 1998/06/30 11:59:10 msaitoh Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -1319,12 +1319,9 @@ iteputchar(c, ip) register int c; struct ite_softc *ip; { - struct tty *kbd_tty; int n, x, y; char *cp; - kbd_tty = ite_tty[kbd_ite->device.dv_unit]; - if (c >= 0x20 && ip->escape) { switch (ip->escape) { @@ -2225,7 +2222,7 @@ iteputchar(c, ip) case BEL: #if NBELL > 0 - if (kbd_tty && ite_tty[kbd_ite->device.dv_unit] == kbd_tty) + if (kbd_ite && ite_tty[kbd_ite->device.dv_unit]) opm_bell(); #endif break; diff --git a/sys/arch/x68k/dev/mha.c b/sys/arch/x68k/dev/mha.c index 672cc11886a3..29a1cd1a1c48 100644 --- a/sys/arch/x68k/dev/mha.c +++ b/sys/arch/x68k/dev/mha.c @@ -1,4 +1,4 @@ -/* $NetBSD: mha.c,v 1.3 1998/01/12 21:13:46 thorpej Exp $ */ +/* $NetBSD: mha.c,v 1.4 1998/06/30 11:59:10 msaitoh Exp $ */ /* * Copyright (c) 1996 Masaru Oki, Takumi Nakamura and Masanobu Saitoh. All rights reserved. @@ -49,6 +49,18 @@ #define SPC_USE_SYNCHRONOUS 0 #define SPC_SYNC_REQ_ACK_OFS 8 +/* Default DMA mode? */ +#define MHA_DMA_LIMIT_XFER 1 +#define MHA_DMA_BURST_XFER 1 +#define MHA_DMA_SHORT_BUS_CYCLE 1 + +#define MHA_DMA_DATAIN (0 | (MHA_DMA_LIMIT_XFER << 1) \ + | (MHA_DMA_BURST_XFER << 2) \ + | (MHA_DMA_SHORT_BUS_CYCLE << 3)) +#define MHA_DMA_DATAOUT (1 | (MHA_DMA_LIMIT_XFER << 1) \ + | (MHA_DMA_BURST_XFER << 2) \ + | (MHA_DMA_SHORT_BUS_CYCLE << 3)) + /* Include debug functions? At the end of this file there are a bunch of * functions that will print out various information regarding queued SCSI * commands, driver state and chip contents. You can call them from the @@ -224,7 +236,7 @@ void mha_timeout __P((void *)); void mha_minphys __P((struct buf *)); void mha_dequeue __P((struct mha_softc *, struct acb *)); inline void mha_setsync __P((struct mha_softc *, struct spc_tinfo *)); -#ifdef SPC_DEBUG +#if SPC_DEBUG void mha_print_acb __P((struct acb *)); void mha_show_scsi_cmd __P((struct acb *)); void mha_print_active_acb __P((void)); @@ -890,42 +902,58 @@ mha_done(sc, acb) xs->error = XS_TIMEOUT; } else if (acb->flags & ACB_CHKSENSE) { xs->error = XS_SENSE; - } else if ((acb->stat & ST_MASK) == SCSI_CHECK) { - struct scsipi_sense *ss = (void *)&acb->cmd; - SPC_MISC(("requesting sense ")); - /* First, save the return values */ - xs->resid = acb->dleft; - xs->status = acb->stat; - /* Next, setup a request sense command block */ - bzero(ss, sizeof(*ss)); - ss->opcode = REQUEST_SENSE; - /*ss->byte2 = sc_link->lun << 5;*/ - ss->length = sizeof(struct scsipi_sense_data); - acb->clen = sizeof(*ss); - acb->daddr = (char *)&xs->sense; - acb->dleft = sizeof(struct scsipi_sense_data); - acb->flags |= ACB_CHKSENSE; -/*XXX - must take off queue here */ - if (acb != sc->sc_nexus) { - panic("%s: mha_sched: floating acb %p", - sc->sc_dev.dv_xname, acb); - } - TAILQ_INSERT_HEAD(&sc->ready_list, acb, chain); - ACB_SETQ(acb, ACB_QREADY); - ti->lubusy &= ~(1<scsipi_scsi.lun); - ti->senses++; - timeout(mha_timeout, acb, (xs->timeout*hz)/1000); - if (sc->sc_nexus == acb) { - sc->sc_nexus = NULL; - sc->sc_state = SPC_IDLE; - mha_sched(sc); - } -#if 0 - mha_sense(sc, acb); -#endif - return; } else { - xs->resid = acb->dleft; + switch (acb->stat & ST_MASK) { + case SCSI_CHECK: + { + struct scsipi_sense *ss = (void *)&acb->cmd; + SPC_MISC(("requesting sense ")); + /* First, save the return values */ + xs->resid = acb->dleft; + xs->status = acb->stat; + /* Next, setup a request sense command block */ + bzero(ss, sizeof(*ss)); + ss->opcode = REQUEST_SENSE; + /*ss->byte2 = sc_link->lun << 5;*/ + ss->length = sizeof(struct scsipi_sense_data); + acb->clen = sizeof(*ss); + acb->daddr = (char *)&xs->sense; + acb->dleft = sizeof(struct scsipi_sense_data); + acb->flags |= ACB_CHKSENSE; +/*XXX - must take off queue here */ + if (acb != sc->sc_nexus) { + panic("%s: mha_sched: floating acb %p", + sc->sc_dev.dv_xname, acb); + } + TAILQ_INSERT_HEAD(&sc->ready_list, acb, chain); + ACB_SETQ(acb, ACB_QREADY); + ti->lubusy &= ~(1<scsipi_scsi.lun); + ti->senses++; + timeout(mha_timeout, acb, (xs->timeout*hz)/1000); + if (sc->sc_nexus == acb) { + sc->sc_nexus = NULL; + sc->sc_state = SPC_IDLE; + mha_sched(sc); + } +#if 0 + mha_sense(sc, acb); +#endif + return; + } + case SCSI_BUSY: + xs->error = XS_BUSY; + break; + case SCSI_OK: + xs->resid = acb->dleft; + break; + default: + xs->error = XS_DRIVER_STUFFUP; +#if SPC_DEBUG + printf("%s: mha_done: bad stat 0x%x\n", + sc->sc_dev.dv_xname, acb->stat); +#endif + break; + } } } @@ -936,7 +964,7 @@ mha_done(sc, acb) if (xs->resid != 0) printf("resid=%d ", xs->resid); if (xs->error == XS_SENSE) - printf("sense=0x%02x\n", xs->sense.error_code); + printf("sense=0x%02x\n", xs->sense.scsi_sense.error_code); else printf("error=%d\n", xs->error); } @@ -1195,14 +1223,14 @@ scsi_print_addr(acb->xs->sc_link); printf("MSG_MESSAGE_REJECT>>"); ti->period = mha_cpb2stp(sc, p); #endif -#ifdef SPC_DEBUG +#if SPC_DEBUG scsi_print_addr(acb->xs->sc_link); #endif if ((sc->sc_flags&SPC_SYNCHNEGO) == 0) { /* Target initiated negotiation */ if (ti->flags & T_SYNCMODE) { ti->flags &= ~T_SYNCMODE; -#ifdef SPC_DEBUG +#if SPC_DEBUG printf("renegotiated "); #endif } @@ -1219,7 +1247,7 @@ scsi_print_addr(acb->xs->sc_link); printf("MSG_MESSAGE_REJECT>>"); TMR = TM_SYNC; ti->flags |= T_SYNCMODE; } -#ifdef SPC_DEBUG +#if SPC_DEBUG printf("max sync rate %d.%02dMb/s\n", r, s); #endif @@ -1605,11 +1633,27 @@ mha_dataio_dma(dw, cw, sc, p, n) vaddr = p; paddr = (char *)kvtop(vaddr); - DCFP((vm_offset_t)paddr); /* XXX */ +#if MHA_DMA_SHORT_BUS_CYCLE == 1 + if ((*(int *)&IODEVbase->io_sram[0xac]) & (1 << ((vm_offset_t)paddr >> 19))) + dw &= ~(1 << 3); +#endif +#if defined(M68040) || defined(M68060) +#if defined(M68020) || defined(M68030) + if (mmutype == MMU_68040) +#endif + DCFP((vm_offset_t)paddr); /* XXX */ +#endif for (ts = (NBPG - ((long)vaddr & PGOFSET)); ts < n && (char *)kvtop(vaddr + ts + 4) == paddr + ts + 4; ts += NBPG) - DCFP((vm_offset_t)paddr + ts); +#if defined(M68040) || defined(M68060) +#if defined(M68020) || defined(M68030) + if (mmutype == MMU_68040) +#endif + DCFP((vm_offset_t)paddr + ts); +#else + ; +#endif if (ts > n) ts = n; #if 0 @@ -1646,7 +1690,7 @@ mha_dataout(sc, p, n) if (((long)p & 1) || (n & 1)) return mha_dataout_pio(sc, p, n); - return mha_dataio_dma(0x000F, CMD_SEND_FROM_DMA, sc, p, n); + return mha_dataio_dma(MHA_DMA_DATAOUT, CMD_SEND_FROM_DMA, sc, p, n); } int @@ -1663,7 +1707,7 @@ mha_datain(sc, p, n) return n; if (acb->cmd.opcode == 0x03 || ((long)p & 1) || (n & 1)) return mha_datain_pio(sc, p, n); - return mha_dataio_dma(0x000E, CMD_RECEIVE_TO_DMA, sc, p, n); + return mha_dataio_dma(MHA_DMA_DATAIN, CMD_RECEIVE_TO_DMA, sc, p, n); } @@ -1679,7 +1723,7 @@ int mhaintr(unit) int unit; { - struct mha_softc *sc = mha_cd.cd_devs[unit]; /* XXX */ + struct mha_softc *sc; u_char ints; struct acb *acb; struct scsipi_link *sc_link; @@ -1688,15 +1732,21 @@ mhaintr(unit) u_short r; int n; +#if 1 /* XXX called during attach? */ + if (tmpsc != NULL) { + SPC_MISC(("[%x %x]\n", mha_cd.cd_devs, sc)); + sc = tmpsc; + } else { +#endif + /* return if not configured */ - if (sc == NULL) - return; + if (!mha_cd.cd_devs) /* Check if at least one unit is attached. */ + return; /* XXX should check if THE unit exists. */ + + sc = mha_cd.cd_devs[unit]; #if 1 /* XXX */ - if (tmpsc != NULL && tmpsc != sc) { - SPC_MISC(("[%x %x]\n", mha_cd.cd_devs, sc)); - sc = tmpsc; - } + } #endif /* @@ -2017,7 +2067,7 @@ again: splx(s); } -#ifdef SPC_DEBUG +#if SPC_DEBUG /* * The following functions are mostly used for debugging purposes, either * directly called from the driver or from the kernel debugger. diff --git a/sys/arch/x68k/dev/spc.c b/sys/arch/x68k/dev/spc.c index 9886fd88f923..d2111042e4a6 100644 --- a/sys/arch/x68k/dev/spc.c +++ b/sys/arch/x68k/dev/spc.c @@ -1,4 +1,4 @@ -/* $NetBSD: spc.c,v 1.13 1998/01/12 21:13:48 thorpej Exp $ */ +/* $NetBSD: spc.c,v 1.14 1998/06/30 11:59:10 msaitoh Exp $ */ #define integrate __inline static @@ -1006,14 +1006,28 @@ spc_done(sc, acb) xs->error = XS_DRIVER_STUFFUP; } else if (acb->flags & ACB_SENSE) { xs->error = XS_SENSE; - } else if (acb->target_stat == SCSI_CHECK) { - /* First, save the return values */ - xs->resid = acb->data_length; - xs->status = acb->target_stat; - spc_sense(sc, acb); - return; } else { - xs->resid = acb->data_length; + switch (acb->target_stat) { + case SCSI_CHECK: + /* First, save the return values */ + xs->resid = acb->data_length; + xs->status = acb->target_stat; + spc_sense(sc, acb); + return; + case SCSI_BUSY: + xs->error = XS_BUSY; + break; + case SCSI_OK: + xs->resid = acb->data_length; + break; + default: + xs->error = XS_DRIVER_STUFFUP; +#if SPC_DEBUG + printf("%s: spc_done: bad stat 0x%x\n", + sc->sc_dev.dv_xname, acb->target_stat); +#endif + break; + } } } @@ -2174,7 +2188,7 @@ spc_timeout(arg) splx(s); } -#ifdef SPC_DEBUG +#if SPC_DEBUG /* * The following functions are mostly used for debugging purposes, either * directly called from the driver or from the kernel debugger. diff --git a/sys/arch/x68k/dev/zs.c b/sys/arch/x68k/dev/zs.c index 8f20156d69e8..727fa99231e7 100644 --- a/sys/arch/x68k/dev/zs.c +++ b/sys/arch/x68k/dev/zs.c @@ -1,4 +1,4 @@ -/* $NetBSD: zs.c,v 1.8 1998/01/12 21:13:49 thorpej Exp $ */ +/* $NetBSD: zs.c,v 1.9 1998/06/30 11:59:11 msaitoh Exp $ */ /* * Copyright (c) 1992, 1993 @@ -79,6 +79,8 @@ #define ZSMAJOR 12 /* XXX */ +#define ZSUNIT(x) (minor(x) & 0x7f) +#define ZSDIALOUT(x) (minor(x) & 0x80) #define ZS_MOUSE 1 /* XXX */ #define PCLK (5*1000*1000) /* PCLK pin input clock rate */ @@ -136,6 +138,7 @@ cdev_decl(zs); static void zsiopen __P((struct tty *)); static void zsiclose __P((struct tty *)); +static void zs_shutdown __P((struct zs_chanstate *cs)); static void zsstart __P((struct tty *)); void zsstop __P((struct tty *, int)); static int zsparam __P((struct tty *, struct termios *)); @@ -510,6 +513,34 @@ zsiclose(tp) } +static void +zs_shutdown(cs) + struct zs_chanstate *cs; +{ + struct tty *tp = cs->cs_ttyp; + int s; + + s = splzs(); + + /* XXX not yet */ + + /* Clear any break condition set with TIOCSBRK. */ + cs->cs_preg[5] &= ~ZSWR5_BREAK; + cs->cs_creg[5] &= ~ZSWR5_BREAK; + ZS_WRITE(cs->cs_zc, 5, cs->cs_creg[5]); + + /* + * Hang up if necessary. Wait a bit, so the other side has time to + * notice even if we immediately open the port again. + */ + if (tp->t_cflag & HUPCL) { + zs_modem(cs, 0); + (void) tsleep(cs, TTIPRI, ttclos, hz); + } + + splx(s); +} + /* * Open a zs serial port. This interface may not be used to open * the keyboard and mouse ports. (XXX) @@ -524,7 +555,7 @@ zsopen(dev, flags, mode, p) register struct tty *tp; register struct zs_chanstate *cs; struct zs_softc *zi; - int unit = minor(dev), zs = unit >> 1, error, s; + int unit = ZSUNIT(dev), zs = unit >> 1, error, s; if (zs >= zs_cd.cd_ndevs || (zi = zs_cd.cd_devs[zs]) == NULL || unit == ZS_MOUSE) @@ -535,54 +566,45 @@ zsopen(dev, flags, mode, p) if (cs->cs_consio) return (ENXIO); /* ??? */ tp = cs->cs_ttyp; + if ((tp->t_state & TS_ISOPEN) && + (tp->t_state & TS_XCLUDE) && + p->p_ucred->cr_uid != 0) + return (EBUSY); + s = spltty(); - if ((tp->t_state & TS_ISOPEN) == 0) { + + if ((tp->t_state & TS_ISOPEN) == 0 && tp->t_wopen == 0) { ttychars(tp); - if (tp->t_ispeed == 0) { - tp->t_iflag = TTYDEF_IFLAG; - tp->t_oflag = TTYDEF_OFLAG; - tp->t_cflag = TTYDEF_CFLAG; - tp->t_lflag = TTYDEF_LFLAG; - tp->t_ispeed = tp->t_ospeed = cs->cs_speed; - } + tp->t_iflag = TTYDEF_IFLAG; + tp->t_oflag = TTYDEF_OFLAG; + tp->t_cflag = TTYDEF_CFLAG; + tp->t_lflag = TTYDEF_LFLAG; + tp->t_ispeed = tp->t_ospeed = cs->cs_speed; (void) zsparam(tp, &tp->t_termios); ttsetwater(tp); - } else if (tp->t_state & TS_XCLUDE && p->p_ucred->cr_uid != 0) { - splx(s); - return (EBUSY); } - error = 0; - for (;;) { - register int rr0; - /* loop, turning on the device, until carrier present */ - zs_modem(cs, 1); - /* May never get status intr if carrier already on. -gwr */ - rr0 = cs->cs_zc->zc_csr; - ZS_DELAY(); - if ((rr0 & ZSRR0_DCD) || cs->cs_softcar) - tp->t_state |= TS_CARR_ON; - if (flags & O_NONBLOCK || tp->t_cflag & CLOCAL || - tp->t_state & TS_CARR_ON) - break; - tp->t_state |= TS_WOPEN; - error = ttysleep(tp, (caddr_t)&tp->t_rawq, TTIPRI | PCATCH, - ttopen, 0); - if (error) { - if (!(tp->t_state & TS_ISOPEN)) { - zs_modem(cs, 0); - tp->t_state &= ~TS_WOPEN; - ttwakeup(tp); - } - splx(s); - return error; - } - } splx(s); - if (error == 0) - error = linesw[tp->t_line].l_open(dev, tp); + + error = ttyopen(tp, ZSDIALOUT(dev), flags & O_NONBLOCK); if (error) - zs_modem(cs, 0); + goto bad; + + error = (*linesw[tp->t_line].l_open)(dev, tp); + if (error) + goto bad; + + return (0); + +bad: + if ((tp->t_state & TS_ISOPEN) == 0 && tp->t_wopen == 0) { + /* + * We failed to open the device, and nobody else had it opened. + * Clean up the state as appropriate. + */ + zs_shutdown(cs); + } + return (error); } @@ -599,25 +621,19 @@ zsclose(dev, flags, mode, p) register struct zs_chanstate *cs; register struct tty *tp; struct zs_softc *zi; - int unit = minor(dev), s; + int unit = ZSUNIT(dev), s; zi = zs_cd.cd_devs[unit >> 1]; cs = &zi->zi_cs[unit & 1]; tp = cs->cs_ttyp; linesw[tp->t_line].l_close(tp, flags); - if (tp->t_cflag & HUPCL || tp->t_state & TS_WOPEN || - (tp->t_state & TS_ISOPEN) == 0) { - zs_modem(cs, 0); - /* hold low for 1 second */ - (void) tsleep((caddr_t)cs, TTIPRI, ttclos, hz); - } - if (cs->cs_creg[5] & ZSWR5_BREAK) - { - s = splzs(); - cs->cs_preg[5] &= ~ZSWR5_BREAK; - cs->cs_creg[5] &= ~ZSWR5_BREAK; - ZS_WRITE(cs->cs_zc, 5, cs->cs_creg[5]); - splx(s); + if ((tp->t_state & TS_ISOPEN) == 0 && tp->t_wopen == 0) { + /* + * Although we got a last close, the device may still be in + * use; e.g. if this was the dialout node, and there are still + * processes waiting for carrier on the non-dialout node. + */ + zs_shutdown(cs); } ttyclose(tp); #ifdef KGDB @@ -642,7 +658,7 @@ zsread(dev, uio, flags) register struct zs_chanstate *cs; register struct zs_softc *zi; register struct tty *tp; - int unit = minor(dev); + int unit = ZSUNIT(dev); zi = zs_cd.cd_devs[unit >> 1]; cs = &zi->zi_cs[unit & 1]; @@ -661,7 +677,7 @@ zswrite(dev, uio, flags) register struct zs_chanstate *cs; register struct zs_softc *zi; register struct tty *tp; - int unit = minor(dev); + int unit = ZSUNIT(dev); zi = zs_cd.cd_devs[unit >> 1]; cs = &zi->zi_cs[unit & 1]; @@ -676,7 +692,7 @@ zstty(dev) { register struct zs_chanstate *cs; register struct zs_softc *zi; - int unit = minor(dev); + int unit = ZSUNIT(dev); zi = zs_cd.cd_devs[unit >> 1]; cs = &zi->zi_cs[unit & 1]; @@ -1088,7 +1104,7 @@ zsioctl(dev, cmd, data, flag, p) int flag; struct proc *p; { - int unit = minor(dev); + int unit = ZSUNIT(dev); struct zs_softc *zi = zs_cd.cd_devs[unit >> 1]; register struct zs_chanstate *cs = &zi->zi_cs[unit & 1]; register struct tty *tp = cs->cs_ttyp; @@ -1205,7 +1221,7 @@ zsstart(tp) { register struct zs_chanstate *cs; register int s, nch; - int unit = minor(tp->t_dev); + int unit = ZSUNIT(tp->t_dev); struct zs_softc *zi = zs_cd.cd_devs[unit >> 1]; cs = &zi->zi_cs[unit & 1]; @@ -1266,7 +1282,7 @@ zsstop(tp, flag) int flag; { register struct zs_chanstate *cs; - register int s, unit = minor(tp->t_dev); + register int s, unit = ZSUNIT(tp->t_dev); struct zs_softc *zi = zs_cd.cd_devs[unit >> 1]; cs = &zi->zi_cs[unit & 1]; @@ -1293,7 +1309,7 @@ zsparam(tp, t) register struct tty *tp; register struct termios *t; { - int unit = minor(tp->t_dev); + int unit = ZSUNIT(tp->t_dev); struct zs_softc *zi = zs_cd.cd_devs[unit >> 1]; register struct zs_chanstate *cs = &zi->zi_cs[unit & 1]; register int tmp, tmp5, cflag, s; @@ -1433,7 +1449,7 @@ zshwiflow(tp, flag) struct tty *tp; int flag; { - int unit = minor(tp->t_dev); + int unit = ZSUNIT(tp->t_dev); struct zs_softc *zi = zs_cd.cd_devs[unit >> 1]; register struct zs_chanstate *cs = &zi->zi_cs[unit & 1]; int s; @@ -1548,7 +1564,7 @@ zs_kgdb_init() if (major(kgdb_dev) != ZSMAJOR) return; - unit = minor(kgdb_dev); + unit = ZSUNIT(kgdb_dev); zs = unit >> 1; if ((addr = zsaddr[zs]) == NULL) addr = zsaddr[zs] = findzs(zs); diff --git a/sys/arch/x68k/include/disklabel.h b/sys/arch/x68k/include/disklabel.h index e559a99dbb37..a043252c58f9 100644 --- a/sys/arch/x68k/include/disklabel.h +++ b/sys/arch/x68k/include/disklabel.h @@ -1,4 +1,4 @@ -/* $NetBSD: disklabel.h,v 1.1.1.1 1996/05/05 12:17:03 oki Exp $ */ +/* $NetBSD: disklabel.h,v 1.2 1998/06/30 11:59:11 msaitoh Exp $ */ /* * Copyright (c) 1994 Masaru Oki @@ -49,7 +49,7 @@ struct dos_partition { unsigned int dp_flag:8; /* state of partition */ unsigned int dp_start:24; /* start position (1024bytes/block) */ unsigned long dp_size; /* partition size (1024bytes/block) */ -} dos_partitions[NDOSPART]; +}; #include struct cpu_disklabel { diff --git a/sys/arch/x68k/include/param.h b/sys/arch/x68k/include/param.h index 796182a33df1..fd9904ccabbf 100644 --- a/sys/arch/x68k/include/param.h +++ b/sys/arch/x68k/include/param.h @@ -1,4 +1,4 @@ -/* $NetBSD: param.h,v 1.9 1998/06/25 23:59:16 thorpej Exp $ */ +/* $NetBSD: param.h,v 1.10 1998/06/30 11:59:11 msaitoh Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -116,13 +116,11 @@ void spl0 __P((void)); /* watch out for side effects */ #define splx(s) ((s) & PSL_IPL ? _spl(s) : spl0()) -#ifdef _KERNEL -#ifndef _LOCORE -int cpuspeed; +#if defined(_KERNEL) && !defined(_LOCORE) +extern int cpuspeed; #define delay(n) do { register int N = cpuspeed * (n); while (--N > 0); } while(0) #define DELAY(n) delay(n) -#endif -#endif +#endif /* _KERNEL && !_LOCORE */ #if defined(_KERNEL) && !defined(_LKM) #include "opt_compat_hpux.h" diff --git a/sys/arch/x68k/include/pmap.h b/sys/arch/x68k/include/pmap.h index 9e48dd29718a..4070afecea7d 100644 --- a/sys/arch/x68k/include/pmap.h +++ b/sys/arch/x68k/include/pmap.h @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.h,v 1.8 1998/02/18 02:05:36 cgd Exp $ */ +/* $NetBSD: pmap.h,v 1.9 1998/06/30 11:59:11 msaitoh Exp $ */ /* * Copyright (c) 1987 Carnegie-Mellon University @@ -146,7 +146,6 @@ extern struct pv_entry *pv_table; /* array of entries, one per page */ #ifndef MACHINE_NONCONTIG #define pmap_page_index(pa) atop(pa - vm_first_phys) #endif -#define pa_to_pvh(pa) (&pv_table[pmap_page_index(pa)]) #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) diff --git a/sys/arch/x68k/include/proc.h b/sys/arch/x68k/include/proc.h index c6135f390564..d6434be49633 100644 --- a/sys/arch/x68k/include/proc.h +++ b/sys/arch/x68k/include/proc.h @@ -1,4 +1,4 @@ -/* $NetBSD: proc.h,v 1.2 1997/12/24 17:46:08 oki Exp $ */ +/* $NetBSD: proc.h,v 1.3 1998/06/30 11:59:11 msaitoh Exp $ */ /* * Copyright (c) 1991, 1993 @@ -47,7 +47,6 @@ struct mdproc { #define MDP_STACKADJ 0x0002 /* frame SP adjusted, might have to undo when system call returns ERESTART. */ -#define MDP_HPUXTRACE 0x0004 /* being traced by HP-UX process */ #define MDP_HPUXMMAP 0x0008 /* VA space is multiply mapped */ #define MDP_CCBDATA 0x0010 /* copyback caching of data (68040) */ #define MDP_CCBSTACK 0x0020 /* copyback caching of stack (68040) */ diff --git a/sys/arch/x68k/include/vmparam.h b/sys/arch/x68k/include/vmparam.h index b88e0502dc58..c32609331b42 100644 --- a/sys/arch/x68k/include/vmparam.h +++ b/sys/arch/x68k/include/vmparam.h @@ -1,4 +1,4 @@ -/* $NetBSD: vmparam.h,v 1.3 1997/07/12 16:20:50 perry Exp $ */ +/* $NetBSD: vmparam.h,v 1.4 1998/06/30 11:59:11 msaitoh Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -42,6 +42,9 @@ * @(#)vmparam.h 8.2 (Berkeley) 4/19/94 */ +#ifndef _X68K_VMPARAM_H_ +#define _X68K_VMPARAM_H_ + /* * Machine dependent constants for X68K */ @@ -233,3 +236,24 @@ /* pcb base */ #define pcbb(p) ((u_int)(p)->p_addr) + +/* Use new VM page bootstrap interface. */ +#ifdef MACHINE_NEW_NONCONTIG + +/* + * Constants which control the way the VM system deals with memory segments. + */ +#define VM_PHYSSEG_MAX 3 +#define VM_PHYSSEG_STRAT VM_PSTRAT_RANDOM /* ??? */ +#define VM_PHYSSEG_NOADD + +/* + * pmap-specific data stored in the vm_physmem[] array. + */ +struct pmap_physseg { + struct pv_entry *pvent; /* pv table for this seg */ + char *attrs; /* page attributes for this seg */ +}; +#endif + +#endif /* _X68K_VMPARAM_H_ */ diff --git a/sys/arch/x68k/stand/Makefile b/sys/arch/x68k/stand/Makefile index c80cc1c8fb1e..f5a5ad777b06 100644 --- a/sys/arch/x68k/stand/Makefile +++ b/sys/arch/x68k/stand/Makefile @@ -1,4 +1,4 @@ -# $NetBSD: Makefile,v 1.1 1997/10/19 11:00:51 oki Exp $ +# $NetBSD: Makefile,v 1.2 1998/06/30 11:59:12 msaitoh Exp $ # Define all target-dependent utilities as macros in case of cross compilation. # These definitions can be overridden by @@ -70,7 +70,7 @@ ${DESTDIR}/usr/mdec/fdboot: ${DESTDIR}/usr/mdec/sdboot ln ${DESTDIR}/usr/mdec/sdboot ${DESTDIR}/usr/mdec/fdboot install: ${DESTDIR}/usr/mdec/sdboot ${DESTDIR}/usr/mdec/fdboot - install -c -o bin -g bin -m 755 writefdboot ${DESTDIR}/usr/mdec + install -c -o bin -g bin -m 755 ${.CURDIR}/writefdboot ${DESTDIR}/usr/mdec # make package dist: ${.CURDIR}/$(BOOT) diff --git a/sys/arch/x68k/stand/bootufs.c b/sys/arch/x68k/stand/bootufs.c index a535c335b7ee..610a5bcbb0d6 100644 --- a/sys/arch/x68k/stand/bootufs.c +++ b/sys/arch/x68k/stand/bootufs.c @@ -1,4 +1,4 @@ -/* $NetBSD: bootufs.c,v 1.1 1997/10/19 11:01:04 oki Exp $ */ +/* $NetBSD: bootufs.c,v 1.2 1998/06/30 11:59:12 msaitoh Exp $ */ /*- * Copyright (c) 1993, 1994 Takumi Nakamura. @@ -54,8 +54,8 @@ #include #include #include +#include #include -#include #include #include diff --git a/sys/arch/x68k/stand/writefdboot b/sys/arch/x68k/stand/writefdboot index 0c41c59f1e09..3f63eb768692 100644 --- a/sys/arch/x68k/stand/writefdboot +++ b/sys/arch/x68k/stand/writefdboot @@ -5,7 +5,7 @@ # # usage: writeboot # -# $NetBSD: writefdboot,v 1.2 1998/01/05 20:52:24 perry Exp $ +# $NetBSD: writefdboot,v 1.3 1998/06/30 11:59:12 msaitoh Exp $ case "$#" in 2) ;; @@ -17,7 +17,7 @@ boot="$1" rootdev="$2" temp=/tmp/writeboot$$ -set - `ls -lLd "$boot"` +set - `ls -lLgd "$boot"` case "$5" in [1-9]*) :;; *) exit 1;; esac nblock=`expr \( $5 + 1023 \) / 1024 ` @@ -27,5 +27,5 @@ rm -f $temp dd if="$rootdev" bs=1024 count=1 | dd bs=4 skip=16 count=69 dd if="$boot" bs=340 skip=1 ) > $temp -cat $temp /dev/zero | dd bs=1024 count=$nblock of="$rootdev" +cat $temp /dev/zero | dd conv=notrunc bs=1024 count=$nblock of="$rootdev" rm -f $temp diff --git a/sys/arch/x68k/x68k/autoconf.c b/sys/arch/x68k/x68k/autoconf.c index 6daf502525dd..db3ff6fe8133 100644 --- a/sys/arch/x68k/x68k/autoconf.c +++ b/sys/arch/x68k/x68k/autoconf.c @@ -1,4 +1,4 @@ -/* $NetBSD: autoconf.c,v 1.12 1997/10/19 09:30:06 oki Exp $ */ +/* $NetBSD: autoconf.c,v 1.13 1998/06/30 11:59:12 msaitoh Exp $ */ /* * Copyright (c) 1995 Leo Weppelman @@ -304,10 +304,6 @@ struct cfattach mainbus_ca = { sizeof(struct device), mbmatch, mbattach }; -struct cfdriver mainbus_cd = { - NULL, "mainbus", DV_DULL, NULL, 0 -}; - int mbmatch(pdp, match, auxp) struct device *pdp; diff --git a/sys/arch/x68k/x68k/genassym.cf b/sys/arch/x68k/x68k/genassym.cf index 7575318a9c07..802bbe0fc983 100644 --- a/sys/arch/x68k/x68k/genassym.cf +++ b/sys/arch/x68k/x68k/genassym.cf @@ -1,4 +1,4 @@ -# $NetBSD: genassym.cf,v 1.4 1998/01/06 08:46:18 thorpej Exp $ +# $NetBSD: genassym.cf,v 1.5 1998/06/30 11:59:12 msaitoh Exp $ # # Copyright (c) 1982, 1990, 1993 @@ -46,6 +46,10 @@ include include +ifdef UVM +include +endif + include include include @@ -84,10 +88,12 @@ define FPU_68882 FPU_68882 define FPU_68040 FPU_68040 define FPU_68060 FPU_68060 +ifdef M68K_MMU_HP # values for ectype define EC_PHYS EC_PHYS define EC_NONE EC_NONE define EC_VIRT EC_VIRT +endif # general constants define UPAGES UPAGES @@ -110,7 +116,11 @@ define SSLEEP SSLEEP define SRUN SRUN # interrupt/fault metering +ifdef UVM +define UVMEXP_INTRS offsetof(struct uvmexp, intrs) +else define V_INTR offsetof(struct vmmeter, v_intr) +endif define T_BUSERR T_BUSERR define T_ADDRERR T_ADDRERR diff --git a/sys/arch/x68k/x68k/locore.s b/sys/arch/x68k/x68k/locore.s index 990580dd292a..92a21b33521a 100644 --- a/sys/arch/x68k/x68k/locore.s +++ b/sys/arch/x68k/x68k/locore.s @@ -1,4 +1,4 @@ -/* $NetBSD: locore.s,v 1.28 1998/05/24 19:32:49 is Exp $ */ +/* $NetBSD: locore.s,v 1.29 1998/06/30 11:59:12 msaitoh Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -42,6 +42,8 @@ * @(#)locore.s 8.6 (Berkeley) 5/27/94 */ +#include "opt_uvm.h" + #include "ite.h" #include "spc.h" #include "mha.h" @@ -57,8 +59,6 @@ .text GLOBAL(kernel_text) -#include - /* * Temporary stack for a variety of purposes. * Try and make this the first thing is the data segment so it @@ -69,6 +69,8 @@ GLOBAL(kernel_text) .space NBPG ASLOCAL(tmpstk) +#include + .text /* * This is where we wind up if the kernel jumps to location 0. @@ -533,16 +535,24 @@ _zstrap: addql #4,sp INTERRUPT_RESTOREREG #endif - addql #1,_intrcnt+48 - addql #1,_cnt+V_INTR + addql #1,_C_LABEL(intrcnt)+48 +#if defined(UVM) + addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS +#else + addql #1,_C_LABEL(cnt)+V_INTR +#endif rte _kbdtrap: INTERRUPT_SAVEREG jbsr _kbdintr INTERRUPT_RESTOREREG - addql #1,_intrcnt+40 - addql #1,_cnt+V_INTR + addql #1,_C_LABEL(intrcnt)+40 +#if defined(UVM) + addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS +#else + addql #1,_C_LABEL(cnt)+V_INTR +#endif /* jra rei*/ rte @@ -555,8 +565,12 @@ _fdctrap: jbsr _C_LABEL(fdcintr) INTERRUPT_RESTOREREG #endif - addql #1,_intrcnt+20 - addql #1,_cnt+V_INTR + addql #1,_C_LABEL(intrcnt)+20 +#if defined(UVM) + addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS +#else + addql #1,_C_LABEL(cnt)+V_INTR +#endif jra rei _fdcdmatrap: @@ -565,8 +579,12 @@ _fdcdmatrap: jbsr _C_LABEL(fdcdmaintr) INTERRUPT_RESTOREREG #endif - addql #1,_intrcnt+20 - addql #1,_cnt+V_INTR + addql #1,_C_LABEL(intrcnt)+20 +#if defined(UVM) + addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS +#else + addql #1,_C_LABEL(cnt)+V_INTR +#endif jra rei @@ -576,8 +594,12 @@ _fdcdmaerrtrap: jbsr _C_LABEL(fdcdmaerrintr) INTERRUPT_RESTOREREG #endif - addql #1,_intrcnt+20 - addql #1,_cnt+V_INTR + addql #1,_C_LABEL(intrcnt)+20 +#if defined(UVM) + addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS +#else + addql #1,_C_LABEL(cnt)+V_INTR +#endif jra rei #ifdef SCSIDMA @@ -587,8 +609,12 @@ _spcdmatrap: jbsr _C_LABEL(spcdmaintr) INTERRUPT_RESTOREREG #endif - addql #1,_intrcnt+20 - addql #1,_cnt+V_INTR + addql #1,_C_LABEL(intrcnt)+20 +#if defined(UVM) + addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS +#else + addql #1,_C_LABEL(cnt)+V_INTR +#endif jra rei @@ -598,8 +624,12 @@ _spcdmaerrtrap: jbsr _C_LABEL(spcdmaerrintr) INTERRUPT_RESTOREREG #endif - addql #1,_intrcnt+20 - addql #1,_cnt+V_INTR + addql #1,_C_LABEL(intrcnt)+20 +#if defined(UVM) + addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS +#else + addql #1,_C_LABEL(cnt)+V_INTR +#endif jra rei #endif @@ -609,8 +639,12 @@ _audiotrap: jbsr _audiointr INTERRUPT_RESTOREREG #endif - addql #1,_intrcnt+52 - addql #1,_cnt+V_INTR + addql #1,_C_LABEL(intrcnt)+52 +#if defined(UVM) + addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS +#else + addql #1,_C_LABEL(cnt)+V_INTR +#endif jra rei _partrap: @@ -621,8 +655,12 @@ _partrap: addql #4,sp INTERRUPT_RESTOREREG #endif - addql #1,_intrcnt+56 - addql #1,_cnt+V_INTR + addql #1,_C_LABEL(intrcnt)+56 +#if defined(UVM) + addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS +#else + addql #1,_C_LABEL(cnt)+V_INTR +#endif jra rei _audioerrtrap: @@ -631,8 +669,12 @@ _audioerrtrap: jbsr _audioerrintr INTERRUPT_RESTOREREG #endif - addql #1,_intrcnt+20 - addql #1,_cnt+V_INTR + addql #1,_C_LABEL(intrcnt)+20 +#if defined(UVM) + addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS +#else + addql #1,_C_LABEL(cnt)+V_INTR +#endif jra rei _spctrap: @@ -643,8 +685,12 @@ _spctrap: addql #4,sp INTERRUPT_RESTOREREG #endif - addql #1,_intrcnt+44 - addql #1,_cnt+V_INTR + addql #1,_C_LABEL(intrcnt)+44 +#if defined(UVM) + addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS +#else + addql #1,_C_LABEL(cnt)+V_INTR +#endif jra rei _exspctrap: @@ -660,8 +706,12 @@ _exspctrap: addql #4,sp #endif INTERRUPT_RESTOREREG - addql #1,_intrcnt+44 - addql #1,_cnt+V_INTR + addql #1,_C_LABEL(intrcnt)+44 +#if defined(UVM) + addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS +#else + addql #1,_C_LABEL(cnt)+V_INTR +#endif jra rei _powtrap: @@ -671,8 +721,12 @@ _powtrap: jbsr _C_LABEL(powintr) INTERRUPT_RESTOREREG #endif - addql #1,_intrcnt+60 - addql #1,_cnt+V_INTR + addql #1,_C_LABEL(intrcnt)+60 +#if defined(UVM) + addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS +#else + addql #1,_C_LABEL(cnt)+V_INTR +#endif jra rei _com0trap: @@ -684,8 +738,12 @@ _com0trap: addql #4,sp INTERRUPT_RESTOREREG #endif - addql #1,_intrcnt+68 - addql #1,_cnt+V_INTR + addql #1,_C_LABEL(intrcnt)+68 +#if defined(UVM) + addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS +#else + addql #1,_C_LABEL(cnt)+V_INTR +#endif jra rei _com1trap: @@ -696,8 +754,12 @@ _com1trap: addql #4,sp INTERRUPT_RESTOREREG #endif - addql #1,_intrcnt+68 - addql #1,_cnt+V_INTR + addql #1,_C_LABEL(intrcnt)+68 +#if defined(UVM) + addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS +#else + addql #1,_C_LABEL(cnt)+V_INTR +#endif jra rei _edtrap: @@ -709,8 +771,12 @@ _edtrap: addql #4,sp INTERRUPT_RESTOREREG #endif - addql #1,_intrcnt+64 - addql #1,_cnt+V_INTR + addql #1,_C_LABEL(intrcnt)+64 +#if defined(UVM) + addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS +#else + addql #1,_C_LABEL(cnt)+V_INTR +#endif jra rei _lev1intr: @@ -721,7 +787,7 @@ _lev5intr: _lev6intr: INTERRUPT_SAVEREG Lnotdma: - lea _intrcnt,a0 + lea _C_LABEL(intrcnt),a0 movw sp@(22),d0 | use vector offset andw #0xfff,d0 | sans frame type addql #1,a0@(-0x60,d0:w) | to increment apropos counter @@ -730,25 +796,33 @@ Lnotdma: jbsr _intrhand | handle interrupt addql #4,sp | pop SR INTERRUPT_RESTOREREG - addql #1,_cnt+V_INTR +#if defined(UVM) + addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS +#else + addql #1,_C_LABEL(cnt)+V_INTR +#endif jra rei _timertrap: movw #SPL4,sr | XXX? moveml #0xC0C0,sp@- | save scratch registers - addql #1,_intrcnt+28 | count hardclock interrupts + addql #1,_C_LABEL(intrcnt)+28 | count hardclock interrupts lea sp@(16),a1 | a1 = &clockframe movl a1,sp@- jbsr _hardclock | hardclock(&frame) movl #1,sp@ jbsr _ms_modem | ms_modem(1) addql #4,sp - addql #1,_cnt+V_INTR | chalk up another interrupt +#if defined(UVM) + addql #1,_C_LABEL(uvmexp)+UVMEXP_INTRS | chalk up another interrupt +#else + addql #1,_C_LABEL(cnt)+V_INTR | chalk up another interrupt +#endif moveml sp@+,#0x0303 | restore scratch registers jra rei | all done _lev7intr: - addql #1,_intrcnt+36 + addql #1,_C_LABEL(intrcnt)+36 clrl sp@- moveml #0xFFFF,sp@- | save registers movl usp,a0 | and save @@ -881,12 +955,17 @@ ASENTRY_NOPROFILE(start) movel sp@+,d5 | fphysize -- last page movel sp@,a4 | esym - lea _edata,a0 | clear out BSS + RELOC(_vectab, a0) | set Vector Base Register temporaly + movc a0,vbr + +#if 0 /* XXX this should be done by the boot loader */ + RELOC(_edata, a0) | clear out BSS movl #_end-4,d0 | (must be <= 256 kB) subl #_edata,d0 lsrl #2,d0 1: clrl a0@+ dbra d0,1b +#endif RELOC(tmpstk, a0) movl a0,sp | give ourselves a temporary stack @@ -899,7 +978,8 @@ ASENTRY_NOPROFILE(start) RELOC(_lowram, a0) movl a5,a0@ | store start of physical memory - jbsr _intr_reset | XXX + RELOC(_intr_reset, a0) + jbsr a0@ | XXX movl #CACHE_OFF,d0 movc d0,cacr | clear and disable on-chip cache(s) @@ -1019,18 +1099,16 @@ Lstploaddone: lea 0x02c00000,a1 | a1: graphic VRAM ( not JUPITER-X ) | DRAM ( JUPITER-X ) movw a0@,d0 - movw #0x000f,a0@ - cmpw #0x000f,a1@ | JUPITER-X? - jne Ljupiter | yes, set SUPER bit - clrw a0@ - tstw a1@ | be sure JUPITER-X? - jeq Ljupiterdone | no, skip -Ljupiter: + movw d0,d1 + notw d1 + movw d1,a1@ + movw d0,a0@ + cmpw a1@,d1 | JUPITER-X? + jne Ljupiterdone | no, skip movl #0x0100a240,d0 | to access system register .long 0x4e7b0006 | movc d0,dtt0 movb #0x01,0x01800003@ | set "SUPER" bit Ljupiterdone: - movw d0,a0@ #endif /* JUPITER */ moveq #0,d0 | ensure TT regs are disabled .long 0x4e7b0004 | movc d0,itt0 @@ -1063,9 +1141,16 @@ Lmotommu2: * Should be running mapped from this point on */ Lenab1: +/* set vector base in virtual address */ + movl #_C_LABEL(vectab),d0 | set Vector Base Register + movc d0,vbr /* select the software page size now */ lea _ASM_LABEL(tmpstk),sp | temporary stack +#if defined(UVM) + jbsr _C_LABEL(uvm_setpagesize) | select software page size +#else jbsr _C_LABEL(vm_set_page_size) | select software page size +#endif /* set kernel stack, user SP, and initial pcb */ movl _C_LABEL(proc0paddr),a1 | get proc0 pcb addr lea a1@(USPACE-4),sp | set kernel stack to end of area @@ -1142,8 +1227,13 @@ _proc_trampoline: */ #include - .globl _whichqs,_qs,_cnt,_panic + .globl _whichqs,_qs,_panic .globl _curproc,_want_resched +#if defined(UVM) + .globl _uvmexp +#else + .globl _cnt +#endif /* * Use common m68k process manipulation routines. @@ -1178,11 +1268,15 @@ ENTRY(switch_exit) /* Free old process's resources. */ movl #USPACE,sp@- | size of u-area movl a0@(P_ADDR),sp@- | address of process's u-area - movl _kernel_map,sp@- | map it was allocated in - jbsr _kmem_free | deallocate it + movl _C_LABEL(kernel_map),sp@- | map it was allocated in +#if defined(UVM) + jbsr _C_LABEL(uvm_km_free) | deallocate it +#else + jbsr _C_LABEL(kmem_free) | deallocate it +#endif lea sp@(12),sp | pop args - jra _cpu_switch + jra _C_LABEL(cpu_switch) /* * When no processes are on the runq, Swtch branches to Idle @@ -1835,9 +1929,12 @@ LmotommuF: #endif clrl sp@ | value for pmove to TC (turn off MMU) pmove sp@,tc | disable MMU - movl 0x00ff0000:l,_vectab - movl 0x00ff0004:l,_vectab+4 - moval 0x00ff0004:l,a0 + + subal a1,a1 + moveml 0x00ff0000,#0x0101 | get RESET vectors in ROM + | (d0: ssp, a0: pc) + moveml #0x0101,a1@ | put them at 0x0000 (for Xellent30) + movc a1,vbr | reset Vector Base Register jmp a0@ | reboot X680x0 Lebootcode: @@ -1845,15 +1942,18 @@ Lebootcode: .globl _machineid _machineid: .long 0 | default to X68030 - .globl _mmutype,_cputype,_fputype,_ectype,_protorp + .globl _mmutype,_cputype,_fputype,_protorp _mmutype: .long MMU_68030 | default to 030 internal MMU _cputype: .long CPU_68030 | default to 68030 CPU _fputype: .long FPU_NONE +#ifdef M68K_MMU_HP + .globl _ectype _ectype: .long EC_NONE | external cache type, default to none +#endif _protorp: .long 0,0 | prototype root pointer .globl _cold diff --git a/sys/arch/x68k/x68k/machdep.c b/sys/arch/x68k/x68k/machdep.c index 71485ea722c9..09dbaef8fbea 100644 --- a/sys/arch/x68k/x68k/machdep.c +++ b/sys/arch/x68k/x68k/machdep.c @@ -1,4 +1,4 @@ -/* $NetBSD: machdep.c,v 1.30 1998/06/25 23:59:17 thorpej Exp $ */ +/* $NetBSD: machdep.c,v 1.31 1998/06/30 11:59:12 msaitoh Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -42,6 +42,7 @@ * @(#)machdep.c 8.10 (Berkeley) 4/20/94 */ +#include "opt_uvm.h" #include "opt_compat_hpux.h" #include @@ -91,6 +92,11 @@ #define MAXMEM 64*1024*CLSIZE /* XXX - from cmap.h */ #include #include + +#if defined(UVM) +#include +#endif + #include #include @@ -106,7 +112,20 @@ int badbaddr __P((caddr_t)); /* the following is used externally (sysctl_hw) */ char machine[] = MACHINE; /* from */ +int cpuspeed; /* XXX */ + +#if defined(UVM) +vm_map_t exec_map = NULL; +vm_map_t mb_map = NULL; +vm_map_t phys_map = NULL; +#else vm_map_t buffer_map; +#endif + +#if defined(MACHINE_NEW_NONCONTIG) +extern vm_offset_t avail_start; +#endif + #ifdef MACHINE_NONCONTIG extern int numranges; extern u_long low[8]; @@ -163,6 +182,9 @@ void intrhand __P((int)); void consinit() { +#if defined(MACHINE_NEW_NONCONTIG) + int i; +#endif /* * Set cpuspeed immediately since cninit() called routines @@ -193,6 +215,37 @@ consinit() if (boothowto & RB_KDB) Debugger(); #endif + +#if defined(MACHINE_NEW_NONCONTIG) + /* + * Tell the VM system about available physical memory. + */ +#if defined(UVM) +#ifdef MACHINE_NONCONTIG + for (i = 0; i < numranges; i++) { + vm_offset_t startmem = i == 0 ? avail_start : low[i]; + + uvm_page_physload(atop(startmem), atop(high[i]), + atop(startmem), atop(high[i])); + } +#else + uvm_page_physload(atop(avail_start), atop(avail_end), + atop(avail_start), atop(avail_end)); +#endif +#else /* not UVM */ +#ifdef MACHINE_NONCONTIG + for (i = 0; i < numranges; i++) { + vm_offset_t startmem = i == 0 ? avail_start : low[i]; + + vm_page_physload(atop(startmem), atop(high[i]), + atop(startmem), atop(high[i])); + } +#else + vm_page_physload(atop(avail_start), atop(avail_end), + atop(avail_start), atop(avail_end)); +#endif +#endif +#endif /* MACHINE_NEW_NONCONTIG */ } /* @@ -306,14 +359,20 @@ again: if (nswbuf > 256) nswbuf = 256; /* sanity */ } +#if !defined(UVM) valloc(swbuf, struct buf, nswbuf); +#endif valloc(buf, struct buf, nbuf); /* * End of first pass, size has been calculated so allocate memory */ if (firstaddr == 0) { size = (vm_size_t)(v - firstaddr); +#if defined(UVM) + firstaddr = (caddr_t) uvm_km_zalloc(kernel_map, round_page(size)); +#else firstaddr = (caddr_t) kmem_alloc(kernel_map, round_page(size)); +#endif if (firstaddr == 0) panic("startup: no room for tables"); goto again; @@ -328,12 +387,21 @@ again: * in that they usually occupy more virtual memory than physical. */ size = MAXBSIZE * nbuf; +#if defined(UVM) + if (uvm_map(kernel_map, (vm_offset_t *) &buffers, round_page(size), + NULL, UVM_UNKNOWN_OFFSET, + UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, + UVM_ADV_NORMAL, 0)) != KERN_SUCCESS) + panic("startup: cannot allocate VM for buffers"); + minaddr = (vm_offset_t)buffers; +#else buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers, &maxaddr, size, TRUE); minaddr = (vm_offset_t)buffers; if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0, &minaddr, size, FALSE) != KERN_SUCCESS) panic("startup: cannot allocate buffers"); +#endif /* UVM */ #if 0 if ((bufpages / nbuf) >= btoc(MAXBSIZE)) { /* don't want to alloc more physical mem than needed */ @@ -343,6 +411,35 @@ again: base = bufpages / nbuf; residual = bufpages % nbuf; for (i = 0; i < nbuf; i++) { +#if defined(UVM) + vm_size_t curbufsize; + vm_offset_t curbuf; + struct vm_page *pg; + + /* + * Each buffer has MAXBSIZE bytes of VM space allocated. Of + * that MAXBSIZE space, we allocate and map (base+1) pages + * for the first "residual" buffers, and then we allocate + * "base" pages for the rest. + */ + curbuf = (vm_offset_t) buffers + (i * MAXBSIZE); + curbufsize = CLBYTES * ((i < residual) ? (base+1) : base); + + while (curbufsize) { + pg = uvm_pagealloc(NULL, 0, NULL); + if (pg == NULL) + panic("cpu_startup: not enough memory for " + "buffer cache"); +#if defined(PMAP_NEW) + pmap_kenter_pgs(curbuf, &pg, 1); +#else + pmap_enter(kernel_map->pmap, curbuf, + VM_PAGE_TO_PHYS(pg), VM_PROT_ALL, TRUE); +#endif + curbuf += PAGE_SIZE; + curbufsize -= PAGE_SIZE; + } +#else /* ! UVM */ vm_size_t curbufsize; vm_offset_t curbuf; @@ -357,24 +454,40 @@ again: curbufsize = CLBYTES * (i < residual ? base+1 : base); vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE); vm_map_simplify(buffer_map, curbuf); +#endif /* UVM */ } /* * Allocate a submap for exec arguments. This map effectively * limits the number of processes exec'ing at any time. */ +#if defined(UVM) + exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, + 16*NCARGS, TRUE, FALSE, NULL); +#else exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 16*NCARGS, TRUE); +#endif /* * Allocate a submap for physio */ +#if defined(UVM) + phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, + VM_PHYS_SIZE, TRUE, FALSE, NULL); +#else phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, TRUE); +#endif /* * Finally, allocate mbuf cluster submap. */ +#if defined(UVM) + mb_map = uvm_km_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr, + VM_MBUF_SIZE, FALSE, FALSE, NULL); +#else mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr, VM_MBUF_SIZE, FALSE); +#endif /* * Initialize callouts */ @@ -386,7 +499,11 @@ again: #ifdef DEBUG pmapdebug = opmapdebug; #endif +#if defined(UVM) + printf("avail mem = %ld\n", ptoa(uvmexp.free)); +#else printf("avail mem = %ld\n", ptoa(cnt.v_free_count)); +#endif printf("using %d buffers containing %d bytes of memory\n", nbuf, bufpages * CLBYTES); /* diff --git a/sys/arch/x68k/x68k/mem.c b/sys/arch/x68k/x68k/mem.c index 3ca140ada1be..e5a8a40340b7 100644 --- a/sys/arch/x68k/x68k/mem.c +++ b/sys/arch/x68k/x68k/mem.c @@ -1,4 +1,4 @@ -/* $NetBSD: mem.c,v 1.9 1998/05/07 21:01:43 kleink Exp $ */ +/* $NetBSD: mem.c,v 1.10 1998/06/30 11:59:12 msaitoh Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -44,6 +44,8 @@ * Memory special file */ +#include "opt_uvm.h" + #include #include #include @@ -55,6 +57,9 @@ #include #include +#if defined(UVM) +#include +#endif extern u_int lowram; static caddr_t devzeropage; @@ -143,9 +148,15 @@ mmrw(dev, uio, flags) case 1: v = uio->uio_offset; c = min(iov->iov_len, MAXPHYS); +#if defined(UVM) + if (!uvm_kernacc((caddr_t)v, c, + uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) + return (EFAULT); +#else if (!kernacc((caddr_t)v, c, uio->uio_rw == UIO_READ ? B_READ : B_WRITE)) return (EFAULT); +#endif error = uiomove((caddr_t)v, c, uio); continue; @@ -187,7 +198,7 @@ mmrw(dev, uio, flags) } if (error) break; - (caddr_t)iov->iov_base += c; + iov->iov_base = (caddr_t)iov->iov_base + c; iov->iov_len -= c; uio->uio_offset += c; uio->uio_resid -= c; diff --git a/sys/arch/x68k/x68k/pmap.c b/sys/arch/x68k/x68k/pmap.c index 5d33aec894cc..153668dda9a5 100644 --- a/sys/arch/x68k/x68k/pmap.c +++ b/sys/arch/x68k/x68k/pmap.c @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.19 1998/06/25 23:59:17 thorpej Exp $ */ +/* $NetBSD: pmap.c,v 1.20 1998/06/30 11:59:12 msaitoh Exp $ */ /* * Copyright (c) 1991, 1993 @@ -93,6 +93,7 @@ * and to when physical maps must be made correct. */ +#include "opt_uvm.h" #include "opt_compat_hpux.h" #include @@ -107,6 +108,10 @@ #include #include +#if defined(UVM) +#include +#endif + #include #include @@ -178,7 +183,7 @@ int pmapdebug = 0x2000; #define PDB_WIRING 0x4000 #define PDB_PVDUMP 0x8000 -#ifdef HAVEVAC +#ifdef M68K_MMU_HP int pmapvacflush = 0; #define PVF_ENTER 0x01 #define PVF_REMOVE 0x02 @@ -233,15 +238,9 @@ extern vm_offset_t pager_sva, pager_eva; #define pmap_pte_w_chg(pte, nw) ((nw) ^ pmap_pte_w(pte)) #define pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte)) -#ifdef MACHINE_NONCONTIG -#define pmap_valid_page(pa) (pmap_initialized && pmap_page_index(pa) >= 0) - -int pmap_page_index __P((vm_offset_t pa)); -#endif - /* * Given a map and a machine independent protection code, - * convert to a x68k protection code. + * convert to an x68k protection code. */ #define pte_prot(m, p) (protection_codes[p]) int protection_codes[8]; @@ -273,20 +272,25 @@ vm_size_t Sysptsize = VM_KERNEL_PT_PAGES; struct pmap kernel_pmap_store; vm_map_t st_map, pt_map; +#if defined(UVM) +struct vm_map st_map_store, pt_map_store; +#endif vm_offset_t avail_start; /* PA of first available physical page */ +#if !defined(MACHINE_NEW_NONCONTIG) vm_offset_t avail_next; /* Next available physical page */ int avail_remaining;/* Number of physical free pages left */ int avail_range; /* Range avail_next is in */ -vm_offset_t avail_end; /* Set for ps and friends as avail_start + avail_remaining */ +#endif +vm_offset_t avail_end; /* PA of last available physical page */ vm_size_t mem_size; /* memory size in bytes */ vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss)*/ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ -#ifndef MACHINE_NONCONTIG +#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG) vm_offset_t vm_first_phys; /* PA of first managed page */ vm_offset_t vm_last_phys; /* PA just past last managed page */ #endif -int npages; +int page_cnt; /* number of pages managed by VM system */ boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ struct pv_entry *pv_table; @@ -299,7 +303,7 @@ extern int numranges; extern unsigned long low[8]; extern unsigned long high[8]; -#ifdef HAVEVAC +#ifdef M68K_MMU_HP int pmap_aliasmask; /* seperation at which VA aliasing ok */ #endif #if defined(M68040) || defined(M68060) @@ -313,24 +317,63 @@ void pmap_collect_pv __P((void)); int pmap_mapmulti __P((pmap_t, vm_offset_t)); #endif /* COMPAT_HPUX */ +#if defined(MACHINE_NEW_NONCONTIG) +#define PAGE_IS_MANAGED(pa) (pmap_initialized && \ + vm_physseg_find(atop((pa)), NULL) != -1) + +#else /* not MACNINE_NEW_NONCONTIG */ + +#ifdef MACHINE_NONCONTIG +#define pmap_valid_page(pa) (pmap_initialized && pmap_page_index(pa) >= 0) +int pmap_page_index __P((vm_offset_t pa)); +#define PAGE_IS_MANAGED(pa) (pmap_valid_page(pa)) +#else +#define PAGE_IS_MANAGED(pa) ((pa) >= vm_first_phys && (pa) < vm_last_phys) +#endif +#endif + +#if defined(MACHINE_NEW_NONCONTIG) +#define pa_to_pvh(pa) \ +({ \ + int bank_, pg_; \ + \ + bank_ = vm_physseg_find(atop((pa)), &pg_); \ + &vm_physmem[bank_].pmseg.pvent[pg_]; \ +}) + +#define pa_to_attribute(pa) \ +({ \ + int bank_, pg_; \ + \ + bank_ = vm_physseg_find(atop((pa)), &pg_); \ + &vm_physmem[bank_].pmseg.attrs[pg_]; \ +}) +#else +#define pa_to_pvh(pa) (&pv_table[pmap_page_index(pa)]) +#define pa_to_attribute(pa) (&pmap_attributes[pmap_page_index(pa)]) +#endif + /* * Internal routines */ -void pmap_remove_mapping __P((pmap_t, vm_offset_t, pt_entry_t *, int)); +void pmap_remove_mapping __P((pmap_t, vm_offset_t, pt_entry_t *, int)); boolean_t pmap_testbit __P((vm_offset_t, int)); -void pmap_changebit __P((vm_offset_t, int, boolean_t)); -void pmap_enter_ptpage __P((pmap_t, vm_offset_t)); +void pmap_changebit __P((vm_offset_t, int, boolean_t)); +void pmap_enter_ptpage __P((pmap_t, vm_offset_t)); +void pmap_collect1 __P((pmap_t, vm_offset_t, vm_offset_t)); +void pmap_pinit __P((pmap_t)); +void pmap_release __P((pmap_t)); + #ifdef DEBUG void pmap_pvdump __P((vm_offset_t)); void pmap_check_wiring __P((char *, vm_offset_t)); #endif -void pmap_pinit __P((pmap_t)); -void pmap_release __P((pmap_t)); /* pmap_remove_mapping flags */ #define PRM_TFLUSH 1 #define PRM_CFLUSH 2 +#if !defined(MACHINE_NEW_NONCONTIG) /* * Bootstrap memory allocator. This function allows for early dynamic * memory allocation until the virtual memory system has been bootstrapped. @@ -365,14 +408,38 @@ pmap_bootstrap_alloc(size) bzero ((caddr_t) val, size); return ((void *) val); } +#endif + +#if defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG) /* - * Initialize the pmap module. - * Called by vm_init, to initialize any structures that the pmap - * system needs to map virtual memory. + * Routine: pmap_virtual_space + * + * Function: + * Report the range of available kernel virtual address + * space to the VM system during bootstrap. Called by + * vm_bootstrap_steal_memory(). */ void -#ifdef MACHINE_NONCONTIG +pmap_virtual_space(vstartp, vendp) + vm_offset_t *vstartp, *vendp; +{ + + *vstartp = virtual_avail; + *vendp = virtual_end; +} +#endif + +/* + * Routine: pmap_init + * + * Function: + * Initialize the pmap module. + * Called by vm_init, to initialize any structures that the pmap + * system needs to map virtual memory. + */ +void +#if defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG) pmap_init() #else pmap_init(phys_start, phys_end) @@ -382,17 +449,49 @@ pmap_init(phys_start, phys_end) vm_offset_t addr, addr2; vm_size_t s; int rv; + int npages; +#if defined(MACHINE_NEW_NONCONTIG) + struct pv_entry *pv; + char *attr; + int bank; +#endif -#ifndef MACHINE_NONCONTIG #ifdef DEBUG if (pmapdebug & PDB_FOLLOW) +#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG) printf("pmap_init(%lx, %lx)\n", phys_start, phys_end); +#else + printf("pmap_init()\n"); #endif #endif /* * Now that kernel map has been allocated, we can mark as - * unavailable regions which we have mapped in locore. + * unavailable regions which we have mapped in pmap_bootstrap(). */ +#if defined(UVM) + addr = (vm_offset_t) IODEVbase; + if (uvm_map(kernel_map, &addr, + m68k_ptob(IIOMAPSIZE+EIOMAPSIZE), + NULL, UVM_UNKNOWN_OFFSET, + UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, + UVM_INH_NONE, UVM_ADV_RANDOM, + UVM_FLAG_FIXED)) != KERN_SUCCESS) + goto bogons; + addr = (vm_offset_t) Sysmap; + if (uvm_map(kernel_map, &addr, X68K_MAX_PTSIZE, + NULL, UVM_UNKNOWN_OFFSET, + UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, + UVM_INH_NONE, UVM_ADV_RANDOM, + UVM_FLAG_FIXED)) != KERN_SUCCESS) { + /* + * If this fails, it is probably because the static + * portion of the kernel page table isn't big enough + * and we overran the page table map. + */ + bogons: + panic("pmap_init: bogons in the VM system!\n"); + } +#else addr = (vm_offset_t) IODEVbase; (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0, &addr, m68k_ptob(IIOMAPSIZE+EIOMAPSIZE), FALSE); @@ -405,11 +504,12 @@ pmap_init(phys_start, phys_end) /* * If this fails it is probably because the static portion of * the kernel page table isn't big enough and we overran the - * page table map. Need to adjust pmap_size() in x68k_init.c. + * page table map. */ if (addr != (vm_offset_t)Sysmap) bogons: panic("pmap_init: bogons in the VM system!\n"); +#endif /* UVM */ #ifdef DEBUG if (pmapdebug & PDB_INIT) { @@ -424,27 +524,60 @@ bogons: * Allocate memory for random pmap data structures. Includes the * initial segment table, pv_head_table and pmap_attributes. */ +#if defined(MACHINE_NEW_NONCONTIG) + for (page_cnt = 0, bank = 0; bank < vm_nphysseg; bank++) + page_cnt += vm_physmem[bank].end - vm_physmem[bank].start; +#else /* not MACHINE_NEW_NONCONTIG */ #ifdef MACHINE_NONCONTIG - npages = atop(high[numranges - 1] - 1); + page_cnt = atop(high[numranges - 1] - 1); #else - npages = atop(phys_end - phys_start); + page_cnt = atop(phys_end - phys_start); #endif - s = (vm_size_t) (X68K_STSIZE + sizeof(struct pv_entry) * npages + npages); +#endif + s = X68K_STSIZE; /* Segtabzero */ + s += page_cnt * sizeof(struct pv_entry); /* pv table */ + s += page_cnt * sizeof(char); /* attribute table */ s = round_page(s); +#if defined(UVM) + addr = (vm_offset_t) uvm_km_zalloc(kernel_map, s); + if (addr == 0) + panic("pmap_init: can't allocate data structures"); +#else addr = (vm_offset_t) kmem_alloc(kernel_map, s); +#endif + Segtabzero = (st_entry_t *) addr; Segtabzeropa = (st_entry_t *) pmap_extract(pmap_kernel(), addr); addr += X68K_STSIZE; + pv_table = (struct pv_entry *) addr; - addr += sizeof(struct pv_entry) * npages; + addr += page_cnt * sizeof(struct pv_entry); + pmap_attributes = (char *) addr; + #ifdef DEBUG if (pmapdebug & PDB_INIT) - printf("pmap_init: %lx bytes: npages %x s0 %p(%p) tbl %p atr %p\n", - s, npages, Segtabzero, Segtabzeropa, + printf("pmap_init: %lx bytes: page_cnt %x s0 %p(%p) tbl %p atr %p\n", + s, page_cnt, Segtabzero, Segtabzeropa, pv_table, pmap_attributes); #endif +#if defined(MACHINE_NEW_NONCONTIG) + /* + * Now that the pv and attribute tables have been allocated, + * assign them to the memory segments. + */ + pv = pv_table; + attr = pmap_attributes; + for (bank = 0; bank < vm_nphysseg; bank++) { + npages = vm_physmem[bank].end - vm_physmem[bank].start; + vm_physmem[bank].pmseg.pvent = pv; + vm_physmem[bank].pmseg.attrs = attr; + pv += npages; + attr += npages; + } +#endif + /* * Allocate physical memory for kernel PT pages and their management. * We need 1 PT page per possible task plus some slop. @@ -456,17 +589,35 @@ bogons: * Verify that space will be allocated in region for which * we already have kernel PT pages. */ +#if defined(UVM) + addr = 0; + rv = uvm_map(kernel_map, &addr, s, NULL, UVM_UNKNOWN_OFFSET, + UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, + UVM_ADV_RANDOM, UVM_FLAG_NOMERGE)); + if (rv != KERN_SUCCESS || (addr + s) >= (vm_offset_t)Sysmap) + panic("pmap_init: kernel PT too small"); + rv = uvm_unmap(kernel_map, addr, addr + s, FALSE); + if (rv != KERN_SUCCESS) + panic("pmap_init: uvm_unmap failed"); +#else addr = 0; rv = vm_map_find(kernel_map, NULL, 0, &addr, s, TRUE); if (rv != KERN_SUCCESS || addr + s >= (vm_offset_t)Sysmap) panic("pmap_init: kernel PT too small"); vm_map_remove(kernel_map, addr, addr + s); +#endif /* * Now allocate the space and link the pages together to * form the KPT free list. */ +#if defined(UVM) + addr = (vm_offset_t) uvm_km_zalloc(kernel_map, s); + if (addr == 0) + panic("pmap_init: cannot allocate KPT free list"); +#else addr = (vm_offset_t) kmem_alloc(kernel_map, s); +#endif s = ptoa(npages); addr2 = addr + s; kpt_pages = &((struct kpt_page *)addr2)[npages]; @@ -487,6 +638,29 @@ bogons: atop(s), addr, addr + s); #endif +#if defined(UVM) + /* + * Allocate the segment table map and the page table map. + */ + s = maxproc * X68K_STSIZE; + st_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, TRUE, + FALSE, &st_map_store); + + addr = X68K_PTBASE; + if ((X68K_PTMAXSIZE / X68K_MAX_PTSIZE) < maxproc) { + s = X68K_PTMAXSIZE; + /* + * XXX We don't want to hang when we run out of + * page tables, so we lower maxproc so that fork() + * will fail instead. Note that root could still raise + * this value via sysctl(2). + */ + maxproc = (X68K_PTMAXSIZE / X68K_MAX_PTSIZE); + } else + s = (maxproc * X68K_MAX_PTSIZE); + pt_map = uvm_km_suballoc(kernel_map, &addr, &addr2, s, TRUE, + TRUE, &pt_map_store); +#else /* * Allocate the segment table map */ @@ -524,6 +698,7 @@ bogons: if (pmapdebug & PDB_INIT) printf("pmap_init: pt_map [%lx - %lx)\n", addr, addr2); #endif +#endif /* UVM */ #if defined(M68040) || defined(M68060) if (mmutype == MMU_68040) { @@ -536,7 +711,7 @@ bogons: /* * Now it is safe to enable pv_table recording. */ -#ifndef MACHINE_NONCONTIG +#if !defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG) vm_first_phys = phys_start; vm_last_phys = phys_end; #endif @@ -551,9 +726,15 @@ pmap_alloc_pv() int i; if (pv_nfree == 0) { +#if defined(UVM) + pvp = (struct pv_page *)uvm_km_zalloc(kernel_map, NBPG); + if (pvp == 0) + panic("pmap_alloc_pv: uvm_km_zalloc() failed"); +#else pvp = (struct pv_page *)kmem_alloc(kernel_map, NBPG); if (pvp == 0) panic("pmap_alloc_pv: kmem_alloc() failed"); +#endif pvp->pvp_pgi.pgi_freelist = pv = &pvp->pvp_pv[1]; for (i = NPVPPG - 2; i; i--, pv++) pv->pv_next = pv + 1; @@ -595,7 +776,11 @@ pmap_free_pv(pv) case NPVPPG: pv_nfree -= NPVPPG - 1; TAILQ_REMOVE(&pv_page_freelist, pvp, pvp_pgi.pgi_list); +#if defined(UVM) + uvm_km_free(kernel_map, (vm_offset_t)pvp, NBPG); +#else kmem_free(kernel_map, (vm_offset_t)pvp, NBPG); +#endif break; } } @@ -625,7 +810,7 @@ pmap_collect_pv() if (pv_page_collectlist.tqh_first == 0) return; - for (ph = &pv_table[npages - 1]; ph >= &pv_table[0]; ph--) { + for (ph = &pv_table[page_cnt - 1]; ph >= &pv_table[0]; ph--) { if (ph->pv_pmap == 0) continue; s = splimp(); @@ -653,7 +838,11 @@ pmap_collect_pv() for (pvp = pv_page_collectlist.tqh_first; pvp; pvp = npvp) { npvp = pvp->pvp_pgi.pgi_list.tqe_next; +#if defined(UVM) + uvm_km_free(kernel_map, (vm_offset_t)pvp, NBPG); +#else kmem_free(kernel_map, (vm_offset_t)pvp, NBPG); +#endif } } @@ -804,11 +993,21 @@ pmap_release(pmap) #endif if (pmap->pm_ptab) +#if defined(UVM) + uvm_km_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab, + X68K_MAX_PTSIZE); +#else kmem_free_wakeup(pt_map, (vm_offset_t)pmap->pm_ptab, X68K_MAX_PTSIZE); +#endif if (pmap->pm_stab != Segtabzero) +#if defined(UVM) + uvm_km_free_wakeup(st_map, (vm_offset_t)pmap->pm_stab, + X68K_STSIZE); +#else kmem_free_wakeup(st_map, (vm_offset_t)pmap->pm_stab, X68K_STSIZE); +#endif } /* @@ -843,7 +1042,7 @@ pmap_activate(p) #ifdef DEBUG if (pmapdebug & (PDB_FOLLOW|PDB_SEGTAB)) - printf("pmap_activate(%p)\n"); + printf("pmap_activate(%p)\n", p); #endif PMAP_ACTIVATE(pmap, p == curproc); @@ -906,7 +1105,7 @@ pmap_remove(pmap, sva, eva) pte = pmap_pte(pmap, sva); while (sva < nssva) { if (pmap_pte_v(pte)) { -#ifdef HAVEVAC +#ifdef M68K_MMU_HP if (pmap_aliasmask) { /* * Purge kernel side of VAC to ensure @@ -941,7 +1140,7 @@ pmap_remove(pmap, sva, eva) */ if (firstpage) return; -#ifdef HAVEVAC +#ifdef M68K_MMU_HP /* * In a couple of cases, we don't need to worry about flushing * the VAC: @@ -950,8 +1149,7 @@ pmap_remove(pmap, sva, eva) * 2. if it is a user mapping not for the current process, * it won't be there */ - if (pmap_aliasmask && - (pmap == pmap_kernel() || pmap != curproc->p_vmspace->vm_map.pmap)) + if (pmap_aliasmask && !active_user_pmap(pmap)) needcflush = FALSE; #ifdef DEBUG if (pmap_aliasmask && (pmapvacflush & PVF_REMOVE)) { @@ -994,16 +1192,11 @@ pmap_page_protect(pa, prot) #ifdef DEBUG if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) || - prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE)) + (prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))) printf("pmap_page_protect(%lx, %x)\n", pa, prot); #endif -#ifdef MACHINE_NONCONTIG - if (!pmap_valid_page(pa)) + if (!PAGE_IS_MANAGED(pa)) return; -#else - if (pa < vm_first_phys || pa >= vm_last_phys) - return; -#endif switch (prot) { case VM_PROT_READ|VM_PROT_WRITE: @@ -1101,7 +1294,7 @@ pmap_protect(pmap, sva, eva, prot) pte = pmap_pte(pmap, sva); while (sva < nssva) { if (pmap_pte_v(pte) && pmap_pte_prot_chg(pte, isro)) { -#ifdef HAVEVAC +#ifdef M68K_MMU_HP /* * Purge kernel side of VAC to ensure we * get the correct state of any hardware @@ -1145,7 +1338,7 @@ pmap_protect(pmap, sva, eva, prot) sva += NBPG; } } -#if defined(HAVEVAC) && defined(DEBUG) +#if defined(M68K_MMU_HP) && defined(DEBUG) if (pmap_aliasmask && (pmapvacflush & PVF_PROTECT)) { if (pmapvacflush & PVF_TOTAL) DCIA(); @@ -1201,8 +1394,13 @@ pmap_enter(pmap, va, pa, prot, wired) * For user mapping, allocate kernel VM resources if necessary. */ if (pmap->pm_ptab == NULL) +#if defined(UVM) + pmap->pm_ptab = (pt_entry_t *) + uvm_km_valloc_wait(pt_map, X68K_MAX_PTSIZE); +#else pmap->pm_ptab = (pt_entry_t *) kmem_alloc_wait(pt_map, X68K_MAX_PTSIZE); +#endif /* * Segment table entry not valid, we need a new PT page @@ -1281,19 +1479,20 @@ pmap_enter(pmap, va, pa, prot, wired) * is a valid mapping in the page. */ if (pmap != pmap_kernel()) +#if defined(UVM) + (void) uvm_map_pageable(pt_map, trunc_page(pte), + round_page(pte+1), FALSE); +#else (void) vm_map_pageable(pt_map, trunc_page(pte), round_page(pte+1), FALSE); +#endif /* * Enter on the PV list if part of our managed memory * Note that we raise IPL while manipulating pv_table * since pmap_enter can be called at interrupt time. */ -#ifdef MACHINE_NONCONTIG - if (pmap_valid_page(pa)) { -#else - if (pa >= vm_first_phys && pa < vm_last_phys) { -#endif + if (PAGE_IS_MANAGED(pa)) { struct pv_entry *pv, *npv; int s; @@ -1343,7 +1542,7 @@ pmap_enter(pmap, va, pa, prot, wired) if (!npv->pv_next) enter_stats.secondpv++; #endif -#ifdef HAVEVAC +#ifdef M68K_MMU_HP /* * Since there is another logical mapping for the * same page we may need to cache-inhibit the @@ -1415,7 +1614,7 @@ pmap_enter(pmap, va, pa, prot, wired) pmap->pm_stats.wired_count++; validate: -#ifdef HAVEVAC +#ifdef M68K_MMU_HP /* * Purge kernel side of VAC to ensure we get correct state * of HW bits so we don't clobber them. @@ -1456,7 +1655,7 @@ validate: *pte = npte; if (!wired && active_pmap(pmap)) TBIS(va); -#ifdef HAVEVAC +#ifdef M68K_MMU_HP /* * The following is executed if we are entering a second * (or greater) mapping for a physical page and the mappings @@ -1633,19 +1832,14 @@ void pmap_collect(pmap) pmap_t pmap; { -#ifdef MACHINE_NONCONTIG +#if defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG) return; -#else - vm_offset_t pa; - struct pv_entry *pv; - pt_entry_t *pte; - vm_offset_t kpa; +#else /* !defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG) */ int s; - -#ifdef DEBUG - st_entry_t *ste; - int opmapdebug; +#if defined(MACHINE_NEW_NONCONTIG) + int bank; #endif + if (pmap != pmap_kernel()) return; @@ -1657,7 +1851,45 @@ pmap_collect(pmap) kpt_stats.collectscans++; #endif s = splimp(); - for (pa = vm_first_phys; pa < vm_last_phys; pa += NBPG) { +#if defined(MACHINE_NEW_NONCONTIG) + for (bank = 0; bank < vm_nphysseg; bank++) + pmap_collect1(pmap, ptoa(vm_physmem[bank].start), + ptoa(vm_physmem[bank].end)); +#else + pmap_collect1(pmap, vm_first_phys, vm_last_phys); +#endif + splx(s); + +#ifdef notyet + /* Go compact and garbage-collect the pv_table. */ + pmap_collect_pv(); +#endif +#endif /* !defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG) */ +} + +#if !defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG) +/* + * Routine: pmap_collect1() + * + * Function: + * Helper function for pmap_collect(). Do the actual + * garbage-collection of range of physical addresses. + */ +void +pmap_collect1(pmap, startpa, endpa) + pmap_t pmap; + vm_offset_t startpa, endpa; +{ + vm_offset_t pa; + struct pv_entry *pv; + pt_entry_t *pte; + vm_offset_t kpa; +#ifdef DEBUG + st_entry_t *ste; + int opmapdebug = 0 /* XXX initialize to quiet gcc -Wall */; +#endif + + for (pa = startpa; pa < endpa; pa += NBPG) { struct kpt_page *kpt, **pkpt; /* @@ -1670,7 +1902,7 @@ pmap_collect(pmap) do { if (pv->pv_ptste && pv->pv_ptpmap == pmap_kernel()) break; - } while (pv = pv->pv_next); + } while ((pv = pv->pv_next)); if (pv == NULL) continue; #ifdef DEBUG @@ -1744,9 +1976,8 @@ ok: ste, *ste); #endif } - splx(s); -#endif /* MACHINE_NONCONTIG */ } +#endif /* !defined(MACHINE_NONCONTIG) || defined(MACHINE_NEW_NONCONTIG) */ /* * pmap_zero_page zeros the specified (machine independent) @@ -1856,11 +2087,7 @@ pmap_pageable(pmap, sva, eva, pageable) if (!pmap_ste_v(pmap, sva)) return; pa = pmap_pte_pa(pmap_pte(pmap, sva)); -#ifdef MACHINE_NONCONTIG - if (!pmap_valid_page(pa)) -#else - if (pa < vm_first_phys || pa >= vm_last_phys) -#endif + if (!PAGE_IS_MANAGED(pa)) return; pv = pa_to_pvh(pa); if (pv->pv_ptste == NULL) @@ -1980,6 +2207,7 @@ pmap_phys_address(ppn) * We implement this at the segment table level, the machine independent * VM knows nothing about it. */ +int pmap_mapmulti(pmap, va) pmap_t pmap; vm_offset_t va; @@ -1989,7 +2217,7 @@ pmap_mapmulti(pmap, va) #ifdef DEBUG if (pmapdebug & PDB_MULTIMAP) { ste = pmap_ste(pmap, HPMMBASEADDR(va)); - printf("pmap_mapmulti(%p, %lx): bste %x(%x)", + printf("pmap_mapmulti(%p, %lx): bste %p(%x)", pmap, va, ste, *ste); ste = pmap_ste(pmap, va); printf(" ste %p(%x)\n", ste, *ste); @@ -2045,7 +2273,7 @@ pmap_remove_mapping(pmap, va, pte, flags) if (*pte == PG_NV) return; } -#ifdef HAVEVAC +#ifdef M68K_MMU_HP if (pmap_aliasmask && (flags & PRM_CFLUSH)) { /* * Purge kernel side of VAC to ensure we get the correct @@ -2060,8 +2288,7 @@ pmap_remove_mapping(pmap, va, pte, flags) * flush the VAC. Note that the kernel side was flushed * above so we don't worry about non-CI kernel mappings. */ - if (pmap == curproc->p_vmspace->vm_map.pmap && - !pmap_pte_ci(pte)) { + if (active_user_pmap(pmap) && !pmap_pte_ci(pte)) { DCIU(); #ifdef PMAPSTATS remove_stats.uflushes++; @@ -2102,8 +2329,13 @@ pmap_remove_mapping(pmap, va, pte, flags) * PT page. */ if (pmap != pmap_kernel()) { +#if defined(UVM) + (void) uvm_map_pageable(pt_map, trunc_page(pte), + round_page(pte+1), TRUE); +#else (void) vm_map_pageable(pt_map, trunc_page(pte), round_page(pte+1), TRUE); +#endif #ifdef DEBUG if (pmapdebug & PDB_WIRING) pmap_check_wiring("remove", trunc_page(pte)); @@ -2112,11 +2344,7 @@ pmap_remove_mapping(pmap, va, pte, flags) /* * If this isn't a managed page, we are all done. */ -#ifdef MACHINE_NONCONTIG - if (!pmap_valid_page(pa)) -#else - if (pa < vm_first_phys || pa >= vm_last_phys) -#endif + if (!PAGE_IS_MANAGED(pa)) return; /* * Otherwise remove it from the PV table @@ -2163,7 +2391,7 @@ pmap_remove_mapping(pmap, va, pte, flags) pmap_free_pv(npv); pv = pa_to_pvh(pa); } -#ifdef HAVEVAC +#ifdef M68K_MMU_HP /* * If only one mapping left we no longer need to cache inhibit */ @@ -2227,9 +2455,15 @@ pmap_remove_mapping(pmap, va, pte, flags) printf("remove: free stab %p\n", ptpmap->pm_stab); #endif +#if defined(UVM) + uvm_km_free_wakeup(st_map, + (vm_offset_t)ptpmap->pm_stab, + X68K_STSIZE); +#else kmem_free_wakeup(st_map, (vm_offset_t)ptpmap->pm_stab, X68K_STSIZE); +#endif ptpmap->pm_stab = Segtabzero; ptpmap->pm_stpa = Segtabzeropa; #if defined(M68040) || defined(M68060) @@ -2265,7 +2499,7 @@ pmap_remove_mapping(pmap, va, pte, flags) /* * Update saved attributes for managed page */ - pmap_attributes[pmap_page_index(pa)] |= bits; + *pa_to_attribute(pa) |= bits; splx(s); } @@ -2279,11 +2513,7 @@ pmap_testbit(pa, bit) pt_entry_t *pte; int s; -#ifdef MACHINE_NONCONTIG - if (!pmap_valid_page(pa)) -#else - if (pa < vm_first_phys || pa >= vm_last_phys) -#endif + if (!PAGE_IS_MANAGED(pa)) return(FALSE); pv = pa_to_pvh(pa); @@ -2291,11 +2521,11 @@ pmap_testbit(pa, bit) /* * Check saved info first */ - if (pmap_attributes[pmap_page_index(pa)] & bit) { + if (*pa_to_attribute(pa) & bit) { splx(s); return(TRUE); } -#ifdef HAVEVAC +#ifdef M68K_MMU_HP /* * Flush VAC to get correct state of any hardware maintained bits. */ @@ -2330,7 +2560,9 @@ pmap_changebit(pa, bit, setem) pt_entry_t *pte, npte; vm_offset_t va; int s; +#if defined(M68K_MMU_HP) || defined(M68040) || defined(M68060) boolean_t firstpage = TRUE; +#endif #ifdef PMAPSTATS struct chgstats *chgp; #endif @@ -2340,11 +2572,7 @@ pmap_changebit(pa, bit, setem) printf("pmap_changebit(%lx, %x, %s)\n", pa, bit, setem ? "set" : "clear"); #endif -#ifdef MACHINE_NONCONTIG - if (!pmap_valid_page(pa)) -#else - if (pa < vm_first_phys || pa >= vm_last_phys) -#endif + if (!PAGE_IS_MANAGED(pa)) return; #ifdef PMAPSTATS @@ -2360,7 +2588,7 @@ pmap_changebit(pa, bit, setem) * Clear saved attributes (modify, reference) */ if (!setem) - pmap_attributes[pmap_page_index(pa)] &= ~bit; + *pa_to_attribute(pa) &= ~bit; /* * Loop over all current mappings setting/clearing as appropos * If setting RO do we need to clear the VAC? @@ -2379,14 +2607,19 @@ pmap_changebit(pa, bit, setem) * XXX don't write protect pager mappings */ if (bit == PG_RO) { +#if defined(UVM) + if (va >= uvm.pager_sva && va < uvm.pager_eva) + continue; +#else extern vm_offset_t pager_sva, pager_eva; if (va >= pager_sva && va < pager_eva) continue; +#endif } pte = pmap_pte(pv->pv_pmap, va); -#ifdef HAVEVAC +#ifdef M68K_MMU_HP /* * Flush VAC to ensure we get correct state of HW bits * so we don't clobber them. @@ -2408,8 +2641,8 @@ pmap_changebit(pa, bit, setem) * flushed (but only once). */ if (firstpage && mmutype == MMU_68040 && - (bit == PG_RO && setem || - (bit & PG_CMASK))) { + ((bit == PG_RO && setem) || + (bit & PG_CMASK))) { firstpage = FALSE; DCFP(pa); ICPP(pa); @@ -2434,7 +2667,7 @@ pmap_changebit(pa, bit, setem) } #endif } -#if defined(HAVEVAC) && defined(DEBUG) +#if defined(M68K_MMU_HP) && defined(DEBUG) if (setem && bit == PG_RO && (pmapvacflush & PVF_PROTECT)) { if ((pmapvacflush & PVF_TOTAL) || toflush == 3) DCIA(); @@ -2474,8 +2707,13 @@ pmap_enter_ptpage(pmap, va) * reference count drops to zero. */ if (pmap->pm_stab == Segtabzero) { +#if defined(UVM) + pmap->pm_stab = (st_entry_t *) + uvm_km_zalloc(st_map, X68K_STSIZE); +#else pmap->pm_stab = (st_entry_t *) kmem_alloc(st_map, X68K_STSIZE); +#endif pmap->pm_stpa = (st_entry_t *) pmap_extract(pmap_kernel(), (vm_offset_t)pmap->pm_stab); #if defined(M68040) || defined(M68060) @@ -2598,11 +2836,20 @@ pmap_enter_ptpage(pmap, va) if (pmapdebug & (PDB_ENTER|PDB_PTPAGE)) printf("enter: about to fault UPT pg at %lx\n", va); #endif +#if defined(UVM) + s = uvm_fault(pt_map, va, 0, VM_PROT_READ|VM_PROT_WRITE); + if (s != KERN_SUCCESS) { + printf("uvm_fault(pt_map, 0x%lx, 0, RW) -> %d\n", + va, s); + panic("pmap_enter: uvm_fault failed"); + } +#else s = vm_fault(pt_map, va, VM_PROT_READ|VM_PROT_WRITE, FALSE); if (s != KERN_SUCCESS) { printf("vm_fault(pt_map, %lx, RW, 0) -> %d\n", va, s); panic("pmap_enter: vm_fault failed"); } +#endif ptpa = pmap_extract(pmap_kernel(), va); /* * Mark the page clean now to avoid its pageout (and @@ -2610,8 +2857,10 @@ pmap_enter_ptpage(pmap, va) * is wired; i.e. while it is on a paging queue. */ PHYS_TO_VM_PAGE(ptpa)->flags |= PG_CLEAN; +#if !defined(UVM) #ifdef DEBUG PHYS_TO_VM_PAGE(ptpa)->flags |= PG_PTPAGE; +#endif #endif } #if defined(M68040) || defined(M68060) @@ -2623,8 +2872,8 @@ pmap_enter_ptpage(pmap, va) if (dowriteback && dokwriteback) #endif if (mmutype == MMU_68040) { - pt_entry_t *pte = pmap_pte(pmap_kernel(), va); #ifdef DEBUG + pt_entry_t *pte = pmap_pte(pmap_kernel(), va); if ((pmapdebug & PDB_PARANOIA) && (*pte & PG_CCB) == 0) printf("%s PT no CCB: kva=%lx ptpa=%lx pte@%p=%x\n", pmap == pmap_kernel() ? "Kernel" : "User", @@ -2728,10 +2977,17 @@ pmap_check_wiring(str, va) !pmap_pte_v(pmap_pte(pmap_kernel(), va))) return; +#if defined(UVM) + if (!uvm_map_lookup_entry(pt_map, va, &entry)) { + printf("wired_check: entry for %lx not found\n", va); + return; + } +#else if (!vm_map_lookup_entry(pt_map, va, &entry)) { printf("wired_check: entry for %lx not found\n", va); return; } +#endif count = 0; for (pte = (pt_entry_t *)va; pte < (pt_entry_t *)(va + NBPG); pte++) if (*pte) @@ -2742,13 +2998,13 @@ pmap_check_wiring(str, va) } #endif +#if defined(MACHINE_NONCONTIG) && !defined(MACHINE_NEW_NONCONTIG) /* * LAK: These functions are from NetBSD/i386 and are used for * the non-contiguous memory machines. * See the functions in sys/vm that #ifdef MACHINE_NONCONTIG. */ -#ifdef MACHINE_NONCONTIG /* * pmap_free_pages() * @@ -2810,12 +3066,4 @@ pmap_page_index(pa) } return -1; } - -void -pmap_virtual_space(startp, endp) - vm_offset_t *startp, *endp; -{ - *startp = virtual_avail; - *endp = virtual_end; -} #endif diff --git a/sys/arch/x68k/x68k/pmap_bootstrap.c b/sys/arch/x68k/x68k/pmap_bootstrap.c index 78391f2236be..869b6a9e8e61 100644 --- a/sys/arch/x68k/x68k/pmap_bootstrap.c +++ b/sys/arch/x68k/x68k/pmap_bootstrap.c @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_bootstrap.c,v 1.11 1998/05/24 19:32:50 is Exp $ */ +/* $NetBSD: pmap_bootstrap.c,v 1.12 1998/06/30 11:59:13 msaitoh Exp $ */ /* * Copyright (c) 1991, 1993 @@ -47,7 +47,8 @@ #include -#define RELOC(v, t) *((t*)((u_int)&(v) + firstpa)) +#define RELOC(v, t) *((t*)((caddr_t)&(v) + firstpa)) +#define RELOCA(a, t) ((t)((caddr_t)(a) + firstpa)) extern char *etext; extern int Sysptsize; @@ -55,20 +56,24 @@ extern char *extiobase, *proc0paddr; extern st_entry_t *Sysseg; extern pt_entry_t *Sysptmap, *Sysmap; -extern int maxmem, physmem, avail_range; +extern int maxmem, physmem; extern vm_offset_t avail_start, avail_end, virtual_avail, virtual_end; +extern vm_size_t mem_size; +#if !defined(MACHINE_NEW_NONCONTIG) +extern int avail_range; extern vm_offset_t avail_next; -extern vm_size_t mem_size, avail_remaining; +extern vm_size_t avail_remaining; +#endif extern int protection_codes[]; -#ifdef HAVEVAC +#ifdef M68K_MMU_HP extern int pmap_aliasmask; #endif void pmap_bootstrap __P((vm_offset_t, vm_offset_t)); #ifdef MACHINE_NONCONTIG -static void setmemrange __P((void)); -#endif +static int mem_exists __P((caddr_t, u_long)); +static void setmemrange __P((vm_offset_t)); /* * These are used to map the non-contiguous memory. @@ -76,6 +81,7 @@ static void setmemrange __P((void)); int numranges; /* = 0 == don't use the ranges */ u_long low[8]; u_long high[8]; +#endif /* * Special purpose kernel virtual addresses, used for mapping @@ -156,12 +162,14 @@ pmap_bootstrap(nextpa, firstpa) p0upa = nextpa; nextpa += USPACE; #ifdef MACHINE_NONCONTIG - setmemrange(); + setmemrange(firstpa); +#if 0 if (nextpa > high[0]) { printf("Failure in BSD boot. nextpa=0x%lx, high[0]=0x%lx.\n", nextpa, high[0]); panic("You're hosed!\n"); } +#endif #endif /* * Initialize segment table and kernel page table map. @@ -328,11 +336,7 @@ pmap_bootstrap(nextpa, firstpa) */ pte = &((u_int *)kptpa)[m68k_btop(KERNBASE)]; epte = &pte[m68k_btop(m68k_trunc_page(&etext))]; -#if defined(KGDB) || defined(DDB) - protopte = firstpa | PG_RW | PG_V; /* XXX RW for now */ -#else protopte = firstpa | PG_RO | PG_V; -#endif while (pte < epte) { *pte++ = protopte; protopte += NBPG; @@ -444,39 +448,54 @@ pmap_bootstrap(nextpa, firstpa) m68k_ptob(RELOC(maxmem, int)) /* XXX allow for msgbuf */ - m68k_round_page(MSGBUFSIZE); +#if !defined(MACHINE_NEW_NONCONTIG) RELOC(avail_next, vm_offset_t) = nextpa; +#endif +#ifdef MACHINE_NONCONTIG + { + int i; + vm_size_t av_rem = 0; + int av_rng = -1; + int nranges = RELOC(numranges, int); + u_long *l = RELOCA(low, u_long *); + u_long *h = RELOCA(high, u_long *); + + for (i = 0; i < nranges; i++) { + if (nextpa >= l[i] && nextpa < h[i]) { + av_rng = i; + av_rem = h[i] - nextpa; + } else if (av_rng != -1) { + av_rem += (h[i] - l[i]); + } + } + + RELOC(physmem, int) = m68k_btop(av_rem + nextpa - firstpa); + av_rem -= m68k_round_page(MSGBUFSIZE); + h[nranges - 1] -= m68k_round_page(MSGBUFSIZE); + /* XXX -- this doesn't look correct to me. */ + while (h[nranges - 1] < l[nranges - 1]) { + RELOC(numranges, int) = --nranges; + h[nranges - 1] -= l[nranges] - h[nranges]; + } + av_rem = m68k_trunc_page(av_rem); + RELOC(avail_end, vm_offset_t) = nextpa + av_rem; +#if !defined(MACHINE_NEW_NONCONTIG) + RELOC(avail_range, int) = av_rng; + RELOC(avail_remaining, vm_size_t) = m68k_btop(av_rem); +#endif + } +#else +#if !defined(MACHINE_NEW_NONCONTIG) RELOC(avail_remaining, vm_size_t) = 0; RELOC(avail_range, int) = -1; -#ifdef MACHINE_NONCONTIG -{ -int i; - for (i = 0; i < numranges; i++) { - if (avail_next >= low[i] && avail_next < high[i]) { - avail_range = i; - avail_remaining = high[i] - avail_next; - } else if (avail_range != -1) { - avail_remaining += (high[i] - low[i]); - } - } -} - physmem = m68k_btop(avail_remaining + nextpa - firstpa); - avail_remaining -= m68k_round_page(MSGBUFSIZE); - high[numranges - 1] -= m68k_round_page(MSGBUFSIZE); - /* XXX -- this doesn't look correct to me. */ - while (high[numranges - 1] < low[numranges - 1]) { - numranges--; - high[numranges - 1] -= low[numranges] - high[numranges]; - } - avail_remaining = m68k_trunc_page(avail_remaining); - avail_end = avail_start + avail_remaining; - avail_remaining = m68k_btop(avail_remaining); +#endif #endif RELOC(mem_size, vm_size_t) = m68k_ptob(RELOC(physmem, int)); RELOC(virtual_avail, vm_offset_t) = VM_MIN_KERNEL_ADDRESS + (nextpa - firstpa); RELOC(virtual_end, vm_offset_t) = VM_MAX_KERNEL_ADDRESS; -#ifdef HAVEVAC +#ifdef M68K_MMU_HP /* * Determine VA aliasing distance if any */ @@ -562,56 +581,161 @@ int i; } #ifdef MACHINE_NONCONTIG -static struct { +static struct memlist { caddr_t base; vm_size_t min; vm_size_t max; } memlist[] = { (caddr_t)0x01000000, 0x01000000, 0x01000000, /* TS-6BE16 16MB memory */ - (caddr_t)0x10000000, 0x00400000, 0x02000000, /* 060turbo SIMM slot (4--32MB) */ + (caddr_t)0x10000000, 0x00400000, 0x08000000, /* 060turbo SIMM slot (4--128MB) */ }; -static void -setmemrange() + +asm(" .text\n\ + .even\n\ +_badaddr_nommu:\n\ + movc vbr,a1\n\ + addql #8,a1 | bus error vector\n\ + movl a1@,d0 | save original vector\n\ + movl sp,d1 | save original sp\n\ + pea pc@(Laddrbad)\n\ + movl sp@+,a1@\n\ + tstw a0@ | test address\n\ + movl d0,a1@ | restore vector\n\ + clrl d0\n\ + rts | d0 == 0, ZF = 1\n\ +Laddrbad:\n\ + movl d1,sp | restore sp\n\ + movl d0,a1@ | restore vector\n\ + rts | d0 != 0, ZF = 0\n\ +"); + +#define badaddr_nommu(addr) \ + ({ int val asm("d0"); caddr_t a asm("a0") = addr; \ + asm("jbsr _badaddr_nommu" : \ + "=d"(val) : "a"(a) : "d1", "a1"); \ + val; }) + +/* + * check memory existency + */ +static int +mem_exists(mem, basemax) + caddr_t mem; + u_long basemax; { - int p, i; + /* most variables must be register! */ + register volatile unsigned char *m, *b; + register unsigned char save_m, save_b; + register int baseismem; + register int exists = 0; + caddr_t base; + caddr_t begin_check, end_check; + + if (badaddr_nommu(mem)) + return 0; + + /* only 24bits are significant on normal X680x0 systems */ + base = (caddr_t)((u_long)mem & 0x00FFFFFF); + + /* This is somewhat paranoid -- avoid overwriting myself */ + asm("lea pc@(begin_check_mem),%0" : "=a"(begin_check)); + asm("lea pc@(end_check_mem),%0" : "=a"(end_check)); + if (base >= begin_check && base < end_check) { + size_t off = end_check - begin_check; + + mem -= off; + base -= off; + } + + m = mem; + b = base; + + /* + * Can't check by writing if the corresponding + * base address isn't memory. + * + * I hope this would be no harm.... + */ + baseismem = base < (caddr_t)basemax; + + /* save original value (base must be saved first) */ + if (baseismem) + save_b = *b; + save_m = *m; + +asm("begin_check_mem:"); + /* + * stack and other data segment variables are unusable + * til end_check_mem, because they may be clobbered. + */ + + /* + * check memory by writing/reading + */ + if (baseismem) + *b = 0x55; + *m = 0xAA; + if ((baseismem && *b != 0x55) || *m != 0xAA) + goto out; + + *m = 0x55; + if (baseismem) + *b = 0xAA; + if (*m != 0x55 || (baseismem && *b != 0xAA)) + goto out; + + exists = 1; +out: + *m = save_m; + if (baseismem) + *b = save_b; + +asm("end_check_mem:"); + + return exists; +} + +static void +setmemrange(firstpa) + vm_offset_t firstpa; +{ + int i; vm_size_t s, min, max; - const volatile caddr_t base = 0x00000000; + u_long *l = RELOCA(low, u_long *); + u_long *h = RELOCA(high, u_long *); + struct memlist *mlist = RELOCA(memlist, struct memlist *); + int nranges; /* first, x68k base memory */ - numranges = 0; - low[numranges] = 0; - high[numranges] = *(u_long *)0x00ED0008; - numranges++; - - p = *base; + nranges = 0; + l[nranges] = 0x00000000; + h[nranges] = *(u_long *)0x00ED0008; + nranges++; /* second, discover extended memory */ for (i = 0; i < sizeof(memlist) / sizeof(memlist[0]); i++) { - min = memlist[i].min; - max = memlist[i].max; + min = mlist[i].min; + max = mlist[i].max; /* * Normally, x68k hardware is NOT 32bit-clean. * But some type of extended memory is in 32bit address space. - * Check weather. + * Check whether. */ - if (badaddr(memlist[i].base)) + if (!mem_exists(mlist[i].base, h[0])) continue; - *base = 0; - *(volatile caddr_t)memlist[i].base = 1; - if (*base == 0) { - low[numranges] = (u_long)memlist[i].base; - high[numranges] = 0; - /* range check */ - for (s = min; s <= max; s += 0x00100000) - if (!badaddr((caddr_t)low[numranges] + s - 4)) - high[numranges] = low[numranges] + s; - if (low[numranges] < high[numranges]) { - numranges++; - } + l[nranges] = (u_long)mlist[i].base; + h[nranges] = 0; + /* range check */ + for (s = min; s <= max; s += 0x00100000) { + if (!mem_exists(mlist[i].base + s - 4, h[0])) + break; + h[nranges] = (u_long)(mlist[i].base + s); } + if (l[nranges] < h[nranges]) + nranges++; } - *base = p; + RELOC(numranges, int) = nranges; } #endif diff --git a/sys/arch/x68k/x68k/trap.c b/sys/arch/x68k/x68k/trap.c index a639648c6f08..a72caadae3e4 100644 --- a/sys/arch/x68k/x68k/trap.c +++ b/sys/arch/x68k/x68k/trap.c @@ -1,4 +1,4 @@ -/* $NetBSD: trap.c,v 1.19 1998/06/25 23:59:17 thorpej Exp $ */ +/* $NetBSD: trap.c,v 1.20 1998/06/30 11:59:13 msaitoh Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -43,6 +43,7 @@ */ #include "opt_ktrace.h" +#include "opt_uvm.h" #include "opt_compat_sunos.h" #include "opt_compat_hpux.h" @@ -69,6 +70,10 @@ #include #include +#if defined(UVM) +#include +#endif + #ifdef FPU_EMULATE #include #endif @@ -243,7 +248,7 @@ again: "pid %d(%s): writeback aborted in sigreturn, pc=%x\n", p->p_pid, p->p_comm, fp->f_pc, faultaddr); #endif - } else if (sig = writeback(fp, fromtrap)) { + } else if ((sig = writeback(fp, fromtrap))) { beenhere = 1; oticks = p->p_sticks; trapsignal(p, sig, faultaddr); @@ -273,7 +278,11 @@ trap(type, code, v, frame) u_int ucode; u_quad_t sticks; +#if defined(UVM) + uvmexp.traps++; +#else cnt.v_trap++; +#endif p = curproc; ucode = 0; @@ -311,7 +320,9 @@ trap(type, code, v, frame) #ifdef DDB (void)kdb_trap(type, (db_regs_t *)&frame); #endif +#ifdef KGDB kgdb_cont: +#endif splx(s); if (panicstr) { printf("trap during panic!\n"); @@ -376,12 +387,12 @@ trap(type, code, v, frame) case T_FPERR|T_USER: /* 68881 exceptions */ /* - * We pass along the 68881 status register which locore stashed + * We pass along the 68881 status which locore stashed * in code for us. Note that there is a possibility that the - * bit pattern of this register will conflict with one of the + * bit pattern of this will conflict with one of the * FPE_* codes defined in signal.h. Fortunately for us, the * only such codes we use are all in the range 1-7 and the low - * 3 bits of the status register are defined as 0 so there is + * 3 bits of the status are defined as 0 so there is * no clash. */ ucode = code; @@ -522,17 +533,29 @@ trap(type, code, v, frame) case T_SSIR|T_USER: if (ssir & SIR_NET) { siroff(SIR_NET); +#if defined(UVM) + uvmexp.softs++; +#else cnt.v_soft++; +#endif netintr(); } if (ssir & SIR_CLOCK) { siroff(SIR_CLOCK); +#if defined(UVM) + uvmexp.softs++; +#else cnt.v_soft++; +#endif softclock(); } if (ssir & SIR_SERIAL) { siroff(SIR_SERIAL); +#if defined(UVM) + uvmexp.softs++; +#else cnt.v_soft++; +#endif #include "zs.h" #if NZS > 0 zssoft(0); @@ -540,14 +563,22 @@ trap(type, code, v, frame) } if (ssir & SIR_KBD) { siroff(SIR_KBD); +#if defined(UVM) + uvmexp.softs++; +#else cnt.v_soft++; +#endif kbdsoftint(); } /* * If this was not an AST trap, we are all done. */ if (type != (T_ASTFLT|T_USER)) { +#if defined(UVM) + uvmexp.traps--; +#else cnt.v_trap--; +#endif return; } spl0(); @@ -611,23 +642,37 @@ trap(type, code, v, frame) #ifdef COMPAT_HPUX if (ISHPMMADDR(va)) { + int pmap_mapmulti __P((pmap_t, vm_offset_t)); vm_offset_t bva; rv = pmap_mapmulti(map->pmap, va); if (rv != KERN_SUCCESS) { bva = HPMMBASEADDR(va); +#if defined(UVM) + rv = uvm_fault(map, bva, 0, ftype); +#else rv = vm_fault(map, bva, ftype, FALSE); +#endif if (rv == KERN_SUCCESS) (void) pmap_mapmulti(map->pmap, va); } } else #endif +#if defined(UVM) + rv = uvm_fault(map, va, 0, ftype); +#ifdef DEBUG + if (rv && MDB_ISPID(p->p_pid)) + printf("uvm_fault(%p, 0x%lx, 0, 0x%x) -> 0x%x\n", + map, va, ftype, rv); +#endif +#else /* ! UVM */ rv = vm_fault(map, va, ftype, FALSE); #ifdef DEBUG if (rv && MDB_ISPID(p->p_pid)) - printf("vm_fault(%x, %lx, %x, 0) -> %x\n", + printf("vm_fault(%p, %lx, %x, 0) -> %x\n", map, va, ftype, rv); #endif +#endif /* UVM */ /* * If this was a stack access we keep track of the maximum * accessed stack size. Also, if vm_fault gets a protection @@ -659,8 +704,13 @@ trap(type, code, v, frame) if (type == T_MMUFLT) { if (p->p_addr->u_pcb.pcb_onfault) goto copyfault; - printf("vm_fault(%x, %lx, %x, 0) -> %x\n", +#if defined(UVM) + printf("uvm_fault(%p, 0x%lx, 0, 0x%x) -> 0x%x\n", + map, va, ftype, rv); +#else + printf("vm_fault(%p, %lx, %x, 0) -> %x\n", map, va, ftype, rv); +#endif printf(" type %x, code [mmu,,ssw]: %x\n", type, code); goto dopanic; @@ -993,7 +1043,11 @@ syscall(code, frame) register_t args[8], rval[2]; u_quad_t sticks; +#if defined(UVM) + uvmexp.syscalls++; +#else cnt.v_syscall++; +#endif if (!USERMODE(frame.f_sr)) panic("syscall"); p = curproc; diff --git a/sys/arch/x68k/x68k/vectors.s b/sys/arch/x68k/x68k/vectors.s index 97ff874ebe40..7ab560c20d46 100644 --- a/sys/arch/x68k/x68k/vectors.s +++ b/sys/arch/x68k/x68k/vectors.s @@ -1,4 +1,4 @@ -| $NetBSD: vectors.s,v 1.2 1997/01/13 14:05:05 oki Exp $ +| $NetBSD: vectors.s,v 1.3 1998/06/30 11:59:13 msaitoh Exp $ | Copyright (c) 1988 University of Utah | Copyright (c) 1990, 1993 @@ -38,7 +38,7 @@ #define _mfptrap _badtrap #define _scctrap _badtrap - .text + .data .globl _vectab,_buserr,_addrerr .globl _illinst,_zerodiv,_chkinst,_trapvinst,_privinst,_trace .globl _badtrap @@ -49,7 +49,7 @@ .globl _trap12 _vectab: - .long 0x4ef80400 /* 0: jmp 0x400:w (unused reset SSP) */ + .long 0x4ef80000 /* 0: jmp 0x0000:w (unused reset SSP) */ .long 0 /* 1: NOT USED (reset PC) */ .long _buserr /* 2: bus error */ .long _addrerr /* 3: address error */ diff --git a/sys/arch/x68k/x68k/vm_machdep.c b/sys/arch/x68k/x68k/vm_machdep.c index a9d63bf9eb19..5b8f33027ff4 100644 --- a/sys/arch/x68k/x68k/vm_machdep.c +++ b/sys/arch/x68k/x68k/vm_machdep.c @@ -1,4 +1,4 @@ -/* $NetBSD: vm_machdep.c,v 1.9 1998/06/25 23:59:18 thorpej Exp $ */ +/* $NetBSD: vm_machdep.c,v 1.10 1998/06/30 11:59:13 msaitoh Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -42,7 +42,7 @@ * @(#)vm_machdep.c 8.6 (Berkeley) 1/12/94 */ -#include "opt_compat_hpux.h" +#include "opt_uvm.h" #include #include @@ -54,13 +54,18 @@ #include #include -#include -#include - +#include #include #include #include +#include +#include + +#if defined(UVM) +#include +#endif + /* * Finish a fork operation, with process p2 nearly set up. * Copy and update the kernel stack and pcb, making the child @@ -80,7 +85,7 @@ cpu_fork(p1, p2) struct switchframe *sf; extern struct pcb *curpcb; - p2->p_md.md_flags = p1->p_md.md_flags & ~MDP_HPUXTRACE; + p2->p_md.md_flags = p1->p_md.md_flags; /* Sync curpcb (which is presumably p1's PCB) and copy it to p2. */ savectx(curpcb); @@ -106,7 +111,7 @@ cpu_set_kpc(p, pc) void (*pc) __P((struct proc *)); { - p->p_addr->u_pcb.pcb_regs[6] = (int)pc; /* A2 */ + p->p_addr->u_pcb.pcb_regs[6] = (int) pc; /* A2 */ } /* @@ -122,10 +127,18 @@ cpu_exit(p) struct proc *p; { +#if defined(UVM) + uvmspace_free(p->p_vmspace); +#else vmspace_free(p->p_vmspace); +#endif (void) splimp(); +#if defined(UVM) + uvmexp.swtch++; +#else cnt.v_swtch++; +#endif switch_exit(p); /* NOTREACHED */ } @@ -148,18 +161,6 @@ cpu_coredump(p, vp, cred, chdr) struct coreseg cseg; int error; -#ifdef COMPAT_HPUX - extern struct emul emul_hpux; - - /* - * If we loaded from an HP-UX format binary file we dump enough - * of an HP-UX style user struct so that the HP-UX debuggers can - * grok it. - */ - if (p->p_emul == &emul_hpux) - return (hpux_dumpu(vp, cred)); -#endif - CORE_SETMAGIC(*chdr, COREMAGIC, MID_M68K, 0); chdr->c_hdrsize = ALIGN(sizeof(*chdr)); chdr->c_seghdrsize = ALIGN(sizeof(cseg)); @@ -170,10 +171,15 @@ cpu_coredump(p, vp, cred, chdr) if (error) return error; - /* Save floating point registers. */ - error = process_read_fpregs(p, &md_core.freg); - if (error) - return error; + if (fputype) { + /* Save floating point registers. */ + error = process_read_fpregs(p, &md_core.freg); + if (error) + return error; + } else { + /* Make sure these are clear. */ + bzero((caddr_t)&md_core.freg, sizeof(md_core.freg)); + } CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_M68K, CORE_CPU); cseg.c_addr = 0; @@ -306,7 +312,11 @@ vmapbuf(bp, len) uva = m68k_trunc_page(bp->b_saveaddr = bp->b_data); off = (vm_offset_t)bp->b_data - uva; len = m68k_round_page(off + len); +#if defined(UVM) + kva = uvm_km_valloc_wait(phys_map, len); +#else kva = kmem_alloc_wait(phys_map, len); +#endif bp->b_data = (caddr_t)(kva + off); upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map); @@ -344,7 +354,11 @@ vunmapbuf(bp, len) * pmap_remove() is unnecessary here, as kmem_free_wakeup() * will do it for us. */ +#if defined(UVM) + uvm_km_free_wakeup(phys_map, kva, len); +#else kmem_free_wakeup(phys_map, kva, len); +#endif bp->b_data = bp->b_saveaddr; bp->b_saveaddr = 0; }