1666 lines
40 KiB
C
1666 lines
40 KiB
C
/*
|
|
* Copyright (c) 1988 University of Utah.
|
|
* Copyright (c) 1982, 1986, 1990 The Regents of the University of California.
|
|
* All rights reserved.
|
|
*
|
|
* This code is derived from software contributed to Berkeley by
|
|
* the Systems Programming Group of the University of Utah Computer
|
|
* Science Department.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
* must display the following acknowledgement:
|
|
* This product includes software developed by the University of
|
|
* California, Berkeley and its contributors.
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
* may be used to endorse or promote products derived from this software
|
|
* without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
* SUCH DAMAGE.
|
|
*
|
|
* from: Utah Hdr: machdep.c 1.63 91/04/24
|
|
* from: @(#)machdep.c 7.16 (Berkeley) 6/3/91
|
|
* $Id: machdep.c,v 1.35 1994/05/18 06:50:24 cgd Exp $
|
|
*/
|
|
|
|
#include <sys/param.h>
|
|
#include <sys/systm.h>
|
|
#include <sys/signalvar.h>
|
|
#include <sys/kernel.h>
|
|
#include <sys/map.h>
|
|
#include <sys/proc.h>
|
|
#include <sys/buf.h>
|
|
#include <sys/reboot.h>
|
|
#include <sys/conf.h>
|
|
#include <sys/file.h>
|
|
#include <sys/clist.h>
|
|
#include <sys/callout.h>
|
|
#include <sys/malloc.h>
|
|
#include <sys/mbuf.h>
|
|
#include <sys/msgbuf.h>
|
|
#include <sys/ioctl.h>
|
|
#include <sys/tty.h>
|
|
#include <sys/mount.h>
|
|
#include <sys/user.h>
|
|
#include <sys/exec.h>
|
|
#include <sys/vnode.h>
|
|
#include <sys/sysctl.h>
|
|
#ifdef SYSVMSG
|
|
#include <sys/msg.h>
|
|
#endif
|
|
#ifdef SYSVSEM
|
|
#include <sys/sem.h>
|
|
#endif
|
|
#ifdef SYSVSHM
|
|
#include <sys/shm.h>
|
|
#endif
|
|
#ifdef COMPAT_HPUX
|
|
#include <hp300/hpux/hpux.h>
|
|
#endif
|
|
|
|
#include <hp300/include/cpu.h>
|
|
#include <hp300/include/reg.h>
|
|
#include <hp300/include/psl.h>
|
|
#include <dev/cons.h>
|
|
#include <hp300/hp300/isr.h>
|
|
#include <hp300/include/pte.h>
|
|
#include <net/netisr.h>
|
|
|
|
#define MAXMEM 64*1024*CLSIZE /* XXX - from cmap.h */
|
|
#include <vm/vm_kern.h>
|
|
|
|
/* the following is used externally (sysctl_hw) */
|
|
char machine[] = "hp300"; /* cpu "architecture" */
|
|
|
|
vm_map_t buffer_map;
|
|
extern vm_offset_t avail_end;
|
|
|
|
/*
|
|
* Declare these as initialized data so we can patch them.
|
|
*/
|
|
int nswbuf = 0;
|
|
#ifdef NBUF
|
|
int nbuf = NBUF;
|
|
#else
|
|
int nbuf = 0;
|
|
#endif
|
|
#ifdef BUFPAGES
|
|
int bufpages = BUFPAGES;
|
|
#else
|
|
int bufpages = 0;
|
|
#endif
|
|
int msgbufmapped; /* set when safe to use msgbuf */
|
|
int maxmem; /* max memory per process */
|
|
int physmem = MAXMEM; /* max supported memory, changes to actual */
|
|
/*
|
|
* safepri is a safe priority for sleep to set for a spin-wait
|
|
* during autoconfiguration or after a panic.
|
|
*/
|
|
int safepri = PSL_LOWIPL;
|
|
|
|
extern u_int lowram;
|
|
|
|
/*
|
|
* Console initialization: called early on from main,
|
|
* before vm init or startup. Do enough configuration
|
|
* to choose and initialize a console.
|
|
*/
|
|
void
|
|
consinit()
|
|
{
|
|
|
|
/*
|
|
* Set cpuspeed immediately since cninit() called routines
|
|
* might use delay.
|
|
*/
|
|
switch (machineid) {
|
|
case HP_320:
|
|
case HP_330:
|
|
case HP_340:
|
|
cpuspeed = MHZ_16;
|
|
break;
|
|
case HP_350:
|
|
case HP_360:
|
|
cpuspeed = MHZ_25;
|
|
break;
|
|
case HP_370:
|
|
cpuspeed = MHZ_33;
|
|
break;
|
|
case HP_375:
|
|
cpuspeed = MHZ_50;
|
|
break;
|
|
}
|
|
/*
|
|
* Find what hardware is attached to this machine.
|
|
*/
|
|
find_devs();
|
|
|
|
/*
|
|
* Initialize the console before we print anything out.
|
|
*/
|
|
cninit();
|
|
|
|
#ifdef DDB
|
|
ddb_init();
|
|
if (boothowto & RB_KDB)
|
|
Debugger();
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* cpu_startup: allocate memory for variable-sized tables,
|
|
* initialize cpu, and do autoconfiguration.
|
|
*/
|
|
void
|
|
cpu_startup()
|
|
{
|
|
register unsigned i;
|
|
register caddr_t v, firstaddr;
|
|
int base, residual;
|
|
extern long Usrptsize;
|
|
extern struct map *useriomap;
|
|
#ifdef DEBUG
|
|
extern int pmapdebug;
|
|
int opmapdebug = pmapdebug;
|
|
#endif
|
|
vm_offset_t minaddr, maxaddr;
|
|
vm_size_t size;
|
|
|
|
/*
|
|
* Initialize error message buffer (at end of core).
|
|
*/
|
|
#ifdef DEBUG
|
|
pmapdebug = 0;
|
|
#endif
|
|
/* avail_end was pre-decremented in pmap_bootstrap to compensate */
|
|
for (i = 0; i < btoc(sizeof (struct msgbuf)); i++)
|
|
pmap_enter(kernel_pmap, (vm_offset_t)msgbufp,
|
|
avail_end + i * NBPG, VM_PROT_ALL, TRUE);
|
|
msgbufmapped = 1;
|
|
|
|
/*
|
|
* Good {morning,afternoon,evening,night}.
|
|
*/
|
|
printf(version);
|
|
identifycpu();
|
|
printf("real mem = %d\n", ctob(physmem));
|
|
|
|
/*
|
|
* Allocate space for system data structures.
|
|
* The first available real memory address is in "firstaddr".
|
|
* The first available kernel virtual address is in "v".
|
|
* As pages of kernel virtual memory are allocated, "v" is incremented.
|
|
* As pages of memory are allocated and cleared,
|
|
* "firstaddr" is incremented.
|
|
* An index into the kernel page table corresponding to the
|
|
* virtual memory address maintained in "v" is kept in "mapaddr".
|
|
*/
|
|
/*
|
|
* Make two passes. The first pass calculates how much memory is
|
|
* needed and allocates it. The second pass assigns virtual
|
|
* addresses to the various data structures.
|
|
*/
|
|
firstaddr = 0;
|
|
again:
|
|
v = (caddr_t)firstaddr;
|
|
|
|
#define valloc(name, type, num) \
|
|
(name) = (type *)v; v = (caddr_t)((name)+(num))
|
|
#define valloclim(name, type, num, lim) \
|
|
(name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
|
|
#ifdef notdef
|
|
valloc(cfree, struct cblock, nclist);
|
|
#endif
|
|
valloc(callout, struct callout, ncallout);
|
|
valloc(swapmap, struct map, nswapmap = maxproc * 2);
|
|
#ifdef SYSVSHM
|
|
valloc(shmsegs, struct shmid_ds, shminfo.shmmni);
|
|
#endif
|
|
#ifdef SYSVSEM
|
|
valloc(sema, struct semid_ds, seminfo.semmni);
|
|
valloc(sem, struct sem, seminfo.semmns);
|
|
/* This is pretty disgusting! */
|
|
valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int));
|
|
#endif
|
|
#ifdef SYSVMSG
|
|
valloc(msgpool, char, msginfo.msgmax);
|
|
valloc(msgmaps, struct msgmap, msginfo.msgseg);
|
|
valloc(msghdrs, struct msg, msginfo.msgtql);
|
|
valloc(msqids, struct msqid_ds, msginfo.msgmni);
|
|
#endif
|
|
|
|
/*
|
|
* Determine how many buffers to allocate.
|
|
* Since HPs tend to be long on memory and short on disk speed,
|
|
* we allocate more buffer space than the BSD standard of
|
|
* use 10% of memory for the first 2 Meg, 5% of remaining.
|
|
* We just allocate a flat 10%. Insure a minimum of 16 buffers.
|
|
* We allocate 1/2 as many swap buffer headers as file i/o buffers.
|
|
*/
|
|
if (bufpages == 0)
|
|
bufpages = physmem / 10 / CLSIZE;
|
|
if (nbuf == 0) {
|
|
nbuf = bufpages;
|
|
if (nbuf < 16)
|
|
nbuf = 16;
|
|
}
|
|
if (nswbuf == 0) {
|
|
nswbuf = (nbuf / 2) &~ 1; /* force even */
|
|
if (nswbuf > 256)
|
|
nswbuf = 256; /* sanity */
|
|
}
|
|
valloc(swbuf, struct buf, nswbuf);
|
|
valloc(buf, struct buf, nbuf);
|
|
/*
|
|
* End of first pass, size has been calculated so allocate memory
|
|
*/
|
|
if (firstaddr == 0) {
|
|
size = (vm_size_t)(v - firstaddr);
|
|
firstaddr = (caddr_t) kmem_alloc(kernel_map, round_page(size));
|
|
if (firstaddr == 0)
|
|
panic("startup: no room for tables");
|
|
goto again;
|
|
}
|
|
/*
|
|
* End of second pass, addresses have been assigned
|
|
*/
|
|
if ((vm_size_t)(v - firstaddr) != size)
|
|
panic("startup: table size inconsistency");
|
|
|
|
/*
|
|
* Now allocate buffers proper. They are different than the above
|
|
* in that they usually occupy more virtual memory than physical.
|
|
*/
|
|
size = MAXBSIZE * nbuf;
|
|
buffer_map = kmem_suballoc(kernel_map, (vm_offset_t *)&buffers,
|
|
&maxaddr, size, FALSE);
|
|
minaddr = (vm_offset_t)buffers;
|
|
if (vm_map_find(buffer_map, vm_object_allocate(size), (vm_offset_t)0,
|
|
&minaddr, size, FALSE) != KERN_SUCCESS)
|
|
panic("startup: cannot allocate buffers");
|
|
base = bufpages / nbuf;
|
|
residual = bufpages % nbuf;
|
|
for (i = 0; i < nbuf; i++) {
|
|
vm_size_t curbufsize;
|
|
vm_offset_t curbuf;
|
|
|
|
/*
|
|
* First <residual> buffers get (base+1) physical pages
|
|
* allocated for them. The rest get (base) physical pages.
|
|
*
|
|
* The rest of each buffer occupies virtual space,
|
|
* but has no physical memory allocated for it.
|
|
*/
|
|
curbuf = (vm_offset_t)buffers + i * MAXBSIZE;
|
|
curbufsize = CLBYTES * (i < residual ? base+1 : base);
|
|
vm_map_pageable(buffer_map, curbuf, curbuf+curbufsize, FALSE);
|
|
vm_map_simplify(buffer_map, curbuf);
|
|
}
|
|
|
|
/*
|
|
* Allocate a submap for exec arguments. This map effectively
|
|
* limits the number of processes exec'ing at any time.
|
|
*/
|
|
exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
|
|
16*NCARGS, TRUE);
|
|
|
|
/*
|
|
* Allocate a submap for physio
|
|
*/
|
|
phys_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr,
|
|
VM_PHYS_SIZE, TRUE);
|
|
|
|
/*
|
|
* Finally, allocate mbuf pool. Since mclrefcnt is an off-size
|
|
* we use the more space efficient malloc in place of kmem_alloc.
|
|
*/
|
|
mclrefcnt = (char *)malloc(NMBCLUSTERS+CLBYTES/MCLBYTES,
|
|
M_MBUF, M_NOWAIT);
|
|
bzero(mclrefcnt, NMBCLUSTERS+CLBYTES/MCLBYTES);
|
|
mb_map = kmem_suballoc(kernel_map, (vm_offset_t *)&mbutl, &maxaddr,
|
|
VM_MBUF_SIZE, FALSE);
|
|
/*
|
|
* Initialize callouts
|
|
*/
|
|
callfree = callout;
|
|
for (i = 1; i < ncallout; i++)
|
|
callout[i-1].c_next = &callout[i];
|
|
|
|
#ifdef DEBUG
|
|
pmapdebug = opmapdebug;
|
|
#endif
|
|
printf("avail mem = %d\n", ptoa(cnt.v_free_count));
|
|
printf("using %d buffers containing %d bytes of memory\n",
|
|
nbuf, bufpages * CLBYTES);
|
|
/*
|
|
* Set up CPU-specific registers, cache, etc.
|
|
*/
|
|
initcpu();
|
|
|
|
/*
|
|
* Set up buffers, so they can be used to read disk labels.
|
|
*/
|
|
bufinit();
|
|
|
|
/*
|
|
* Configure the system.
|
|
*/
|
|
configure();
|
|
}
|
|
|
|
/*
|
|
* Set registers on exec.
|
|
* XXX Should clear registers except sp, pc,
|
|
* but would break init; should be fixed soon.
|
|
*/
|
|
void
|
|
setregs(p, entry, stack, retval)
|
|
register struct proc *p;
|
|
u_long entry;
|
|
u_long stack;
|
|
int retval[2];
|
|
{
|
|
p->p_md.md_regs[PC] = entry & ~1;
|
|
p->p_md.md_regs[SP] = stack;
|
|
#ifdef FPCOPROC
|
|
/* restore a null state frame */
|
|
p->p_addr->u_pcb.pcb_fpregs.fpf_null = 0;
|
|
m68881_restore(&p->p_addr->u_pcb.pcb_fpregs);
|
|
#endif
|
|
#ifdef COMPAT_HPUX
|
|
if (p->p_emul == EMUL_HPUX) {
|
|
|
|
p->p_md.md_regs[A0] = 0;/* not 68010 (bit 31), no FPA (30) */
|
|
retval[0] = 0; /* no float card */
|
|
#ifdef FPCOPROC
|
|
retval[1] = 1; /* yes 68881 */
|
|
#else
|
|
retval[1] = 0; /* no 68881 */
|
|
#endif
|
|
}
|
|
/*
|
|
* Ensure we perform the right action on traps type 1 and 2:
|
|
* If our parent is an HPUX process and we are being traced, turn
|
|
* on HPUX style interpretation. Else if we were using the HPUX
|
|
* style interpretation, revert to the BSD interpretation.
|
|
*
|
|
* XXX This doesn't have much to do with setting registers but
|
|
* I didn't want to muck up kern_exec.c with this code, so I
|
|
* stuck it here.
|
|
*/
|
|
if ((p->p_pptr->p_emul == EMUL_HPUX) &&
|
|
(p->p_flag & P_TRACED)) {
|
|
tweaksigcode(1);
|
|
p->p_addr->u_pcb.pcb_flags |= PCB_HPUXTRACE;
|
|
} else if (p->p_addr->u_pcb.pcb_flags & PCB_HPUXTRACE) {
|
|
tweaksigcode(0);
|
|
p->p_addr->u_pcb.pcb_flags &= ~PCB_HPUXTRACE;
|
|
}
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* Info for CTL_HW
|
|
*/
|
|
char cpu_model[120];
|
|
extern char version[];
|
|
|
|
identifycpu()
|
|
{
|
|
|
|
strcpy(cpu_model, "HP9000/");
|
|
switch (machineid) {
|
|
case HP_320:
|
|
strcat(cpu_model, "320 (16.67");
|
|
break;
|
|
case HP_330:
|
|
strcat(cpu_model, "318/319/330 (16.67");
|
|
break;
|
|
case HP_340:
|
|
strcat(cpu_model, "340 (16.67");
|
|
break;
|
|
case HP_350:
|
|
strcat(cpu_model, "350 (25");
|
|
break;
|
|
case HP_360:
|
|
strcat(cpu_model, "360 (25");
|
|
break;
|
|
case HP_370:
|
|
strcat(cpu_model, "370 (33.33");
|
|
break;
|
|
case HP_375:
|
|
strcat(cpu_model, "345/375 (50");
|
|
break;
|
|
default:
|
|
printf("unknown machine type %d\n", machineid);
|
|
panic("startup");
|
|
}
|
|
switch (mmutype) {
|
|
case MMU_68030:
|
|
strcat(cpu_model, "Mhz MC68030 CPU+MMU, ");
|
|
break;
|
|
case MMU_68851:
|
|
strcat(cpu_model, "Mhz MC68020 CPU, MC68851 MMU, ");
|
|
break;
|
|
case MMU_HP:
|
|
strcat(cpu_model, "Mhz MC68020 CPU, HP MMU, ");
|
|
break;
|
|
default:
|
|
printf("unknown MMU type %d\n", mmutype);
|
|
panic("startup");
|
|
}
|
|
switch (machineid) {
|
|
case HP_320:
|
|
case HP_330:
|
|
case HP_340:
|
|
strcat(cpu_model, "16.67");
|
|
break;
|
|
case HP_350:
|
|
strcat(cpu_model, "20");
|
|
break;
|
|
case HP_360:
|
|
strcat(cpu_model, "25");
|
|
break;
|
|
case HP_370:
|
|
strcat(cpu_model, "33.33");
|
|
break;
|
|
case HP_375:
|
|
strcat(cpu_model, "50");
|
|
break;
|
|
}
|
|
if (mmutype == MMU_68030)
|
|
strcat(cpu_model, "Mhz MC68882 FPU, ");
|
|
else
|
|
strcat(cpu_model, "Mhz MC68881 FPU, ");
|
|
switch (machineid) {
|
|
case HP_320:
|
|
strcat(cpu_model, "16K");
|
|
break;
|
|
default:
|
|
strcat(cpu_model, "32K");
|
|
break;
|
|
case HP_370:
|
|
strcat(cpu_model, "64K");
|
|
break;
|
|
}
|
|
switch (ectype) {
|
|
case EC_VIRT:
|
|
strcat(cpu_model, " virtual-address cache");
|
|
break;
|
|
case EC_PHYS:
|
|
strcat(cpu_model, " physical-address cache");
|
|
break;
|
|
}
|
|
strcat(cpu_model, ")");
|
|
printf("%s\n", cpu_model);
|
|
/*
|
|
* Now that we have told the user what they have,
|
|
* let them know if that machine type isn't configured.
|
|
*/
|
|
switch (machineid) {
|
|
case -1: /* keep compilers happy */
|
|
#if !defined(HP320) && !defined(HP350)
|
|
case HP_320:
|
|
case HP_350:
|
|
#endif
|
|
#ifndef HP330
|
|
case HP_330:
|
|
#endif
|
|
#if !defined(HP360) && !defined(HP370)
|
|
case HP_340:
|
|
case HP_360:
|
|
case HP_370:
|
|
#endif
|
|
panic("CPU type not configured");
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* machine dependent system variables.
|
|
*/
|
|
cpu_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
|
|
int *name;
|
|
u_int namelen;
|
|
void *oldp;
|
|
size_t *oldlenp;
|
|
void *newp;
|
|
size_t newlen;
|
|
struct proc *p;
|
|
{
|
|
dev_t consdev;
|
|
|
|
/* all sysctl names at this level are terminal */
|
|
if (namelen != 1)
|
|
return (ENOTDIR); /* overloaded */
|
|
|
|
switch (name[0]) {
|
|
case CPU_CONSDEV:
|
|
if (cn_tab != NULL)
|
|
consdev = cn_tab->cn_dev;
|
|
else
|
|
consdev = NODEV;
|
|
return (sysctl_rdstruct(oldp, oldlenp, newp, &consdev,
|
|
sizeof consdev));
|
|
default:
|
|
return (EOPNOTSUPP);
|
|
}
|
|
/* NOTREACHED */
|
|
}
|
|
|
|
#ifdef COMPAT_HPUX
|
|
tweaksigcode(ishpux)
|
|
{
|
|
static short *sigtrap = NULL;
|
|
extern short sigcode[], esigcode[];
|
|
|
|
/* locate trap instruction in pcb_sigc */
|
|
if (sigtrap == NULL) {
|
|
sigtrap = esigcode;
|
|
while (--sigtrap >= sigcode)
|
|
if ((*sigtrap & 0xFFF0) == 0x4E40)
|
|
break;
|
|
if (sigtrap < sigcode)
|
|
panic("bogus sigcode\n");
|
|
}
|
|
*sigtrap = ishpux ? 0x4E42 : 0x4E41;
|
|
}
|
|
#endif
|
|
|
|
#define SS_RTEFRAME 1
|
|
#define SS_FPSTATE 2
|
|
#define SS_USERREGS 4
|
|
|
|
struct sigstate {
|
|
int ss_flags; /* which of the following are valid */
|
|
struct frame ss_frame; /* original exception frame */
|
|
struct fpframe ss_fpstate; /* 68881/68882 state info */
|
|
};
|
|
|
|
/*
|
|
* WARNING: code in locore.s assumes the layout shown for sf_signum
|
|
* thru sf_handler so... don't screw with them!
|
|
*/
|
|
struct sigframe {
|
|
int sf_signum; /* signo for handler */
|
|
int sf_code; /* additional info for handler */
|
|
struct sigcontext *sf_scp; /* context ptr for handler */
|
|
sig_t sf_handler; /* handler addr for u_sigc */
|
|
struct sigstate sf_state; /* state of the hardware */
|
|
struct sigcontext sf_sc; /* actual context */
|
|
};
|
|
|
|
#ifdef COMPAT_HPUX
|
|
struct hpuxsigcontext {
|
|
int hsc_syscall;
|
|
char hsc_action;
|
|
char hsc_pad1;
|
|
char hsc_pad2;
|
|
char hsc_onstack;
|
|
int hsc_mask;
|
|
int hsc_sp;
|
|
short hsc_ps;
|
|
int hsc_pc;
|
|
/* the rest aren't part of the context but are included for our convenience */
|
|
short hsc_pad;
|
|
u_int hsc_magic; /* XXX sigreturn: cookie */
|
|
struct sigcontext *hsc_realsc; /* XXX sigreturn: ptr to BSD context */
|
|
};
|
|
|
|
/*
|
|
* For an HP-UX process, a partial hpuxsigframe follows the normal sigframe.
|
|
* Tremendous waste of space, but some HP-UX applications (e.g. LCL) need it.
|
|
*/
|
|
struct hpuxsigframe {
|
|
int hsf_signum;
|
|
int hsf_code;
|
|
struct sigcontext *hsf_scp;
|
|
struct hpuxsigcontext hsf_sc;
|
|
int hsf_regs[15];
|
|
};
|
|
#endif
|
|
|
|
#ifdef DEBUG
|
|
int sigdebug = 0;
|
|
int sigpid = 0;
|
|
#define SDB_FOLLOW 0x01
|
|
#define SDB_KSTACK 0x02
|
|
#define SDB_FPSTATE 0x04
|
|
#endif
|
|
|
|
/*
|
|
* Send an interrupt to process.
|
|
*/
|
|
void
|
|
sendsig(catcher, sig, mask, code)
|
|
sig_t catcher;
|
|
int sig, mask;
|
|
unsigned code;
|
|
{
|
|
register struct proc *p = curproc;
|
|
register struct sigframe *fp, *kfp;
|
|
register struct frame *frame;
|
|
register struct sigacts *psp = p->p_sigacts;
|
|
register short ft;
|
|
int oonstack, fsize;
|
|
extern short exframesize[];
|
|
extern char sigcode[], esigcode[];
|
|
|
|
frame = (struct frame *)p->p_md.md_regs;
|
|
ft = frame->f_format;
|
|
oonstack = psp->ps_sigstk.ss_flags & SA_ONSTACK;
|
|
/*
|
|
* Allocate and validate space for the signal handler
|
|
* context. Note that if the stack is in P0 space, the
|
|
* call to grow() is a nop, and the useracc() check
|
|
* will fail if the process has not already allocated
|
|
* the space with a `brk'.
|
|
*/
|
|
#ifdef COMPAT_HPUX
|
|
if (p->p_emul == EMUL_HPUX)
|
|
fsize = sizeof(struct sigframe) + sizeof(struct hpuxsigframe);
|
|
else
|
|
#endif
|
|
fsize = sizeof(struct sigframe);
|
|
if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack &&
|
|
(psp->ps_sigonstack & sigmask(sig))) {
|
|
fp = (struct sigframe *)(psp->ps_sigstk.ss_base +
|
|
psp->ps_sigstk.ss_size - fsize);
|
|
psp->ps_sigstk.ss_flags |= SA_ONSTACK;
|
|
} else
|
|
fp = (struct sigframe *)(frame->f_regs[SP] - fsize);
|
|
if ((unsigned)fp <= USRSTACK - ctob(p->p_vmspace->vm_ssize))
|
|
(void)grow(p, (unsigned)fp);
|
|
#ifdef DEBUG
|
|
if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
|
|
printf("sendsig(%d): sig %d ssp %x usp %x scp %x ft %d\n",
|
|
p->p_pid, sig, &oonstack, fp, &fp->sf_sc, ft);
|
|
#endif
|
|
if (useracc((caddr_t)fp, fsize, B_WRITE) == 0) {
|
|
#ifdef DEBUG
|
|
if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
|
|
printf("sendsig(%d): useracc failed on sig %d\n",
|
|
p->p_pid, sig);
|
|
#endif
|
|
/*
|
|
* Process has trashed its stack; give it an illegal
|
|
* instruction to halt it in its tracks.
|
|
*/
|
|
SIGACTION(p, SIGILL) = SIG_DFL;
|
|
sig = sigmask(SIGILL);
|
|
p->p_sigignore &= ~sig;
|
|
p->p_sigcatch &= ~sig;
|
|
p->p_sigmask &= ~sig;
|
|
psignal(p, SIGILL);
|
|
return;
|
|
}
|
|
kfp = (struct sigframe *)malloc((u_long)fsize, M_TEMP, M_WAITOK);
|
|
/*
|
|
* Build the argument list for the signal handler.
|
|
*/
|
|
kfp->sf_signum = sig;
|
|
kfp->sf_code = code;
|
|
kfp->sf_scp = &fp->sf_sc;
|
|
kfp->sf_handler = catcher;
|
|
/*
|
|
* Save necessary hardware state. Currently this includes:
|
|
* - general registers
|
|
* - original exception frame (if not a "normal" frame)
|
|
* - FP coprocessor state
|
|
*/
|
|
kfp->sf_state.ss_flags = SS_USERREGS;
|
|
bcopy((caddr_t)frame->f_regs,
|
|
(caddr_t)kfp->sf_state.ss_frame.f_regs, sizeof frame->f_regs);
|
|
if (ft >= FMT9) {
|
|
#ifdef DEBUG
|
|
if (ft != FMT9 && ft != FMTA && ft != FMTB)
|
|
panic("sendsig: bogus frame type");
|
|
#endif
|
|
kfp->sf_state.ss_flags |= SS_RTEFRAME;
|
|
kfp->sf_state.ss_frame.f_format = frame->f_format;
|
|
kfp->sf_state.ss_frame.f_vector = frame->f_vector;
|
|
bcopy((caddr_t)&frame->F_u,
|
|
(caddr_t)&kfp->sf_state.ss_frame.F_u, exframesize[ft]);
|
|
/*
|
|
* Leave an indicator that we need to clean up the kernel
|
|
* stack. We do this by setting the "pad word" above the
|
|
* hardware stack frame to the amount the stack must be
|
|
* adjusted by.
|
|
*
|
|
* N.B. we increment rather than just set f_stackadj in
|
|
* case we are called from syscall when processing a
|
|
* sigreturn. In that case, f_stackadj may be non-zero.
|
|
*/
|
|
frame->f_stackadj += exframesize[ft];
|
|
frame->f_format = frame->f_vector = 0;
|
|
#ifdef DEBUG
|
|
if (sigdebug & SDB_FOLLOW)
|
|
printf("sendsig(%d): copy out %d of frame %d\n",
|
|
p->p_pid, exframesize[ft], ft);
|
|
#endif
|
|
}
|
|
#ifdef FPCOPROC
|
|
kfp->sf_state.ss_flags |= SS_FPSTATE;
|
|
m68881_save(&kfp->sf_state.ss_fpstate);
|
|
#ifdef DEBUG
|
|
if ((sigdebug & SDB_FPSTATE) && *(char *)&kfp->sf_state.ss_fpstate)
|
|
printf("sendsig(%d): copy out FP state (%x) to %x\n",
|
|
p->p_pid, *(u_int *)&kfp->sf_state.ss_fpstate,
|
|
&kfp->sf_state.ss_fpstate);
|
|
#endif
|
|
#endif
|
|
/*
|
|
* Build the signal context to be used by sigreturn.
|
|
*/
|
|
kfp->sf_sc.sc_onstack = oonstack;
|
|
kfp->sf_sc.sc_mask = mask;
|
|
kfp->sf_sc.sc_sp = frame->f_regs[SP];
|
|
kfp->sf_sc.sc_fp = frame->f_regs[A6];
|
|
kfp->sf_sc.sc_ap = (int)&fp->sf_state;
|
|
kfp->sf_sc.sc_pc = frame->f_pc;
|
|
kfp->sf_sc.sc_ps = frame->f_sr;
|
|
#ifdef COMPAT_HPUX
|
|
/*
|
|
* Create an HP-UX style sigcontext structure and associated goo
|
|
*/
|
|
if (p->p_emul == EMUL_HPUX) {
|
|
register struct hpuxsigframe *hkfp;
|
|
|
|
hkfp = (struct hpuxsigframe *)&kfp[1];
|
|
hkfp->hsf_signum = bsdtohpuxsig(kfp->sf_signum);
|
|
hkfp->hsf_code = kfp->sf_code;
|
|
hkfp->hsf_scp = (struct sigcontext *)
|
|
&((struct hpuxsigframe *)(&fp[1]))->hsf_sc;
|
|
hkfp->hsf_sc.hsc_syscall = 0; /* XXX */
|
|
hkfp->hsf_sc.hsc_action = 0; /* XXX */
|
|
hkfp->hsf_sc.hsc_pad1 = hkfp->hsf_sc.hsc_pad2 = 0;
|
|
hkfp->hsf_sc.hsc_onstack = kfp->sf_sc.sc_onstack;
|
|
hkfp->hsf_sc.hsc_mask = kfp->sf_sc.sc_mask;
|
|
hkfp->hsf_sc.hsc_sp = kfp->sf_sc.sc_sp;
|
|
hkfp->hsf_sc.hsc_ps = kfp->sf_sc.sc_ps;
|
|
hkfp->hsf_sc.hsc_pc = kfp->sf_sc.sc_pc;
|
|
hkfp->hsf_sc.hsc_pad = 0;
|
|
hkfp->hsf_sc.hsc_magic = 0xdeadbeef;
|
|
hkfp->hsf_sc.hsc_realsc = kfp->sf_scp;
|
|
bcopy((caddr_t)frame->f_regs, (caddr_t)hkfp->hsf_regs,
|
|
sizeof (hkfp->hsf_regs));
|
|
|
|
kfp->sf_signum = hkfp->hsf_signum;
|
|
kfp->sf_scp = hkfp->hsf_scp;
|
|
}
|
|
#endif
|
|
(void) copyout((caddr_t)kfp, (caddr_t)fp, fsize);
|
|
frame->f_regs[SP] = (int)fp;
|
|
#ifdef DEBUG
|
|
if (sigdebug & SDB_FOLLOW)
|
|
printf("sendsig(%d): sig %d scp %x fp %x sc_sp %x sc_ap %x\n",
|
|
p->p_pid, sig, kfp->sf_scp, fp,
|
|
kfp->sf_sc.sc_sp, kfp->sf_sc.sc_ap);
|
|
#endif
|
|
/*
|
|
* Signal trampoline code is at base of user stack.
|
|
*/
|
|
frame->f_pc = (int)(((char *)PS_STRINGS) - (esigcode - sigcode));
|
|
#ifdef DEBUG
|
|
if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
|
|
printf("sendsig(%d): sig %d returns\n",
|
|
p->p_pid, sig);
|
|
#endif
|
|
free((caddr_t)kfp, M_TEMP);
|
|
}
|
|
|
|
/*
|
|
* System call to cleanup state after a signal
|
|
* has been taken. Reset signal mask and
|
|
* stack state from context left by sendsig (above).
|
|
* Return to previous pc and psl as specified by
|
|
* context left by sendsig. Check carefully to
|
|
* make sure that the user has not modified the
|
|
* psl to gain improper priviledges or to cause
|
|
* a machine fault.
|
|
*/
|
|
struct sigreturn_args {
|
|
struct sigcontext *sigcntxp;
|
|
};
|
|
|
|
sigreturn(p, uap, retval)
|
|
struct proc *p;
|
|
struct sigreturn_args *uap;
|
|
int *retval;
|
|
{
|
|
register struct sigcontext *scp;
|
|
register struct frame *frame;
|
|
register int rf;
|
|
struct sigcontext tsigc;
|
|
struct sigstate tstate;
|
|
int flags;
|
|
extern short exframesize[];
|
|
|
|
scp = uap->sigcntxp;
|
|
#ifdef DEBUG
|
|
if (sigdebug & SDB_FOLLOW)
|
|
printf("sigreturn: pid %d, scp %x\n", p->p_pid, scp);
|
|
#endif
|
|
if ((int)scp & 1)
|
|
return (EINVAL);
|
|
#ifdef COMPAT_HPUX
|
|
/*
|
|
* Grab context as an HP-UX style context and determine if it
|
|
* was one that we contructed in sendsig.
|
|
*/
|
|
if (p->p_emul == EMUL_HPUX) {
|
|
struct hpuxsigcontext *hscp = (struct hpuxsigcontext *)scp;
|
|
struct hpuxsigcontext htsigc;
|
|
|
|
if (useracc((caddr_t)hscp, sizeof (*hscp), B_WRITE) == 0 ||
|
|
copyin((caddr_t)hscp, (caddr_t)&htsigc, sizeof htsigc))
|
|
return (EINVAL);
|
|
/*
|
|
* If not generated by sendsig or we cannot restore the
|
|
* BSD-style sigcontext, just restore what we can -- state
|
|
* will be lost, but them's the breaks.
|
|
*/
|
|
hscp = &htsigc;
|
|
if (hscp->hsc_magic != 0xdeadbeef ||
|
|
(scp = hscp->hsc_realsc) == 0 ||
|
|
useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
|
|
copyin((caddr_t)scp, (caddr_t)&tsigc, sizeof tsigc)) {
|
|
if (hscp->hsc_onstack & 01)
|
|
p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
|
|
else
|
|
p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
|
|
p->p_sigmask = hscp->hsc_mask &~ sigcantmask;
|
|
frame = (struct frame *) p->p_md.md_regs;
|
|
frame->f_regs[SP] = hscp->hsc_sp;
|
|
frame->f_pc = hscp->hsc_pc;
|
|
frame->f_sr = hscp->hsc_ps &~ PSL_USERCLR;
|
|
return (EJUSTRETURN);
|
|
}
|
|
/*
|
|
* Otherwise, overlay BSD context with possibly modified
|
|
* HP-UX values.
|
|
*/
|
|
tsigc.sc_onstack = hscp->hsc_onstack;
|
|
tsigc.sc_mask = hscp->hsc_mask;
|
|
tsigc.sc_sp = hscp->hsc_sp;
|
|
tsigc.sc_ps = hscp->hsc_ps;
|
|
tsigc.sc_pc = hscp->hsc_pc;
|
|
} else
|
|
#endif
|
|
/*
|
|
* Test and fetch the context structure.
|
|
* We grab it all at once for speed.
|
|
*/
|
|
if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0 ||
|
|
copyin((caddr_t)scp, (caddr_t)&tsigc, sizeof tsigc))
|
|
return (EINVAL);
|
|
scp = &tsigc;
|
|
if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_S)) != 0)
|
|
return (EINVAL);
|
|
/*
|
|
* Restore the user supplied information
|
|
*/
|
|
if (scp->sc_onstack & 01)
|
|
p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK;
|
|
else
|
|
p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK;
|
|
p->p_sigmask = scp->sc_mask &~ sigcantmask;
|
|
frame = (struct frame *) p->p_md.md_regs;
|
|
frame->f_regs[SP] = scp->sc_sp;
|
|
frame->f_regs[A6] = scp->sc_fp;
|
|
frame->f_pc = scp->sc_pc;
|
|
frame->f_sr = scp->sc_ps;
|
|
/*
|
|
* Grab pointer to hardware state information.
|
|
* If zero, the user is probably doing a longjmp.
|
|
*/
|
|
if ((rf = scp->sc_ap) == 0)
|
|
return (EJUSTRETURN);
|
|
/*
|
|
* See if there is anything to do before we go to the
|
|
* expense of copying in close to 1/2K of data
|
|
*/
|
|
flags = fuword((caddr_t)rf);
|
|
#ifdef DEBUG
|
|
if (sigdebug & SDB_FOLLOW)
|
|
printf("sigreturn(%d): sc_ap %x flags %x\n",
|
|
p->p_pid, rf, flags);
|
|
#endif
|
|
/*
|
|
* fuword failed (bogus sc_ap value).
|
|
*/
|
|
if (flags == -1)
|
|
return (EINVAL);
|
|
if (flags == 0 || copyin((caddr_t)rf, (caddr_t)&tstate, sizeof tstate))
|
|
return (EJUSTRETURN);
|
|
#ifdef DEBUG
|
|
if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)
|
|
printf("sigreturn(%d): ssp %x usp %x scp %x ft %d\n",
|
|
p->p_pid, &flags, scp->sc_sp, uap->sigcntxp,
|
|
(flags&SS_RTEFRAME) ? tstate.ss_frame.f_format : -1);
|
|
#endif
|
|
/*
|
|
* Restore most of the users registers except for A6 and SP
|
|
* which were handled above.
|
|
*/
|
|
if (flags & SS_USERREGS)
|
|
bcopy((caddr_t)tstate.ss_frame.f_regs,
|
|
(caddr_t)frame->f_regs, sizeof(frame->f_regs)-2*NBPW);
|
|
/*
|
|
* Restore long stack frames. Note that we do not copy
|
|
* back the saved SR or PC, they were picked up above from
|
|
* the sigcontext structure.
|
|
*/
|
|
if (flags & SS_RTEFRAME) {
|
|
register int sz;
|
|
|
|
/* grab frame type and validate */
|
|
sz = tstate.ss_frame.f_format;
|
|
if (sz > 15 || (sz = exframesize[sz]) < 0)
|
|
return (EINVAL);
|
|
frame->f_stackadj -= sz;
|
|
frame->f_format = tstate.ss_frame.f_format;
|
|
frame->f_vector = tstate.ss_frame.f_vector;
|
|
bcopy((caddr_t)&tstate.ss_frame.F_u, (caddr_t)&frame->F_u, sz);
|
|
#ifdef DEBUG
|
|
if (sigdebug & SDB_FOLLOW)
|
|
printf("sigreturn(%d): copy in %d of frame type %d\n",
|
|
p->p_pid, sz, tstate.ss_frame.f_format);
|
|
#endif
|
|
}
|
|
#ifdef FPCOPROC
|
|
/*
|
|
* Finally we restore the original FP context
|
|
*/
|
|
if (flags & SS_FPSTATE)
|
|
m68881_restore(&tstate.ss_fpstate);
|
|
#ifdef DEBUG
|
|
if ((sigdebug & SDB_FPSTATE) && *(char *)&tstate.ss_fpstate)
|
|
printf("sigreturn(%d): copied in FP state (%x) at %x\n",
|
|
p->p_pid, *(u_int *)&tstate.ss_fpstate,
|
|
&tstate.ss_fpstate);
|
|
#endif
|
|
#endif
|
|
#ifdef DEBUG
|
|
if ((sigdebug & SDB_FOLLOW) ||
|
|
((sigdebug & SDB_KSTACK) && p->p_pid == sigpid))
|
|
printf("sigreturn(%d): returns\n", p->p_pid);
|
|
#endif
|
|
return (EJUSTRETURN);
|
|
}
|
|
|
|
int waittime = -1;
|
|
|
|
void
|
|
boot(howto)
|
|
register int howto;
|
|
{
|
|
/* take a snap shot before clobbering any registers */
|
|
if (curproc)
|
|
savectx(curproc->p_addr, 0);
|
|
|
|
boothowto = howto;
|
|
if ((howto&RB_NOSYNC) == 0 && waittime < 0) {
|
|
register struct buf *bp;
|
|
int iter, nbusy;
|
|
|
|
waittime = 0;
|
|
(void) spl0();
|
|
printf("syncing disks... ");
|
|
/*
|
|
* Release vnodes held by texts before sync.
|
|
*/
|
|
if (panicstr == 0)
|
|
vnode_pager_umount(NULL);
|
|
#ifdef notdef
|
|
#include "fd.h"
|
|
#if NFD > 0
|
|
fdshutdown();
|
|
#endif
|
|
#endif
|
|
sync(&proc0, (void *)NULL, (int *)NULL);
|
|
|
|
for (iter = 0; iter < 20; iter++) {
|
|
nbusy = 0;
|
|
for (bp = &buf[nbuf]; --bp >= buf; )
|
|
if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
|
|
nbusy++;
|
|
if (nbusy == 0)
|
|
break;
|
|
printf("%d ", nbusy);
|
|
DELAY(40000 * iter);
|
|
}
|
|
if (nbusy)
|
|
printf("giving up\n");
|
|
else
|
|
printf("done\n");
|
|
/*
|
|
* If we've been adjusting the clock, the todr
|
|
* will be out of synch; adjust it now.
|
|
*/
|
|
resettodr();
|
|
}
|
|
splhigh(); /* extreme priority */
|
|
if (howto&RB_HALT) {
|
|
printf("halted\n\n");
|
|
asm(" stop #0x2700");
|
|
} else {
|
|
if (howto & RB_DUMP)
|
|
dumpsys();
|
|
doboot();
|
|
/*NOTREACHED*/
|
|
}
|
|
for (;;) ; /* should not be necessary, but here to quiet compiler */
|
|
/*NOTREACHED*/
|
|
}
|
|
|
|
u_int dumpmag = 0x8fca0101; /* magic number for savecore */
|
|
int dumpsize = 0; /* also for savecore */
|
|
long dumplo = 0;
|
|
|
|
dumpconf()
|
|
{
|
|
int nblks;
|
|
|
|
dumpsize = physmem;
|
|
if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
|
|
nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
|
|
if (dumpsize > btoc(dbtob(nblks - dumplo)))
|
|
dumpsize = btoc(dbtob(nblks - dumplo));
|
|
else if (dumplo == 0)
|
|
dumplo = nblks - btodb(ctob(physmem));
|
|
}
|
|
/*
|
|
* Don't dump on the first CLBYTES (why CLBYTES?)
|
|
* in case the dump device includes a disk label.
|
|
*/
|
|
if (dumplo < btodb(CLBYTES))
|
|
dumplo = btodb(CLBYTES);
|
|
}
|
|
|
|
/*
|
|
* Doadump comes here after turning off memory management and
|
|
* getting on the dump stack, either when called above, or by
|
|
* the auto-restart code.
|
|
*/
|
|
dumpsys()
|
|
{
|
|
|
|
msgbufmapped = 0;
|
|
if (dumpdev == NODEV)
|
|
return;
|
|
/*
|
|
* For dumps during autoconfiguration,
|
|
* if dump device has already configured...
|
|
*/
|
|
if (dumpsize == 0)
|
|
dumpconf();
|
|
if (dumplo < 0)
|
|
return;
|
|
printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
|
|
printf("dump ");
|
|
switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
|
|
|
|
case ENXIO:
|
|
printf("device bad\n");
|
|
break;
|
|
|
|
case EFAULT:
|
|
printf("device not ready\n");
|
|
break;
|
|
|
|
case EINVAL:
|
|
printf("area improper\n");
|
|
break;
|
|
|
|
case EIO:
|
|
printf("i/o error\n");
|
|
break;
|
|
|
|
default:
|
|
printf("succeeded\n");
|
|
break;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Return the best possible estimate of the time in the timeval
|
|
* to which tvp points. We do this by returning the current time
|
|
* plus the amount of time since the last clock interrupt (clock.c:clkread).
|
|
*
|
|
* Check that this time is no less than any previously-reported time,
|
|
* which could happen around the time of a clock adjustment. Just for fun,
|
|
* we guarantee that the time will be greater than the value obtained by a
|
|
* previous call.
|
|
*/
|
|
microtime(tvp)
|
|
register struct timeval *tvp;
|
|
{
|
|
int s = splhigh();
|
|
static struct timeval lasttime;
|
|
|
|
*tvp = time;
|
|
tvp->tv_usec += clkread();
|
|
while (tvp->tv_usec > 1000000) {
|
|
tvp->tv_sec++;
|
|
tvp->tv_usec -= 1000000;
|
|
}
|
|
if (tvp->tv_sec == lasttime.tv_sec &&
|
|
tvp->tv_usec <= lasttime.tv_usec &&
|
|
(tvp->tv_usec = lasttime.tv_usec + 1) > 1000000) {
|
|
tvp->tv_sec++;
|
|
tvp->tv_usec -= 1000000;
|
|
}
|
|
lasttime = *tvp;
|
|
splx(s);
|
|
}
|
|
|
|
initcpu()
|
|
{
|
|
parityenable();
|
|
}
|
|
|
|
straytrap(pc, evec)
|
|
int pc;
|
|
u_short evec;
|
|
{
|
|
printf("unexpected trap (vector offset %x) from %x\n",
|
|
evec & 0xFFF, pc);
|
|
}
|
|
|
|
int *nofault;
|
|
|
|
badaddr(addr)
|
|
register caddr_t addr;
|
|
{
|
|
register int i;
|
|
label_t faultbuf;
|
|
|
|
#ifdef lint
|
|
i = *addr; if (i) return(0);
|
|
#endif
|
|
nofault = (int *) &faultbuf;
|
|
if (setjmp((label_t *)nofault)) {
|
|
nofault = (int *) 0;
|
|
return(1);
|
|
}
|
|
i = *(volatile short *)addr;
|
|
nofault = (int *) 0;
|
|
return(0);
|
|
}
|
|
|
|
badbaddr(addr)
|
|
register caddr_t addr;
|
|
{
|
|
register int i;
|
|
label_t faultbuf;
|
|
|
|
#ifdef lint
|
|
i = *addr; if (i) return(0);
|
|
#endif
|
|
nofault = (int *) &faultbuf;
|
|
if (setjmp((label_t *)nofault)) {
|
|
nofault = (int *) 0;
|
|
return(1);
|
|
}
|
|
i = *(volatile char *)addr;
|
|
nofault = (int *) 0;
|
|
return(0);
|
|
}
|
|
|
|
netintr()
|
|
{
|
|
#ifdef INET
|
|
if (netisr & (1 << NETISR_ARP)) {
|
|
netisr &= ~(1 << NETISR_ARP);
|
|
arpintr();
|
|
}
|
|
if (netisr & (1 << NETISR_IP)) {
|
|
netisr &= ~(1 << NETISR_IP);
|
|
ipintr();
|
|
}
|
|
#endif
|
|
#ifdef NS
|
|
if (netisr & (1 << NETISR_NS)) {
|
|
netisr &= ~(1 << NETISR_NS);
|
|
nsintr();
|
|
}
|
|
#endif
|
|
#ifdef ISO
|
|
if (netisr & (1 << NETISR_ISO)) {
|
|
netisr &= ~(1 << NETISR_ISO);
|
|
clnlintr();
|
|
}
|
|
#endif
|
|
}
|
|
|
|
intrhand(sr)
|
|
int sr;
|
|
{
|
|
register struct isr *isr;
|
|
register int found = 0;
|
|
register int ipl;
|
|
extern struct isr isrqueue[];
|
|
|
|
ipl = (sr >> 8) & 7;
|
|
switch (ipl) {
|
|
|
|
case 3:
|
|
case 4:
|
|
case 5:
|
|
ipl = ISRIPL(ipl);
|
|
isr = isrqueue[ipl].isr_forw;
|
|
for (; isr != &isrqueue[ipl]; isr = isr->isr_forw) {
|
|
if ((isr->isr_intr)(isr->isr_arg)) {
|
|
found++;
|
|
break;
|
|
}
|
|
}
|
|
if (found == 0)
|
|
printf("stray interrupt, sr 0x%x\n", sr);
|
|
break;
|
|
|
|
case 0:
|
|
case 1:
|
|
case 2:
|
|
case 6:
|
|
case 7:
|
|
printf("intrhand: unexpected sr 0x%x\n", sr);
|
|
break;
|
|
}
|
|
}
|
|
|
|
#if (defined(DDB) || defined(DEBUG)) && !defined(PANICBUTTON)
|
|
#define PANICBUTTON
|
|
#endif
|
|
|
|
#ifdef PANICBUTTON
|
|
int panicbutton = 1; /* non-zero if panic buttons are enabled */
|
|
int crashandburn = 0;
|
|
int candbdelay = 50; /* give em half a second */
|
|
|
|
void
|
|
candbtimer(arg)
|
|
void *arg;
|
|
{
|
|
crashandburn = 0;
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* Level 7 interrupts can be caused by the keyboard or parity errors.
|
|
*/
|
|
nmihand(frame)
|
|
struct frame frame;
|
|
{
|
|
if (kbdnmi()) {
|
|
#ifdef PANICBUTTON
|
|
static int innmihand = 0;
|
|
|
|
/*
|
|
* Attempt to reduce the window of vulnerability for recursive
|
|
* NMIs (e.g. someone holding down the keyboard reset button).
|
|
*/
|
|
if (innmihand == 0) {
|
|
innmihand = 1;
|
|
printf("Got a keyboard NMI\n");
|
|
innmihand = 0;
|
|
}
|
|
#ifdef DDB
|
|
Debugger();
|
|
#else
|
|
if (panicbutton) {
|
|
if (crashandburn) {
|
|
crashandburn = 0;
|
|
panic(panicstr ?
|
|
"forced crash, nosync" : "forced crash");
|
|
}
|
|
crashandburn++;
|
|
timeout(candbtimer, NULL, candbdelay);
|
|
}
|
|
#endif
|
|
#endif
|
|
return;
|
|
}
|
|
if (parityerror(&frame))
|
|
return;
|
|
/* panic?? */
|
|
printf("unexpected level 7 interrupt ignored\n");
|
|
}
|
|
|
|
/*
|
|
* Parity error section. Contains magic.
|
|
*/
|
|
#define PARREG ((volatile short *)IIOV(0x5B0000))
|
|
static int gotparmem = 0;
|
|
#ifdef DEBUG
|
|
int ignorekperr = 0; /* ignore kernel parity errors */
|
|
#endif
|
|
|
|
/*
|
|
* Enable parity detection
|
|
*/
|
|
parityenable()
|
|
{
|
|
label_t faultbuf;
|
|
|
|
nofault = (int *) &faultbuf;
|
|
if (setjmp((label_t *)nofault)) {
|
|
nofault = (int *) 0;
|
|
#ifdef DEBUG
|
|
printf("No parity memory\n");
|
|
#endif
|
|
return;
|
|
}
|
|
*PARREG = 1;
|
|
nofault = (int *) 0;
|
|
gotparmem = 1;
|
|
#ifdef DEBUG
|
|
printf("Parity detection enabled\n");
|
|
#endif
|
|
}
|
|
|
|
/*
|
|
* Determine if level 7 interrupt was caused by a parity error
|
|
* and deal with it if it was. Returns 1 if it was a parity error.
|
|
*/
|
|
parityerror(fp)
|
|
struct frame *fp;
|
|
{
|
|
if (!gotparmem)
|
|
return(0);
|
|
*PARREG = 0;
|
|
DELAY(10);
|
|
*PARREG = 1;
|
|
if (panicstr) {
|
|
printf("parity error after panic ignored\n");
|
|
return(1);
|
|
}
|
|
if (!findparerror())
|
|
printf("WARNING: transient parity error ignored\n");
|
|
else if (USERMODE(fp->f_sr)) {
|
|
printf("pid %d: parity error\n", curproc->p_pid);
|
|
uprintf("sorry, pid %d killed due to memory parity error\n",
|
|
curproc->p_pid);
|
|
psignal(curproc, SIGKILL);
|
|
#ifdef DEBUG
|
|
} else if (ignorekperr) {
|
|
printf("WARNING: kernel parity error ignored\n");
|
|
#endif
|
|
} else {
|
|
regdump(fp->f_regs, 128);
|
|
panic("kernel parity error");
|
|
}
|
|
return(1);
|
|
}
|
|
|
|
/*
|
|
* Yuk! There has got to be a better way to do this!
|
|
* Searching all of memory with interrupts blocked can lead to disaster.
|
|
*/
|
|
findparerror()
|
|
{
|
|
static label_t parcatch;
|
|
static int looking = 0;
|
|
volatile struct pte opte;
|
|
volatile int pg, o, s;
|
|
register volatile int *ip;
|
|
register int i;
|
|
int found;
|
|
|
|
#ifdef lint
|
|
ip = &found;
|
|
i = o = pg = 0; if (i) return(0);
|
|
#endif
|
|
/*
|
|
* If looking is true we are searching for a known parity error
|
|
* and it has just occured. All we do is return to the higher
|
|
* level invocation.
|
|
*/
|
|
if (looking)
|
|
longjmp(&parcatch);
|
|
s = splhigh();
|
|
/*
|
|
* If setjmp returns true, the parity error we were searching
|
|
* for has just occured (longjmp above) at the current pg+o
|
|
*/
|
|
if (setjmp(&parcatch)) {
|
|
printf("Parity error at 0x%x\n", ctob(pg)|o);
|
|
found = 1;
|
|
goto done;
|
|
}
|
|
/*
|
|
* If we get here, a parity error has occured for the first time
|
|
* and we need to find it. We turn off any external caches and
|
|
* loop thru memory, testing every longword til a fault occurs and
|
|
* we regain control at setjmp above. Note that because of the
|
|
* setjmp, pg and o need to be volatile or their values will be lost.
|
|
*/
|
|
looking = 1;
|
|
ecacheoff();
|
|
for (pg = btoc(lowram); pg < btoc(lowram)+physmem; pg++) {
|
|
pmap_enter(kernel_pmap, (vm_offset_t)vmmap, ctob(pg),
|
|
VM_PROT_READ, TRUE);
|
|
for (o = 0; o < NBPG; o += sizeof(int))
|
|
i = *(int *)(&vmmap[o]);
|
|
}
|
|
/*
|
|
* Getting here implies no fault was found. Should never happen.
|
|
*/
|
|
printf("Couldn't locate parity error\n");
|
|
found = 0;
|
|
done:
|
|
looking = 0;
|
|
pmap_remove(kernel_pmap, (vm_offset_t)vmmap, (vm_offset_t)&vmmap[NBPG]);
|
|
ecacheon();
|
|
splx(s);
|
|
return(found);
|
|
}
|
|
|
|
regdump(rp, sbytes)
|
|
int *rp; /* must not be register */
|
|
int sbytes;
|
|
{
|
|
static int doingdump = 0;
|
|
register int i;
|
|
int s;
|
|
extern char *hexstr();
|
|
|
|
if (doingdump)
|
|
return;
|
|
s = splhigh();
|
|
doingdump = 1;
|
|
printf("pid = %d, pc = %s, ", curproc->p_pid, hexstr(rp[PC], 8));
|
|
printf("ps = %s, ", hexstr(rp[PS], 4));
|
|
printf("sfc = %s, ", hexstr(getsfc(), 4));
|
|
printf("dfc = %s\n", hexstr(getdfc(), 4));
|
|
printf("Registers:\n ");
|
|
for (i = 0; i < 8; i++)
|
|
printf(" %d", i);
|
|
printf("\ndreg:");
|
|
for (i = 0; i < 8; i++)
|
|
printf(" %s", hexstr(rp[i], 8));
|
|
printf("\nareg:");
|
|
for (i = 0; i < 8; i++)
|
|
printf(" %s", hexstr(rp[i+8], 8));
|
|
if (sbytes > 0) {
|
|
if (rp[PS] & PSL_S) {
|
|
printf("\n\nKernel stack (%s):",
|
|
hexstr((int)(((int *)&rp)-1), 8));
|
|
dumpmem(((int *)&rp)-1, sbytes, 0);
|
|
} else {
|
|
printf("\n\nUser stack (%s):", hexstr(rp[SP], 8));
|
|
dumpmem((int *)rp[SP], sbytes, 1);
|
|
}
|
|
}
|
|
doingdump = 0;
|
|
splx(s);
|
|
}
|
|
|
|
extern char kstack[];
|
|
#define KSADDR ((int *)&(kstack[(UPAGES-1)*NBPG]))
|
|
|
|
dumpmem(ptr, sz, ustack)
|
|
register int *ptr;
|
|
int sz;
|
|
{
|
|
register int i, val;
|
|
extern char *hexstr();
|
|
|
|
for (i = 0; i < sz; i++) {
|
|
if ((i & 7) == 0)
|
|
printf("\n%s: ", hexstr((int)ptr, 6));
|
|
else
|
|
printf(" ");
|
|
if (ustack == 1) {
|
|
if ((val = fuword(ptr++)) == -1)
|
|
break;
|
|
} else {
|
|
if (ustack == 0 &&
|
|
(ptr < KSADDR || ptr > KSADDR+(NBPG/4-1)))
|
|
break;
|
|
val = *ptr++;
|
|
}
|
|
printf("%s", hexstr(val, 8));
|
|
}
|
|
printf("\n");
|
|
}
|
|
|
|
char *
|
|
hexstr(val, len)
|
|
register int val;
|
|
{
|
|
static char nbuf[9];
|
|
register int x, i;
|
|
|
|
if (len > 8)
|
|
return("");
|
|
nbuf[len] = '\0';
|
|
for (i = len-1; i >= 0; --i) {
|
|
x = val & 0xF;
|
|
if (x > 9)
|
|
nbuf[i] = x - 10 + 'A';
|
|
else
|
|
nbuf[i] = x + '0';
|
|
val >>= 4;
|
|
}
|
|
return(nbuf);
|
|
}
|
|
|
|
/*
|
|
* cpu_exec_aout_makecmds():
|
|
* cpu-dependent a.out format hook for execve().
|
|
*
|
|
* Determine of the given exec package refers to something which we
|
|
* understand and, if so, set up the vmcmds for it.
|
|
*
|
|
* XXX what are the special cases for the hp300?
|
|
* XXX why is this COMPAT_NOMID? was something generating
|
|
* hp300 binaries with an a_mid of 0? i thought that was only
|
|
* done on little-endian machines... -- cgd
|
|
*/
|
|
int
|
|
cpu_exec_aout_makecmds(p, epp)
|
|
struct proc *p;
|
|
struct exec_package *epp;
|
|
{
|
|
#ifdef COMPAT_NOMID
|
|
u_long midmag, magic;
|
|
u_short mid;
|
|
int error;
|
|
struct exec *execp = epp->ep_hdr;
|
|
|
|
midmag = ntohl(execp->a_midmag);
|
|
mid = (midmag >> 16) & 0xffff;
|
|
magic = midmag & 0xffff;
|
|
|
|
midmag = mid << 16 | magic;
|
|
|
|
switch (midmag) {
|
|
case (MID_ZERO << 16) | ZMAGIC:
|
|
error = cpu_exec_aout_prep_oldzmagic(p, epp);
|
|
break;
|
|
default:
|
|
error = ENOEXEC;
|
|
}
|
|
|
|
return error;
|
|
#else /* ! COMPAT_NOMID */
|
|
return ENOEXEC;
|
|
#endif
|
|
}
|
|
|
|
#ifdef COMPAT_NOMID
|
|
/*
|
|
* cpu_exec_aout_prep_oldzmagic():
|
|
* Prepare the vmcmds to build a vmspace for an old
|
|
* (i.e. USRTEXT == 0) binary.
|
|
*
|
|
* Cloned from exec_aout_prep_zmagic() in kern/exec_aout.c; a more verbose
|
|
* description of operation is there.
|
|
*/
|
|
int
|
|
cpu_exec_aout_prep_oldzmagic(p, epp)
|
|
struct proc *p;
|
|
struct exec_package *epp;
|
|
{
|
|
struct exec *execp = epp->ep_hdr;
|
|
|
|
epp->ep_taddr = 0;
|
|
epp->ep_tsize = execp->a_text;
|
|
epp->ep_daddr = epp->ep_taddr + execp->a_text;
|
|
epp->ep_dsize = execp->a_data + execp->a_bss;
|
|
epp->ep_entry = execp->a_entry;
|
|
|
|
/*
|
|
* check if vnode is in open for writing, because we want to * demand-page out of it. if it is, don't do it, for various
|
|
* reasons
|
|
*/
|
|
if ((execp->a_text != 0 || execp->a_data != 0) &&
|
|
epp->ep_vp->v_writecount != 0) {
|
|
#ifdef DIAGNOSTIC
|
|
if (epp->ep_vp->v_flag & VTEXT)
|
|
panic("exec: a VTEXT vnode has writecount != 0\n");
|
|
#endif
|
|
return ETXTBSY;
|
|
}
|
|
epp->ep_vp->v_flag |= VTEXT;
|
|
|
|
/* set up command for text segment */
|
|
NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_pagedvn, execp->a_text,
|
|
epp->ep_taddr, epp->ep_vp, NBPG, /* XXX - should NBPG be CLBYTES? */
|
|
VM_PROT_READ|VM_PROT_EXECUTE);
|
|
|
|
/* set up command for data segment */
|
|
NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_pagedvn, execp->a_data,
|
|
epp->ep_daddr, epp->ep_vp,
|
|
execp->a_text + NBPG, /* XXX - should NBPG be CLBYTES? */
|
|
VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
|
|
|
|
/* set up command for bss segment */
|
|
NEW_VMCMD(&epp->ep_vmcmds, vmcmd_map_zero, execp->a_bss,
|
|
epp->ep_daddr + execp->a_data, NULLVP, 0,
|
|
VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE);
|
|
|
|
return exec_aout_setup_stack(p, epp);
|
|
}
|
|
#endif /* COMPAT_NOMID */
|