mostly-machine-indepedent switch, and changes to match. also, hack init_main

This commit is contained in:
cgd 1994-05-18 05:12:10 +00:00
parent c7468280da
commit 21a03cca53
11 changed files with 118 additions and 79 deletions

View File

@ -37,7 +37,7 @@
* SUCH DAMAGE.
*
* from: @(#)locore.s 7.3 (Berkeley) 5/13/91
* $Id: locore.s,v 1.67 1994/05/13 00:50:41 cgd Exp $
* $Id: locore.s,v 1.68 1994/05/18 05:12:10 cgd Exp $
*/
/*
@ -441,10 +441,10 @@ reloc_gdt:
/*
* Some BIOSes leave trash in the spare segment registers. We need to
* clear them so we don't get a protection fault in swtch() later on.
* Since the kernel itself does not use these except in copyin/out, it
* seems best to make them null selectors so we get a trap if they are
* accidentally referenced.
* clear them so we don't get a protection fault in cpu_switch() later
* on. Since the kernel itself does not use these except in
* copyin/out, it seems best to make them null selectors so we get a
* trap if they are accidentally referenced.
*/
xorl %ecx,%ecx
movl %cx,%fs
@ -1446,7 +1446,7 @@ ENTRY(remrq)
#endif
/*
* When no processes are on the runq, swtch() branches to here to wait for
* When no processes are on the runq, cpu_switch() branches to here to wait for
* something to come ready.
*/
ENTRY(idle)
@ -1463,20 +1463,20 @@ ENTRY(idle)
jmp 1b
#ifdef DIAGNOSTIC
ENTRY(swtch_error)
ENTRY(switch_error)
pushl $1f
call _panic
/*NOTREACHED*/
1: .asciz "swtch"
1: .asciz "cpu_switch"
#endif
/*
* swtch(void);
* cpu_switch(void);
* Find a runnable process and switch to it. Wait if necessary. If the new
* process is the same as the old one, we short-circuit the context save and
* restore.
*/
ENTRY(swtch)
ENTRY(cpu_switch)
pushl %ebx
pushl %esi
pushl %edi
@ -1488,7 +1488,7 @@ ENTRY(swtch)
movl _curproc,%esi
movl $0,_curproc
swtch_search:
switch_search:
/*
* First phase: find new process.
*
@ -1513,7 +1513,7 @@ sw1: bsfl %ecx,%ebx # find a full q
movl P_FORW(%eax),%edi # unlink from front of process q
#ifdef DIAGNOSTIC
cmpl %edi,%eax # linked to self (e.g. nothing queued)?
je _swtch_error # not possible
je _switch_error # not possible
#endif
movl P_FORW(%edi),%edx
movl %edx,P_FORW(%eax)
@ -1533,9 +1533,9 @@ sw1: bsfl %ecx,%ebx # find a full q
#ifdef DIAGNOSTIC
cmpl %eax,P_WCHAN(%edi) # Waiting for something?
jne _swtch_error # Yes; shouldn't be queued.
jne _switch_error # Yes; shouldn't be queued.
cmpb $SRUN,P_STAT(%edi) # In run state?
jne _swtch_error # No; shouldn't be queued.
jne _switch_error # No; shouldn't be queued.
#endif
/* Isolate process. XXX Is this necessary? */
@ -1546,11 +1546,11 @@ sw1: bsfl %ecx,%ebx # find a full q
/* Skip context switch if same process. */
cmpl %edi,%esi
je swtch_return
je switch_return
/* If old process exited, don't bother. */
testl %esi,%esi
jz swtch_exited
jz switch_exited
/*
* Second phase: save old context.
@ -1585,7 +1585,7 @@ sw1: bsfl %ecx,%ebx # find a full q
1:
#endif
swtch_exited:
switch_exited:
/*
* Third phase: restore saved context.
*
@ -1634,7 +1634,7 @@ swtch_exited:
/* Interrupts are okay again. */
sti
swtch_return:
switch_return:
/* Record new process. */
movl %edi,_curproc
@ -1649,12 +1649,12 @@ swtch_return:
ret
/*
* swtch_exit(struct proc *p);
* switch_exit(struct proc *p);
* Switch to proc0's saved context and deallocate the address space and kernel
* stack for p. Then jump into swtch(), as if we were in proc0 all along.
* stack for p. Then jump into cpu_switch(), as if we were in proc0 all along.
*/
.globl _proc0,_vmspace_free,_kernel_map,_kmem_free
ENTRY(swtch_exit)
ENTRY(switch_exit)
movl 4(%esp),%edi # old process
movl $_proc0,%ebx
@ -1695,15 +1695,15 @@ ENTRY(swtch_exit)
call _kmem_free
addl $16,%esp
/* Jump into swtch() with the right state. */
/* Jump into cpu_switch() with the right state. */
movl %ebx,%esi
movl $0,_curproc
jmp swtch_search
jmp switch_search
/*
* savectx(struct pcb *pcb, int altreturn);
* Update pcb, saving current processor state and arranging for alternate
* return in swtch() if altreturn is true.
* return in cpu_switch() if altreturn is true.
*/
ENTRY(savectx)
pushl %ebx
@ -1768,7 +1768,7 @@ ENTRY(savectx)
call _bcopy
addl $12,%esp
1: /* This is the parent. The child will return from swtch(). */
1: /* This is the parent. The child will return from cpu_switch(). */
xorl %eax,%eax # return 0
addl $4,%esp # drop saved _cpl on the floor
popl %edi

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)trap.c 7.4 (Berkeley) 5/13/91
* $Id: trap.c,v 1.41 1994/05/16 09:46:10 cgd Exp $
* $Id: trap.c,v 1.42 1994/05/18 05:12:14 cgd Exp $
*/
/*
@ -80,7 +80,7 @@ userret(p, pc, oticks)
int pc;
u_quad_t oticks;
{
int sig;
int sig, s;
/* take pending signals */
while ((sig = CURSIG(p)) != 0)
@ -92,14 +92,14 @@ userret(p, pc, oticks)
* change our priority without changing run queues
* (the running process is not kept on a run queue).
* If this happened after we setrunqueue ourselves but
* before we swtch()'ed, we might not be on the queue
* before we switch()'ed, we might not be on the queue
* indicated by our priority.
*/
(void) splclock();
s = splstatclock();
setrunqueue(p);
p->p_stats->p_ru.ru_nivcsw++;
swtch();
(void) spl0();
mi_switch();
splx(s);
while ((sig = CURSIG(p)) != 0)
postsig(sig);
}

View File

@ -94,8 +94,8 @@ cpu_fork(p1, p2)
/*
* Copy the stack.
*
* When we first swtch() to the child, this will return from swtch()
* rather than savectx(). swtch() returns a pointer to the current
* When we first switch to the child, this will return from cpu_switch()
* rather than savectx(). cpu_switch returns a pointer to the current
* process; savectx() returns 0. Thus we can look for a non-zero
* return value to indicate that we're in the child.
*/
@ -111,10 +111,10 @@ cpu_fork(p1, p2)
/*
* cpu_exit is called as the last action during exit.
*
* We clean up a little and then call swtch_exit() with the old proc as an
* argument. swtch_exit() first switches to proc0's context, then does the
* We clean up a little and then call switch_exit() with the old proc as an
* argument. switch_exit() first switches to proc0's context, then does the
* vmspace_free() and kmem_free() that we don't do here, and finally jumps
* into swtch() to wait for another process to wake up.
* into switch() to wait for another process to wake up.
*/
void
cpu_exit(p)
@ -141,7 +141,7 @@ cpu_exit(p)
if (vm->vm_refcnt == 1)
vm_map_remove(&vm->vm_map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
swtch_exit(p);
switch_exit(p);
}
void

View File

@ -1 +1 @@
revision 1.42 intentionally removed
revision 1.43 intentionally removed

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 1989 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 1989, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@ -30,12 +30,13 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* from: @(#)kern_ktrace.c 7.15 (Berkeley) 6/21/91
* $Id: kern_ktrace.c,v 1.8 1994/05/05 05:38:13 cgd Exp $
* from: @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
* $Id: kern_ktrace.c,v 1.9 1994/05/18 05:12:37 cgd Exp $
*/
#ifdef KTRACE
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/file.h>
#include <sys/namei.h>
@ -44,8 +45,6 @@
#include <sys/malloc.h>
#include <sys/syslog.h>
void ktrwrite __P((struct vnode *vp, struct ktr_header *kth));
struct ktr_header *
ktrgetheader(type)
int type;
@ -62,16 +61,18 @@ ktrgetheader(type)
return (kth);
}
void
ktrsyscall(vp, code, narg, args)
struct vnode *vp;
int code, narg, args[];
{
struct ktr_header *kth = ktrgetheader(KTR_SYSCALL);
struct ktr_header *kth;
struct ktr_syscall *ktp;
register len = sizeof(struct ktr_syscall) + (narg * sizeof(int));
struct proc *p = curproc; /* XXX */
int *argp, i;
p->p_traceflag |= KTRFAC_ACTIVE;
kth = ktrgetheader(KTR_SYSCALL);
MALLOC(ktp, struct ktr_syscall *, len, M_TEMP, M_WAITOK);
ktp->ktr_code = code;
ktp->ktr_narg = narg;
@ -83,16 +84,19 @@ ktrsyscall(vp, code, narg, args)
ktrwrite(vp, kth);
FREE(ktp, M_TEMP);
FREE(kth, M_TEMP);
p->p_traceflag &= ~KTRFAC_ACTIVE;
}
void
ktrsysret(vp, code, error, retval)
struct vnode *vp;
int code, error, retval;
{
struct ktr_header *kth = ktrgetheader(KTR_SYSRET);
struct ktr_header *kth;
struct ktr_sysret ktp;
struct proc *p = curproc; /* XXX */
p->p_traceflag |= KTRFAC_ACTIVE;
kth = ktrgetheader(KTR_SYSRET);
ktp.ktr_code = code;
ktp.ktr_error = error;
ktp.ktr_retval = retval; /* what about val2 ? */
@ -102,23 +106,26 @@ ktrsysret(vp, code, error, retval)
ktrwrite(vp, kth);
FREE(kth, M_TEMP);
p->p_traceflag &= ~KTRFAC_ACTIVE;
}
void
ktrnamei(vp, path)
struct vnode *vp;
char *path;
{
struct ktr_header *kth = ktrgetheader(KTR_NAMEI);
struct ktr_header *kth;
struct proc *p = curproc; /* XXX */
p->p_traceflag |= KTRFAC_ACTIVE;
kth = ktrgetheader(KTR_NAMEI);
kth->ktr_len = strlen(path);
kth->ktr_buf = path;
ktrwrite(vp, kth);
FREE(kth, M_TEMP);
p->p_traceflag &= ~KTRFAC_ACTIVE;
}
void
ktrgenio(vp, fd, rw, iov, len, error)
struct vnode *vp;
int fd;
@ -126,13 +133,16 @@ ktrgenio(vp, fd, rw, iov, len, error)
register struct iovec *iov;
int len, error;
{
struct ktr_header *kth = ktrgetheader(KTR_GENIO);
struct ktr_header *kth;
register struct ktr_genio *ktp;
register caddr_t cp;
register int resid = len, cnt;
struct proc *p = curproc; /* XXX */
if (error)
return;
p->p_traceflag |= KTRFAC_ACTIVE;
kth = ktrgetheader(KTR_GENIO);
MALLOC(ktp, struct ktr_genio *, sizeof(struct ktr_genio) + len,
M_TEMP, M_WAITOK);
ktp->ktr_fd = fd;
@ -154,17 +164,21 @@ ktrgenio(vp, fd, rw, iov, len, error)
done:
FREE(kth, M_TEMP);
FREE(ktp, M_TEMP);
p->p_traceflag &= ~KTRFAC_ACTIVE;
}
void
ktrpsig(vp, sig, action, mask, code)
struct vnode *vp;
sig_t action;
int sig, mask, code;
struct vnode *vp;
int sig;
sig_t action;
int mask, code;
{
struct ktr_header *kth = ktrgetheader(KTR_PSIG);
struct ktr_header *kth;
struct ktr_psig kp;
struct proc *p = curproc; /* XXX */
p->p_traceflag |= KTRFAC_ACTIVE;
kth = ktrgetheader(KTR_PSIG);
kp.signo = (char)sig;
kp.action = action;
kp.mask = mask;
@ -174,6 +188,27 @@ ktrpsig(vp, sig, action, mask, code)
ktrwrite(vp, kth);
FREE(kth, M_TEMP);
p->p_traceflag &= ~KTRFAC_ACTIVE;
}
ktrcsw(vp, out, user)
struct vnode *vp;
int out, user;
{
struct ktr_header *kth;
struct ktr_csw kc;
struct proc *p = curproc; /* XXX */
p->p_traceflag |= KTRFAC_ACTIVE;
kth = ktrgetheader(KTR_CSW);
kc.out = out;
kc.user = user;
kth->ktr_buf = (caddr_t)&kc;
kth->ktr_len = sizeof (struct ktr_csw);
ktrwrite(vp, kth);
FREE(kth, M_TEMP);
p->p_traceflag &= ~KTRFAC_ACTIVE;
}
/* Interface and common routines */
@ -181,16 +216,13 @@ ktrpsig(vp, sig, action, mask, code)
/*
* ktrace system call
*/
struct ktrace_args {
char *fname;
int ops;
int facs;
int pid;
};
/* ARGSUSED */
int
ktrace(curp, uap, retval)
struct proc *curp;
register struct ktrace_args *uap;
@ -206,18 +238,27 @@ ktrace(curp, uap, retval)
int error = 0;
struct nameidata nd;
curp->p_traceflag |= KTRFAC_ACTIVE;
if (ops != KTROP_CLEAR) {
/*
* an operation which requires a file argument.
*/
#ifdef notyet
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->fname, curp);
if (error = vn_open(&nd, FREAD|FWRITE, 0)) {
#else
nd.ni_segflg = UIO_USERSPACE;
nd.ni_dirp = uap->fname;
if (error = vn_open(&nd, curp, FREAD|FWRITE, 0))
nd.ni_dirp = uap->fname;
if (error = vn_open(&nd, curp, FREAD|FWRITE, 0)) {
#endif
curp->p_traceflag &= ~KTRFAC_ACTIVE;
return (error);
}
vp = nd.ni_vp;
VOP_UNLOCK(vp);
if (vp->v_type != VREG) {
(void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp);
curp->p_traceflag &= ~KTRFAC_ACTIVE;
return (EACCES);
}
}
@ -282,12 +323,13 @@ ktrace(curp, uap, retval)
done:
if (vp != NULL)
(void) vn_close(vp, FWRITE, curp->p_ucred, curp);
curp->p_traceflag &= ~KTRFAC_ACTIVE;
return (error);
}
int
ktrops(curp, p, ops, facs, vp)
struct proc *curp, *p;
struct proc *p, *curp;
int ops, facs;
struct vnode *vp;
{
@ -322,7 +364,6 @@ ktrops(curp, p, ops, facs, vp)
return (1);
}
int
ktrsetchildren(curp, top, ops, facs, vp)
struct proc *curp, *top;
int ops, facs;
@ -358,7 +399,6 @@ ktrsetchildren(curp, top, ops, facs, vp)
/*NOTREACHED*/
}
void
ktrwrite(vp, kth)
struct vnode *vp;
register struct ktr_header *kth;
@ -413,7 +453,6 @@ ktrwrite(vp, kth)
*
* TODO: check groups. use caller effective gid.
*/
int
ktrcanset(callp, targetp)
struct proc *callp, *targetp;
{
@ -430,3 +469,5 @@ ktrcanset(callp, targetp)
return (0);
}
#endif

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)kern_resource.c 7.13 (Berkeley) 5/9/91
* $Id: kern_resource.c,v 1.17 1994/05/17 04:21:59 cgd Exp $
* $Id: kern_resource.c,v 1.18 1994/05/18 05:12:39 cgd Exp $
*/
#include <sys/param.h>
@ -432,8 +432,6 @@ getrusage(p, uap, retval)
switch (uap->who) {
case RUSAGE_SELF: {
int s;
rup = &p->p_stats->p_ru;
calcru(p, &rup->ru_utime, &rup->ru_stime, NULL);
break;

View File

@ -1 +1 @@
revision 1.23 intentionally removed
revision 1.24 intentionally removed

View File

@ -1 +1 @@
revision 1.24 intentionally removed
revision 1.25 intentionally removed

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* from: @(#)vfs_conf.c 7.3 (Berkeley) 6/28/90
* $Id: vfs_conf.c,v 1.18 1994/05/11 18:51:18 chopps Exp $
* $Id: vfs_conf.c,v 1.19 1994/05/18 05:12:43 cgd Exp $
*/
#include <sys/param.h>
@ -41,7 +41,7 @@
* These define the root filesystem and device.
*/
struct mount *rootfs;
struct vnode *rootdir;
struct vnode *rootvnode;
/*
* Set up the filesystem operations for vnodes.

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)vfs_lookup.c 7.32 (Berkeley) 5/21/91
* $Id: vfs_lookup.c,v 1.10 1994/05/17 04:22:02 cgd Exp $
* $Id: vfs_lookup.c,v 1.11 1994/05/18 05:12:44 cgd Exp $
*/
#include <sys/param.h>
@ -114,7 +114,7 @@ namei(ndp, p)
* Get starting point for the translation.
*/
if ((ndp->ni_rootdir = fdp->fd_rdir) == NULL)
ndp->ni_rootdir = rootdir;
ndp->ni_rootdir = rootvnode;
dp = fdp->fd_cdir;
VREF(dp);
for (;;) {
@ -331,7 +331,7 @@ dirloop:
*/
if (ndp->ni_isdotdot) {
for (;;) {
if ((dp == ndp->ni_rootdir) || (dp == rootdir)) {
if ((dp == ndp->ni_rootdir) || (dp == rootvnode)) {
ndp->ni_dvp = dp;
ndp->ni_vp = dp;
VREF(dp);

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode.h 7.39 (Berkeley) 6/27/91
* $Id: vnode.h,v 1.19 1994/05/11 18:50:41 chopps Exp $
* $Id: vnode.h,v 1.20 1994/05/18 05:12:55 cgd Exp $
*/
#ifndef _SYS_VNODE_H_
@ -299,7 +299,7 @@ struct vnodeops {
/*
* Global vnode data.
*/
extern struct vnode *rootdir; /* root (i.e. "/") vnode */
extern struct vnode *rootvnode; /* root (i.e. "/") vnode */
extern long desiredvnodes; /* number of vnodes desired */
extern struct vattr va_null; /* predefined null vattr structure */