Turn the proclist lock into a read/write spinlock. Update proclist locking

calls to reflect this.  Also, block statclock rather than softclock during
in the proclist locking functions, to address a problem reported on
current-users by Sean Doran.
This commit is contained in:
thorpej 1999-07-25 06:30:33 +00:00
parent 50f9f26fe1
commit ea8fb3e04a
10 changed files with 36 additions and 56 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_ktrace.c,v 1.36 1999/07/22 21:08:31 thorpej Exp $ */
/* $NetBSD: kern_ktrace.c,v 1.37 1999/07/25 06:30:34 thorpej Exp $ */
/*
* Copyright (c) 1989, 1993
@ -335,7 +335,7 @@ sys_fktrace(curp, v, retval)
* Clear all uses of the tracefile
*/
if (KTROP(ops) == KTROP_CLEARFILE) {
proclist_lock_read(0);
proclist_lock_read();
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
if (p->p_tracep == fp) {
if (ktrcanset(curp, p))
@ -442,7 +442,7 @@ sys_ktrace(curp, v, retval)
* Clear all uses of the tracefile
*/
if (KTROP(ops) == KTROP_CLEARFILE) {
proclist_lock_read(0);
proclist_lock_read();
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
if (p->p_tracep == vp &&
!ktrops(curp, p, KTROP_CLEAR, ~0, vp))
@ -621,7 +621,7 @@ ktrwrite(p, v, kth)
*/
log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
error);
proclist_lock_read(0);
proclist_lock_read();
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
if (p->p_tracep == v)
ktrderef(p);

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_proc.c,v 1.33 1999/07/22 21:08:31 thorpej Exp $ */
/* $NetBSD: kern_proc.c,v 1.34 1999/07/25 06:30:34 thorpej Exp $ */
/*-
* Copyright (c) 1999 The NetBSD Foundation, Inc.
@ -173,7 +173,7 @@ procinit()
for (pd = proclists; pd->pd_list != NULL; pd++)
LIST_INIT(pd->pd_list);
lockinit(&proclist_lock, PZERO, "proclk", 0, 0);
spinlockinit(&proclist_lock, "proclk", 0);
LIST_INIT(&deadproc);
simple_lock_init(&deadproc_slock);
@ -198,27 +198,16 @@ procinit()
* Acquire a read lock on the proclist.
*/
void
proclist_lock_read(flags)
int flags;
proclist_lock_read()
{
int error, s;
/* Block schedcpu() while we acquire the lock. */
s = splsoftclock();
/*
* We spin here if called with LK_NOWAIT; schedcpu() uses that
* to prevent sleeping.
*/
do {
error = lockmgr(&proclist_lock, LK_SHARED | flags, NULL);
s = splstatclock();
error = spinlockmgr(&proclist_lock, LK_SHARED, NULL);
#ifdef DIAGNOSTIC
if (error != 0 && error != EBUSY)
panic("proclist_lock_read: failed to acquire lock");
if (error)
panic("proclist_lock_read: failed to acquire lock");
#endif
} while (error != 0);
/* Let schedcpu() back in. */
splx(s);
}
@ -230,12 +219,8 @@ proclist_unlock_read()
{
int s;
/* Block schedcpu() while we release the lock. */
s = splsoftclock();
(void) lockmgr(&proclist_lock, LK_RELEASE, NULL);
/* Let schedcpu() back in. */
s = splstatclock();
(void) spinlockmgr(&proclist_lock, LK_RELEASE, NULL);
splx(s);
}
@ -247,15 +232,12 @@ proclist_lock_write()
{
int error, s;
/* Block schedcpu() while lock is held. */
s = splsoftclock();
error = lockmgr(&proclist_lock, LK_EXCLUSIVE, NULL);
s = splstatclock();
error = spinlockmgr(&proclist_lock, LK_EXCLUSIVE, NULL);
#ifdef DIAGNOSTIC
if (error != 0)
panic("proclist_lock: failed to acquire lock");
#endif
return (s);
}
@ -267,9 +249,7 @@ proclist_unlock_write(s)
int s;
{
(void) lockmgr(&proclist_lock, LK_RELEASE, NULL);
/* Let schedcpu() back in. */
(void) spinlockmgr(&proclist_lock, LK_RELEASE, NULL);
splx(s);
}
@ -334,7 +314,7 @@ pfind(pid)
{
struct proc *p;
proclist_lock_read(0);
proclist_lock_read();
for (p = PIDHASH(pid)->lh_first; p != 0; p = p->p_hash.le_next)
if (p->p_pid == pid)
goto out;

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_resource.c,v 1.51 1999/07/22 21:08:31 thorpej Exp $ */
/* $NetBSD: kern_resource.c,v 1.52 1999/07/25 06:30:34 thorpej Exp $ */
/*-
* Copyright (c) 1982, 1986, 1991, 1993
@ -104,7 +104,7 @@ sys_getpriority(curp, v, retval)
case PRIO_USER:
if (SCARG(uap, who) == 0)
SCARG(uap, who) = curp->p_ucred->cr_uid;
proclist_lock_read(0);
proclist_lock_read();
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next)
if (p->p_ucred->cr_uid == SCARG(uap, who) &&
p->p_nice < low)
@ -167,7 +167,7 @@ sys_setpriority(curp, v, retval)
case PRIO_USER:
if (SCARG(uap, who) == 0)
SCARG(uap, who) = curp->p_ucred->cr_uid;
proclist_lock_read(0);
proclist_lock_read();
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next)
if (p->p_ucred->cr_uid == SCARG(uap, who)) {
error = donice(curp, p, SCARG(uap, prio));

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_sig.c,v 1.91 1999/07/22 21:08:31 thorpej Exp $ */
/* $NetBSD: kern_sig.c,v 1.92 1999/07/25 06:30:34 thorpej Exp $ */
/*
* Copyright (c) 1982, 1986, 1989, 1991, 1993
@ -631,7 +631,7 @@ killpg1(cp, signum, pgid, all)
/*
* broadcast
*/
proclist_lock_read(0);
proclist_lock_read();
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
if (p->p_pid <= 1 || p->p_flag & P_SYSTEM ||
p == cp || !CANSIGNAL(cp, pc, p, signum))

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_synch.c,v 1.61 1999/07/22 21:08:32 thorpej Exp $ */
/* $NetBSD: kern_synch.c,v 1.62 1999/07/25 06:30:35 thorpej Exp $ */
/*-
* Copyright (c) 1982, 1986, 1990, 1991, 1993
@ -186,7 +186,7 @@ schedcpu(arg)
register unsigned int newcpu;
wakeup((caddr_t)&lbolt);
proclist_lock_read(LK_NOWAIT);
proclist_lock_read();
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
/*
* Increment time in/out of memory and sleep time

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_sysctl.c,v 1.49 1999/07/22 21:08:32 thorpej Exp $ */
/* $NetBSD: kern_sysctl.c,v 1.50 1999/07/25 06:30:35 thorpej Exp $ */
/*-
* Copyright (c) 1982, 1986, 1989, 1993
@ -726,7 +726,7 @@ sysctl_doproc(name, namelen, where, sizep)
if (namelen != 2 && !(namelen == 1 && name[0] == KERN_PROC_ALL))
return (EINVAL);
proclist_lock_read(0);
proclist_lock_read();
pd = proclists;
again:

View File

@ -1,4 +1,4 @@
/* $NetBSD: vfs_syscalls.c,v 1.143 1999/07/22 23:00:27 thorpej Exp $ */
/* $NetBSD: vfs_syscalls.c,v 1.144 1999/07/25 06:30:35 thorpej Exp $ */
/*
* Copyright (c) 1989, 1993
@ -362,7 +362,7 @@ checkdirs(olddp)
return;
if (VFS_ROOT(olddp->v_mountedhere, &newdp))
panic("mount: lost mount");
proclist_lock_read(0);
proclist_lock_read();
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
cwdi = p->p_cwdi;
if (cwdi->cwdi_cdir == olddp) {

View File

@ -1,4 +1,4 @@
/* $NetBSD: proc.h,v 1.80 1999/07/22 21:08:32 thorpej Exp $ */
/* $NetBSD: proc.h,v 1.81 1999/07/25 06:30:33 thorpej Exp $ */
/*-
* Copyright (c) 1986, 1989, 1991, 1993
@ -375,7 +375,7 @@ void cpu_wait __P((struct proc *));
void cpu_exit __P((struct proc *));
int proc_isunder __P((struct proc *, struct proc*));
void proclist_lock_read __P((int));
void proclist_lock_read __P((void));
void proclist_unlock_read __P((void));
int proclist_lock_write __P((void));
void proclist_unlock_write __P((int));

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_glue.c,v 1.28 1999/07/22 22:58:38 thorpej Exp $ */
/* $NetBSD: uvm_glue.c,v 1.29 1999/07/25 06:30:36 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -447,7 +447,7 @@ loop:
#endif
pp = NULL; /* process to choose */
ppri = INT_MIN; /* its priority */
proclist_lock_read(0);
proclist_lock_read();
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
/* is it a runnable swapped out process? */
@ -548,7 +548,7 @@ uvm_swapout_threads()
*/
outp = outp2 = NULL;
outpri = outpri2 = 0;
proclist_lock_read(0);
proclist_lock_read();
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
if (!swappable(p))
continue;

View File

@ -1,4 +1,4 @@
/* $NetBSD: uvm_meter.c,v 1.9 1999/07/22 22:58:38 thorpej Exp $ */
/* $NetBSD: uvm_meter.c,v 1.10 1999/07/25 06:30:36 thorpej Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@ -95,7 +95,7 @@ uvm_loadav(avg)
int i, nrun;
struct proc *p;
proclist_lock_read(LK_NOWAIT);
proclist_lock_read();
for (nrun = 0, p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
switch (p->p_stat) {
case SSLEEP:
@ -172,7 +172,7 @@ uvm_total(totalp)
* calculate process statistics
*/
proclist_lock_read(0);
proclist_lock_read();
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
if (p->p_flag & P_SYSTEM)
continue;