- Disallow setting of affinity for zombie LWPs.

- Fix the possible NULL dereference when LWP exiting.
- Fix the inhertance of affinity.
This commit is contained in:
rmind 2008-07-14 01:19:37 +00:00
parent d489642431
commit 4f91cff093
2 changed files with 27 additions and 12 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_lwp.c,v 1.121 2008/07/02 19:53:12 rmind Exp $ */
/* $NetBSD: kern_lwp.c,v 1.122 2008/07/14 01:19:37 rmind Exp $ */
/*-
* Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -206,7 +206,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.121 2008/07/02 19:53:12 rmind Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.122 2008/07/14 01:19:37 rmind Exp $");
#include "opt_ddb.h"
#include "opt_lockdebug.h"
@ -649,9 +649,16 @@ lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, bool inmem, int flags,
/* Inherit a processor-set */
l2->l_psid = l1->l_psid;
/* Inherit an affinity */
if (l1->l_affinity) {
kcpuset_use(l1->l_affinity);
l2->l_affinity = l1->l_affinity;
if (l1->l_flag & LW_AFFINITY) {
proc_t *p = l1->l_proc;
mutex_enter(p->p_lock);
if (l1->l_flag & LW_AFFINITY) {
kcpuset_use(l1->l_affinity);
l2->l_affinity = l1->l_affinity;
l2->l_flag |= LW_AFFINITY;
}
mutex_exit(p->p_lock);
}
/* Look for a CPU to start */
l2->l_cpu = sched_takecpu(l2);
@ -747,11 +754,6 @@ lwp_exit(struct lwp *l)
kauth_cred_free(l->l_cred);
callout_destroy(&l->l_timeout_ch);
if (l->l_affinity) {
kcpuset_unuse(l->l_affinity, NULL);
l->l_affinity = NULL;
}
/*
* While we can still block, mark the LWP as unswappable to
* prevent conflicts with the with the swapper.
@ -805,6 +807,8 @@ lwp_exit(struct lwp *l)
l->l_stat = LSZOMB;
if (l->l_name != NULL)
strcpy(l->l_name, "(zombie)");
if (l->l_flag & LW_AFFINITY)
l->l_flag &= ~LW_AFFINITY;
lwp_unlock(l);
p->p_nrlwps--;
cv_broadcast(&p->p_lwpcv);
@ -812,6 +816,12 @@ lwp_exit(struct lwp *l)
l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
mutex_exit(p->p_lock);
/* Safe without lock since LWP is in zombie state */
if (l->l_affinity) {
kcpuset_unuse(l->l_affinity, NULL);
l->l_affinity = NULL;
}
/*
* We can no longer block. At this point, lwp_free() may already
* be gunning for us. On a multi-CPU system, we may be off p_lwps.

View File

@ -1,4 +1,4 @@
/* $NetBSD: sys_sched.c,v 1.26 2008/06/22 00:06:36 christos Exp $ */
/* $NetBSD: sys_sched.c,v 1.27 2008/07/14 01:19:37 rmind Exp $ */
/*
* Copyright (c) 2008, Mindaugas Rasiukevicius <rmind at NetBSD org>
@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: sys_sched.c,v 1.26 2008/06/22 00:06:36 christos Exp $");
__KERNEL_RCSID(0, "$NetBSD: sys_sched.c,v 1.27 2008/07/14 01:19:37 rmind Exp $");
#include <sys/param.h>
@ -387,6 +387,11 @@ sys__sched_setaffinity(struct lwp *l,
if (lid && lid != t->l_lid)
continue;
lwp_lock(t);
/* It is not allowed to set the affinity for zombie LWPs */
if (t->l_stat == LSZOMB) {
lwp_unlock(t);
continue;
}
if (cpuset) {
/* Set the affinity flag and new CPU set */
t->l_flag |= LW_AFFINITY;