Remove LW_AFFINITY flag and fix some bugs affinity mask handling.

This commit is contained in:
rmind 2011-08-07 21:13:05 +00:00
parent 5deb799841
commit 501dd321fb
7 changed files with 58 additions and 60 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_cpu.c,v 1.48 2011/08/07 13:33:01 rmind Exp $ */ /* $NetBSD: kern_cpu.c,v 1.49 2011/08/07 21:13:05 rmind Exp $ */
/*- /*-
* Copyright (c) 2007, 2008, 2009, 2010 The NetBSD Foundation, Inc. * Copyright (c) 2007, 2008, 2009, 2010 The NetBSD Foundation, Inc.
@ -56,7 +56,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.48 2011/08/07 13:33:01 rmind Exp $"); __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.49 2011/08/07 21:13:05 rmind Exp $");
#include <sys/param.h> #include <sys/param.h>
#include <sys/systm.h> #include <sys/systm.h>
@ -311,13 +311,12 @@ cpu_xc_offline(struct cpu_info *ci)
lwp_unlock(l); lwp_unlock(l);
continue; continue;
} }
/* Normal case - no affinity */ /* Regular case - no affinity. */
if ((l->l_flag & LW_AFFINITY) == 0) { if (l->l_affinity == NULL) {
lwp_migrate(l, target_ci); lwp_migrate(l, target_ci);
continue; continue;
} }
/* Affinity is set, find an online CPU in the set */ /* Affinity is set, find an online CPU in the set. */
KASSERT(l->l_affinity != NULL);
for (CPU_INFO_FOREACH(cii, mci)) { for (CPU_INFO_FOREACH(cii, mci)) {
mspc = &mci->ci_schedstate; mspc = &mci->ci_schedstate;
if ((mspc->spc_flags & SPCF_OFFLINE) == 0 && if ((mspc->spc_flags & SPCF_OFFLINE) == 0 &&

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_lwp.c,v 1.161 2011/07/30 17:01:04 christos Exp $ */ /* $NetBSD: kern_lwp.c,v 1.162 2011/08/07 21:13:05 rmind Exp $ */
/*- /*-
* Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc. * Copyright (c) 2001, 2006, 2007, 2008, 2009 The NetBSD Foundation, Inc.
@ -211,7 +211,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.161 2011/07/30 17:01:04 christos Exp $"); __KERNEL_RCSID(0, "$NetBSD: kern_lwp.c,v 1.162 2011/08/07 21:13:05 rmind Exp $");
#include "opt_ddb.h" #include "opt_ddb.h"
#include "opt_lockdebug.h" #include "opt_lockdebug.h"
@ -803,18 +803,19 @@ lwp_create(lwp_t *l1, proc_t *p2, vaddr_t uaddr, int flags,
p2->p_nlwps++; p2->p_nlwps++;
p2->p_nrlwps++; p2->p_nrlwps++;
KASSERT(l2->l_affinity == NULL);
if ((p2->p_flag & PK_SYSTEM) == 0) { if ((p2->p_flag & PK_SYSTEM) == 0) {
/* Inherit an affinity */ /* Inherit the affinity mask. */
if (l1->l_flag & LW_AFFINITY) { if (l1->l_affinity) {
/* /*
* Note that we hold the state lock while inheriting * Note that we hold the state lock while inheriting
* the affinity to avoid race with sched_setaffinity(). * the affinity to avoid race with sched_setaffinity().
*/ */
lwp_lock(l1); lwp_lock(l1);
if (l1->l_flag & LW_AFFINITY) { if (l1->l_affinity) {
kcpuset_use(l1->l_affinity); kcpuset_use(l1->l_affinity);
l2->l_affinity = l1->l_affinity; l2->l_affinity = l1->l_affinity;
l2->l_flag |= LW_AFFINITY;
} }
lwp_unlock(l1); lwp_unlock(l1);
} }
@ -987,12 +988,8 @@ lwp_exit(struct lwp *l)
lwp_lock(l); lwp_lock(l);
l->l_stat = LSZOMB; l->l_stat = LSZOMB;
if (l->l_name != NULL) if (l->l_name != NULL) {
strcpy(l->l_name, "(zombie)"); strcpy(l->l_name, "(zombie)");
if (l->l_flag & LW_AFFINITY) {
l->l_flag &= ~LW_AFFINITY;
} else {
KASSERT(l->l_affinity == NULL);
} }
lwp_unlock(l); lwp_unlock(l);
p->p_nrlwps--; p->p_nrlwps--;
@ -1001,12 +998,6 @@ lwp_exit(struct lwp *l)
l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED; l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED;
mutex_exit(p->p_lock); mutex_exit(p->p_lock);
/* Safe without lock since LWP is in zombie state */
if (l->l_affinity) {
kcpuset_unuse(l->l_affinity, NULL);
l->l_affinity = NULL;
}
/* /*
* We can no longer block. At this point, lwp_free() may already * We can no longer block. At this point, lwp_free() may already
* be gunning for us. On a multi-CPU system, we may be off p_lwps. * be gunning for us. On a multi-CPU system, we may be off p_lwps.
@ -1102,6 +1093,17 @@ lwp_free(struct lwp *l, bool recycle, bool last)
ksiginfo_queue_drain(&kq); ksiginfo_queue_drain(&kq);
cv_destroy(&l->l_sigcv); cv_destroy(&l->l_sigcv);
/*
* Free lwpctl structure and affinity.
*/
if (l->l_lwpctl) {
lwp_ctl_free(l);
}
if (l->l_affinity) {
kcpuset_unuse(l->l_affinity, NULL);
l->l_affinity = NULL;
}
/* /*
* Free the LWP's turnstile and the LWP structure itself unless the * Free the LWP's turnstile and the LWP structure itself unless the
* caller wants to recycle them. Also, free the scheduler specific * caller wants to recycle them. Also, free the scheduler specific
@ -1112,8 +1114,6 @@ lwp_free(struct lwp *l, bool recycle, bool last)
* *
* We don't recycle the VM resources at this time. * We don't recycle the VM resources at this time.
*/ */
if (l->l_lwpctl != NULL)
lwp_ctl_free(l);
if (!recycle && l->l_ts != &turnstile0) if (!recycle && l->l_ts != &turnstile0)
pool_cache_put(turnstile_cache, l->l_ts); pool_cache_put(turnstile_cache, l->l_ts);

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_runq.c,v 1.31 2011/08/07 13:33:01 rmind Exp $ */ /* $NetBSD: kern_runq.c,v 1.32 2011/08/07 21:13:05 rmind Exp $ */
/* /*
* Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org> * Copyright (c) 2007, 2008 Mindaugas Rasiukevicius <rmind at NetBSD org>
@ -27,7 +27,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.31 2011/08/07 13:33:01 rmind Exp $"); __KERNEL_RCSID(0, "$NetBSD: kern_runq.c,v 1.32 2011/08/07 21:13:05 rmind Exp $");
#include <sys/param.h> #include <sys/param.h>
#include <sys/kernel.h> #include <sys/kernel.h>
@ -346,15 +346,15 @@ sched_migratable(const struct lwp *l, struct cpu_info *ci)
const struct schedstate_percpu *spc = &ci->ci_schedstate; const struct schedstate_percpu *spc = &ci->ci_schedstate;
KASSERT(lwp_locked(__UNCONST(l), NULL)); KASSERT(lwp_locked(__UNCONST(l), NULL));
/* CPU is offline */ /* Is CPU offline? */
if (__predict_false(spc->spc_flags & SPCF_OFFLINE)) if (__predict_false(spc->spc_flags & SPCF_OFFLINE))
return false; return false;
/* Affinity bind */ /* Is affinity set? */
if (__predict_false(l->l_flag & LW_AFFINITY)) if (__predict_false(l->l_affinity))
return kcpuset_isset(l->l_affinity, cpu_index(ci)); return kcpuset_isset(l->l_affinity, cpu_index(ci));
/* Processor-set */ /* Is there a processor-set? */
return (spc->spc_psid == l->l_psid); return (spc->spc_psid == l->l_psid);
} }

View File

@ -1,4 +1,4 @@
/* $NetBSD: subr_kcpuset.c,v 1.1 2011/08/07 13:33:01 rmind Exp $ */ /* $NetBSD: subr_kcpuset.c,v 1.2 2011/08/07 21:13:05 rmind Exp $ */
/*- /*-
* Copyright (c) 2011 The NetBSD Foundation, Inc. * Copyright (c) 2011 The NetBSD Foundation, Inc.
@ -41,7 +41,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.1 2011/08/07 13:33:01 rmind Exp $"); __KERNEL_RCSID(0, "$NetBSD: subr_kcpuset.c,v 1.2 2011/08/07 21:13:05 rmind Exp $");
#include <sys/param.h> #include <sys/param.h>
#include <sys/types.h> #include <sys/types.h>
@ -216,17 +216,16 @@ kcpuset_create(kcpuset_t **retkcp)
void void
kcpuset_destroy(kcpuset_t *kcp) kcpuset_destroy(kcpuset_t *kcp)
{ {
kcpuset_impl_t *kc, *nkc; kcpuset_impl_t *kc;
KASSERT(kc_initialised); KASSERT(kc_initialised);
KASSERT(kcp != NULL); KASSERT(kcp != NULL);
kc = KC_GETSTRUCT(kcp);
do { do {
nkc = KC_GETSTRUCT(kc->kc_next); kc = KC_GETSTRUCT(kcp);
kcp = kc->kc_next;
pool_cache_put(kc_cache, kc); pool_cache_put(kc_cache, kc);
kc = nkc; } while (kcp);
} while (kc);
} }
/* /*

View File

@ -1,4 +1,4 @@
/* $NetBSD: sys_pset.c,v 1.16 2011/08/07 13:33:01 rmind Exp $ */ /* $NetBSD: sys_pset.c,v 1.17 2011/08/07 21:13:05 rmind Exp $ */
/* /*
* Copyright (c) 2008, Mindaugas Rasiukevicius <rmind at NetBSD org> * Copyright (c) 2008, Mindaugas Rasiukevicius <rmind at NetBSD org>
@ -36,7 +36,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: sys_pset.c,v 1.16 2011/08/07 13:33:01 rmind Exp $"); __KERNEL_RCSID(0, "$NetBSD: sys_pset.c,v 1.17 2011/08/07 21:13:05 rmind Exp $");
#include <sys/param.h> #include <sys/param.h>
@ -366,10 +366,11 @@ sys_pset_assign(struct lwp *l, const struct sys_pset_assign_args *uap,
* with this target CPU in it. * with this target CPU in it.
*/ */
LIST_FOREACH(t, &alllwp, l_list) { LIST_FOREACH(t, &alllwp, l_list) {
if ((t->l_flag & LW_AFFINITY) == 0) if (t->l_affinity == NULL) {
continue; continue;
}
lwp_lock(t); lwp_lock(t);
if ((t->l_flag & LW_AFFINITY) == 0) { if (t->l_affinity == NULL) {
lwp_unlock(t); lwp_unlock(t);
continue; continue;
} }

View File

@ -1,4 +1,4 @@
/* $NetBSD: sys_sched.c,v 1.36 2011/08/07 13:33:01 rmind Exp $ */ /* $NetBSD: sys_sched.c,v 1.37 2011/08/07 21:13:05 rmind Exp $ */
/* /*
* Copyright (c) 2008, 2011 Mindaugas Rasiukevicius <rmind at NetBSD org> * Copyright (c) 2008, 2011 Mindaugas Rasiukevicius <rmind at NetBSD org>
@ -42,7 +42,7 @@
*/ */
#include <sys/cdefs.h> #include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: sys_sched.c,v 1.36 2011/08/07 13:33:01 rmind Exp $"); __KERNEL_RCSID(0, "$NetBSD: sys_sched.c,v 1.37 2011/08/07 21:13:05 rmind Exp $");
#include <sys/param.h> #include <sys/param.h>
@ -425,32 +425,33 @@ sys__sched_setaffinity(struct lwp *l,
} }
#endif #endif
/* Find the LWP(s) */ /* Iterate through LWP(s). */
lcnt = 0; lcnt = 0;
lid = SCARG(uap, lid); lid = SCARG(uap, lid);
LIST_FOREACH(t, &p->p_lwps, l_sibling) { LIST_FOREACH(t, &p->p_lwps, l_sibling) {
if (lid && lid != t->l_lid) if (lid && lid != t->l_lid) {
continue; continue;
}
lwp_lock(t); lwp_lock(t);
/* It is not allowed to set the affinity for zombie LWPs */ /* No affinity for zombie LWPs. */
if (t->l_stat == LSZOMB) { if (t->l_stat == LSZOMB) {
lwp_unlock(t); lwp_unlock(t);
continue; continue;
} }
/* First, release existing affinity, if any. */
if (t->l_affinity) {
kcpuset_unuse(t->l_affinity, &kcpulst);
}
if (kcset) { if (kcset) {
/* Set the affinity flag and new CPU set */ /*
t->l_flag |= LW_AFFINITY; * Hold a reference on affinity mask, assign mask to
* LWP and migrate it to another CPU (unlocks LWP).
*/
kcpuset_use(kcset); kcpuset_use(kcset);
if (t->l_affinity != NULL)
kcpuset_unuse(t->l_affinity, &kcpulst);
t->l_affinity = kcset; t->l_affinity = kcset;
/* Migrate to another CPU, unlocks LWP */
lwp_migrate(t, ci); lwp_migrate(t, ci);
} else { } else {
/* Unset the affinity flag */ /* Old affinity mask is released, just clear. */
t->l_flag &= ~LW_AFFINITY;
if (t->l_affinity != NULL)
kcpuset_unuse(t->l_affinity, &kcpulst);
t->l_affinity = NULL; t->l_affinity = NULL;
lwp_unlock(t); lwp_unlock(t);
} }
@ -511,8 +512,7 @@ sys__sched_getaffinity(struct lwp *l,
goto out; goto out;
} }
lwp_lock(t); lwp_lock(t);
if (t->l_flag & LW_AFFINITY) { if (t->l_affinity) {
KASSERT(t->l_affinity != NULL);
kcpuset_copy(kcset, t->l_affinity); kcpuset_copy(kcset, t->l_affinity);
} else { } else {
kcpuset_zero(kcset); kcpuset_zero(kcset);

View File

@ -1,4 +1,4 @@
/* $NetBSD: lwp.h,v 1.154 2011/08/07 14:03:16 rmind Exp $ */ /* $NetBSD: lwp.h,v 1.155 2011/08/07 21:13:06 rmind Exp $ */
/*- /*-
* Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010 * Copyright (c) 2001, 2006, 2007, 2008, 2009, 2010
@ -227,7 +227,6 @@ extern lwp_t lwp0; /* LWP for proc0. */
#define LW_BATCH 0x00040000 /* LWP tends to hog CPU */ #define LW_BATCH 0x00040000 /* LWP tends to hog CPU */
#define LW_WCORE 0x00080000 /* Stop for core dump on return to user */ #define LW_WCORE 0x00080000 /* Stop for core dump on return to user */
#define LW_WEXIT 0x00100000 /* Exit before return to user */ #define LW_WEXIT 0x00100000 /* Exit before return to user */
#define LW_AFFINITY 0x00200000 /* Affinity is assigned to the thread */
#define LW_SA_UPCALL 0x00400000 /* SA upcall is pending */ #define LW_SA_UPCALL 0x00400000 /* SA upcall is pending */
#define LW_SA_BLOCKING 0x00800000 /* Blocking in tsleep() */ #define LW_SA_BLOCKING 0x00800000 /* Blocking in tsleep() */
#define LW_PENDSIG 0x01000000 /* Pending signal for us */ #define LW_PENDSIG 0x01000000 /* Pending signal for us */