- Avoid the race with CPU online/offline state changes, when setting the

affinity (cpu_lock protects these operations now).
- Disallow setting of state of CPU to to offline, if there are bound LWPs,
  which have no CPU to migrate.
- Disallow setting of affinity for the LWP(s), if all CPUs in the dynamic
  CPU-set are offline.
- sched_setaffinity: fix invalid check of kcpuset_isset().
- Rename cpu_setonline() to cpu_setstate().

Should fix PR/39349.
This commit is contained in:
rmind 2008-10-31 00:36:22 +00:00
parent 9b24696c04
commit 8f1873ea3b
5 changed files with 102 additions and 58 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.c,v 1.57 2008/10/15 08:13:17 ad Exp $ */
/* $NetBSD: cpu.c,v 1.58 2008/10/31 00:36:22 rmind Exp $ */
/*-
* Copyright (c) 2000, 2006, 2007, 2008 The NetBSD Foundation, Inc.
@ -62,7 +62,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.57 2008/10/15 08:13:17 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.58 2008/10/31 00:36:22 rmind Exp $");
#include "opt_ddb.h"
#include "opt_mpbios.h" /* for MPDEBUG */
@ -1002,7 +1002,7 @@ cpu_suspend(device_t dv PMF_FN_ARGS)
if (sc->sc_wasonline) {
mutex_enter(&cpu_lock);
err = cpu_setonline(ci, false);
err = cpu_setstate(ci, false);
mutex_exit(&cpu_lock);
if (err)
@ -1028,7 +1028,7 @@ cpu_resume(device_t dv PMF_FN_ARGS)
if (sc->sc_wasonline) {
mutex_enter(&cpu_lock);
err = cpu_setonline(ci, true);
err = cpu_setstate(ci, true);
mutex_exit(&cpu_lock);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.c,v 1.28 2008/08/22 10:25:58 bouyer Exp $ */
/* $NetBSD: cpu.c,v 1.29 2008/10/31 00:36:22 rmind Exp $ */
/* NetBSD: cpu.c,v 1.18 2004/02/20 17:35:01 yamt Exp */
/*-
@ -66,7 +66,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.28 2008/08/22 10:25:58 bouyer Exp $");
__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.29 2008/10/31 00:36:22 rmind Exp $");
#include "opt_ddb.h"
#include "opt_multiprocessor.h"
@ -1012,7 +1012,7 @@ cpu_suspend(device_t dv PMF_FN_ARGS)
if (sc->sc_wasonline) {
mutex_enter(&cpu_lock);
err = cpu_setonline(ci, false);
err = cpu_setstate(ci, false);
mutex_exit(&cpu_lock);
if (err)
@ -1038,7 +1038,7 @@ cpu_resume(device_t dv PMF_FN_ARGS)
if (sc->sc_wasonline) {
mutex_enter(&cpu_lock);
err = cpu_setonline(ci, true);
err = cpu_setstate(ci, true);
mutex_exit(&cpu_lock);
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_cpu.c,v 1.36 2008/10/15 08:13:17 ad Exp $ */
/* $NetBSD: kern_cpu.c,v 1.37 2008/10/31 00:36:22 rmind Exp $ */
/*-
* Copyright (c) 2007, 2008 The NetBSD Foundation, Inc.
@ -56,8 +56,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.36 2008/10/15 08:13:17 ad Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.37 2008/10/31 00:36:22 rmind Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@ -174,7 +173,7 @@ cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
error = EOPNOTSUPP;
break;
}
error = cpu_setonline(ci, cs->cs_online);
error = cpu_setstate(ci, cs->cs_online);
break;
case IOC_CPU_GETSTATE:
@ -235,45 +234,72 @@ static void
cpu_xc_offline(struct cpu_info *ci)
{
struct schedstate_percpu *spc, *mspc = NULL;
struct cpu_info *mci;
struct cpu_info *target_ci;
struct lwp *l;
CPU_INFO_ITERATOR cii;
int s;
/*
* Thread which sent unicast (separate context) is holding
* the cpu_lock for us.
*/
spc = &ci->ci_schedstate;
s = splsched();
spc->spc_flags |= SPCF_OFFLINE;
splx(s);
/* Take the first available CPU for the migration */
for (CPU_INFO_FOREACH(cii, mci)) {
mspc = &mci->ci_schedstate;
for (CPU_INFO_FOREACH(cii, target_ci)) {
mspc = &target_ci->ci_schedstate;
if ((mspc->spc_flags & SPCF_OFFLINE) == 0)
break;
}
KASSERT(mci != NULL);
KASSERT(target_ci != NULL);
/*
* Migrate all non-bound threads to the other CPU.
*
* Please note, that this runs from the xcall thread, thus handling
* of LSONPROC is not needed. Threads which change the state will
* be handled by sched_takecpu().
* Migrate all non-bound threads to the other CPU. Note that this
* runs from the xcall thread, thus handling of LSONPROC is not needed.
*/
mutex_enter(proc_lock);
LIST_FOREACH(l, &alllwp, l_list) {
struct cpu_info *mci;
lwp_lock(l);
if ((l->l_pflag & LP_BOUND) == 0 && l->l_cpu == ci) {
lwp_migrate(l, mci);
} else {
if (l->l_cpu != ci || (l->l_pflag & (LP_BOUND | LP_INTR))) {
lwp_unlock(l);
continue;
}
/* Normal case - no affinity */
if ((l->l_flag & LW_AFFINITY) == 0) {
lwp_migrate(l, target_ci);
continue;
}
/* Affinity is set, find an online CPU in the set */
KASSERT(l->l_affinity != NULL);
for (CPU_INFO_FOREACH(cii, mci)) {
mspc = &mci->ci_schedstate;
if ((mspc->spc_flags & SPCF_OFFLINE) == 0 &&
kcpuset_isset(cpu_index(mci), l->l_affinity))
break;
}
if (mci == NULL) {
lwp_unlock(l);
mutex_exit(proc_lock);
goto fail;
}
lwp_migrate(l, mci);
}
mutex_exit(proc_lock);
#ifdef __HAVE_MD_CPU_OFFLINE
cpu_offline_md();
#endif
return;
fail:
/* Just unset the SPCF_OFFLINE flag, caller will check */
s = splsched();
spc->spc_flags &= ~SPCF_OFFLINE;
splx(s);
}
static void
@ -289,7 +315,7 @@ cpu_xc_online(struct cpu_info *ci)
}
int
cpu_setonline(struct cpu_info *ci, bool online)
cpu_setstate(struct cpu_info *ci, bool online)
{
struct schedstate_percpu *spc;
CPU_INFO_ITERATOR cii;
@ -332,10 +358,11 @@ cpu_setonline(struct cpu_info *ci, bool online)
xc_wait(where);
if (online) {
KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0);
} else {
KASSERT(spc->spc_flags & SPCF_OFFLINE);
} else if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
/* If was not set offline, then it is busy */
return EBUSY;
}
spc->spc_lastmod = time_second;
spc->spc_lastmod = time_second;
return 0;
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: sys_sched.c,v 1.30 2008/10/18 19:24:04 rmind Exp $ */
/* $NetBSD: sys_sched.c,v 1.31 2008/10/31 00:36:22 rmind Exp $ */
/*
* Copyright (c) 2008, Mindaugas Rasiukevicius <rmind at NetBSD org>
@ -29,13 +29,20 @@
/*
* System calls relating to the scheduler.
*
* Lock order:
*
* cpu_lock ->
* proc_lock ->
* proc_t::p_lock ->
* lwp_t::lwp_lock
*
* TODO:
* - Handle pthread_setschedprio() as defined by POSIX;
* - Handle sched_yield() case for SCHED_FIFO as defined by POSIX;
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: sys_sched.c,v 1.30 2008/10/18 19:24:04 rmind Exp $");
__KERNEL_RCSID(0, "$NetBSD: sys_sched.c,v 1.31 2008/10/31 00:36:22 rmind Exp $");
#include <sys/param.h>
@ -204,9 +211,8 @@ sys__sched_setparam(struct lwp *l, const struct sys__sched_setparam_args *uap,
error = do_sched_setparam(SCARG(uap, pid), SCARG(uap, lid),
SCARG(uap, policy), &params);
out:
return (error);
out:
return error;
}
int
@ -279,12 +285,13 @@ sys__sched_getparam(struct lwp *l, const struct sys__sched_getparam_args *uap,
error = copyout(&params, SCARG(uap, params), sizeof(params));
if (error == 0 && SCARG(uap, policy) != NULL)
error = copyout(&policy, SCARG(uap, policy), sizeof(int));
out:
return (error);
out:
return error;
}
/* Allocate the CPU set, and get it from userspace */
/*
* Allocate the CPU set, and get it from userspace.
*/
static int
genkcpuset(kcpuset_t **dset, const cpuset_t *sset, size_t size)
{
@ -315,25 +322,41 @@ sys__sched_setaffinity(struct lwp *l,
struct proc *p;
struct lwp *t;
CPU_INFO_ITERATOR cii;
bool offline_in_set;
lwpid_t lid;
u_int lcnt;
int error;
if ((error = genkcpuset(&cpuset, SCARG(uap, cpuset), SCARG(uap, size))))
error = genkcpuset(&cpuset, SCARG(uap, cpuset), SCARG(uap, size));
if (error)
return error;
/* Look for a CPU in the set */
/*
* Look for a CPU in the set, however, skip offline CPUs.
*
* To avoid the race with CPU online/offline calls, cpu_lock will
* be locked for the entire operation.
*/
offline_in_set = false;
mutex_enter(&cpu_lock);
for (CPU_INFO_FOREACH(cii, ci)) {
error = kcpuset_isset(cpu_index(ci), cpuset);
if (error) {
if (error == -1) {
error = E2BIG;
goto out;
}
break;
struct schedstate_percpu *spc;
if (kcpuset_isset(cpu_index(ci), cpuset) == 0)
continue;
spc = &ci->ci_schedstate;
if (spc->spc_flags & SPCF_OFFLINE) {
offline_in_set = true;
continue;
}
break;
}
if (ci == NULL) {
if (offline_in_set) {
/* All CPUs in the set are offline */
error = EPERM;
goto out;
}
/* Empty set */
kcpuset_unuse(cpuset, NULL);
cpuset = NULL;
@ -373,15 +396,7 @@ sys__sched_setaffinity(struct lwp *l,
}
#ifdef KERN_SA
/*
* Don't permit changing the affinity of an SA process. The only
* thing that would make sense wold be to set the affinity of
* a VP and all threads running on it. But we don't support that
* now, so just don't permit it.
*
* Test is here so that caller gets auth errors before SA
* errors.
*/
/* Changing the affinity of a SA process is not supported */
if ((p->p_sflag & (PS_SA | PS_WEXIT)) != 0 || p->p_sa != NULL) {
mutex_exit(p->p_lock);
error = EINVAL;
@ -424,6 +439,7 @@ sys__sched_setaffinity(struct lwp *l,
if (lcnt == 0)
error = ESRCH;
out:
mutex_exit(&cpu_lock);
if (cpuset != NULL)
kcpuset_unuse(cpuset, &cpulst);
kcpuset_destroy(cpulst);
@ -447,7 +463,8 @@ sys__sched_getaffinity(struct lwp *l,
kcpuset_t *cpuset;
int error;
if ((error = genkcpuset(&cpuset, SCARG(uap, cpuset), SCARG(uap, size))))
error = genkcpuset(&cpuset, SCARG(uap, cpuset), SCARG(uap, size));
if (error)
return error;
/* Locks the LWP */

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.h,v 1.23 2008/10/15 08:13:17 ad Exp $ */
/* $NetBSD: cpu.h,v 1.24 2008/10/31 00:36:22 rmind Exp $ */
/*-
* Copyright (c) 2007 YAMAMOTO Takashi,
@ -72,7 +72,7 @@ void cpu_offline_md(void);
lwp_t *cpu_switchto(lwp_t *, lwp_t *, bool);
struct cpu_info *cpu_lookup(u_int);
int cpu_setonline(struct cpu_info *, bool);
int cpu_setstate(struct cpu_info *, bool);
bool cpu_intr_p(void);
bool cpu_kpreempt_enter(uintptr_t, int);
void cpu_kpreempt_exit(uintptr_t);