Maintain a bitmap of idle CPUs and add idle_pick() to find an idle CPU

and remove it from the bitmap.
This commit is contained in:
ad 2008-04-04 17:21:22 +00:00
parent a3fc79baa9
commit 61a0a96054
2 changed files with 56 additions and 3 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_idle.c,v 1.12 2008/03/10 22:20:14 martin Exp $ */
/* $NetBSD: kern_idle.c,v 1.13 2008/04/04 17:21:22 ad Exp $ */
/*-
* Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
@ -28,7 +28,7 @@
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.12 2008/03/10 22:20:14 martin Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.13 2008/04/04 17:21:22 ad Exp $");
#include <sys/param.h>
#include <sys/cpu.h>
@ -37,15 +37,24 @@ __KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.12 2008/03/10 22:20:14 martin Exp $"
#include <sys/lockdebug.h>
#include <sys/kmem.h>
#include <sys/proc.h>
#include <sys/atomic.h>
#include <uvm/uvm.h>
#include <uvm/uvm_extern.h>
#if MAXCPUS > 32
#error fix this code
#endif
static volatile uint32_t *idle_cpus;
void
idle_loop(void *dummy)
{
struct cpu_info *ci = curcpu();
struct lwp *l = curlwp;
unsigned mask = (1 << cpu_index(ci));
bool set = false;
/* Update start time for this thread. */
lwp_lock(l);
@ -66,9 +75,17 @@ idle_loop(void *dummy)
if (sched_curcpu_runnable_p()) {
goto schedule;
}
if (!set) {
set = true;
atomic_or_32(idle_cpus, mask);
}
uvm_pageidlezero();
}
if (!sched_curcpu_runnable_p()) {
if (!set) {
set = true;
atomic_or_32(idle_cpus, mask);
}
cpu_idle();
if (!sched_curcpu_runnable_p() &&
!ci->ci_want_resched) {
@ -76,6 +93,10 @@ idle_loop(void *dummy)
}
}
schedule:
if (set) {
set = false;
atomic_and_32(idle_cpus, ~mask);
}
KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
lwp_lock(l);
mi_switch(l);
@ -84,12 +105,41 @@ schedule:
}
}
/*
* Find an idle CPU and remove from the idle bitmask. The bitmask
* is always accurate but is "good enough" for the purpose of finding
* a CPU to run a job on.
*/
struct cpu_info *
idle_pick(void)
{
uint32_t mask;
u_int index;
do {
if ((mask = *idle_cpus) == 0)
return NULL;
index = ffs(mask) - 1;
} while (atomic_cas_32(idle_cpus, mask, mask ^ (1 << index)) != mask);
return cpu_lookup_byindex(index);
}
int
create_idle_lwp(struct cpu_info *ci)
{
static bool again;
uintptr_t addr;
lwp_t *l;
int error;
if (!again) {
again = true;
addr = (uintptr_t)kmem_alloc(coherency_unit * 2, KM_SLEEP);
addr = roundup(addr, coherency_unit);
idle_cpus = (uint32_t *)addr;
}
KASSERT(ci->ci_data.cpu_idlelwp == NULL);
error = kthread_create(PRI_IDLE, KTHREAD_MPSAFE | KTHREAD_IDLE,
ci, idle_loop, NULL, &l, "idle/%u", ci->ci_index);

View File

@ -1,4 +1,4 @@
/* $NetBSD: cpu.h,v 1.16 2008/03/15 14:55:22 ad Exp $ */
/* $NetBSD: cpu.h,v 1.17 2008/04/04 17:21:22 ad Exp $ */
/*-
* Copyright (c) 2007 YAMAMOTO Takashi,
@ -67,8 +67,11 @@ struct cpu_info *cpu_lookup_byindex(u_int);
int cpu_setonline(struct cpu_info *, bool);
bool cpu_intr_p(void);
CIRCLEQ_HEAD(cpuqueue, cpu_info);
extern kmutex_t cpu_lock;
extern u_int maxcpus;
extern struct cpuqueue cpu_queue;
static inline u_int
cpu_index(struct cpu_info *ci)