Back out __HAVE_CHOOSEPROC stuff.

This commit is contained in:
gmcgarry 2002-09-29 21:11:36 +00:00
parent 9dd94a1878
commit 395d77f3dc
1 changed files with 24 additions and 138 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: kern_synch.c,v 1.113 2002/09/22 05:36:48 gmcgarry Exp $ */
/* $NetBSD: kern_synch.c,v 1.114 2002/09/29 21:11:36 gmcgarry Exp $ */
/*-
* Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
@ -78,7 +78,7 @@
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.113 2002/09/22 05:36:48 gmcgarry Exp $");
__KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.114 2002/09/29 21:11:36 gmcgarry Exp $");
#include "opt_ddb.h"
#include "opt_ktrace.h"
@ -717,31 +717,6 @@ wakeup_one(void *ident)
SCHED_UNLOCK(s);
}
#if defined(__HAVE_CHOOSEPROC)
/*
* Remove the next process of the highest priority from the run queue.
* If the queue is empty, then call cpu_idle() and wait until one is
* available. Set curproc to NULL to avoid the process accumulating
* time while we idle.
*/
struct proc *
chooseproc(void)
{
struct proc *oldp, *newp;
oldp = curproc;
curproc = NULL;
for (;;) {
newp = nextrunqueue();
if (newp != NULL)
break;
cpu_idle();
}
curproc = oldp;
return (newp);
}
#endif
/*
* General yield call. Puts the current process back on its run queue and
* performs a voluntary context switch.
@ -814,10 +789,7 @@ mi_switch(struct proc *p, struct proc *newp)
KDASSERT(p->p_cpu != NULL);
KDASSERT(p->p_cpu == curcpu());
#if !defined(__HAVE_CHOOSEPROC)
KDASSERT(newp == NULL);
#endif
spc = &p->p_cpu->ci_schedstate;
@ -842,6 +814,8 @@ mi_switch(struct proc *p, struct proc *newp)
u -= 1000000;
s++;
}
p->p_rtime.tv_usec = u;
p->p_rtime.tv_sec = s;
/*
* Check if the process exceeds its cpu resource allocation.
@ -878,87 +852,34 @@ mi_switch(struct proc *p, struct proc *newp)
kstack_check_magic(p);
#endif
#if defined(__HAVE_CHOOSEPROC)
/*
* If we haven't been told which process to switch to, then
* call nextrunqueue() to select the next process from the run
* queue.
* If we are using h/w performance counters, save context.
*/
if (newp == NULL)
newp = nextrunqueue();
/*
* If we're switching to ourself then don't bother reloading
* the address space or recalculating the process execution
* time. Just short-circuit out of here.
*/
if (newp == p) {
p->p_stat = SONPROC;
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
sched_unlock_idle();
#endif
goto out;
}
#endif /* __HAVE_CHOOSEPROC */
/*
* We won't be short-circuiting our path out of here, so
* update the outgoing process CPU usage.
*/
p->p_rtime.tv_usec = u;
p->p_rtime.tv_sec = s;
#if defined(__HAVE_CHOOSEPROC)
/*
* If newp == NULL, then nextrunqueue() couldn't find a
* runnable process. We must invoke chooseproc() to wait for
* one to become available.
*/
if (newp == NULL)
newp = chooseproc();
/*
* Check if we're switching to ourself. If we're not, then
* call cpu_switch() to switch to the new current process.
* We must have idled so the process CPU time has to be
* recalculated.
*/
if (p == newp) {
p->p_stat = SONPROC;
#if defined(MULTIPROCESSOR) || defined(LOCKDEBUG)
sched_unlock_idle();
#endif
} else
#endif /* __HAVE_CHOOSEPROC */
{
/*
* If we are using h/w performance counters, save context.
*/
#if PERFCTRS
if (PMC_ENABLED(p))
pmc_save_context(p);
if (PMC_ENABLED(p))
pmc_save_context(p);
#endif
/*
* Switch to the new current process. When we
* run again, we'll return back here.
*/
uvmexp.swtch++;
cpu_switch(p, newp);
/*
* Switch to the new current process. When we
* run again, we'll return back here.
*/
uvmexp.swtch++;
cpu_switch(p, NULL);
/*
* If we are using h/w performance counters, restore context.
*/
/*
* If we are using h/w performance counters, restore context.
*/
#if PERFCTRS
if (PMC_ENABLED(p))
pmc_restore_context(p);
if (PMC_ENABLED(p))
pmc_restore_context(p);
#endif
}
/*
* Make sure that MD code released the scheduler lock before
* resuming us.
*/
SCHED_ASSERT_UNLOCKED();
/*
* We're running again; record our new start time. We might
@ -969,16 +890,6 @@ mi_switch(struct proc *p, struct proc *newp)
KDASSERT(p->p_cpu == curcpu());
microtime(&p->p_cpu->ci_schedstate.spc_runtime);
#if defined(__HAVE_CHOOSEPROC)
out:
#endif
/*
* Make sure that MD code released the scheduler lock before
* resuming us.
*/
SCHED_ASSERT_UNLOCKED();
#if defined(MULTIPROCESSOR)
/*
* Reacquire the kernel_lock now. We do this after we've
@ -1199,29 +1110,4 @@ remrunqueue(struct proc *p)
sched_whichqs &= ~(1<<whichq);
}
struct proc *
nextrunqueue(void)
{
struct prochd *rq;
struct proc *next, *p;
int whichq;
if (sched_whichqs == 0)
return (NULL);
whichq = ffs(sched_whichqs)-1;
rq = &sched_qs[whichq];
p = rq->ph_link;
#ifdef DIAGNOSTIC
if (p == (struct proc *)rq)
panic("nextrunqueue");
#endif
next = p->p_forw;
rq->ph_link = next;
next->p_back = (struct proc *)rq;
if (next == (struct proc *)rq)
sched_whichqs &= ~(1<<whichq);
p->p_back = NULL;
return (p);
}
#endif