Do not disable interrupts at machine-level in the MI idle-loop entry.

This commit is contained in:
jruoho 2011-07-13 07:34:55 +00:00
parent ded622654f
commit 57c7846186
2 changed files with 18 additions and 24 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: acpi_cpu_md.c,v 1.63 2011/06/23 08:10:35 jruoho Exp $ */
/* $NetBSD: acpi_cpu_md.c,v 1.64 2011/07/13 07:34:55 jruoho Exp $ */
/*-
* Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi>
@ -27,7 +27,7 @@
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_md.c,v 1.63 2011/06/23 08:10:35 jruoho Exp $");
__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_md.c,v 1.64 2011/07/13 07:34:55 jruoho Exp $");
#include <sys/param.h>
#include <sys/bus.h>
@ -397,19 +397,19 @@ acpicpu_md_cstate_stop(void)
}
/*
* Called with interrupts disabled.
* Caller should enable interrupts after return.
* Called with interrupts enabled.
*/
void
acpicpu_md_cstate_enter(int method, int state)
{
struct cpu_info *ci = curcpu();
KASSERT(ci->ci_ilevel == IPL_NONE);
switch (method) {
case ACPICPU_C_STATE_FFH:
x86_enable_intr();
x86_monitor(&ci->ci_want_resched, 0, 0);
if (__predict_false(ci->ci_want_resched != 0))
@ -420,8 +420,12 @@ acpicpu_md_cstate_enter(int method, int state)
case ACPICPU_C_STATE_HALT:
if (__predict_false(ci->ci_want_resched != 0))
x86_disable_intr();
if (__predict_false(ci->ci_want_resched != 0)) {
x86_enable_intr();
return;
}
x86_stihlt();
break;

View File

@ -1,4 +1,4 @@
/* $NetBSD: acpi_cpu_cstate.c,v 1.53 2011/06/22 08:49:54 jruoho Exp $ */
/* $NetBSD: acpi_cpu_cstate.c,v 1.54 2011/07/13 07:34:55 jruoho Exp $ */
/*-
* Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi>
@ -27,7 +27,7 @@
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.53 2011/06/22 08:49:54 jruoho Exp $");
__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.54 2011/07/13 07:34:55 jruoho Exp $");
#include <sys/param.h>
#include <sys/cpu.h>
@ -629,6 +629,8 @@ acpicpu_cstate_latency(struct acpicpu_softc *sc)
struct acpicpu_cstate *cs;
int i;
KASSERT(mutex_owned(&sc->sc_mtx) != 0);
for (i = cs_state_max; i > 0; i--) {
cs = &sc->sc_cstate[i];
@ -658,30 +660,25 @@ acpicpu_cstate_idle(void)
struct acpicpu_softc *sc;
int state;
acpi_md_OsDisableInterrupt();
if (__predict_false(ci->ci_want_resched != 0))
goto out;
KASSERT(acpicpu_sc != NULL);
KASSERT(ci->ci_acpiid < maxcpus);
sc = acpicpu_sc[ci->ci_acpiid];
if (__predict_false(sc == NULL))
goto out;
return;
KASSERT(ci->ci_ilevel == IPL_NONE);
KASSERT((sc->sc_flags & ACPICPU_FLAG_C) != 0);
if (__predict_false(sc->sc_cold != false))
goto out;
return;
if (__predict_false(mutex_tryenter(&sc->sc_mtx) == 0))
goto out;
return;
mutex_exit(&sc->sc_mtx);
state = acpicpu_cstate_latency(sc);
mutex_exit(&sc->sc_mtx);
/*
* Apply AMD C1E quirk.
@ -743,11 +740,6 @@ acpicpu_cstate_idle(void)
if ((sc->sc_flags & ACPICPU_FLAG_C_ARB) != 0)
(void)AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0);
return;
out:
acpi_md_OsEnableInterrupt();
}
static void
@ -770,8 +762,6 @@ acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state)
break;
}
acpi_md_OsEnableInterrupt();
cs->cs_evcnt.ev_count++;
end = acpitimer_read_fast(NULL);
sc->sc_cstate_sleep = hztoms(acpitimer_delta(end, start)) * 1000;