Make sure we use MWAIT with MONITOR.

Also clarify when we have interrupts disabled.
This commit is contained in:
jruoho 2010-07-23 13:54:21 +00:00
parent 00ad9caa92
commit 4127064077
2 changed files with 31 additions and 22 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: acpi_cpu_md.c,v 1.2 2010/07/18 09:39:45 jruoho Exp $ */
/* $NetBSD: acpi_cpu_md.c,v 1.3 2010/07/23 13:54:21 jruoho Exp $ */
/*-
* Copyright (c) 2010 Jukka Ruohonen <jruohonen@iki.fi>
@ -27,7 +27,7 @@
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_md.c,v 1.2 2010/07/18 09:39:45 jruoho Exp $");
__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_md.c,v 1.3 2010/07/23 13:54:21 jruoho Exp $");
#include <sys/param.h>
#include <sys/bus.h>
@ -164,24 +164,35 @@ acpicpu_md_idle_stop(void)
return 0;
}
/*
* The MD idle loop. Called with interrupts disabled.
*/
void
acpicpu_md_idle_enter(int method, int state)
{
KASSERT(native_idle != NULL);
struct cpu_info *ci = curcpu();
switch (method) {
case ACPICPU_C_STATE_FFH:
x86_enable_intr();
x86_monitor(&ci->ci_want_resched, 0, 0);
if (__predict_false(ci->ci_want_resched) != 0)
return;
x86_mwait((state - 1) << 4, 0);
break;
case ACPICPU_C_STATE_HALT:
x86_stihlt();
break;
default:
(*native_idle)();
if (__predict_false(ci->ci_want_resched) != 0) {
x86_enable_intr();
return;
}
x86_stihlt();
break;
}
}

View File

@ -1,4 +1,4 @@
/* $NetBSD: acpi_cpu_cstate.c,v 1.7 2010/07/23 08:11:49 jruoho Exp $ */
/* $NetBSD: acpi_cpu_cstate.c,v 1.8 2010/07/23 13:54:21 jruoho Exp $ */
/*-
* Copyright (c) 2010 Jukka Ruohonen <jruohonen@iki.fi>
@ -27,7 +27,7 @@
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.7 2010/07/23 08:11:49 jruoho Exp $");
__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_cstate.c,v 1.8 2010/07/23 13:54:21 jruoho Exp $");
#include <sys/param.h>
#include <sys/cpu.h>
@ -720,30 +720,28 @@ acpicpu_cstate_idle(void)
struct acpicpu_softc *sc;
int state;
if (__predict_false(ci->ci_want_resched) != 0)
return;
acpi_md_OsDisableInterrupt();
KASSERT(acpicpu_sc != NULL);
KASSERT(ci->ci_cpuid < maxcpus);
KASSERT(ci->ci_ilevel == IPL_NONE);
if (__predict_false(acpi_suspended != 0)) {
acpicpu_md_idle_enter(0, 0);
return;
}
sc = acpicpu_sc[ci->ci_cpuid];
/*
* If all CPUs do not have an ACPI counterpart,
* the softc may be NULL. In this case use C1.
* If all CPUs do not have their ACPI counterparts, the softc
* may be NULL. In this case fall back to normal C1 with HALT.
*/
if (__predict_false(sc == NULL)) {
acpicpu_md_idle_enter(0, 0);
acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1);
return;
}
if (__predict_false(acpi_suspended != 0)) {
acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1);
return;
}
acpi_md_OsDisableInterrupt();
state = acpicpu_cstate_latency(sc);
/*
@ -822,7 +820,7 @@ acpicpu_cstate_idle_enter(struct acpicpu_softc *sc, int state)
break;
default:
acpicpu_md_idle_enter(0, 0);
acpicpu_md_idle_enter(ACPICPU_C_STATE_HALT, ACPI_STATE_C1);
break;
}