Add __cpu_simple_lock_t. Use it, x86_read_psl(), and x86_disable_intr() to

disable interrupts locally and protect the access to APERF and MPERF. Also
rationalize the MD initialization sequence.
This commit is contained in:
jruoho 2011-03-05 09:47:19 +00:00
parent 64c2268f08
commit 6315c7d63e
4 changed files with 128 additions and 109 deletions

View File

@ -1,4 +1,4 @@
/* $NetBSD: acpi_cpu_md.c,v 1.54 2011/03/05 06:39:55 jruoho Exp $ */
/* $NetBSD: acpi_cpu_md.c,v 1.55 2011/03/05 09:47:19 jruoho Exp $ */
/*-
* Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi>
@ -27,7 +27,7 @@
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_md.c,v 1.54 2011/03/05 06:39:55 jruoho Exp $");
__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_md.c,v 1.55 2011/03/05 09:47:19 jruoho Exp $");
#include <sys/param.h>
#include <sys/bus.h>
@ -51,6 +51,12 @@ __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_md.c,v 1.54 2011/03/05 06:39:55 jruoho Exp
#include <machine/acpi_machdep.h>
/*
* Intel IA32_MISC_ENABLE.
*/
#define MSR_MISC_ENABLE_EST __BIT(16)
#define MSR_MISC_ENABLE_TURBO __BIT(38)
/*
* AMD C1E.
*/
@ -99,8 +105,10 @@ __KERNEL_RCSID(0, "$NetBSD: acpi_cpu_md.c,v 1.54 2011/03/05 06:39:55 jruoho Exp
static char native_idle_text[16];
void (*native_idle)(void) = NULL;
static u_long acpicpu_md_lock(struct acpicpu_softc *);
static void acpicpu_md_unlock(struct acpicpu_softc *, u_long);
static int acpicpu_md_quirk_piix4(struct pci_attach_args *);
static void acpicpu_md_pstate_percent_reset(struct acpicpu_softc *);
static void acpicpu_md_pstate_percent_reset(struct cpu_info *);
static int acpicpu_md_pstate_fidvid_get(struct acpicpu_softc *,
uint32_t *);
static int acpicpu_md_pstate_fidvid_set(struct acpicpu_pstate *);
@ -134,6 +142,24 @@ acpicpu_md_attach(device_t parent, device_t self, void *aux)
return cfaa->ci;
}
static u_long
acpicpu_md_lock(struct acpicpu_softc *sc)
{
const u_long flags = x86_read_psl();
x86_disable_intr();
__cpu_simple_lock(&sc->sc_lock);
return flags;
}
static void
acpicpu_md_unlock(struct acpicpu_softc *sc, u_long flags)
{
__cpu_simple_unlock(&sc->sc_lock);
x86_write_psl(flags);
}
uint32_t
acpicpu_md_cap(void)
{
@ -469,11 +495,27 @@ acpicpu_md_cstate_enter(int method, int state)
int
acpicpu_md_pstate_start(struct acpicpu_softc *sc)
{
const uint64_t est = __BIT(16);
return acpicpu_md_pstate_sysctl_init();
}
int
acpicpu_md_pstate_stop(void)
{
if (acpicpu_log != NULL)
sysctl_teardown(&acpicpu_log);
return 0;
}
int
acpicpu_md_pstate_init(struct acpicpu_softc *sc)
{
struct acpicpu_pstate *ps, msr;
struct cpu_info *ci = curcpu();
uint32_t family, i = 0;
uint64_t val;
if ((sc->sc_flags & ACPICPU_FLAG_P) == 0)
return ENODEV;
(void)memset(&msr, 0, sizeof(struct acpicpu_pstate));
switch (cpu_vendor) {
@ -483,52 +525,20 @@ acpicpu_md_pstate_start(struct acpicpu_softc *sc)
/*
* Make sure EST is enabled.
*/
val = rdmsr(MSR_MISC_ENABLE);
if ((sc->sc_flags & ACPICPU_FLAG_P_FFH) != 0) {
if ((val & est) == 0) {
val |= est;
wrmsr(MSR_MISC_ENABLE, val);
val = rdmsr(MSR_MISC_ENABLE);
if ((val & est) == 0)
return ENOTTY;
if ((val & MSR_MISC_ENABLE_EST) == 0) {
val |= MSR_MISC_ENABLE_EST;
wrmsr(MSR_MISC_ENABLE, val);
val = rdmsr(MSR_MISC_ENABLE);
if ((val & MSR_MISC_ENABLE_EST) == 0)
return ENOTTY;
}
}
}
/*
* Reset the APERF and MPERF counters.
*/
if ((sc->sc_flags & ACPICPU_FLAG_P_HWF) != 0)
acpicpu_md_pstate_percent_reset(sc);
return acpicpu_md_pstate_sysctl_init();
}
int
acpicpu_md_pstate_stop(void)
{
if (acpicpu_log != NULL)
sysctl_teardown(&acpicpu_log);
return 0;
}
int
acpicpu_md_pstate_pss(struct acpicpu_softc *sc)
{
struct acpicpu_pstate *ps, msr;
struct cpu_info *ci = curcpu();
uint32_t family, i = 0;
(void)memset(&msr, 0, sizeof(struct acpicpu_pstate));
switch (cpu_vendor) {
case CPUVENDOR_IDT:
case CPUVENDOR_INTEL:
/*
* If the so-called Turbo Boost is present,
@ -541,7 +551,7 @@ acpicpu_md_pstate_pss(struct acpicpu_softc *sc)
* in Intel Core(tm) Microarchitectures (Nehalem)
* Based Processors. White Paper, November 2008.
*/
if (sc->sc_pstate_count > 2 &&
if (sc->sc_pstate_count >= 2 &&
(sc->sc_flags & ACPICPU_FLAG_P_TURBO) != 0) {
ps = &sc->sc_pstate[0];
@ -586,6 +596,9 @@ acpicpu_md_pstate_pss(struct acpicpu_softc *sc)
default:
/*
* If we have an unknown AMD CPU, rely on XPSS.
*/
if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) == 0)
return EOPNOTSUPP;
}
@ -624,87 +637,94 @@ acpicpu_md_pstate_pss(struct acpicpu_softc *sc)
i++;
}
/*
* Reset the APERF and MPERF counters.
*
* XXX: Should be with xc_unicast(9).
*/
if ((sc->sc_flags & ACPICPU_FLAG_P_HWF) != 0)
acpicpu_md_pstate_percent_reset(sc->sc_ci);
return 0;
}
/*
* Read the IA32_APERF and IA32_MPERF counters. The first
* increments at the rate of the fixed maximum frequency
* configured during the boot, whereas APERF counts at the
* rate of the actual frequency. Note that the MSRs must be
* read without delay, and that only the ratio between
* IA32_APERF and IA32_MPERF is architecturally defined.
*
* The function thus returns the percentage of the actual
* frequency in terms of the maximum frequency of the calling
* CPU since the last call. A value zero implies an error.
*
* For further details, refer to:
*
* Intel Corporation: Intel 64 and IA-32 Architectures
* Software Developer's Manual. Section 13.2, Volume 3A:
* System Programming Guide, Part 1. July, 2008.
*
* Advanced Micro Devices: BIOS and Kernel Developer's
* Guide (BKDG) for AMD Family 10h Processors. Section
* 2.4.5, Revision 3.48, April 2010.
*/
uint8_t
acpicpu_md_pstate_percent(struct acpicpu_softc *sc)
acpicpu_md_pstate_percent(struct cpu_info *ci)
{
struct acpicpu_softc *sc;
uint64_t aperf, mperf;
uint64_t rv = 0;
uint8_t rv = 0;
u_long flags;
/*
* Read the IA32_APERF and IA32_MPERF counters. The first
* increments at the rate of the fixed maximum frequency
* configured during the boot, whereas APERF counts at the
* rate of the actual frequency. Note that the MSRs must be
* read without delay, and that only the ratio between
* IA32_APERF and IA32_MPERF is architecturally defined.
*
* The function thus returns the percentage of the actual
* frequency in terms of the maximum frequency of the calling
* CPU since the last call. A value zero implies an error.
*
* For further details, refer to:
*
* Intel Corporation: Intel 64 and IA-32 Architectures
* Software Developer's Manual. Section 13.2, Volume 3A:
* System Programming Guide, Part 1. July, 2008.
*
* Advanced Micro Devices: BIOS and Kernel Developer's
* Guide (BKDG) for AMD Family 10h Processors. Section
* 2.4.5, Revision 3.48, April 2010.
*/
if (__predict_false((sc->sc_flags & ACPICPU_FLAG_P) == 0))
sc = acpicpu_sc[ci->ci_acpiid];
if (__predict_false(sc == NULL))
return 0;
if (__predict_false((sc->sc_flags & ACPICPU_FLAG_P_HWF) == 0))
return 0;
flags = acpicpu_md_lock(sc);
aperf = sc->sc_pstate_aperf;
mperf = sc->sc_pstate_mperf;
x86_disable_intr();
sc->sc_pstate_aperf = rdmsr(MSR_APERF);
sc->sc_pstate_mperf = rdmsr(MSR_MPERF);
x86_enable_intr();
aperf = sc->sc_pstate_aperf - aperf;
mperf = sc->sc_pstate_mperf - mperf;
if (__predict_true(mperf != 0))
rv = (aperf * 100) / mperf;
acpicpu_md_unlock(sc, flags);
return rv;
}
static void
acpicpu_md_pstate_percent_reset(struct acpicpu_softc *sc)
acpicpu_md_pstate_percent_reset(struct cpu_info *ci)
{
struct msr_rw_info msr;
uint64_t xc;
struct acpicpu_softc *sc;
u_long flags;
KASSERT((sc->sc_flags & ACPICPU_FLAG_P) != 0);
KASSERT((sc->sc_flags & ACPICPU_FLAG_P_HWF) != 0);
sc = acpicpu_sc[ci->ci_acpiid];
msr.msr_value = 0;
msr.msr_read = false;
msr.msr_type = MSR_APERF;
if (__predict_false(sc == NULL))
return;
xc = xc_broadcast(0, (xcfunc_t)x86_msr_xcall, &msr, NULL);
xc_wait(xc);
flags = acpicpu_md_lock(sc);
msr.msr_value = 0;
msr.msr_read = false;
msr.msr_type = MSR_MPERF;
xc = xc_broadcast(0, (xcfunc_t)x86_msr_xcall, &msr, NULL);
xc_wait(xc);
wrmsr(MSR_APERF, 0);
wrmsr(MSR_MPERF, 0);
sc->sc_pstate_aperf = 0;
sc->sc_pstate_mperf = 0;
acpicpu_md_unlock(sc, flags);
}
int

View File

@ -1,4 +1,4 @@
/* $NetBSD: acpi_cpu.c,v 1.36 2011/03/03 19:24:43 jruoho Exp $ */
/* $NetBSD: acpi_cpu.c,v 1.37 2011/03/05 09:47:19 jruoho Exp $ */
/*-
* Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi>
@ -27,7 +27,7 @@
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.36 2011/03/03 19:24:43 jruoho Exp $");
__KERNEL_RCSID(0, "$NetBSD: acpi_cpu.c,v 1.37 2011/03/05 09:47:19 jruoho Exp $");
#include <sys/param.h>
#include <sys/cpu.h>
@ -187,6 +187,8 @@ acpicpu_attach(device_t parent, device_t self, void *aux)
KASSERT(sc->sc_node->ad_device == NULL);
sc->sc_node->ad_device = self;
__cpu_simple_lock_init(&sc->sc_lock);
mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
acpicpu_cstate_attach(self);

View File

@ -1,4 +1,4 @@
/* $NetBSD: acpi_cpu.h,v 1.36 2011/03/04 12:10:50 jruoho Exp $ */
/* $NetBSD: acpi_cpu.h,v 1.37 2011/03/05 09:47:19 jruoho Exp $ */
/*-
* Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi>
@ -223,6 +223,7 @@ struct acpicpu_softc {
uint32_t sc_tstate_max;
uint32_t sc_tstate_min;
__cpu_simple_lock_t sc_lock;
kmutex_t sc_mtx;
uint32_t sc_cap;
uint32_t sc_ncpus;
@ -267,8 +268,8 @@ int acpicpu_md_cstate_stop(void);
void acpicpu_md_cstate_enter(int, int);
int acpicpu_md_pstate_start(struct acpicpu_softc *);
int acpicpu_md_pstate_stop(void);
int acpicpu_md_pstate_pss(struct acpicpu_softc *);
uint8_t acpicpu_md_pstate_percent(struct acpicpu_softc *);
int acpicpu_md_pstate_init(struct acpicpu_softc *);
uint8_t acpicpu_md_pstate_percent(struct cpu_info *);
int acpicpu_md_pstate_get(struct acpicpu_softc *, uint32_t *);
int acpicpu_md_pstate_set(struct acpicpu_pstate *);
int acpicpu_md_tstate_get(struct acpicpu_softc *, uint32_t *);

View File

@ -1,4 +1,4 @@
/* $NetBSD: acpi_cpu_pstate.c,v 1.44 2011/03/01 05:57:04 jruoho Exp $ */
/* $NetBSD: acpi_cpu_pstate.c,v 1.45 2011/03/05 09:47:19 jruoho Exp $ */
/*-
* Copyright (c) 2010, 2011 Jukka Ruohonen <jruohonen@iki.fi>
@ -27,7 +27,7 @@
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_pstate.c,v 1.44 2011/03/01 05:57:04 jruoho Exp $");
__KERNEL_RCSID(0, "$NetBSD: acpi_cpu_pstate.c,v 1.45 2011/03/05 09:47:19 jruoho Exp $");
#include <sys/param.h>
#include <sys/kmem.h>
@ -114,10 +114,9 @@ acpicpu_pstate_attach(device_t self)
aprint_debug_dev(self, "_PPC missing\n");
/*
* Employ the XPSS structure by filling
* it with MD information required for FFH.
* Carry out MD initialization.
*/
rv = acpicpu_md_pstate_pss(sc);
rv = acpicpu_md_pstate_init(sc);
if (rv != 0) {
rv = AE_SUPPORT;
@ -209,7 +208,6 @@ acpicpu_pstate_start(device_t self)
fail:
sc->sc_flags &= ~ACPICPU_FLAG_P;
aprint_error_dev(self, "failed to start P-states (err %d)\n", rv);
}
@ -641,10 +639,8 @@ acpicpu_pstate_pct(struct acpicpu_softc *sc)
goto out;
/*
* In XPSS the control address can not be zero,
* but the status address may be. In this case,
* comparable to T-states, we can ignore the status
* check during the P-state (FFH) transition.
* At the very least, mandate that
* XPSS supplies the control address.
*/
if (sc->sc_pstate_control.reg_addr == 0) {
rv = AE_AML_BAD_RESOURCE_LENGTH;