Refactored MTRR code a bit: there is now a generic base that is used by

all CPU specific implementations as much as possible.
AMD and Intel are now separated again, even though they are currently
equivalent besides the CPU vendor detection.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@15567 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2005-12-16 17:16:22 +00:00
parent d0d37bdfd1
commit 2058e6c1d4
6 changed files with 223 additions and 295 deletions

View File

@ -4,7 +4,7 @@ UsePrivateHeaders kernel ;
KernelAddon generic_x86 : kernel cpu :
generic_x86.cpp
amd.cpp
intel.cpp
# amd.cpp
via.cpp
;

View File

@ -8,26 +8,15 @@
#include "amd.h"
#include "generic_x86.h"
#include <OS.h>
static uint32
amd_count_mtrrs(void)
static void
amd_init_mtrrs(void)
{
return 0;
}
static status_t
amd_set_mtrr(uint32 index, addr_t base, addr_t length, uint32 type)
{
return B_OK;
}
static status_t
amd_unset_mtrr(uint32 index)
{
return B_OK;
generic_init_mtrrs(generic_count_mtrrs());
}
@ -41,13 +30,9 @@ amd_init(void)
if ((info.cpu_type & B_CPU_x86_VENDOR_MASK) != B_CPU_AMD_x86)
return B_ERROR;
return B_OK;
}
generic_mtrr_compute_physical_mask();
generic_dump_mtrrs(generic_count_mtrrs());
static status_t
amd_uninit(void)
{
return B_OK;
}
@ -59,7 +44,7 @@ amd_stdops(int32 op, ...)
case B_MODULE_INIT:
return amd_init();
case B_MODULE_UNINIT:
return amd_uninit();
return B_OK;
}
return B_ERROR;
@ -73,7 +58,9 @@ x86_cpu_module_info gAMDModule = {
amd_stdops,
},
amd_count_mtrrs,
amd_set_mtrr,
amd_unset_mtrr,
generic_count_mtrrs,
amd_init_mtrrs,
generic_set_mtrr,
generic_get_mtrr,
};

View File

@ -7,14 +7,170 @@
*/
#include "generic_x86.h"
#include "intel.h"
#include "amd.h"
#include "via.h"
//#define TRACE_MTRR
#ifdef TRACE_MTRR
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
#define IA32_MTRR_ENABLE (1UL << 11)
#define IA32_MTRR_ENABLE_FIXED (1UL << 10)
#define IA32_MTRR_VALID_RANGE (1UL << 11)
struct mtrr_capabilities {
mtrr_capabilities(uint64 value) { *(uint64 *)this = value; }
uint64 variable_ranges : 8;
uint64 supports_fixed : 1;
uint64 _reserved0 : 1;
uint64 supports_write_combined : 1;
uint64 _reserved1 : 53;
};
uint64 gPhysicalMask = 0;
uint32
generic_count_mtrrs(void)
{
cpuid_info cpuInfo;
if (get_cpuid(&cpuInfo, 1, 0) != B_OK
|| (cpuInfo.eax_1.features & IA32_FEATURE_MTRR) == 0)
return 0;
mtrr_capabilities capabilities(x86_read_msr(IA32_MSR_MTRR_CAPABILITIES));
TRACE(("cpu has %ld variable range MTRs.\n", capabilities.variable_ranges));
return capabilities.variable_ranges;
}
void
generic_init_mtrrs(uint32 count)
{
// disable and clear all MTRRs
// (we leave the fixed MTRRs as is)
// TODO: check if the fixed MTRRs are set on all CPUs identically?
x86_write_msr(IA32_MSR_MTRR_DEFAULT_TYPE,
x86_read_msr(IA32_MSR_MTRR_DEFAULT_TYPE) & ~IA32_MTRR_ENABLE);
for (uint32 i = count; i-- > 0;) {
if (x86_read_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + i * 2) & IA32_MTRR_VALID_RANGE)
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + i * 2, 0);
}
// but turn on variable MTRR functionality
x86_write_msr(IA32_MSR_MTRR_DEFAULT_TYPE,
x86_read_msr(IA32_MSR_MTRR_DEFAULT_TYPE) | IA32_MTRR_ENABLE);
}
void
generic_set_mtrr(uint32 index, uint64 base, uint64 length, uint8 type)
{
index *= 2;
// there are two registers per slot
uint64 mask = length - 1;
mask = ~mask & gPhysicalMask;
TRACE(("MTRR %ld: new mask %Lx)\n", index, mask));
TRACE((" mask test base: %Lx)\n", mask & base));
TRACE((" mask test middle: %Lx)\n", mask & (base + length / 2)));
TRACE((" mask test end: %Lx)\n", mask & (base + length)));
// First, disable MTRR
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + index, 0);
if (base != 0 || mask != 0 || type != 0) {
// then fill in the new values, and enable it again
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_BASE_0 + index,
(base & ~(B_PAGE_SIZE - 1)) | type);
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + index,
mask | IA32_MTRR_VALID_RANGE);
} else {
// reset base as well
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_BASE_0 + index, 0);
}
}
status_t
generic_get_mtrr(uint32 index, uint64 *_base, uint64 *_length, uint8 *_type)
{
uint64 mask = x86_read_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + index * 2);
if ((mask & IA32_MTRR_VALID_RANGE) == 0)
return B_ERROR;
uint64 base = x86_read_msr(IA32_MSR_MTRR_PHYSICAL_BASE_0 + index * 2);
*_base = base & ~(B_PAGE_SIZE - 1);
*_length = (~mask & gPhysicalMask) + 1;
*_type = base & 0xff;
return B_OK;
}
status_t
generic_mtrr_compute_physical_mask(void)
{
uint32 bits = 36;
cpuid_info cpuInfo;
if (get_cpuid(&cpuInfo, 0x80000000, 0) == B_OK
&& cpuInfo.eax_0.max_eax & 0xff >= 8) {
get_cpuid(&cpuInfo, 0x80000008, 0);
bits = cpuInfo.regs.eax & 0xff;
}
gPhysicalMask = ((1ULL << bits) - 1) & ~(B_PAGE_SIZE - 1);
TRACE(("CPU has %ld physical address bits, physical mask is %016Lx\n",
bits, gPhysicalMask));
return B_OK;
}
void
generic_dump_mtrrs(uint32 count)
{
if (x86_read_msr(IA32_MSR_MTRR_DEFAULT_TYPE) & IA32_MTRR_ENABLE) {
TRACE(("MTRR enabled\n"));
} else {
TRACE(("MTRR disabled\n"));
}
for (uint32 i = 0; i < count; i++) {
uint64 base;
uint64 length;
uint8 type;
if (generic_get_mtrr(i, &base, &length, &type) == B_OK) {
TRACE((" %ld: 0x%Lx, 0x%Lx, %u\n", i, base, length, type));
} else {
TRACE((" %ld: empty\n", i));
}
}
}
module_info *modules[] = {
(module_info *)&gIntelModule,
// (module_info *)&gAMDModule,
(module_info *)&gAMDModule,
(module_info *)&gVIAModule,
NULL
};

View File

@ -0,0 +1,34 @@
/*
* Copyright 2005, Haiku, Inc.
* Distributed under the terms of the MIT License.
*
* Authors:
* Axel Dörfler, axeld@pinc-software.de
*/
#ifndef CPU_GENERIC_x86_H
#define CPU_GENERIC_x86_H
#include <SupportDefs.h>
extern uint64 gPhysicalMask;
#ifdef __cplusplus
extern "C" {
#endif
extern uint32 generic_count_mtrrs(void);
extern void generic_init_mtrrs(uint32 count);
extern void generic_set_mtrr(uint32 index, uint64 base, uint64 length, uint8 type);
extern status_t generic_get_mtrr(uint32 index, uint64 *_base, uint64 *_length,
uint8 *_type);
extern status_t generic_mtrr_compute_physical_mask(void);
extern void generic_dump_mtrrs(uint32 count);
#ifdef __cplusplus
}
#endif
#endif // CPU_GENERIC_x86_H

View File

@ -8,121 +8,15 @@
#include "intel.h"
#include "generic_x86.h"
#include <KernelExport.h>
#include <OS.h>
#include <arch_cpu.h>
//#define TRACE_MTRR
#ifdef TRACE_MTRR
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
#define IA32_MTRR_ENABLE (1UL << 11)
#define IA32_MTRR_ENABLE_FIXED (1UL << 10)
#define IA32_MTRR_VALID_RANGE (1UL << 11)
struct mtrr_capabilities {
mtrr_capabilities(uint64 value) { *(uint64 *)this = value; }
uint64 variable_ranges : 8;
uint64 supports_fixed : 1;
uint64 _reserved0 : 1;
uint64 supports_write_combined : 1;
uint64 _reserved1 : 53;
};
static uint64 sPhysicalMask = 0;
static uint32
intel_count_mtrrs(void)
{
cpuid_info cpuInfo;
if (get_cpuid(&cpuInfo, 1, 0) != B_OK
|| (cpuInfo.eax_1.features & IA32_FEATURE_MTRR) == 0)
return 0;
mtrr_capabilities capabilities(x86_read_msr(IA32_MSR_MTRR_CAPABILITIES));
TRACE(("cpu has %ld variable range MTRs.\n", capabilities.variable_ranges));
return capabilities.variable_ranges;
}
static void
intel_init_mtrrs(void)
{
// disable and clear all MTRRs
// (we leave the fixed MTRRs as is)
// TODO: check if the fixed MTRRs are set on all CPUs identically?
x86_write_msr(IA32_MSR_MTRR_DEFAULT_TYPE,
x86_read_msr(IA32_MSR_MTRR_DEFAULT_TYPE) & ~IA32_MTRR_ENABLE);
for (uint32 i = intel_count_mtrrs(); i-- > 0;) {
if (x86_read_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + i * 2) & IA32_MTRR_VALID_RANGE)
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + i * 2, 0);
}
// but turn on variable MTRR functionality
x86_write_msr(IA32_MSR_MTRR_DEFAULT_TYPE,
x86_read_msr(IA32_MSR_MTRR_DEFAULT_TYPE) | IA32_MTRR_ENABLE);
}
static void
intel_set_mtrr(uint32 index, uint64 base, uint64 length, uint8 type)
{
index *= 2;
// there are two registers per slot
uint64 mask = length - 1;
mask = ~mask & sPhysicalMask;
TRACE(("MTRR %ld: new mask %Lx)\n", index, mask));
TRACE((" mask test base: %Lx)\n", mask & base));
TRACE((" mask test middle: %Lx)\n", mask & (base + length / 2)));
TRACE((" mask test end: %Lx)\n", mask & (base + length)));
// First, disable MTRR
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + index, 0);
if (base != 0 || mask != 0 || type != 0) {
// then fill in the new values, and enable it again
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_BASE_0 + index,
(base & ~(B_PAGE_SIZE - 1)) | type);
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + index,
mask | IA32_MTRR_VALID_RANGE);
} else {
// reset base as well
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_BASE_0 + index, 0);
}
}
static status_t
intel_get_mtrr(uint32 index, uint64 *_base, uint64 *_length, uint8 *_type)
{
uint64 mask = x86_read_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + index * 2);
if ((mask & IA32_MTRR_VALID_RANGE) == 0)
return B_ERROR;
uint64 base = x86_read_msr(IA32_MSR_MTRR_PHYSICAL_BASE_0 + index * 2);
*_base = base & ~(B_PAGE_SIZE - 1);
*_length = (~mask & sPhysicalMask) + 1;
*_type = base & 0xff;
return B_OK;
generic_init_mtrrs(generic_count_mtrrs());
}
@ -133,52 +27,12 @@ intel_init(void)
if (get_system_info(&info) != B_OK)
return B_ERROR;
if ((info.cpu_type & B_CPU_x86_VENDOR_MASK) != B_CPU_INTEL_x86
&& (info.cpu_type & B_CPU_x86_VENDOR_MASK) != B_CPU_AMD_x86)
if ((info.cpu_type & B_CPU_x86_VENDOR_MASK) != B_CPU_INTEL_x86)
return B_ERROR;
if (x86_read_msr(IA32_MSR_MTRR_DEFAULT_TYPE) & IA32_MTRR_ENABLE) {
TRACE(("MTRR enabled\n"));
} else {
TRACE(("MTRR disabled\n"));
}
generic_mtrr_compute_physical_mask();
generic_dump_mtrrs(generic_count_mtrrs());
for (uint32 i = 0; i < intel_count_mtrrs(); i++) {
uint64 base;
uint64 length;
uint8 type;
if (intel_get_mtrr(i, &base, &length, &type) == B_OK) {
TRACE((" %ld: 0x%Lx, 0x%Lx, %u\n", i, base, length, type));
} else {
TRACE((" %ld: empty\n", i));
}
}
// TODO: dump fixed ranges as well
// get number of physical address bits
uint32 bits = 36;
cpuid_info cpuInfo;
if (get_cpuid(&cpuInfo, 0x80000000, 0) == B_OK
&& cpuInfo.eax_0.max_eax & 0xff >= 8) {
get_cpuid(&cpuInfo, 0x80000008, 0);
bits = cpuInfo.regs.eax & 0xff;
}
sPhysicalMask = ((1ULL << bits) - 1) & ~(B_PAGE_SIZE - 1);
TRACE(("CPU has %ld physical address bits, physical mask is %016Lx\n",
bits, sPhysicalMask));
return B_OK;
}
static status_t
intel_uninit(void)
{
return B_OK;
}
@ -190,7 +44,7 @@ intel_stdops(int32 op, ...)
case B_MODULE_INIT:
return intel_init();
case B_MODULE_UNINIT:
return intel_uninit();
return B_OK;
}
return B_ERROR;
@ -204,9 +58,9 @@ x86_cpu_module_info gIntelModule = {
intel_stdops,
},
intel_count_mtrrs,
generic_count_mtrrs,
intel_init_mtrrs,
intel_set_mtrr,
intel_get_mtrr,
generic_set_mtrr,
generic_get_mtrr,
};

View File

@ -8,26 +8,9 @@
#include "via.h"
#include "generic_x86.h"
#include <KernelExport.h>
#include <OS.h>
#include <arch_cpu.h>
#define TRACE_MTRR
#ifdef TRACE_MTRR
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
#define IA32_MTRR_ENABLE (1UL << 11)
#define IA32_MTRR_ENABLE_FIXED (1UL << 10)
#define IA32_MTRR_VALID_RANGE (1UL << 11)
static uint64 kPhysicalMask = ((1ULL << 36) - 1) & ~(B_PAGE_SIZE - 1);;
static uint32
@ -46,71 +29,7 @@ via_count_mtrrs(void)
static void
via_init_mtrrs(void)
{
// disable and clear all MTRRs
// (we leave the fixed MTRRs as is)
// TODO: check if the fixed MTRRs are set on all CPUs identically?
x86_write_msr(IA32_MSR_MTRR_DEFAULT_TYPE,
x86_read_msr(IA32_MSR_MTRR_DEFAULT_TYPE) & ~IA32_MTRR_ENABLE);
for (uint32 i = via_count_mtrrs(); i-- > 0;) {
if (x86_read_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + i * 2) & IA32_MTRR_VALID_RANGE)
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + i * 2, 0);
}
// but turn on variable MTRR functionality
x86_write_msr(IA32_MSR_MTRR_DEFAULT_TYPE,
x86_read_msr(IA32_MSR_MTRR_DEFAULT_TYPE) | IA32_MTRR_ENABLE);
}
static void
via_set_mtrr(uint32 index, uint64 base, uint64 length, uint8 type)
{
index *= 2;
// there are two registers per slot
uint64 mask = length - 1;
mask = ~mask & kPhysicalMask;
TRACE(("MTRR %ld: new mask %Lx)\n", index, mask));
TRACE((" mask test base: %Lx)\n", mask & base));
TRACE((" mask test middle: %Lx)\n", mask & (base + length / 2)));
TRACE((" mask test end: %Lx)\n", mask & (base + length)));
// First, disable MTRR
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + index, 0);
if (base != 0 || mask != 0 || type != 0) {
// then fill in the new values, and enable it again
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_BASE_0 + index,
(base & ~(B_PAGE_SIZE - 1)) | type);
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + index,
mask | IA32_MTRR_VALID_RANGE);
} else {
// reset base as well
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_BASE_0 + index, 0);
}
}
static status_t
via_get_mtrr(uint32 index, uint64 *_base, uint64 *_length, uint8 *_type)
{
uint64 mask = x86_read_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + index * 2);
if ((mask & IA32_MTRR_VALID_RANGE) == 0)
return B_ERROR;
uint64 base = x86_read_msr(IA32_MSR_MTRR_PHYSICAL_BASE_0 + index * 2);
*_base = base & ~(B_PAGE_SIZE - 1);
*_length = (~mask & kPhysicalMask) + 1;
*_type = base & 0xff;
return B_OK;
generic_init_mtrrs(via_count_mtrrs());
}
@ -124,32 +43,10 @@ via_init(void)
if ((info.cpu_type & B_CPU_x86_VENDOR_MASK) != B_CPU_VIA_IDT_x86)
return B_ERROR;
if (x86_read_msr(IA32_MSR_MTRR_DEFAULT_TYPE) & IA32_MTRR_ENABLE) {
TRACE(("MTRR enabled\n"));
} else {
TRACE(("MTRR disabled\n"));
}
// current VIA CPUs have always 36 bit (or less?)
gPhysicalMask = ((1ULL << 36) - 1) & ~(B_PAGE_SIZE - 1);
for (uint32 i = 0; i < via_count_mtrrs(); i++) {
uint64 base;
uint64 length;
uint8 type;
if (via_get_mtrr(i, &base, &length, &type) == B_OK) {
TRACE((" %ld: 0x%Lx, 0x%Lx, %u\n", i, base, length, type));
} else {
TRACE((" %ld: empty\n", i));
}
}
// TODO: dump fixed ranges as well
return B_OK;
}
static status_t
via_uninit(void)
{
generic_dump_mtrrs(generic_count_mtrrs());
return B_OK;
}
@ -161,7 +58,7 @@ via_stdops(int32 op, ...)
case B_MODULE_INIT:
return via_init();
case B_MODULE_UNINIT:
return via_uninit();
return B_OK;
}
return B_ERROR;
@ -178,6 +75,6 @@ x86_cpu_module_info gVIAModule = {
via_count_mtrrs,
via_init_mtrrs,
via_set_mtrr,
via_get_mtrr,
generic_set_mtrr,
generic_get_mtrr,
};