The short story: we now have MTRR support on Intel and AMD CPUs (the latter

has not yet been tested, though - I'll do this after this commit):
* Removed the arch_memory_type stuff from vm_area; since there are only 8 memory
  ranges on x86, it's simply overkill. The MTRR code now remembers the area ID
  and finds the MTRR that way (it could also iterate over the existing MTRRs).
* Introduced some post_modules() init functions.
* If the other x86 CPUs out there don't differ a lot, MTRR functionality might
  be put back into the kernel.
* x86_write_msr() was broken, it wrote the 64 bit number with the 32 bit words
  switched - it took me some time (and lots of #GPs) to figure that one out.
* Removed the macro read_ebp() and introduced a function x86_read_ebp()
  (it's not really a time critical call).
* Followed the Intel docs on how to change MTRRs (symmetrically on all CPUs
  with caches turned off).
* Asking for memory types will automatically change the requested length to
  a power of two - note that BeOS seems to behave in the same, although that's
  not really very clean.
* fixed MTRRs are ignored for now - we should make sure at least, though,
  that they are identical on all CPUs (or turn them off, even though I'd
  prefer the BIOS stuff to be uncacheable, which we don't enforce yet, though).



git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@15528 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2005-12-13 16:34:29 +00:00
parent 57eab4408b
commit 51a3c450be
18 changed files with 392 additions and 255 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2004, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
@ -22,6 +22,7 @@ extern "C" {
status_t arch_cpu_preboot_init(kernel_args *args);
status_t arch_cpu_init(kernel_args *args);
status_t arch_cpu_init_post_vm(kernel_args *args);
status_t arch_cpu_init_post_modules(kernel_args *args);
status_t arch_cpu_shutdown(bool reboot);
void arch_cpu_invalidate_TLB_range(addr_t start, addr_t end);

View File

@ -5,12 +5,4 @@
#ifndef _KERNEL_ARCH_PPC_VM_TYPES_H
#define _KERNEL_ARCH_PPC_VM_TYPES_H
#include <SupportDefs.h>
struct arch_vm_memory_type {
uint32 dummy;
};
#endif /* _KERNEL_ARCH_PPC_6VM_TYPES_H */
#endif /* _KERNEL_ARCH_PPC_VM_TYPES_H */

View File

@ -20,9 +20,9 @@ extern "C" {
status_t arch_vm_init(struct kernel_args *args);
status_t arch_vm_init_post_area(struct kernel_args *args);
status_t arch_vm_init_end(struct kernel_args *args);
status_t arch_vm_init_post_modules(kernel_args *args);
void arch_vm_aspace_swap(vm_address_space *aspace);
bool arch_vm_supports_protection(uint32 protection);
void arch_vm_init_area(vm_area *area);
status_t arch_vm_set_memory_type(vm_area *area, addr_t physicalBase, uint32 type);
void arch_vm_unset_memory_type(vm_area *area);

View File

@ -21,16 +21,21 @@
#define IA32_MSR_MTRR_PHYSICAL_BASE_0 0x200
#define IA32_MSR_MTRR_PHYSICAL_MASK_0 0x201
// Memory type ranges
#define IA32_MTR_UNCACHED 0
#define IA32_MTR_WRITE_COMBINED 1
#define IA32_MTR_WRITE_THROUGH 4
#define IA32_MTR_WRITE_PROTECTED 5
#define IA32_MTR_WRITE_BACK 6
typedef struct x86_cpu_module_info {
module_info info;
uint32 (*count_mtrrs)(void);
status_t (*enable_mtrrs)(void);
void (*disable_mtrrs)(void);
void (*init_mtrrs)(void);
void (*set_mtrr)(uint32 index, addr_t base, addr_t length, uint32 type);
status_t (*get_mtrr)(uint32 index, addr_t *_base, addr_t *_length,
uint32 *_type);
void (*set_mtrr)(uint32 index, uint64 base, uint64 length, uint8 type);
status_t (*get_mtrr)(uint32 index, uint64 *_base, uint64 *_length,
uint8 *_type);
} x86_cpu_module_info;
@ -96,19 +101,17 @@ void i386_frstor(const void *fpu_state);
void i386_fxrstor(const void *fpu_state);
void i386_fsave_swap(void *old_fpu_state, const void *new_fpu_state);
void i386_fxsave_swap(void *old_fpu_state, const void *new_fpu_state);
uint32 x86_read_ebp();
uint32 x86_read_cr0();
void x86_write_cr0(uint32 value);
uint64 x86_read_msr(uint32 registerNumber);
void x86_write_msr(uint32 registerNumber, uint64 value);
void x86_set_task_gate(int32 n, int32 segment);
uint32 x86_count_mtrrs(void);
status_t x86_enable_mtrrs(void);
status_t x86_disable_mtrrs(void);
status_t x86_set_mtrr(uint32 index, addr_t base, addr_t length, uint32 type);
status_t x86_get_mtrr(uint32 index, addr_t *_base, addr_t *_length, uint32 *_type);
void x86_set_mtrr(uint32 index, uint64 base, uint64 length, uint8 type);
status_t x86_get_mtrr(uint32 index, uint64 *_base, uint64 *_length, uint8 *_type);
struct tss *x86_get_main_tss(void);
#define read_ebp(value) \
__asm__("movl %%ebp,%0" : "=r" (value))
#define read_cr3(value) \
__asm__("movl %%cr3,%0" : "=r" (value))

View File

@ -5,13 +5,4 @@
#ifndef _KERNEL_ARCH_x86_VM_TYPES_H
#define _KERNEL_ARCH_x86_VM_TYPES_H
#include <SupportDefs.h>
struct arch_vm_memory_type {
uint16 type;
uint16 index;
};
#endif /* _KERNEL_ARCH_x86_VM_TYPES_H */

View File

@ -37,6 +37,7 @@ extern "C" {
status_t cpu_preboot_init(struct kernel_args *args);
status_t cpu_init(struct kernel_args *args);
status_t cpu_init_post_vm(struct kernel_args *args);
status_t cpu_init_post_modules(struct kernel_args *args);
cpu_ent *get_cpu_struct(void);
extern inline cpu_ent *get_cpu_struct(void) { return &cpu[smp_get_current_cpu()]; }

View File

@ -24,6 +24,7 @@ extern "C" {
status_t vm_init(kernel_args *args);
status_t vm_init_post_sem(struct kernel_args *args);
status_t vm_init_post_thread(struct kernel_args *args);
status_t vm_init_post_modules(struct kernel_args *args);
status_t vm_aspace_init(void);
status_t vm_aspace_init_post_sem(void);
void vm_free_kernel_args(kernel_args *args);

View File

@ -83,9 +83,9 @@ typedef struct vm_area {
addr_t base;
addr_t size;
uint32 protection;
uint32 wiring;
uint16 wiring;
uint16 memory_type;
int32 ref_count;
struct arch_vm_memory_type memory_type;
struct vm_cache_ref *cache_ref;
off_t cache_offset;
@ -168,6 +168,8 @@ enum {
PHYSICAL_PAGE_CAN_WAIT,
};
#define MEMORY_TYPE_SHIFT 28
// additional protection flags
// Note: the VM probably won't support all combinations - it will try
// its best, but create_area() will fail if it has to.

View File

@ -14,11 +14,21 @@
#include <arch_cpu.h>
//#define TRACE_MTRR
#ifdef TRACE_MTRR
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
#define IA32_MTRR_FEATURE (1UL << 12)
#define IA32_MTRR_ENABLE (1UL << 11)
#define IA32_MTRR_ENABLE_FIXED (1UL << 10)
#define IA32_MTRR_VALID_RANGE (1UL << 11)
#define MTRR_MASK (0xffffffff & (B_PAGE_SIZE - 1))
struct mtrr_capabilities {
mtrr_capabilities(uint64 value) { *(uint64 *)this = value; }
@ -30,6 +40,9 @@ struct mtrr_capabilities {
};
static uint64 sPhysicalMask = 0;
static uint32
intel_count_mtrrs(void)
{
@ -39,63 +52,76 @@ intel_count_mtrrs(void)
return 0;
mtrr_capabilities capabilities(x86_read_msr(IA32_MSR_MTRR_CAPABILITIES));
TRACE(("cpu has %ld variable range MTRs.\n", capabilities.variable_ranges));
return capabilities.variable_ranges;
}
static status_t
intel_enable_mtrrs(void)
static void
intel_init_mtrrs(void)
{
cpuid_info cpuInfo;
if (get_cpuid(&cpuInfo, 1, 0) != B_OK
|| (cpuInfo.eax_1.features & IA32_MTRR_FEATURE) == 0)
return B_NOT_SUPPORTED;
// disable and clear all MTRRs
// (we leave the fixed MTRRs as is)
// TODO: check if the fixed MTRRs are set on all CPUs identically?
x86_write_msr(IA32_MSR_MTRR_DEFAULT_TYPE,
x86_read_msr(IA32_MSR_MTRR_DEFAULT_TYPE) & ~IA32_MTRR_ENABLE);
for (uint32 i = intel_count_mtrrs(); i-- > 0;) {
if (x86_read_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + i * 2) & IA32_MTRR_VALID_RANGE)
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + i * 2, 0);
}
// but turn on variable MTRR functionality
x86_write_msr(IA32_MSR_MTRR_DEFAULT_TYPE,
x86_read_msr(IA32_MSR_MTRR_DEFAULT_TYPE) | IA32_MTRR_ENABLE);
return B_OK;
}
static void
intel_disable_mtrrs(void)
intel_set_mtrr(uint32 index, uint64 base, uint64 length, uint8 type)
{
x86_write_msr(IA32_MSR_MTRR_DEFAULT_TYPE,
x86_read_msr(IA32_MSR_MTRR_DEFAULT_TYPE) & ~IA32_MTRR_ENABLE);
}
index *= 2;
// there are two registers per slot
uint64 mask = length - 1;
mask = ~mask & sPhysicalMask;
static void
intel_set_mtrr(uint32 index, addr_t base, addr_t length, uint32 type)
{
if (base != 0 && length != 0) {
// enable MTRR
TRACE(("MTRR %ld: new mask %Lx)\n", index, mask));
TRACE((" mask test base: %Lx)\n", mask & base));
TRACE((" mask test middle: %Lx)\n", mask & (base + length / 2)));
TRACE((" mask test end: %Lx)\n", mask & (base + length)));
// First, disable MTRR
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + index, 0);
if (base != 0 || mask != 0 || type != 0) {
// then fill in the new values, and enable it again
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_BASE_0 + index,
(base & ~(B_PAGE_SIZE - 1)) | type);
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + index,
(length & ~(B_PAGE_SIZE - 1)) | IA32_MTRR_VALID_RANGE);
mask | IA32_MTRR_VALID_RANGE);
} else {
// disable MTRR
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + index, 0);
// reset base as well
x86_write_msr(IA32_MSR_MTRR_PHYSICAL_BASE_0 + index, 0);
}
}
static status_t
intel_get_mtrr(uint32 index, addr_t *_base, addr_t *_length, uint32 *_type)
intel_get_mtrr(uint32 index, uint64 *_base, uint64 *_length, uint8 *_type)
{
uint64 base = x86_read_msr(IA32_MSR_MTRR_PHYSICAL_BASE_0 + index);
uint64 mask = x86_read_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + index);
uint64 mask = x86_read_msr(IA32_MSR_MTRR_PHYSICAL_MASK_0 + index * 2);
if ((mask & IA32_MTRR_VALID_RANGE) == 0)
return B_ERROR;
uint64 base = x86_read_msr(IA32_MSR_MTRR_PHYSICAL_BASE_0 + index * 2);
*_base = base & ~(B_PAGE_SIZE - 1);
*_length = mask & ~(B_PAGE_SIZE - 1);
*_length = (~mask & sPhysicalMask) + 1;
*_type = base & 0xff;
return B_OK;
@ -110,27 +136,43 @@ intel_init(void)
return B_ERROR;
if ((info.cpu_type & B_CPU_x86_VENDOR_MASK) != B_CPU_INTEL_x86
|| (info.cpu_type & B_CPU_x86_VENDOR_MASK) != B_CPU_AMD_x86)
&& (info.cpu_type & B_CPU_x86_VENDOR_MASK) != B_CPU_AMD_x86)
return B_ERROR;
if (x86_read_msr(IA32_MSR_MTRR_DEFAULT_TYPE) & IA32_MTRR_ENABLE)
dprintf("MTRR enabled\n");
TRACE(("MTRR enabled\n"));
else
dprintf("MTRR disabled\n");
TRACE(("MTRR disabled\n"));
for (uint32 i = 0; i < intel_count_mtrrs(); i++) {
addr_t base;
addr_t length;
uint32 type;
uint64 base;
uint64 length;
uint8 type;
if (intel_get_mtrr(i, &base, &length, &type) == B_OK)
dprintf(" %ld: %p, %p, %ld\n", i, (void *)base, (void *)length, type);
TRACE((" %ld: 0x%Lx, 0x%Lx, %u\n", i, base, length, type));
else
dprintf(" %ld: empty\n", i);
TRACE((" %ld: empty\n", i));
}
// don't open it just now
return B_ERROR;
// return B_OK;
// TODO: dump fixed ranges as well
// get number of physical address bits
uint32 bits = 36;
cpuid_info cpuInfo;
if (get_cpuid(&cpuInfo, 0x80000000, 0) == B_OK
&& cpuInfo.eax_0.max_eax & 0xff >= 8) {
get_cpuid(&cpuInfo, 0x80000008, 0);
bits = cpuInfo.regs.eax & 0xff;
}
sPhysicalMask = ((1ULL << bits) - 1) & ~(B_PAGE_SIZE - 1);
TRACE(("CPU has %ld physical address bits, physical mask is %016Lx\n",
bits, sPhysicalMask));
return B_OK;
}
@ -163,8 +205,7 @@ x86_cpu_module_info gIntelModule = {
},
intel_count_mtrrs,
intel_enable_mtrrs,
intel_disable_mtrrs,
intel_init_mtrrs,
intel_set_mtrr,
intel_get_mtrr,

View File

@ -100,6 +100,13 @@ arch_vm_init_end(kernel_args *args)
}
status_t
arch_vm_init_post_modules(kernel_args *args)
{
return B_OK;
}
void
arch_vm_aspace_swap(vm_address_space *aspace)
{
@ -113,12 +120,6 @@ arch_vm_supports_protection(uint32 protection)
}
void
arch_vm_init_area(vm_area *area)
{
}
void
arch_vm_unset_memory_type(vm_area *area)
{

View File

@ -22,6 +22,18 @@
#include <stdlib.h>
#include <stdio.h>
#define CR0_CACHE_DISABLE (1UL << 30)
#define CR0_NOT_WRITE_THROUGH (1UL << 29)
struct set_mtrr_parameter {
int32 index;
uint64 base;
uint64 length;
uint8 type;
};
extern void reboot(void);
// from arch_x86.S
@ -29,6 +41,7 @@ static struct tss **sTSS;
//static struct tss **sDoubleFaultTSS;
struct tss **sDoubleFaultTSS;
static int *sIsTSSLoaded;
static int32 sWaitAllCPUs;
segment_descriptor *gGDT = NULL;
@ -48,98 +61,103 @@ x86_get_main_tss(void)
}
static x86_cpu_module_info *
load_cpu_module(void)
/** Disable CPU caches, and invalidate them. */
static void
disable_caches()
{
if (sCpuModule != NULL)
return sCpuModule;
x86_write_cr0((x86_read_cr0() | CR0_CACHE_DISABLE) & ~CR0_NOT_WRITE_THROUGH);
wbinvd();
arch_cpu_global_TLB_invalidate();
}
// find model specific CPU module
if (gBootDevice > 0) {
void *cookie = open_module_list("cpu");
/** Invalidate CPU caches, and enable them. */
while (true) {
char name[B_FILE_NAME_LENGTH];
size_t nameLength = sizeof(name);
static void
enable_caches()
{
wbinvd();
arch_cpu_global_TLB_invalidate();
x86_write_cr0(x86_read_cr0() & ~(CR0_CACHE_DISABLE | CR0_NOT_WRITE_THROUGH));
}
if (read_next_module_name(cookie, name, &nameLength) != B_OK
|| get_module(name, (module_info **)&sCpuModule) == B_OK)
break;
}
close_module_list(cookie);
} else {
// we're in early boot mode, let's use get_loaded_module
static void
set_mtrr(void *_parameter, int cpu)
{
struct set_mtrr_parameter *parameter = (struct set_mtrr_parameter *)_parameter;
uint32 cookie = 0;
// wait until all CPUs have arrived here
atomic_add(&sWaitAllCPUs, 1);
while (sWaitAllCPUs != smp_get_num_cpus())
;
while (true) {
char name[B_FILE_NAME_LENGTH];
size_t nameLength = sizeof(name);
disable_caches();
if (get_next_loaded_module_name(&cookie, name, &nameLength) != B_OK)
break;
if (strncmp(name, "cpu", 3))
continue;
sCpuModule->set_mtrr(parameter->index, parameter->base, parameter->length,
parameter->type);
if (get_module(name, (module_info **)&sCpuModule) == B_OK)
break;
}
}
enable_caches();
return sCpuModule;
// wait until all CPUs have arrived here
atomic_add(&sWaitAllCPUs, -1);
while (sWaitAllCPUs != 0)
;
}
static void
init_mtrrs(void *_unused, int cpu)
{
// wait until all CPUs have arrived here
atomic_add(&sWaitAllCPUs, 1);
while (sWaitAllCPUs != smp_get_num_cpus())
;
disable_caches();
sCpuModule->init_mtrrs();
enable_caches();
// wait until all CPUs have arrived here
atomic_add(&sWaitAllCPUs, -1);
while (sWaitAllCPUs != 0)
;
}
uint32
x86_count_mtrrs(void)
{
if (load_cpu_module() == NULL)
if (sCpuModule == NULL)
return 0;
return sCpuModule->count_mtrrs();
}
status_t
x86_enable_mtrrs(void)
void
x86_set_mtrr(uint32 index, uint64 base, uint64 length, uint8 type)
{
if (load_cpu_module() == NULL)
return B_NOT_SUPPORTED;
cpu_status state;
return sCpuModule->enable_mtrrs();
struct set_mtrr_parameter parameter;
parameter.index = index;
parameter.base = base;
parameter.length = length;
parameter.type = type;
call_all_cpus(&set_mtrr, &parameter);
}
status_t
x86_disable_mtrrs(void)
x86_get_mtrr(uint32 index, uint64 *_base, uint64 *_length, uint8 *_type)
{
if (load_cpu_module() == NULL)
return B_NOT_SUPPORTED;
sCpuModule->disable_mtrrs();
return B_OK;
}
status_t
x86_set_mtrr(uint32 index, addr_t base, addr_t length, uint32 type)
{
if (load_cpu_module() == NULL)
return B_NOT_SUPPORTED;
sCpuModule->set_mtrr(index, base, length, type);
return B_OK;
}
status_t
x86_get_mtrr(uint32 index, addr_t *_base, addr_t *_length, uint32 *_type)
{
if (load_cpu_module() == NULL)
return B_NOT_SUPPORTED;
// the MTRRs are identical on all CPUs, so it doesn't matter
// on which CPU this runs
return sCpuModule->get_mtrr(index, _base, _length, _type);
}
@ -285,6 +303,34 @@ arch_cpu_init_post_vm(kernel_args *args)
}
status_t
arch_cpu_init_post_modules(kernel_args *args)
{
// initialize CPU module
void *cookie = open_module_list("cpu");
while (true) {
char name[B_FILE_NAME_LENGTH];
size_t nameLength = sizeof(name);
if (read_next_module_name(cookie, name, &nameLength) != B_OK
|| get_module(name, (module_info **)&sCpuModule) == B_OK)
break;
}
close_module_list(cookie);
if (sCpuModule == NULL)
return B_OK;
// initialize MTRRs
call_all_cpus(&init_mtrrs, NULL);
return B_OK;
}
void
i386_set_tss_and_kstack(addr_t kstack)
{

View File

@ -116,7 +116,7 @@ stack_trace(int argc, char **argv)
if (argc < 2) {
thread = thread_get_current_thread();
read_ebp(ebp);
ebp = x86_read_ebp();
} else {
thread_id id = strtoul(argv[1], NULL, 0);
thread = thread_get_thread_struct_locked(id);
@ -234,8 +234,7 @@ arch_debug_get_caller(void)
// It looks like you would get the wrong stack frame here, but
// since read_ebp() is an assembler inline macro, GCC seems to
// be smart enough to save its original value.
struct stack_frame *frame;
read_ebp(frame);
struct stack_frame *frame = (struct stack_frame *)x86_read_ebp();
return (void *)frame->return_address;
}

View File

@ -8,6 +8,7 @@
#include <KernelExport.h>
#include <smp.h>
#include <vm.h>
#include <vm_page.h>
#include <vm_priv.h>
@ -30,29 +31,12 @@
#endif
struct set_mtrr_parameter {
int32 index;
addr_t base;
addr_t length;
uint8 type;
};
#define kMaxMemoryTypeRegisters 32
static uint32 sMTRRBitmap;
static int32 sMTRRCount;
static spinlock sMTRRLock;
static status_t
init_mtrr(void)
{
sMTRRCount = x86_count_mtrrs();
if (sMTRRCount == 0)
return B_NOT_SUPPORTED;
sMTRRBitmap = 0;
return B_OK;
}
static uint32 sMemoryTypeBitmap;
static int32 sMemoryTypeIDs[kMaxMemoryTypeRegisters];
static int32 sMemoryTypeRegisterCount;
static spinlock sMemoryTypeLock;
static int32
@ -61,22 +45,22 @@ allocate_mtrr(void)
int32 index;
cpu_status state = disable_interrupts();
acquire_spinlock(&sMTRRLock);
acquire_spinlock(&sMemoryTypeLock);
// find free bit
for (index = 0; index < 32; index++) {
if (sMTRRBitmap & (1UL << index))
for (index = 0; index < sMemoryTypeRegisterCount; index++) {
if (sMemoryTypeBitmap & (1UL << index))
continue;
sMTRRBitmap |= 1UL << index;
sMemoryTypeBitmap |= 1UL << index;
release_spinlock(&sMTRRLock);
release_spinlock(&sMemoryTypeLock);
restore_interrupts(state);
return index;
}
release_spinlock(&sMTRRLock);
release_spinlock(&sMemoryTypeLock);
restore_interrupts(state);
return -1;
@ -87,31 +71,82 @@ static void
free_mtrr(int32 index)
{
cpu_status state = disable_interrupts();
acquire_spinlock(&sMTRRLock);
acquire_spinlock(&sMemoryTypeLock);
sMTRRBitmap &= ~(1UL << index);
sMemoryTypeBitmap &= ~(1UL << index);
release_spinlock(&sMTRRLock);
release_spinlock(&sMemoryTypeLock);
restore_interrupts(state);
}
static void
unset_mtrr(void *_index, int cpu)
static uint64
nearest_power(addr_t value)
{
int32 index = (int32)_index;
uint64 power = 1UL << 12;
// 12 bits is the smallest supported alignment/length
x86_set_mtrr(index, 0, 0, 0);
while (value > power)
power <<= 1;
return power;
}
static void
set_mtrr(void *_parameter, int cpu)
static status_t
set_memory_type(int32 id, uint64 base, uint64 length, uint32 type)
{
struct set_mtrr_parameter *parameter = (struct set_mtrr_parameter *)_parameter;
int32 index;
x86_set_mtrr(parameter->index, parameter->base, parameter->length,
parameter->type);
if (type == 0)
return B_OK;
switch (type) {
case B_MTR_UC:
type = IA32_MTR_UNCACHED;
break;
case B_MTR_WC:
type = IA32_MTR_WRITE_COMBINED;
break;
case B_MTR_WT:
type = IA32_MTR_WRITE_THROUGH;
break;
case B_MTR_WP:
type = IA32_MTR_WRITE_PROTECTED;
break;
case B_MTR_WB:
type = IA32_MTR_WRITE_BACK;
break;
default:
return B_BAD_VALUE;
}
if (sMemoryTypeRegisterCount == 0)
return B_NOT_SUPPORTED;
// length must be a power of 2; just round it up to the next value
length = nearest_power(length);
if (length + base <= base) {
// 4GB overflow
return B_BAD_VALUE;
}
// base must be aligned to the length
if (base & (length - 1))
return B_BAD_VALUE;
index = allocate_mtrr();
if (index < 0)
return B_ERROR;
TRACE(("allocate MTRR slot %ld, base = %Lx, length = %Lx\n", index,
base, length));
sMemoryTypeIDs[index] = id;
x86_set_mtrr(index, base, length, type);
return B_OK;
}
@ -161,6 +196,39 @@ arch_vm_init_end(kernel_args *args)
}
status_t
arch_vm_init_post_modules(kernel_args *args)
{
void *cookie;
int32 i;
// the x86 CPU modules are now accessible
sMemoryTypeRegisterCount = x86_count_mtrrs();
if (sMemoryTypeRegisterCount == 0)
return B_OK;
// not very likely, but play safe here
if (sMemoryTypeRegisterCount > kMaxMemoryTypeRegisters)
sMemoryTypeRegisterCount = kMaxMemoryTypeRegisters;
// init memory type ID table
for (i = 0; i < sMemoryTypeRegisterCount; i++) {
sMemoryTypeIDs[i] = -1;
}
// set the physical memory ranges to write-back mode
for (i = 0; i < args->num_physical_memory_ranges; i++) {
set_memory_type(-1, args->physical_memory_range[i].start,
args->physical_memory_range[i].size, B_MTR_WB);
}
return B_OK;
}
void
arch_vm_aspace_swap(vm_address_space *aspace)
{
@ -184,70 +252,30 @@ arch_vm_supports_protection(uint32 protection)
void
arch_vm_init_area(vm_area *area)
arch_vm_unset_memory_type(struct vm_area *area)
{
area->memory_type.type = 0;
}
int32 index;
void
arch_vm_unset_memory_type(vm_area *area)
{
if (area->memory_type.type == 0)
if (area->memory_type == 0)
return;
call_all_cpus(&unset_mtrr, (void *)(int32)area->memory_type.index);
free_mtrr(area->memory_type.index);
// find index for area ID
for (index = 0; index < sMemoryTypeRegisterCount; index++) {
if (sMemoryTypeIDs[index] == area->id) {
x86_set_mtrr(index, 0, 0, 0);
sMemoryTypeIDs[index] = -1;
free_mtrr(index);
break;
}
}
}
status_t
arch_vm_set_memory_type(vm_area *area, addr_t physicalBase, uint32 type)
arch_vm_set_memory_type(struct vm_area *area, addr_t physicalBase, uint32 type)
{
struct set_mtrr_parameter parameter;
if (type == 0)
return B_OK;
switch (type) {
case B_MTR_UC: // uncacheable
parameter.type = 0;
break;
case B_MTR_WC: // write combining
parameter.type = 1;
break;
case B_MTR_WT: // write through
parameter.type = 4;
break;
case B_MTR_WP: // write protected
parameter.type = 5;
break;
case B_MTR_WB: // write back
parameter.type = 6;
break;
default:
return B_BAD_VALUE;
}
if (sMTRRCount == 0) {
status_t status = init_mtrr();
if (status < B_OK)
return status;
}
parameter.index = allocate_mtrr();
if (parameter.index < 0)
return B_ERROR;
parameter.base = physicalBase;
parameter.length = area->size;
call_all_cpus(&set_mtrr, &parameter);
area->memory_type.type = parameter.type;
area->memory_type.index = (uint16)parameter.index;
dprintf("memory type: %u, index: %ld\n", area->memory_type.type, parameter.index);
return B_OK;
area->memory_type = type >> MEMORY_TYPE_SHIFT;
return set_memory_type(area->id, physicalBase, area->size, type);
}

View File

@ -61,6 +61,22 @@ FUNCTION(i386_fxsave_swap):
fxrstor (%eax)
ret
/* uint32 x86_read_ebp(); */
FUNCTION(x86_read_ebp):
movl %ebp, %eax
ret
/* uint32 x86_read_cr0(); */
FUNCTION(x86_read_cr0):
movl %cr0, %eax
ret
/* void x86_write_cr0(uint32 value); */
FUNCTION(x86_write_cr0):
movl 4(%esp), %eax
movl %eax, %cr0
ret
/* uint64 x86_read_msr(uint32 register); */
FUNCTION(x86_read_msr):
movl 4(%esp), %ecx
@ -70,8 +86,8 @@ FUNCTION(x86_read_msr):
/* void x86_write_msr(uint32 register, uint64 value); */
FUNCTION(x86_write_msr):
movl 4(%esp), %ecx
movl 8(%esp), %edx
movl 12(%esp), %eax
movl 8(%esp), %eax
movl 12(%esp), %edx
wrmsr
ret

View File

@ -42,6 +42,13 @@ cpu_init_post_vm(kernel_args *args)
}
status_t
cpu_init_post_modules(kernel_args *args)
{
return arch_cpu_init_post_modules(args);
}
status_t
cpu_preboot_init(kernel_args *args)
{

View File

@ -430,13 +430,14 @@ frame_buffer_console_init(kernel_args *args)
status_t
frame_buffer_console_init_post_modules(kernel_args *args)
{
if (sConsole.frame_buffer == NULL)
// TODO: enable MTRR in VESA mode!
// if (sConsole.frame_buffer == NULL)
return B_OK;
// try to set frame buffer memory to write combined
return vm_set_area_memory_type(sConsole.area,
args->frame_buffer.physical_buffer.start, B_MTR_WC);
// return vm_set_area_memory_type(sConsole.area,
// args->frame_buffer.physical_buffer.start, B_MTR_WC);
}

View File

@ -215,6 +215,9 @@ main2(void *unused)
TRACE(("Mount boot file system\n"));
vfs_mount_boot_file_system(&sKernelArgs);
// CPU specific modules may now be available
cpu_init_post_modules(&sKernelArgs);
vm_init_post_modules(&sKernelArgs);
debug_init_post_modules(&sKernelArgs);
// start the init process

View File

@ -136,7 +136,8 @@ _vm_create_reserved_region_struct(vm_virtual_map *map, uint32 flags)
static vm_area *
_vm_create_area_struct(vm_address_space *aspace, const char *name, uint32 wiring, uint32 protection)
_vm_create_area_struct(vm_address_space *aspace, const char *name,
uint32 wiring, uint32 protection)
{
vm_area *area = NULL;
@ -161,6 +162,7 @@ _vm_create_area_struct(vm_address_space *aspace, const char *name, uint32 wiring
area->size = 0;
area->protection = protection;
area->wiring = wiring;
area->memory_type = 0;
area->ref_count = 1;
area->cache_ref = NULL;
@ -172,7 +174,6 @@ _vm_create_area_struct(vm_address_space *aspace, const char *name, uint32 wiring
area->cache_next = area->cache_prev = NULL;
area->hash_next = NULL;
arch_vm_init_area(area);
return area;
}
@ -1846,7 +1847,8 @@ _dump_area(vm_area *area)
kprintf("base: 0x%lx\n", area->base);
kprintf("size: 0x%lx\n", area->size);
kprintf("protection: 0x%lx\n", area->protection);
kprintf("wiring: 0x%lx\n", area->wiring);
kprintf("wiring: 0x%x\n", area->wiring);
kprintf("memory_type: 0x%x\n", area->memory_type);
kprintf("ref_count: %ld\n", area->ref_count);
kprintf("cache_ref: %p\n", area->cache_ref);
kprintf("cache_offset: 0x%Lx\n", area->cache_offset);
@ -1899,7 +1901,7 @@ dump_area_list(int argc, char **argv)
hash_open(sAreaHash, &iter);
while ((area = (vm_area *)hash_next(sAreaHash, &iter)) != NULL) {
kprintf("%p %5lx %p\t%p\t%ld\t%ld\t%s\n", area, area->id, (void *)area->base,
kprintf("%p %5lx %p\t%p\t%ld\t%d\t%s\n", area, area->id, (void *)area->base,
(void *)area->size, area->protection, area->wiring, area->name);
}
hash_close(sAreaHash, &iter, false);
@ -2291,6 +2293,13 @@ vm_init_post_thread(kernel_args *args)
}
status_t
vm_init_post_modules(kernel_args *args)
{
return arch_vm_init_post_modules(args);
}
void
permit_page_faults(void)
{
@ -2763,12 +2772,7 @@ vm_set_area_memory_type(area_id id, addr_t physicalBase, uint32 type)
return B_BAD_VALUE;
status_t status = arch_vm_set_memory_type(area, physicalBase, type);
if (status < B_OK)
goto out;
arch_cpu_invalidate_TLB_range(area->base, area->size);
out:
vm_put_area(area);
return status;
}