Preparation for MTRR support, code is completely untested, though.

The CPU specific MTRR code will be in modules.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@15520 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2005-12-12 17:04:36 +00:00
parent 381d6b57f6
commit 7c0a93573b
9 changed files with 294 additions and 17 deletions

View File

@ -22,6 +22,10 @@ status_t arch_vm_init_post_area(struct kernel_args *args);
status_t arch_vm_init_end(struct kernel_args *args);
void arch_vm_aspace_swap(vm_address_space *aspace);
bool arch_vm_supports_protection(uint32 protection);
void arch_vm_init_area(vm_area *area);
status_t arch_vm_set_memory_type(vm_area *area, uint32 type);
void arch_vm_unset_memory_type(vm_area *area);
#ifdef __cplusplus
}

View File

@ -0,0 +1,10 @@
/*
* Copyright 2005, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_VM_TYPES_H
#define KERNEL_ARCH_VM_TYPES_H
#include <arch_vm_types.h>
#endif /* KERNEL_ARCH_VM_TYPES_H */

View File

@ -9,13 +9,22 @@
#define _KERNEL_ARCH_x86_CPU_H
#include <SupportDefs.h>
#include <module.h>
#include <arch/x86/descriptors.h>
// MSR registers (possibly Intel specific)
#define IA32_MSR_APIC_BASE 0x1b
typedef struct x86_cpu_module_info {
module_info info;
uint32 (*count_mtrrs)(void);
status_t (*set_mtrr)(uint32 index, addr_t base, addr_t length, uint32 type);
status_t (*unset_mtrr)(uint32 index);
} x86_cpu_module_info;
struct tss {
uint16 prev_task;
uint16 unused0;
@ -78,9 +87,12 @@ void i386_frstor(const void *fpu_state);
void i386_fxrstor(const void *fpu_state);
void i386_fsave_swap(void *old_fpu_state, const void *new_fpu_state);
void i386_fxsave_swap(void *old_fpu_state, const void *new_fpu_state);
uint64 x86_read_msr(uint32 register);
void x86_write_msr(uint32 register, uint64 value);
uint64 x86_read_msr(uint32 registerNumber);
void x86_write_msr(uint32 registerNumber, uint64 value);
void x86_set_task_gate(int32 n, int32 segment);
uint32 x86_count_mtrrs(void);
status_t x86_set_mtrr(uint32 index, addr_t base, addr_t length, uint32 type);
status_t x86_unset_mtrr(uint32 index);
struct tss *x86_get_main_tss(void);
#define read_ebp(value) \

View File

@ -0,0 +1,17 @@
/*
* Copyright 2005, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef _KERNEL_ARCH_x86_VM_TYPES_H
#define _KERNEL_ARCH_x86_VM_TYPES_H
#include <SupportDefs.h>
struct arch_vm_memory_type {
uint16 type;
uint16 index;
};
#endif /* _KERNEL_ARCH_x86_VM_TYPES_H */

View File

@ -19,18 +19,14 @@
typedef union cpu_ent {
struct {
int cpu_num;
// thread.c: used to force a reschedule at quantum expiration time
int preempted;
timer quantum_timer;
} info;
// ToDo: align manually on CPU cache lines if possible
uint32 align[16];
} cpu_ent;
} cpu_ent __attribute__((aligned(64)));
/**
* Defined in core/cpu.c
*/
extern cpu_ent cpu[MAX_BOOT_CPUS];

View File

@ -1,5 +1,5 @@
/*
* Copyright 2002-2004, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2002-2005, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
@ -11,6 +11,7 @@
#include <kernel.h>
#include <sys/uio.h>
#include <arch/vm_types.h>
#include <arch/vm_translation_map.h>
@ -84,6 +85,7 @@ typedef struct vm_area {
uint32 protection;
uint32 wiring;
int32 ref_count;
struct arch_vm_memory_type memory_type;
struct vm_cache_ref *cache_ref;
off_t cache_offset;

View File

@ -7,11 +7,13 @@
*/
#include <arch/cpu.h>
#include <vm.h>
#include <boot_device.h>
#include <smp.h>
#include <arch/x86/selector.h>
#include <tls.h>
#include <vm.h>
#include <arch/cpu.h>
#include <arch/x86/selector.h>
#include <boot/kernel_args.h>
#include "interrupts.h"
@ -34,6 +36,8 @@ segment_descriptor *gGDT = NULL;
//static struct tss sDoubleFaultTSS;
static uint32 sDoubleFaultStack[10240];
static x86_cpu_module_info *sCpuModule;
struct tss *
x86_get_main_tss(void)
@ -44,6 +48,80 @@ x86_get_main_tss(void)
}
static x86_cpu_module_info *
load_cpu_module(void)
{
if (sCpuModule != NULL)
return sCpuModule;
// find model specific CPU module
if (gBootDevice > 0) {
void *cookie = open_module_list("cpu");
while (true) {
char name[B_FILE_NAME_LENGTH];
size_t nameLength = sizeof(name);
if (read_next_module_name(cookie, name, &nameLength) != B_OK
|| get_module(name, (module_info **)&sCpuModule) == B_OK)
break;
}
close_module_list(cookie);
} else {
// we're in early boot mode, let's use get_loaded_module
uint32 cookie = 0;
while (true) {
char name[B_FILE_NAME_LENGTH];
size_t nameLength = sizeof(name);
if (get_next_loaded_module_name(&cookie, name, &nameLength) != B_OK)
break;
if (strncmp(name, "cpu", 3))
continue;
if (get_module(name, (module_info **)&sCpuModule) == B_OK)
break;
}
}
return sCpuModule;
}
uint32
x86_count_mtrrs(void)
{
if (load_cpu_module() == NULL)
return 0;
return sCpuModule->count_mtrrs();
}
status_t
x86_set_mtrr(uint32 index, addr_t base, addr_t length, uint32 type)
{
if (load_cpu_module() == NULL)
return B_UNSUPPORTED;
return sCpuModule->set_mtrr(index, base, length, type);
}
status_t
x86_unset_mtrr(uint32 index)
{
if (load_cpu_module() == NULL)
return B_UNSUPPORTED;
return sCpuModule->unset_mtrr(index);
}
static void
load_tss(void *data, int cpu)
{

View File

@ -18,6 +18,9 @@
#include <arch/x86/bios.h>
#include <stdlib.h>
#include <string.h>
//#define TRACE_ARCH_VM
#ifdef TRACE_ARCH_VM
@ -27,6 +30,80 @@
#endif
static uint32 *sMTRRBitmap;
static int32 sMTRRCount;
static spinlock sMTRRLock;
static status_t
init_mtrr_bitmap(void)
{
if (sMTRRBitmap != NULL)
return B_OK;
sMTRRCount = x86_count_mtrrs();
if (sMTRRCount == 0)
return B_UNSUPPORTED;
sMTRRBitmap = malloc(sMTRRCount / 8);
if (sMTRRBitmap == NULL)
return B_NO_MEMORY;
memset(sMTRRBitmap, 0, sMTRRCount / 8);
return B_OK;
}
static int32
allocate_mtrr(void)
{
int32 count = sMTRRCount / 32;
int32 i, j;
cpu_status state = disable_interrupts();
acquire_spinlock(&sMTRRLock);
for (i = 0; i < count; i++) {
if (sMTRRBitmap[i] == 0xffffffff)
continue;
// find free bit
for (j = 0; j < 32; j++) {
if (sMTRRBitmap[i] & (1UL << j))
continue;
sMTRRBitmap[i] |= 1UL << j;
release_spinlock(&sMTRRLock);
restore_interrupts(state);
return i * 32 + j;
}
}
release_spinlock(&sMTRRLock);
restore_interrupts(state);
return -1;
}
static void
free_mtrr(int32 index)
{
int32 i = index / 32;
int32 j = index - i * 32;
cpu_status state = disable_interrupts();
acquire_spinlock(&sMTRRLock);
sMTRRBitmap[i] &= ~(1UL << j);
release_spinlock(&sMTRRLock);
restore_interrupts(state);
}
status_t
arch_vm_init(kernel_args *args)
{
@ -91,3 +168,72 @@ arch_vm_supports_protection(uint32 protection)
return true;
}
void
arch_vm_init_area(vm_area *area)
{
area->memory_type.type = 0;
}
void
arch_vm_unset_memory_type(vm_area *area)
{
if (area->memory_type.type == 0)
return;
x86_unset_mtrr(area->memory_type.index);
free_mtrr(area->memory_type.index);
}
status_t
arch_vm_set_memory_type(vm_area *area, uint32 type)
{
status_t status;
int32 index;
if (type == 0)
return B_OK;
switch (type) {
case B_MTR_UC: // uncacheable
type = 0;
break;
case B_MTR_WC: // write combining
type = 1;
break;
case B_MTR_WT: // write through
type = 4;
break;
case B_MTR_WP: // write protected
type = 5;
break;
case B_MTR_WB: // write back
type = 6;
break;
default:
return B_BAD_VALUE;
}
if (sMTRRBitmap == NULL) {
status = init_mtrr_bitmap();
if (status < B_OK)
return status;
}
index = allocate_mtrr();
if (index < 0)
return B_ERROR;
status = x86_set_mtrr(index, area->base, area->size, type);
if (status != B_OK) {
area->memory_type.type = (uint16)type;
area->memory_type.index = (uint16)index;
} else
free_mtrr(index);
dprintf("memory type: %u, index: %ld\n", area->memory_type.type, index);
return status;
}

View File

@ -172,6 +172,7 @@ _vm_create_area_struct(vm_address_space *aspace, const char *name, uint32 wiring
area->cache_next = area->cache_prev = NULL;
area->hash_next = NULL;
arch_vm_init_area(area);
return area;
}
@ -484,10 +485,11 @@ insert_area(vm_address_space *addressSpace, void **_address,
status = find_and_insert_area_slot(&addressSpace->virtual_map, searchBase, size,
searchEnd, addressSpec, area);
if (status == B_OK)
if (status == B_OK) {
// ToDo: do we have to do anything about B_ANY_KERNEL_ADDRESS
// vs. B_ANY_KERNEL_BLOCK_ADDRESS here?
*_address = (void *)area->base;
}
return status;
}
@ -804,7 +806,8 @@ vm_create_anonymous_area(aspace_id aid, const char *name, void **address,
}
vm_cache_acquire_ref(cache_ref, true);
err = map_backing_store(aspace, store, address, 0, size, addressSpec, wiring, protection, REGION_NO_PRIVATE_MAP, &area, name);
err = map_backing_store(aspace, store, address, 0, size, addressSpec, wiring,
protection, REGION_NO_PRIVATE_MAP, &area, name);
vm_cache_release_ref(cache_ref);
if (err < 0) {
vm_put_aspace(aspace);
@ -975,9 +978,17 @@ vm_map_physical_memory(aspace_id aid, const char *name, void **_address,
cache->scan_skip = 1;
vm_cache_acquire_ref(cache_ref, true);
status = map_backing_store(aspace, store, _address, 0, size, addressSpec, 0, protection, REGION_NO_PRIVATE_MAP, &area, name);
status = map_backing_store(aspace, store, _address, 0, size,
addressSpec & ~B_MTR_MASK, 0, protection, REGION_NO_PRIVATE_MAP, &area, name);
vm_cache_release_ref(cache_ref);
if (status >= B_OK && (addressSpec & B_MTR_MASK) != 0) {
// set requested memory type
status = arch_vm_set_memory_type(area, addressSpec & B_MTR_MASK);
if (status < B_OK)
vm_put_area(area);
}
if (status >= B_OK) {
// make sure our area is mapped in completely
// (even if that makes the fault routine pretty much useless)
@ -1321,6 +1332,7 @@ _vm_put_area(vm_area *area, bool aspaceLocked)
aspace = area->aspace;
arch_vm_unset_memory_type(area);
remove_area_from_virtual_map(aspace, area, aspaceLocked);
vm_cache_remove_area(area->cache_ref, area);