Added arch_cpu.cpp to the x86_64 build.

* Some things are currently ifndef'd out completely for x86_64 because
  they aren't implemented, there's a few other ifdef's to handle x86_64
  differences but most of the code works unchanged.
* Renamed some i386_* functions to x86_*.
* Added a temporary method for setting the current thread on x86_64
  (a global variable, not SMP safe). This will be changed to be done
  via the GS segment but I've not implemented that yet.
This commit is contained in:
Alex Smith 2012-07-04 14:06:46 +01:00
parent 4e8fbfb2d1
commit 4304bb9894
17 changed files with 291 additions and 211 deletions

View File

@ -645,7 +645,7 @@ typedef enum cpu_types {
#define B_CPU_x86_VENDOR_MASK 0xff00
#ifdef __INTEL__
#if defined(__INTEL__) || defined(__x86_64)
typedef union {
struct {
uint32 max_eax;

View File

@ -26,10 +26,11 @@
// (and is in arch_interrupts.S)
#define DOUBLE_FAULT_TSS_BASE_SEGMENT 9
#define TSS_BASE_SEGMENT (DOUBLE_FAULT_TSS_BASE_SEGMENT + smp_get_num_cpus())
#define TLS_BASE_SEGMENT (TSS_BASE_SEGMENT + smp_get_num_cpus())
#define APM_BASE_SEGMENT (TLS_BASE_SEGMENT + smp_get_num_cpus())
#define TSS_BASE_SEGMENT (DOUBLE_FAULT_TSS_BASE_SEGMENT + smp_get_num_cpus())
#define TLS_BASE_SEGMENT (TSS_BASE_SEGMENT + smp_get_num_cpus())
#define APM_BASE_SEGMENT (TLS_BASE_SEGMENT + smp_get_num_cpus())
#define TSS_SEGMENT(cpu) (TSS_BASE_SEGMENT + cpu)
// defines entries in the GDT/LDT

View File

@ -18,7 +18,7 @@
#define TSS_BASE_SEGMENT 5
#define TLS_BASE_SEGMENT (TSS_BASE_SEGMENT + smp_get_num_cpus())
#define TSS_SEGMENT(cpu) (TSS_BASE_SEGMENT + cpu * 2)
// Structure of a segment descriptor.
@ -89,7 +89,7 @@ struct tss {
uint64 ist7;
uint64 _reserved3;
uint16 _reserved4;
uint16 io_bitmap;
uint16 io_map_base;
} _PACKED;

View File

@ -365,12 +365,9 @@ typedef struct arch_cpu_info {
extern "C" {
#endif
// temporary
#ifndef __x86_64__
struct arch_thread;
void __x86_setup_system_time(uint32 conversionFactor,
uint32 conversionFactorNsecs, bool conversionFactorNsecsShift);
void x86_context_switch(struct arch_thread* oldState,
@ -378,18 +375,12 @@ void x86_context_switch(struct arch_thread* oldState,
void x86_userspace_thread_exit(void);
void x86_end_userspace_thread_exit(void);
void x86_swap_pgdir(uint32 newPageDir);
void i386_set_tss_and_kstack(addr_t kstack);
void i386_fnsave(void* fpuState);
void i386_fxsave(void* fpuState);
void i386_frstor(const void* fpuState);
void i386_fxrstor(const void* fpuState);
void i386_noop_swap(void* oldFpuState, const void* newFpuState);
void i386_fnsave_swap(void* oldFpuState, const void* newFpuState);
void i386_fxsave_swap(void* oldFpuState, const void* newFpuState);
uint32 x86_read_ebp();
void x86_fxsave(void* fpuState);
void x86_fxrstor(const void* fpuState);
void x86_fxsave_swap(void* oldFpuState, const void* newFpuState);
addr_t x86_read_ebp();
uint64 x86_read_msr(uint32 registerNumber);
void x86_write_msr(uint32 registerNumber, uint64 value);
void x86_set_task_gate(int32 cpu, int32 n, int32 segment);
void* x86_get_idt(int32 cpu);
uint32 x86_count_mtrrs(void);
void x86_set_mtrr(uint32 index, uint64 base, uint64 length, uint8 type);
@ -400,16 +391,23 @@ void x86_set_mtrrs(uint8 defaultType, const x86_mtrr_info* infos,
void x86_init_fpu();
bool x86_check_feature(uint32 feature, enum x86_feature_type type);
void* x86_get_double_fault_stack(int32 cpu, size_t* _size);
int32 x86_double_fault_get_cpu(void);
void x86_double_fault_exception(struct iframe* frame);
void x86_page_fault_exception_double_fault(struct iframe* frame);
#ifndef __x86_64__
void i386_set_tss_and_kstack(addr_t kstack);
void x86_fnsave(void* fpuState);
void x86_frstor(const void* fpuState);
void x86_noop_swap(void* oldFpuState, const void* newFpuState);
void x86_fnsave_swap(void* oldFpuState, const void* newFpuState);
void x86_set_task_gate(int32 cpu, int32 n, int32 segment);
int32 x86_double_fault_get_cpu(void);
#endif
extern segment_descriptor* gGDT;
#ifdef __cplusplus
} // extern "C" {
#endif

View File

@ -12,13 +12,11 @@
extern "C" {
#endif
#ifndef __x86_64__
status_t get_current_cpuid(cpuid_info *info, uint32 eax);
status_t get_current_cpuid(cpuid_info* info, uint32 eax);
uint32 get_eflags(void);
void set_eflags(uint32 value);
status_t _user_get_cpuid(cpuid_info *info, uint32 eax, uint32 cpu);
#endif
status_t _user_get_cpuid(cpuid_info* info, uint32 eax, uint32 cpu);
#ifdef __cplusplus
}

View File

@ -30,19 +30,21 @@ void x86_set_tls_context(Thread* thread);
#ifdef __x86_64__
// TODO
extern Thread* gCurrentThread;
static inline Thread*
arch_thread_get_current_thread(void)
{
return NULL;
return gCurrentThread;
}
static inline void
arch_thread_set_current_thread(Thread* t)
{
gCurrentThread = t;
}

View File

@ -4,6 +4,9 @@ DEFINES += _BOOT_MODE ;
local kernelArchSources =
arch_elf.cpp
;
local kernelArch32Sources =
cpuid.S
;
@ -17,6 +20,7 @@ local librootOsArchSources =
BootMergeObject boot_arch_$(TARGET_KERNEL_ARCH).o :
$(kernelArchSources)
$(kernelArch32Sources)
$(kernelLibArchSources)
$(librootOsArchSources)
: # additional flags
@ -24,6 +28,8 @@ BootMergeObject boot_arch_$(TARGET_KERNEL_ARCH).o :
SEARCH on [ FGristFiles $(kernelArchSources) ]
= [ FDirName $(HAIKU_TOP) src system kernel arch x86 ] ;
SEARCH on [ FGristFiles $(kernelArch32Sources) ]
= [ FDirName $(HAIKU_TOP) src system kernel arch x86 32 ] ;
SEARCH on [ FGristFiles $(kernelLibArchSources) ]
= [ FDirName $(HAIKU_TOP) src system kernel lib arch x86 ] ;
SEARCH on [ FGristFiles $(librootOsArchSources) ]

View File

@ -19,66 +19,57 @@
.text
/*! \fn void arch_cpu_user_TLB_invalidate()
Invalidates the TLB. Must be called with interrupts disabled.
*/
FUNCTION(arch_cpu_user_TLB_invalidate):
movl %cr3, %eax
movl %eax, %cr3
ret
FUNCTION_END(arch_cpu_user_TLB_invalidate)
/* void i386_fnsave(void *fpu_state); */
FUNCTION(i386_fnsave):
/* void x86_fnsave(void *fpu_state); */
FUNCTION(x86_fnsave):
movl 4(%esp), %eax
fnsave (%eax)
ret
FUNCTION_END(i386_fnsave)
FUNCTION_END(x86_fnsave)
/* void i386_fxsave(void *fpu_state); */
FUNCTION(i386_fxsave):
/* void x86_fxsave(void *fpu_state); */
FUNCTION(x86_fxsave):
movl 4(%esp), %eax
fxsave (%eax)
ret
FUNCTION_END(i386_fxsave)
FUNCTION_END(x86_fxsave)
/* void i386_frstor(const void *fpu_state); */
FUNCTION(i386_frstor):
/* void x86_frstor(const void *fpu_state); */
FUNCTION(x86_frstor):
movl 4(%esp), %eax
frstor (%eax)
ret
FUNCTION_END(i386_frstor)
FUNCTION_END(x86_frstor)
/* void i386_fxrstor(const void *fpu_state); */
FUNCTION(i386_fxrstor):
/* void x86_fxrstor(const void *fpu_state); */
FUNCTION(x86_fxrstor):
movl 4(%esp), %eax
fxrstor (%eax)
ret
FUNCTION_END(i386_fxrstor)
FUNCTION_END(x86_fxrstor)
/* void i386_noop_swap(void *old_fpu_state, const void *new_fpu_state); */
FUNCTION(i386_noop_swap):
/* void x86_noop_swap(void *old_fpu_state, const void *new_fpu_state); */
FUNCTION(x86_noop_swap):
nop
ret
FUNCTION_END(i386_noop_swap)
FUNCTION_END(x86_noop_swap)
/* void i386_fsave_swap(void *old_fpu_state, const void *new_fpu_state); */
FUNCTION(i386_fnsave_swap):
/* void x86_fnsave_swap(void *old_fpu_state, const void *new_fpu_state); */
FUNCTION(x86_fnsave_swap):
movl 4(%esp),%eax
fnsave (%eax)
movl 8(%esp),%eax
frstor (%eax)
ret
FUNCTION_END(i386_fnsave_swap)
FUNCTION_END(x86_fnsave_swap)
/* void i386_fxsave_swap(void *old_fpu_state, const void *new_fpu_state); */
FUNCTION(i386_fxsave_swap):
/* void x86_fxsave_swap(void *old_fpu_state, const void *new_fpu_state); */
FUNCTION(x86_fxsave_swap):
movl 4(%esp),%eax
fxsave (%eax)
movl 8(%esp),%eax
fxrstor (%eax)
ret
FUNCTION_END(i386_fxsave_swap)
FUNCTION_END(x86_fxsave_swap)
/* uint32 x86_read_ebp(); */
FUNCTION(x86_read_ebp):
@ -142,12 +133,12 @@ null_idt_descr:
.word 0
.word 0,0
FUNCTION(reboot):
FUNCTION(x86_reboot):
lidt null_idt_descr
int $0
done:
jmp done
FUNCTION_END(reboot)
FUNCTION_END(x86_reboot)
/* status_t arch_cpu_user_memcpy(void *to, const void *from, size_t size, addr_t *faultHandler) */

View File

@ -0,0 +1,66 @@
/*
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
* Distributed under the terms of the MIT License.
*/
#include <asm_defs.h>
.text
/* void x86_fxsave(void* fpuState); */
FUNCTION(x86_fxsave):
fxsave (%rdi)
ret
FUNCTION_END(x86_fxsave)
/* void x86_fxrstor(const void* fpuState); */
FUNCTION(x86_fxrstor):
fxrstor (%rdi)
ret
FUNCTION_END(x86_fxrstor)
/* void x86_fxsave_swap(void* oldFpuState, const void* newFpuState); */
FUNCTION(x86_fxsave_swap):
fxsave (%rdi)
fxrstor (%rsi)
ret
FUNCTION_END(x86_fxsave_swap)
/* uint64 x86_read_msr(uint32 register); */
FUNCTION(x86_read_msr):
mov %edi, %ecx
rdmsr
shl $32, %rdx
mov %eax, %eax
or %rdx, %rax
ret
FUNCTION_END(x86_read_msr)
/* void x86_write_msr(uint32 register, uint64 value); */
FUNCTION(x86_write_msr):
mov %rsi, %rdx
mov %esi, %eax
mov %edi, %ecx
shr $32, %rdx
wrmsr
ret
FUNCTION_END(x86_write_msr)
null_idt_descr:
.word 0
.quad 0
FUNCTION(x86_reboot):
lidt null_idt_descr
int $0
done:
jmp done
FUNCTION_END(x86_reboot)

View File

@ -0,0 +1,45 @@
/*
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
* Distributed under the terms of the MIT License.
*/
#include <asm_defs.h>
.text
/* status_t get_current_cpuid(cpuid_info* info, uint32 eaxRegister) */
FUNCTION(get_current_cpuid):
push %rbx
movl %esi, %eax
cpuid
movl %eax, 0(%rdi)
movl %ebx, 4(%rdi)
movl %edx, 8(%rdi)
movl %ecx, 12(%rdi)
xorl %eax, %eax
// B_OK
pop %rbx
ret
FUNCTION_END(get_current_cpuid)
/* uint32 get_eflags(void) */
FUNCTION(get_eflags):
// The top 32 bits of RFLAGS are reserved, we can ignore them.
pushf
pop %rax
mov %eax, %eax
ret
FUNCTION_END(get_eflags)
/* void set_eflags(uint32 val) */
FUNCTION(set_eflags):
mov %edi, %edi
push %rdi
popf
ret
FUNCTION_END(set_eflags)

View File

@ -104,6 +104,15 @@ page_fault_exception(iframe* frame)
}
/*! Returns the virtual IDT address for CPU \a cpu. */
void*
x86_get_idt(int32 cpu)
{
// We use a single IDT for all CPUs on x86_64.
return sIDT;
}
// #pragma mark -

View File

@ -37,6 +37,10 @@
#include <arch/elf.h>
// temporary
Thread* gCurrentThread = NULL;
status_t
arch_commpage_init(void)
{
@ -51,69 +55,6 @@ arch_commpage_init_post_cpus(void)
}
status_t
arch_cpu_preboot_init_percpu(kernel_args* args, int cpu)
{
return B_OK;
}
status_t
arch_cpu_init_percpu(kernel_args* args, int cpu)
{
return B_OK;
}
status_t
arch_cpu_init(kernel_args* args)
{
return B_OK;
}
status_t
arch_cpu_init_post_vm(kernel_args* args)
{
return B_OK;
}
status_t
arch_cpu_init_post_modules(kernel_args* args)
{
return B_OK;
}
void
arch_cpu_user_TLB_invalidate(void)
{
}
void
arch_cpu_global_TLB_invalidate(void)
{
}
void
arch_cpu_invalidate_TLB_range(addr_t start, addr_t end)
{
}
void
arch_cpu_invalidate_TLB_list(addr_t pages[], int num_pages)
{
}
ssize_t
arch_cpu_user_strlcpy(char* to, const char* from, size_t size,
addr_t* faultHandler)
@ -137,41 +78,6 @@ arch_cpu_user_memset(void* s, char c, size_t count, addr_t* faultHandler)
}
status_t
arch_cpu_shutdown(bool rebootSystem)
{
return B_ERROR;
}
void
arch_cpu_idle(void)
{
asm("hlt");
}
void
arch_cpu_sync_icache(void* address, size_t length)
{
// Instruction cache is always consistent on x86.
}
void
arch_cpu_memory_read_barrier(void)
{
asm volatile("lfence" : : : "memory");
}
void
arch_cpu_memory_write_barrier(void)
{
asm volatile("sfence" : : : "memory");
}
void
arch_debug_save_registers(struct arch_debug_registers* registers)
{

View File

@ -20,6 +20,8 @@ if $(TARGET_ARCH) = x86_64 {
SEARCH_SOURCE += [ FDirName $(SUBDIR) 64 ] ;
archSpecificSources =
arch.S
cpuid.S
int.cpp
interrupts.S
stubs.cpp
@ -29,11 +31,11 @@ if $(TARGET_ARCH) = x86_64 {
archSpecificSources =
arch.S
cpuid.S
int.cpp
interrupts.S
arch_commpage.cpp
arch_cpu.cpp
arch_debug.cpp
arch_elf.cpp
arch_platform.cpp
@ -48,7 +50,6 @@ if $(TARGET_ARCH) = x86_64 {
apic.cpp
apm.cpp
bios.cpp
cpuid.S
ioapic.cpp
irq_routing_table.cpp
msi.cpp
@ -83,6 +84,7 @@ if $(TARGET_ARCH) = x86_64 {
}
local archGenericSources =
arch_cpu.cpp
arch_debug_console.cpp
arch_int.cpp
pic.cpp

View File

@ -1,5 +1,6 @@
/*
* Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
@ -30,10 +31,13 @@
#include <arch/x86/apic.h>
#include <boot/kernel_args.h>
#include "interrupts.h"
#include "paging/X86PagingStructures.h"
#include "paging/X86VMTranslationMap.h"
#ifndef __x86_64__
#include "32/interrupts.h"
#endif
#define DUMP_FEATURE_STRING 1
@ -83,25 +87,27 @@ struct set_mtrrs_parameter {
};
extern "C" void reboot(void);
// from arch_x86.S
extern "C" void x86_reboot(void);
// from arch.S
void (*gX86SwapFPUFunc)(void *oldState, const void *newState);
void (*gCpuIdleFunc)(void);
#ifndef __x86_64__
void (*gX86SwapFPUFunc)(void* oldState, const void* newState) = x86_noop_swap;
bool gHasSSE = false;
#endif
static uint32 sCpuRendezvous;
static uint32 sCpuRendezvous2;
static uint32 sCpuRendezvous3;
static vint32 sTSCSyncRendezvous;
segment_descriptor *gGDT = NULL;
segment_descriptor* gGDT = NULL;
/* Some specials for the double fault handler */
static uint8* sDoubleFaultStacks;
static const size_t kDoubleFaultStackSize = 4096; // size per CPU
static x86_cpu_module_info *sCpuModule;
static x86_cpu_module_info* sCpuModule;
extern "C" void memcpy_generic(void* dest, const void* source, size_t count);
@ -109,12 +115,15 @@ extern int memcpy_generic_end;
extern "C" void memset_generic(void* dest, int value, size_t count);
extern int memset_generic_end;
// TODO x86_64
#ifndef __x86_64__
x86_optimized_functions gOptimizedFunctions = {
memcpy_generic,
&memcpy_generic_end,
memset_generic,
&memset_generic_end
};
#endif
static status_t
@ -176,10 +185,10 @@ enable_caches()
static void
set_mtrr(void *_parameter, int cpu)
set_mtrr(void* _parameter, int cpu)
{
struct set_mtrr_parameter *parameter
= (struct set_mtrr_parameter *)_parameter;
struct set_mtrr_parameter* parameter
= (struct set_mtrr_parameter*)_parameter;
// wait until all CPUs have arrived here
smp_cpu_rendezvous(&sCpuRendezvous, cpu);
@ -233,7 +242,7 @@ set_mtrrs(void* _parameter, int cpu)
static void
init_mtrrs(void *_unused, int cpu)
init_mtrrs(void* _unused, int cpu)
{
// wait until all CPUs have arrived here
smp_cpu_rendezvous(&sCpuRendezvous, cpu);
@ -282,7 +291,7 @@ x86_set_mtrr(uint32 index, uint64 base, uint64 length, uint8 type)
status_t
x86_get_mtrr(uint32 index, uint64 *_base, uint64 *_length, uint8 *_type)
x86_get_mtrr(uint32 index, uint64* _base, uint64* _length, uint8* _type)
{
// the MTRRs are identical on all CPUs, so it doesn't matter
// on which CPU this runs
@ -309,10 +318,12 @@ x86_set_mtrrs(uint8 defaultType, const x86_mtrr_info* infos, uint32 count)
void
x86_init_fpu(void)
{
// All x86_64 CPUs support SSE, don't need to bother checking for it.
#ifndef __x86_64__
if (!x86_check_feature(IA32_FEATURE_FPU, FEATURE_COMMON)) {
// No FPU... time to install one in your 386?
dprintf("%s: Warning: CPU has no reported FPU.\n", __func__);
gX86SwapFPUFunc = i386_noop_swap;
gX86SwapFPUFunc = x86_noop_swap;
return;
}
@ -321,17 +332,21 @@ x86_init_fpu(void)
dprintf("%s: CPU has no SSE... just enabling FPU.\n", __func__);
// we don't have proper SSE support, just enable FPU
x86_write_cr0(x86_read_cr0() & ~(CR0_FPU_EMULATION | CR0_MONITOR_FPU));
gX86SwapFPUFunc = i386_fnsave_swap;
gX86SwapFPUFunc = x86_fnsave_swap;
return;
}
#endif
dprintf("%s: CPU has SSE... enabling FXSR and XMM.\n", __func__);
// enable OS support for SSE
x86_write_cr4(x86_read_cr4() | CR4_OS_FXSR | CR4_OS_XMM_EXCEPTION);
x86_write_cr0(x86_read_cr0() & ~(CR0_FPU_EMULATION | CR0_MONITOR_FPU));
gX86SwapFPUFunc = i386_fxsave_swap;
#ifndef __x86_64__
gX86SwapFPUFunc = x86_fxsave_swap;
gHasSSE = true;
#endif
}
@ -339,20 +354,27 @@ static void
load_tss(int cpu)
{
short seg = ((TSS_BASE_SEGMENT + cpu) << 3) | DPL_KERNEL;
asm("movw %0, %%ax;"
"ltr %%ax;" : : "r" (seg) : "eax");
asm("ltr %%ax" : : "a" (seg));
}
static void
init_double_fault(int cpuNum)
{
#ifdef __x86_64__
// x86_64 does not have task gates, so we use the IST mechanism to switch
// to the double fault stack upon a double fault (see 64/int.cpp).
struct tss* tss = &gCPU[cpuNum].arch.tss;
size_t stackSize;
tss->ist1 = (addr_t)x86_get_double_fault_stack(cpuNum, &stackSize);
tss->ist1 += stackSize;
#else
// set up the double fault TSS
struct tss *tss = &gCPU[cpuNum].arch.double_fault_tss;
struct tss* tss = &gCPU[cpuNum].arch.double_fault_tss;
memset(tss, 0, sizeof(struct tss));
size_t stackSize;
tss->sp0 = (uint32)x86_get_double_fault_stack(cpuNum, &stackSize);
tss->sp0 = (addr_t)x86_get_double_fault_stack(cpuNum, &stackSize);
tss->sp0 += stackSize;
tss->ss0 = KERNEL_DATA_SEG;
tss->cr3 = x86_read_cr3();
@ -374,12 +396,13 @@ init_double_fault(int cpuNum)
(addr_t)tss, sizeof(struct tss));
x86_set_task_gate(cpuNum, 8, tssSegmentDescriptorIndex << 3);
#endif
}
#if DUMP_FEATURE_STRING
static void
dump_feature_string(int currentCPU, cpu_ent *cpu)
dump_feature_string(int currentCPU, cpu_ent* cpu)
{
char features[256];
features[0] = 0;
@ -541,7 +564,7 @@ dump_feature_string(int currentCPU, cpu_ent *cpu)
static int
detect_cpu(int currentCPU)
{
cpu_ent *cpu = get_cpu_struct();
cpu_ent* cpu = get_cpu_struct();
char vendorString[17];
cpuid_info cpuid;
@ -656,7 +679,7 @@ detect_cpu(int currentCPU)
bool
x86_check_feature(uint32 feature, enum x86_feature_type type)
{
cpu_ent *cpu = get_cpu_struct();
cpu_ent* cpu = get_cpu_struct();
#if 0
int i;
@ -678,6 +701,7 @@ x86_get_double_fault_stack(int32 cpu, size_t* _size)
}
#ifndef __x86_64__
/*! Returns the index of the current CPU. Can only be called from the double
fault handler.
*/
@ -687,17 +711,15 @@ x86_double_fault_get_cpu(void)
uint32 stack = x86_read_ebp();
return (stack - (uint32)sDoubleFaultStacks) / kDoubleFaultStackSize;
}
#endif
// #pragma mark -
status_t
arch_cpu_preboot_init_percpu(kernel_args *args, int cpu)
arch_cpu_preboot_init_percpu(kernel_args* args, int cpu)
{
// A simple nop FPU call until x86_init_fpu
gX86SwapFPUFunc = i386_noop_swap;
// On SMP system we want to synchronize the CPUs' TSCs, so system_time()
// will return consistent values.
if (smp_get_num_cpus() > 1) {
@ -744,7 +766,7 @@ amdc1e_noarat_idle(void)
static bool
detect_amdc1e_noarat()
{
cpu_ent *cpu = get_cpu_struct();
cpu_ent* cpu = get_cpu_struct();
if (cpu->arch.vendor != VENDOR_AMD)
return false;
@ -759,7 +781,7 @@ detect_amdc1e_noarat()
status_t
arch_cpu_init_percpu(kernel_args *args, int cpu)
arch_cpu_init_percpu(kernel_args* args, int cpu)
{
detect_cpu(cpu);
@ -791,7 +813,7 @@ arch_cpu_init_percpu(kernel_args *args, int cpu)
status_t
arch_cpu_init(kernel_args *args)
arch_cpu_init(kernel_args* args)
{
// init the TSC -> system_time() conversion factors
@ -812,13 +834,13 @@ arch_cpu_init(kernel_args *args)
status_t
arch_cpu_init_post_vm(kernel_args *args)
arch_cpu_init_post_vm(kernel_args* args)
{
uint32 i;
// account for the segment descriptors
gGDT = (segment_descriptor *)(addr_t)args->arch_args.vir_gdt;
create_area("gdt", (void **)&gGDT, B_EXACT_ADDRESS, B_PAGE_SIZE,
gGDT = (segment_descriptor*)(addr_t)args->arch_args.vir_gdt;
create_area("gdt", (void**)&gGDT, B_EXACT_ADDRESS, B_PAGE_SIZE,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
// allocate an area for the double fault stacks
@ -831,41 +853,53 @@ arch_cpu_init_post_vm(kernel_args *args)
&virtualRestrictions, &physicalRestrictions,
(void**)&sDoubleFaultStacks);
// TODO x86_64
#ifndef __x86_64__
X86PagingStructures* kernelPagingStructures
= static_cast<X86VMTranslationMap*>(
VMAddressSpace::Kernel()->TranslationMap())->PagingStructures();
#endif
// setup task-state segments
for (i = 0; i < args->num_cpus; i++) {
// initialize the regular and double fault tss stored in the per-cpu
// structure
memset(&gCPU[i].arch.tss, 0, sizeof(struct tss));
#ifndef __x86_64__
gCPU[i].arch.tss.ss0 = KERNEL_DATA_SEG;
#endif
gCPU[i].arch.tss.io_map_base = sizeof(struct tss);
// add TSS descriptor for this new TSS
set_tss_descriptor(&gGDT[TSS_BASE_SEGMENT + i],
(addr_t)&gCPU[i].arch.tss, sizeof(struct tss));
set_tss_descriptor(&gGDT[TSS_SEGMENT(i)], (addr_t)&gCPU[i].arch.tss,
sizeof(struct tss));
// initialize the double fault tss
init_double_fault(i);
// TODO x86_64
#ifndef __x86_64__
// init active translation map
gCPU[i].arch.active_paging_structures = kernelPagingStructures;
kernelPagingStructures->AddReference();
#endif
}
// set the current hardware task on cpu 0
load_tss(0);
#ifndef __x86_64__
// setup TLS descriptors (one for every CPU)
for (i = 0; i < args->num_cpus; i++) {
set_segment_descriptor(&gGDT[TLS_BASE_SEGMENT + i], 0, TLS_SIZE,
DT_DATA_WRITEABLE, DPL_USER);
}
#endif
// TODO x86_64
#ifndef __x86_64
if (!apic_available())
#endif
x86_init_fpu();
// else fpu gets set up in smp code
@ -874,18 +908,18 @@ arch_cpu_init_post_vm(kernel_args *args)
status_t
arch_cpu_init_post_modules(kernel_args *args)
arch_cpu_init_post_modules(kernel_args* args)
{
// initialize CPU module
void *cookie = open_module_list("cpu");
void* cookie = open_module_list("cpu");
while (true) {
char name[B_FILE_NAME_LENGTH];
size_t nameLength = sizeof(name);
if (read_next_module_name(cookie, name, &nameLength) != B_OK
|| get_module(name, (module_info **)&sCpuModule) == B_OK)
|| get_module(name, (module_info**)&sCpuModule) == B_OK)
break;
}
@ -897,6 +931,8 @@ arch_cpu_init_post_modules(kernel_args *args)
call_all_cpus(&init_mtrrs, NULL);
}
// TODO x86_64
#ifndef __x86_64__
// get optimized functions from the CPU module
if (sCpuModule != NULL && sCpuModule->get_optimized_functions != NULL) {
x86_optimized_functions functions;
@ -933,16 +969,25 @@ arch_cpu_init_post_modules(kernel_args *args)
elf_add_memory_image_symbol(image, "commpage_memset",
((addr_t*)USER_COMMPAGE_ADDR)[COMMPAGE_ENTRY_X86_MEMSET], memsetLen,
B_SYMBOL_TYPE_TEXT);
#endif
return B_OK;
}
#ifndef __x86_64__
void
i386_set_tss_and_kstack(addr_t kstack)
{
get_cpu_struct()->arch.tss.sp0 = kstack;
}
#endif
void
arch_cpu_user_TLB_invalidate(void)
{
x86_write_cr3(x86_read_cr3());
}
void
@ -990,8 +1035,11 @@ arch_cpu_shutdown(bool rebootSystem)
if (acpi_shutdown(rebootSystem) == B_OK)
return B_OK;
// TODO x86_64
#ifndef __x86_64
if (!rebootSystem)
return apm_shutdown();
#endif
cpu_status state = disable_interrupts();
@ -1002,7 +1050,7 @@ arch_cpu_shutdown(bool rebootSystem)
snooze(500000);
// if that didn't help, try it this way
reboot();
x86_reboot();
restore_interrupts(state);
return B_ERROR;
@ -1017,7 +1065,7 @@ arch_cpu_idle(void)
void
arch_cpu_sync_icache(void *address, size_t length)
arch_cpu_sync_icache(void* address, size_t length)
{
// instruction cache is always consistent on x86
}
@ -1026,15 +1074,23 @@ arch_cpu_sync_icache(void *address, size_t length)
void
arch_cpu_memory_read_barrier(void)
{
#ifdef __x86_64__
asm volatile("lfence" : : : "memory");
#else
asm volatile ("lock;" : : : "memory");
asm volatile ("addl $0, 0(%%esp);" : : : "memory");
#endif
}
void
arch_cpu_memory_write_barrier(void)
{
#ifdef __x86_64__
asm volatile("sfence" : : : "memory");
#else
asm volatile ("lock;" : : : "memory");
asm volatile ("addl $0, 0(%%esp);" : : : "memory");
#endif
}

View File

@ -86,9 +86,9 @@ arch_thread_init(struct kernel_args *args)
asm volatile ("clts; fninit; fnclex;");
if (gHasSSE)
i386_fxsave(sInitialState.fpu_state);
x86_fxsave(sInitialState.fpu_state);
else
i386_fnsave(sInitialState.fpu_state);
x86_fnsave(sInitialState.fpu_state);
return B_OK;
}
@ -536,7 +536,7 @@ arch_setup_signal_frame(Thread* thread, struct sigaction* action,
signalFrameData->context.uc_mcontext.edi = frame->edi;
signalFrameData->context.uc_mcontext.esi = frame->esi;
signalFrameData->context.uc_mcontext.ebx = frame->ebx;
i386_fnsave((void *)(&signalFrameData->context.uc_mcontext.xregs));
x86_fnsave((void *)(&signalFrameData->context.uc_mcontext.xregs));
// fill in signalFrameData->context.uc_stack
signal_get_user_stack(frame->user_esp, &signalFrameData->context.uc_stack);
@ -605,7 +605,7 @@ arch_restore_signal_frame(struct signal_frame_data* signalFrameData)
frame->esi = signalFrameData->context.uc_mcontext.esi;
frame->ebx = signalFrameData->context.uc_mcontext.ebx;
i386_frstor((void*)(&signalFrameData->context.uc_mcontext.xregs));
x86_frstor((void*)(&signalFrameData->context.uc_mcontext.xregs));
TRACE(("### arch_restore_signal_frame: exit\n"));

View File

@ -596,10 +596,10 @@ arch_set_debug_cpu_state(const debug_cpu_state *cpuState)
InterruptsLocker locker;
memcpy(thread->arch_info.fpu_state, &cpuState->extended_registers,
sizeof(cpuState->extended_registers));
i386_fxrstor(thread->arch_info.fpu_state);
x86_fxrstor(thread->arch_info.fpu_state);
} else {
// TODO: Implement! We need to convert the format first.
// i386_frstor(&cpuState->extended_registers);
// x86_frstor(&cpuState->extended_registers);
}
// frame->gs = cpuState->gs;
@ -638,13 +638,13 @@ arch_get_debug_cpu_state(debug_cpu_state *cpuState)
// buffer. We need to disable interrupts to make use of it.
Thread* thread = thread_get_current_thread();
InterruptsLocker locker;
i386_fxsave(thread->arch_info.fpu_state);
x86_fxsave(thread->arch_info.fpu_state);
// unlike fnsave, fxsave doesn't reinit the FPU state
memcpy(&cpuState->extended_registers, thread->arch_info.fpu_state,
sizeof(cpuState->extended_registers));
} else {
i386_fnsave(&cpuState->extended_registers);
i386_frstor(&cpuState->extended_registers);
x86_fnsave(&cpuState->extended_registers);
x86_frstor(&cpuState->extended_registers);
// fnsave reinits the FPU state after saving, so we need to
// load it again
// TODO: Convert to fxsave format!