Cleaner separation of 32-/64-bit specific CPU/interrupt code.

Renamed {32,64}/int.cpp to {32,64}/descriptors.cpp, which now contain
functions for GDT and TSS setup that were previously in arch_cpu.cpp,
as well as the IDT setup code. These get called from the init functions
in arch_cpu.cpp, rather than having a bunch of ifdef'd chunks of code
for 32/64.
This commit is contained in:
Alex Smith 2012-08-18 17:32:59 +01:00
parent dcd705cded
commit d2a1be1c4e
10 changed files with 241 additions and 209 deletions

View File

@ -389,7 +389,6 @@ void x86_context_switch(struct arch_thread* oldState,
void x86_userspace_thread_exit(void);
void x86_end_userspace_thread_exit(void);
void x86_swap_pgdir(addr_t newPageDir);
void x86_set_tss_and_kstack(addr_t kstack);
void x86_fxsave(void* fpuState);
void x86_fxrstor(const void* fpuState);
void x86_noop_swap(void* oldFpuState, const void* newFpuState);
@ -397,7 +396,6 @@ void x86_fxsave_swap(void* oldFpuState, const void* newFpuState);
addr_t x86_get_stack_frame();
uint64 x86_read_msr(uint32 registerNumber);
void x86_write_msr(uint32 registerNumber, uint64 value);
void* x86_get_idt(int32 cpu);
uint32 x86_count_mtrrs(void);
void x86_set_mtrr(uint32 index, uint64 base, uint64 length, uint8 type);
status_t x86_get_mtrr(uint32 index, uint64* _base, uint64* _length,
@ -420,13 +418,9 @@ void x86_page_fault_exception(iframe* iframe);
void x86_fnsave(void* fpuState);
void x86_frstor(const void* fpuState);
void x86_fnsave_swap(void* oldFpuState, const void* newFpuState);
void x86_set_task_gate(int32 cpu, int32 n, int32 segment);
void x86_double_fault_exception(iframe* frame);
void x86_page_fault_exception_double_fault(iframe* frame);
#endif
extern segment_descriptor* gGDT;
#ifdef __cplusplus
} // extern "C" {

View File

@ -12,6 +12,12 @@
#ifndef _ASSEMBLER
#include <SupportDefs.h>
struct kernel_args;
enum descriptor_privilege_levels {
DPL_KERNEL = 0,
DPL_USER = 3,
@ -42,6 +48,11 @@ enum gate_types {
};
void x86_descriptors_init(kernel_args* args);
void x86_descriptors_init_percpu(kernel_args* args, int cpu);
status_t x86_descriptors_init_post_vm(kernel_args* args);
#endif // !_ASSEMBLER

View File

@ -3,30 +3,26 @@
* Copyright 2010, Clemens Zeidler, haiku@clemens-zeidler.de.
* Copyright 2009-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <int.h>
#include <arch/x86/descriptors.h>
#include <stdio.h>
#include <boot/kernel_args.h>
#include <cpu.h>
#include <smp.h>
#include <tls.h>
#include <vm/vm.h>
#include <vm/vm_priv.h>
#include <arch/cpu.h>
#include <arch/int.h>
#include <arch/smp.h>
#include <arch/user_debugger.h>
#include <arch/vm.h>
#include <arch/x86/apic.h>
#include <arch/x86/descriptors.h>
#include <arch/x86/pic.h>
#include "interrupts.h"
@ -36,9 +32,10 @@ static interrupt_descriptor* sIDTs[B_MAX_CPU_COUNT];
// table with functions handling respective interrupts
typedef void interrupt_handler_function(struct iframe* frame);
#define INTERRUPT_HANDLER_TABLE_SIZE 256
interrupt_handler_function* gInterruptHandlerTable[
INTERRUPT_HANDLER_TABLE_SIZE];
static const uint32 kInterruptHandlerTableSize = 256;
interrupt_handler_function* gInterruptHandlerTable[kInterruptHandlerTableSize];
segment_descriptor* gGDT;
/*! Initializes a descriptor in an IDT.
@ -88,23 +85,23 @@ set_trap_gate(int32 cpu, int n, void (*addr)())
For CPUs other than the boot CPU it must not be called before
arch_int_init_post_vm() (arch_cpu_init_post_vm() is fine).
*/
void
x86_set_task_gate(int32 cpu, int32 n, int32 segment)
static void
set_task_gate(int32 cpu, int32 n, int32 segment)
{
sIDTs[cpu][n].a = (segment << 16);
sIDTs[cpu][n].b = 0x8000 | (0 << 13) | (0x5 << 8); // present, dpl 0, type 5
}
/*! Returns the virtual IDT address for CPU \a cpu. */
void*
x86_get_idt(int32 cpu)
static void
load_tss(int cpu)
{
return sIDTs[cpu];
short seg = (TSS_SEGMENT(cpu) << 3) | DPL_KERNEL;
asm("ltr %%ax" : : "a" (seg));
}
// #pragma mark -
// #pragma mark - Double fault handling
void
@ -171,18 +168,52 @@ x86_page_fault_exception_double_fault(struct iframe* frame)
}
status_t
arch_int_init(struct kernel_args *args)
static void
init_double_fault(int cpuNum)
{
int i;
// set up the double fault TSS
struct tss* tss = &gCPU[cpuNum].arch.double_fault_tss;
memset(tss, 0, sizeof(struct tss));
size_t stackSize;
tss->sp0 = (addr_t)x86_get_double_fault_stack(cpuNum, &stackSize);
tss->sp0 += stackSize;
tss->ss0 = KERNEL_DATA_SEG;
tss->cr3 = x86_read_cr3();
// copy the current cr3 to the double fault cr3
tss->eip = (uint32)&double_fault;
tss->es = KERNEL_DATA_SEG;
tss->cs = KERNEL_CODE_SEG;
tss->ss = KERNEL_DATA_SEG;
tss->esp = tss->sp0;
tss->ds = KERNEL_DATA_SEG;
tss->fs = KERNEL_DATA_SEG;
tss->gs = KERNEL_DATA_SEG;
tss->ldt_seg_selector = 0;
tss->io_map_base = sizeof(struct tss);
// add TSS descriptor for this new TSS
uint16 tssSegmentDescriptorIndex = DOUBLE_FAULT_TSS_BASE_SEGMENT + cpuNum;
set_tss_descriptor(&gGDT[tssSegmentDescriptorIndex],
(addr_t)tss, sizeof(struct tss));
set_task_gate(cpuNum, 8, tssSegmentDescriptorIndex << 3);
}
// #pragma mark -
void
x86_descriptors_init(kernel_args* args)
{
uint32 i;
interrupt_handler_function** table;
// set the global sIDT variable
// Get the GDT and boot CPU IDT set up by the boot loader.
gGDT = (segment_descriptor*)args->arch_args.vir_gdt;
sIDTs[0] = (interrupt_descriptor *)(addr_t)args->arch_args.vir_idt;
// setup the standard programmable interrupt controller
pic_init();
set_interrupt_gate(0, 0, &trap0);
set_interrupt_gate(0, 1, &trap1);
set_interrupt_gate(0, 2, &trap2);
@ -191,7 +222,7 @@ arch_int_init(struct kernel_args *args)
set_interrupt_gate(0, 5, &trap5);
set_interrupt_gate(0, 6, &trap6);
set_interrupt_gate(0, 7, &trap7);
// trap8 (double fault) is set in arch_cpu.c
// trap8 (double fault) is set in init_double_fault().
set_interrupt_gate(0, 9, &trap9);
set_interrupt_gate(0, 10, &trap10);
set_interrupt_gate(0, 11, &trap11);
@ -447,7 +478,7 @@ arch_int_init(struct kernel_args *args)
// defaults
for (i = 0; i < ARCH_INTERRUPT_BASE; i++)
table[i] = x86_invalid_exception;
for (i = ARCH_INTERRUPT_BASE; i < INTERRUPT_HANDLER_TABLE_SIZE; i++)
for (i = ARCH_INTERRUPT_BASE; i < kInterruptHandlerTableSize; i++)
table[i] = x86_hardware_interrupt;
table[0] = x86_unexpected_exception; // Divide Error Exception (#DE)
@ -469,30 +500,51 @@ arch_int_init(struct kernel_args *args)
table[17] = x86_unexpected_exception; // Alignment Check Exception (#AC)
table[18] = x86_fatal_exception; // Machine-Check Exception (#MC)
table[19] = x86_unexpected_exception; // SIMD Floating-Point Exception (#XF)
}
return B_OK;
void
x86_descriptors_init_percpu(kernel_args* args, int cpu)
{
// load the TSS for this cpu
// note the main cpu gets initialized in x86_descriptors_init_post_vm()
if (cpu != 0) {
load_tss(cpu);
// set the IDT
struct {
uint16 limit;
void* address;
} _PACKED descriptor = {
256 * 8 - 1, // 256 descriptors, 8 bytes each (-1 for "limit")
sIDTs[cpu]
};
asm volatile("lidt %0" : : "m"(descriptor));
}
}
status_t
arch_int_init_post_vm(struct kernel_args *args)
x86_descriptors_init_post_vm(kernel_args* args)
{
// Always init the local apic as it can be used for timers even if we
// don't end up using the io apic
apic_init(args);
uint32 i;
// account for the segment descriptors
create_area("gdt", (void **)&gGDT, B_EXACT_ADDRESS, B_PAGE_SIZE,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
// create IDT area for the boot CPU
area_id area = create_area("idt", (void**)&sIDTs[0], B_EXACT_ADDRESS,
B_PAGE_SIZE, B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (area < 0)
if (area < B_OK)
return area;
// create IDTs for the off-boot CPU
size_t idtSize = 256 * 8;
// 256 8 bytes-sized descriptors
int32 cpuCount = smp_get_num_cpus();
if (cpuCount > 0) {
size_t areaSize = ROUNDUP(cpuCount * idtSize, B_PAGE_SIZE);
if (args->num_cpus > 0) {
size_t areaSize = ROUNDUP(args->num_cpus * idtSize, B_PAGE_SIZE);
interrupt_descriptor* idt;
virtual_address_restrictions virtualRestrictions = {};
virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
@ -503,7 +555,7 @@ arch_int_init_post_vm(struct kernel_args *args)
if (area < 0)
return area;
for (int32 i = 1; i < cpuCount; i++) {
for (i = 1; i < args->num_cpus; i++) {
sIDTs[i] = idt;
memcpy(idt, sIDTs[0], idtSize);
idt += 256;
@ -511,5 +563,31 @@ arch_int_init_post_vm(struct kernel_args *args)
}
}
return area >= B_OK ? B_OK : area;
// setup task-state segments
for (i = 0; i < args->num_cpus; i++) {
// initialize the regular and double fault tss stored in the per-cpu
// structure
memset(&gCPU[i].arch.tss, 0, sizeof(struct tss));
gCPU[i].arch.tss.ss0 = KERNEL_DATA_SEG;
gCPU[i].arch.tss.io_map_base = sizeof(struct tss);
// add TSS descriptor for this new TSS
set_tss_descriptor(&gGDT[TSS_SEGMENT(i)], (addr_t)&gCPU[i].arch.tss,
sizeof(struct tss));
// initialize the double fault tss
init_double_fault(i);
}
// set the current hardware task on cpu 0
load_tss(0);
// setup TLS descriptors (one for every CPU)
for (i = 0; i < args->num_cpus; i++) {
set_segment_descriptor(&gGDT[TLS_BASE_SEGMENT + i], 0, TLS_SIZE,
DT_DATA_WRITEABLE, DPL_USER);
}
return B_OK;
}

View File

@ -13,6 +13,10 @@
extern "C" {
#endif
struct iframe;
void trap0();void trap1();void trap2();void trap3();void trap4();void trap5();
void trap6();void trap7();void trap9();void trap10();void trap11();
void trap12();void trap13();void trap14();void trap16();void trap17();
@ -73,6 +77,10 @@ void trap250();
void trap251();void trap252();void trap253();void trap254();void trap255();
void x86_double_fault_exception(struct iframe* frame);
void x86_page_fault_exception_double_fault(struct iframe* frame);
#ifdef __cplusplus
}
#endif

View File

@ -67,6 +67,7 @@ class RestartSyscall : public AbstractTraceEntry {
// from arch_cpu.cpp
extern bool gHasSSE;
extern segment_descriptor* gGDT;
static struct arch_thread sInitialState _ALIGNED(16);
// the fpu_state must be aligned on a 16 byte boundary, so that fxsave can use it

View File

@ -4,32 +4,40 @@
*/
#include <int.h>
#include <stdio.h>
#include <arch/x86/descriptors.h>
#include <boot/kernel_args.h>
#include <cpu.h>
#include <debug.h>
#include <vm/vm.h>
#include <vm/vm_priv.h>
#include <arch/x86/apic.h>
#include <arch/x86/descriptors.h>
#include <arch/x86/pic.h>
#include <arch/int.h>
#include <arch/user_debugger.h>
typedef void interrupt_handler_function(iframe* frame);
static const uint32 kInterruptHandlerTableSize = 256;
static segment_descriptor* sGDT;
static interrupt_descriptor* sIDT;
static const uint32 kInterruptHandlerTableSize = 256;
interrupt_handler_function* gInterruptHandlerTable[kInterruptHandlerTableSize];
extern uint8 isr_array[kInterruptHandlerTableSize][16];
static void
load_tss(int cpu)
{
uint16 seg = (TSS_SEGMENT(cpu) << 3) | DPL_KERNEL;
asm volatile("ltr %%ax" :: "a" (seg));
}
// #pragma mark - Exception handlers
static void
x86_64_general_protection_fault(iframe* frame)
{
@ -49,22 +57,14 @@ x86_64_general_protection_fault(iframe* frame)
}
/*! Returns the virtual IDT address for CPU \a cpu. */
void*
x86_get_idt(int32 cpu)
{
// We use a single IDT for all CPUs on x86_64.
return sIDT;
}
// #pragma mark -
status_t
arch_int_init(kernel_args* args)
void
x86_descriptors_init(kernel_args* args)
{
// The loader allocates an empty IDT for us.
// The boot loader sets up a GDT and allocates an empty IDT for us.
sGDT = (segment_descriptor*)args->arch_args.vir_gdt;
sIDT = (interrupt_descriptor*)args->arch_args.vir_idt;
// Fill out the IDT, pointing each entry to the corresponding entry in the
@ -76,7 +76,7 @@ arch_int_init(kernel_args* args)
// in the TSS. If the IST field of an interrupt descriptor is non-zero,
// the CPU will switch to the stack specified by that IST entry when
// handling that interrupt. So, we use IST entry 1 to store the double
// fault stack address (this is set up in arch_cpu.cpp).
// fault stack address (set up in x86_descriptors_init_post_vm()).
uint32 ist = (i == 8) ? 1 : 0;
// Breakpoint exception can be raised from userland.
@ -86,9 +86,8 @@ arch_int_init(kernel_args* args)
GATE_INTERRUPT, KERNEL_CODE_SEG, dpl, ist);
}
interrupt_handler_function** table = gInterruptHandlerTable;
// Initialize the interrupt handler table.
interrupt_handler_function** table = gInterruptHandlerTable;
for (uint32 i = 0; i < ARCH_INTERRUPT_BASE; i++)
table[i] = x86_invalid_exception;
for (uint32 i = ARCH_INTERRUPT_BASE; i < kInterruptHandlerTableSize; i++)
@ -113,33 +112,61 @@ arch_int_init(kernel_args* args)
table[17] = x86_unexpected_exception; // Alignment Check Exception (#AC)
table[18] = x86_fatal_exception; // Machine-Check Exception (#MC)
table[19] = x86_unexpected_exception; // SIMD Floating-Point Exception (#XF)
}
void
x86_descriptors_init_percpu(kernel_args* args, int cpu)
{
// Load the IDT.
gdt_idt_descr idtr = {
256 * sizeof(interrupt_descriptor) - 1,
(addr_t)sIDT
};
asm volatile("lidt %0" :: "m"(idtr));
asm volatile("lidt %0" :: "m" (idtr));
// Set up the legacy PIC.
pic_init();
return B_OK;
// Load the TSS for non-boot CPUs (boot CPU gets done below).
if (cpu != 0) {
load_tss(cpu);
}
}
status_t
arch_int_init_post_vm(kernel_args* args)
x86_descriptors_init_post_vm(kernel_args* args)
{
// Always init the local apic as it can be used for timers even if we
// don't end up using the io apic
apic_init(args);
area_id area;
// Create an area for the IDT.
area_id area = create_area("idt", (void**)&sIDT, B_EXACT_ADDRESS,
B_PAGE_SIZE, B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (area < 0)
// Create an area for the GDT.
area = create_area("gdt", (void**)&sGDT, B_EXACT_ADDRESS, B_PAGE_SIZE,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (area < B_OK)
return area;
// Same for the IDT.
area = create_area("idt", (void**)&sIDT, B_EXACT_ADDRESS, B_PAGE_SIZE,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (area < B_OK)
return area;
for (uint32 i = 0; i < args->num_cpus; i++) {
// Set up the task state segment.
memset(&gCPU[i].arch.tss, 0, sizeof(struct tss));
gCPU[i].arch.tss.io_map_base = sizeof(struct tss);
// Set up the descriptor for this TSS.
set_tss_descriptor(&sGDT[TSS_SEGMENT(i)], (addr_t)&gCPU[i].arch.tss,
sizeof(struct tss));
// Set up the double fault IST entry (see x86_descriptors_init()).
struct tss* tss = &gCPU[i].arch.tss;
size_t stackSize;
tss->ist1 = (addr_t)x86_get_double_fault_stack(i, &stackSize);
tss->ist1 += stackSize;
}
// Load the TSS for the boot CPU.
load_tss(0);
return B_OK;
}

View File

@ -22,7 +22,7 @@ if $(TARGET_ARCH) = x86_64 {
archSpecificSources =
arch.S
cpuid.S
int.cpp
descriptors.cpp
interrupts.S
signals.cpp
signals_asm.S
@ -47,7 +47,7 @@ if $(TARGET_ARCH) = x86_64 {
arch.S
bios.cpp
cpuid.S
int.cpp
descriptors.cpp
interrupts.S
signals.cpp
signals_asm.S

View File

@ -5,7 +5,6 @@
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*
*/
@ -22,7 +21,6 @@
#include <debug.h>
#include <elf.h>
#include <smp.h>
#include <tls.h>
#include <vm/vm.h>
#include <vm/vm_types.h>
#include <vm/VMAddressSpace.h>
@ -34,10 +32,6 @@
#include "paging/X86PagingStructures.h"
#include "paging/X86VMTranslationMap.h"
#ifndef __x86_64__
#include "32/interrupts.h"
#endif
#define DUMP_FEATURE_STRING 1
@ -99,8 +93,6 @@ static uint32 sCpuRendezvous2;
static uint32 sCpuRendezvous3;
static vint32 sTSCSyncRendezvous;
segment_descriptor* gGDT = NULL;
/* Some specials for the double fault handler */
static uint8* sDoubleFaultStacks;
static const size_t kDoubleFaultStackSize = 4096; // size per CPU
@ -343,56 +335,6 @@ x86_init_fpu(void)
}
static void
load_tss(int cpu)
{
short seg = (TSS_SEGMENT(cpu) << 3) | DPL_KERNEL;
asm("ltr %%ax" : : "a" (seg));
}
static void
init_double_fault(int cpuNum)
{
#ifdef __x86_64__
// x86_64 does not have task gates, so we use the IST mechanism to switch
// to the double fault stack upon a double fault (see 64/int.cpp).
struct tss* tss = &gCPU[cpuNum].arch.tss;
size_t stackSize;
tss->ist1 = (addr_t)x86_get_double_fault_stack(cpuNum, &stackSize);
tss->ist1 += stackSize;
#else
// set up the double fault TSS
struct tss* tss = &gCPU[cpuNum].arch.double_fault_tss;
memset(tss, 0, sizeof(struct tss));
size_t stackSize;
tss->sp0 = (addr_t)x86_get_double_fault_stack(cpuNum, &stackSize);
tss->sp0 += stackSize;
tss->ss0 = KERNEL_DATA_SEG;
tss->cr3 = x86_read_cr3();
// copy the current cr3 to the double fault cr3
tss->eip = (uint32)&double_fault;
tss->es = KERNEL_DATA_SEG;
tss->cs = KERNEL_CODE_SEG;
tss->ss = KERNEL_DATA_SEG;
tss->esp = tss->sp0;
tss->ds = KERNEL_DATA_SEG;
tss->fs = KERNEL_DATA_SEG;
tss->gs = KERNEL_DATA_SEG;
tss->ldt_seg_selector = 0;
tss->io_map_base = sizeof(struct tss);
// add TSS descriptor for this new TSS
uint16 tssSegmentDescriptorIndex = DOUBLE_FAULT_TSS_BASE_SEGMENT + cpuNum;
set_tss_descriptor(&gGDT[tssSegmentDescriptorIndex],
(addr_t)tss, sizeof(struct tss));
x86_set_task_gate(cpuNum, 8, tssSegmentDescriptorIndex << 3);
#endif
}
#if DUMP_FEATURE_STRING
static void
dump_feature_string(int currentCPU, cpu_ent* cpu)
@ -554,7 +496,7 @@ dump_feature_string(int currentCPU, cpu_ent* cpu)
#endif // DUMP_FEATURE_STRING
static int
static void
detect_cpu(int currentCPU)
{
cpu_ent* cpu = get_cpu_struct();
@ -664,8 +606,6 @@ detect_cpu(int currentCPU)
#if DUMP_FEATURE_STRING
dump_feature_string(currentCPU, cpu);
#endif
return 0;
}
@ -700,7 +640,7 @@ x86_get_double_fault_stack(int32 cpu, size_t* _size)
int32
x86_double_fault_get_cpu(void)
{
uint32 stack = x86_get_stack_frame();
addr_t stack = x86_get_stack_frame();
return (stack - (addr_t)sDoubleFaultStacks) / kDoubleFaultStackSize;
}
@ -774,33 +714,19 @@ detect_amdc1e_noarat()
status_t
arch_cpu_init_percpu(kernel_args* args, int cpu)
{
// Load descriptor tables for this CPU.
x86_descriptors_init_percpu(args, cpu);
detect_cpu(cpu);
// load the TSS for this cpu
// note the main cpu gets initialized in arch_cpu_init_post_vm()
if (cpu != 0) {
load_tss(cpu);
// set the IDT
struct {
uint16 limit;
void* address;
} _PACKED descriptor = {
256 * sizeof(interrupt_descriptor) - 1,
// 256 descriptors (-1 for "limit")
x86_get_idt(cpu)
};
asm volatile("lidt %0" : : "m"(descriptor));
}
if (!gCpuIdleFunc) {
if (detect_amdc1e_noarat())
gCpuIdleFunc = amdc1e_noarat_idle;
else
gCpuIdleFunc = halt_idle;
}
return 0;
return B_OK;
}
@ -829,6 +755,9 @@ arch_cpu_init(kernel_args* args)
}
#endif
// Initialize descriptor tables.
x86_descriptors_init(args);
return B_OK;
}
@ -838,11 +767,6 @@ arch_cpu_init_post_vm(kernel_args* args)
{
uint32 i;
// account for the segment descriptors
gGDT = (segment_descriptor*)(addr_t)args->arch_args.vir_gdt;
create_area("gdt", (void**)&gGDT, B_EXACT_ADDRESS, B_PAGE_SIZE,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
// allocate an area for the double fault stacks
virtual_address_restrictions virtualRestrictions = {};
virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
@ -853,43 +777,19 @@ arch_cpu_init_post_vm(kernel_args* args)
&virtualRestrictions, &physicalRestrictions,
(void**)&sDoubleFaultStacks);
// More descriptor table setup.
x86_descriptors_init_post_vm(args);
X86PagingStructures* kernelPagingStructures
= static_cast<X86VMTranslationMap*>(
VMAddressSpace::Kernel()->TranslationMap())->PagingStructures();
// setup task-state segments
// Set active translation map on each CPU.
for (i = 0; i < args->num_cpus; i++) {
// initialize the regular and double fault tss stored in the per-cpu
// structure
memset(&gCPU[i].arch.tss, 0, sizeof(struct tss));
#ifndef __x86_64__
gCPU[i].arch.tss.ss0 = KERNEL_DATA_SEG;
#endif
gCPU[i].arch.tss.io_map_base = sizeof(struct tss);
// add TSS descriptor for this new TSS
set_tss_descriptor(&gGDT[TSS_SEGMENT(i)], (addr_t)&gCPU[i].arch.tss,
sizeof(struct tss));
// initialize the double fault tss
init_double_fault(i);
// init active translation map
gCPU[i].arch.active_paging_structures = kernelPagingStructures;
kernelPagingStructures->AddReference();
}
// set the current hardware task on cpu 0
load_tss(0);
#ifndef __x86_64__
// setup TLS descriptors (one for every CPU)
for (i = 0; i < args->num_cpus; i++) {
set_segment_descriptor(&gGDT[TLS_BASE_SEGMENT + i], 0, TLS_SIZE,
DT_DATA_WRITEABLE, DPL_USER);
}
#endif
if (!apic_available())
x86_init_fpu();
// else fpu gets set up in smp code
@ -963,13 +863,6 @@ arch_cpu_init_post_modules(kernel_args* args)
}
void
x86_set_tss_and_kstack(addr_t kstack)
{
get_cpu_struct()->arch.tss.sp0 = kstack;
}
void
arch_cpu_user_TLB_invalidate(void)
{

View File

@ -388,6 +388,25 @@ arch_int_are_interrupts_enabled(void)
}
status_t
arch_int_init(kernel_args* args)
{
// setup the standard programmable interrupt controller
pic_init();
return B_OK;
}
status_t
arch_int_init_post_vm(kernel_args* args)
{
// Always init the local apic as it can be used for timers even if we
// don't end up using the io apic
apic_init(args);
return B_OK;
}
status_t
arch_int_init_io(kernel_args* args)
{

View File

@ -163,7 +163,7 @@ x86_initial_return_to_userland(Thread* thread, iframe* frame)
// disable interrupts and set up CPU specifics for this thread
disable_interrupts();
x86_set_tss_and_kstack(thread->kernel_stack_top);
get_cpu_struct()->arch.tss.sp0 = thread->kernel_stack_top;
x86_set_tls_context(thread);
x86_set_syscall_stack(thread->kernel_stack_top);
@ -208,7 +208,9 @@ arch_thread_init_tls(Thread* thread)
void
arch_thread_context_switch(Thread* from, Thread* to)
{
x86_set_tss_and_kstack(to->kernel_stack_top);
cpu_ent* cpuData = to->cpu;
cpuData->arch.tss.sp0 = to->kernel_stack_top;
x86_set_syscall_stack(to->kernel_stack_top);
// set TLS GDT entry to the current thread - since this action is
@ -216,7 +218,6 @@ arch_thread_context_switch(Thread* from, Thread* to)
if (to->user_local_storage != 0)
x86_set_tls_context(to);
struct cpu_ent* cpuData = to->cpu;
X86PagingStructures* activePagingStructures
= cpuData->arch.active_paging_structures;
VMAddressSpace* toAddressSpace = to->team->address_space;