Implemented long mode setup/switch code, the bootloader can now start the 64-bit kernel!

The setup procedure is fairly simple: create a 64-bit GDT and 64-bit page
tables that include all kernel mappings from the 32-bit address space, but at
the correct 64-bit address, then go through kernel_args and changes all virtual
addresses to 64-bit addresses, and finally switch to long mode and jump to the
kernel.
This commit is contained in:
Alex Smith 2012-06-26 11:01:23 +01:00
parent a820f12943
commit e5fc2bfcab
16 changed files with 614 additions and 15 deletions

View File

@ -26,15 +26,15 @@ typedef struct {
uint32 system_time_cv_factor;
uint64 cpu_clock_speed;
uint32 phys_pgdir;
uint32 vir_pgdir;
uint64 vir_pgdir;
uint32 num_pgtables;
uint32 pgtables[MAX_BOOT_PTABLES];
uint32 virtual_end;
uint64 virtual_end;
uint32 phys_idt;
uint32 vir_idt;
uint64 vir_idt;
uint32 phys_gdt;
uint32 vir_gdt;
uint32 page_hole;
uint64 vir_gdt;
uint64 page_hole;
// smp stuff
uint32 apic_time_cv_factor; // apic ticks per second
uint32 apic_phys;

View File

@ -0,0 +1,149 @@
/*
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
* Distributed under the terms of the MIT License.
*/
#ifndef _KERNEL_ARCH_X86_64_DESCRIPTORS_H
#define _KERNEL_ARCH_X86_64_DESCRIPTORS_H
// Segment definitions.
// Note that the ordering of these is important to SYSCALL/SYSRET.
#define KERNEL_CODE_SEG 0x08
#define KERNEL_DATA_SEG 0x10
#define USER_DATA_SEG 0x18
#define USER_CODE_SEG 0x20
#ifndef _ASSEMBLER
#define TSS_BASE_SEGMENT 5
#define TLS_BASE_SEGMENT (TSS_BASE_SEGMENT + smp_get_num_cpus())
// Structure of a segment descriptor.
struct segment_descriptor {
uint32 limit0 : 16;
uint32 base0 : 24;
uint32 type : 4;
uint32 desc_type : 1;
uint32 dpl : 2;
uint32 present : 1;
uint32 limit1 : 4;
uint32 available : 1;
uint32 long_mode : 1;
uint32 d_b : 1;
uint32 granularity : 1;
uint32 base1 : 8;
} _PACKED;
// Structure of a TSS segment descriptor.
struct tss_descriptor {
uint32 limit0 : 16;
uint32 base0 : 24;
uint32 type : 4;
uint32 desc_type : 1;
uint32 dpl : 2;
uint32 present : 1;
uint32 limit1 : 4;
uint32 available : 1;
uint32 unused1 : 2;
uint32 granularity : 1;
uint32 base1 : 8;
uint32 base2 : 32;
uint32 unused2 : 32;
} _PACKED;
// Structure of an interrupt descriptor.
struct interrupt_descriptor {
uint32 base0 : 16;
uint32 sel : 16;
uint32 ist : 3;
uint32 unused : 5;
uint32 flags : 8;
uint32 base1 : 16;
uint32 base2 : 32;
uint32 reserved : 32;
} _PACKED;
enum descriptor_privilege_levels {
DPL_KERNEL = 0,
DPL_USER = 3,
};
enum descriptor_types {
// Code/data descriptor types.
DT_CODE_EXECUTE_ONLY = 0x8,
DT_CODE_ACCESSED = 0x9,
DT_CODE_READABLE = 0xa,
DT_CODE_CONFORM = 0xc,
DT_DATA_READ_ONLY = 0x0,
DT_DATA_ACCESSED = 0x1,
DT_DATA_WRITEABLE = 0x2,
DT_DATA_EXPANSION_DOWN = 0x4,
// System descriptor types.
DT_TSS = 9,
// Descriptor types
DT_SYSTEM_SEGMENT = 0,
DT_CODE_DATA_SEGMENT = 1,
};
static inline void
clear_segment_descriptor(segment_descriptor* desc)
{
*(uint64*)desc = 0;
}
static inline void
set_segment_descriptor(segment_descriptor* desc, uint8 type, uint8 dpl)
{
clear_segment_descriptor(desc);
// In 64-bit mode the CPU ignores the base/limit of code/data segments,
// it always treats base as 0 and does no limit checks.
desc->base0 = 0;
desc->base1 = 0;
desc->limit0 = 0xFFFF;
desc->limit1 = 0xF;
desc->granularity = 1;
desc->type = type;
desc->desc_type = DT_CODE_DATA_SEGMENT;
desc->dpl = dpl;
desc->present = 1;
desc->long_mode = (type & DT_CODE_EXECUTE_ONLY) ? 1 : 0;
// Must be set to 1 for code segments only.
}
static inline void
set_tss_descriptor(segment_descriptor* _desc, uint64 base, uint32 limit)
{
clear_segment_descriptor(_desc);
clear_segment_descriptor(&_desc[1]);
// The TSS descriptor is a special format in 64-bit mode, it is 16 bytes
// instead of 8.
tss_descriptor* desc = (tss_descriptor*)_desc;
desc->base0 = base & 0xffffff;
desc->base1 = ((base) >> 24) & 0xff;
desc->base2 = ((base) >> 32);
desc->limit0 = limit & 0xffff;
desc->limit1 = (limit >> 16) & 0xf;
desc->present = 1;
desc->type = DT_TSS;
desc->desc_type = DT_SYSTEM_SEGMENT;
desc->dpl = DPL_KERNEL;
}
#endif /* _ASSEMBLER */
#endif /* _KERNEL_ARCH_X86_64_DESCRIPTORS_H */

View File

@ -40,6 +40,8 @@ BootMergeObject boot_platform_bios_ia32.o :
hpet.cpp
interrupts.cpp
interrupts_asm.S
long.cpp
long_asm.S
# VESA/DDC EDID
decode_edid.c

View File

@ -0,0 +1,290 @@
/*
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
* Distributed under the terms of the MIT License.
*/
// Stop this from being included, it conflicts with the x86_64 version.
#define _KERNEL_ARCH_x86_DESCRIPTORS_H
#include "long.h"
#include <KernelExport.h>
#include <arch/x86_64/descriptors.h>
#include <arch_system_info.h>
#include <boot/platform.h>
#include <boot/heap.h>
#include <boot/stage2.h>
#include <boot/stdio.h>
#include <kernel.h>
#include "debug.h"
#include "mmu.h"
struct gdt_idt_descr {
uint16 limit;
addr_t base;
} _PACKED;
/*! Convert a 32-bit address to a 64-bit address. */
static inline uint64
fix_address(uint64 address)
{
return address - KERNEL_BASE + KERNEL_BASE_64BIT;
}
template<typename Type>
inline void
fix_address(FixedWidthPointer<Type>& p)
{
if(p != NULL)
p.SetTo(fix_address(p.Get()));
}
static void
long_gdt_init()
{
// Allocate memory for the GDT.
segment_descriptor* gdt = (segment_descriptor*)
mmu_allocate_page(&gKernelArgs.arch_args.phys_gdt);
gKernelArgs.arch_args.vir_gdt = fix_address((addr_t)gdt);
dprintf("GDT at phys 0x%lx, virt 0x%llx\n", gKernelArgs.arch_args.phys_gdt,
gKernelArgs.arch_args.vir_gdt);
clear_segment_descriptor(&gdt[0]);
// Set up code/data segments (TSS segments set up later in the kernel).
set_segment_descriptor(&gdt[KERNEL_CODE_SEG / 8], DT_CODE_EXECUTE_ONLY,
DPL_KERNEL);
set_segment_descriptor(&gdt[KERNEL_DATA_SEG / 8], DT_DATA_WRITEABLE,
DPL_KERNEL);
set_segment_descriptor(&gdt[USER_CODE_SEG / 8], DT_CODE_EXECUTE_ONLY,
DPL_USER);
set_segment_descriptor(&gdt[USER_DATA_SEG / 8], DT_DATA_WRITEABLE,
DPL_USER);
}
static void
long_idt_init()
{
interrupt_descriptor* idt = (interrupt_descriptor*)
mmu_allocate_page(&gKernelArgs.arch_args.phys_idt);
gKernelArgs.arch_args.vir_idt = fix_address((addr_t)idt);
dprintf("IDT at phys 0x%lx, virt 0x%llx\n", gKernelArgs.arch_args.phys_idt,
gKernelArgs.arch_args.vir_idt);
// The 32-bit kernel gets an IDT with the loader's exception handlers until
// it can set up its own. Can't do that here because they won't work after
// switching to long mode. Therefore, just clear the IDT and leave the
// kernel to set it up.
memset(idt, 0, B_PAGE_SIZE);
}
static void
long_mmu_init()
{
addr_t physicalAddress;
// Allocate the top level PML4.
uint64* pml4 = (uint64*)mmu_allocate_page(&gKernelArgs.arch_args.phys_pgdir);
memset(pml4, 0, B_PAGE_SIZE);
gKernelArgs.arch_args.vir_pgdir = (uint64)(addr_t)pml4;
// Identity map the first 1GB of memory, do so using large pages.
uint64* pdpt = (uint64*)mmu_allocate_page(&physicalAddress);
memset(pdpt, 0, B_PAGE_SIZE);
pml4[0] = physicalAddress | 0x3;
uint64* pageDir = (uint64*)mmu_allocate_page(&physicalAddress);
memset(pageDir, 0, B_PAGE_SIZE);
pdpt[0] = physicalAddress | 0x3;
for (uint32 i = 0; i < 512; i++) {
pageDir[i] = (i * 0x200000) | 0x83;
}
// Allocate tables for the kernel mappings.
pdpt = (uint64*)mmu_allocate_page(&physicalAddress);
memset(pdpt, 0, B_PAGE_SIZE);
pml4[511] = physicalAddress | 0x3;
pageDir = (uint64*)mmu_allocate_page(&physicalAddress);
memset(pageDir, 0, B_PAGE_SIZE);
pdpt[510] = physicalAddress | 0x3;
// Store the virtual memory usage information.
gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE_64BIT;
gKernelArgs.virtual_allocated_range[0].size = mmu_get_virtual_usage();
gKernelArgs.num_virtual_allocated_ranges = 1;
// We can now allocate page tables and duplicate the mappings across from
// the 32-bit address space to them.
uint64* pageTable = NULL;
for (uint32 i = 0; i < gKernelArgs.virtual_allocated_range[0].size
/ B_PAGE_SIZE; i++) {
if ((i % 512) == 0) {
pageTable = (uint64*)mmu_allocate_page(&physicalAddress);
memset(pageTable, 0, B_PAGE_SIZE);
pageDir[i / 512] = physicalAddress | 0x3;
// Just performed another virtual allocation, account for it.
gKernelArgs.virtual_allocated_range[0].size += B_PAGE_SIZE;
}
// Get the physical address to map.
if (!mmu_get_virtual_mapping(KERNEL_BASE + (i * B_PAGE_SIZE),
&physicalAddress))
continue;
pageTable[i % 512] = physicalAddress | 0x3;
}
gKernelArgs.arch_args.virtual_end = ROUNDUP(KERNEL_BASE_64BIT
+ gKernelArgs.virtual_allocated_range[0].size, 0x200000);
// Sort the address ranges.
sort_address_ranges(gKernelArgs.physical_memory_range,
gKernelArgs.num_physical_memory_ranges);
sort_address_ranges(gKernelArgs.physical_allocated_range,
gKernelArgs.num_physical_allocated_ranges);
sort_address_ranges(gKernelArgs.virtual_allocated_range,
gKernelArgs.num_virtual_allocated_ranges);
dprintf("phys memory ranges:\n");
for (uint32 i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
dprintf(" base %#018" B_PRIx64 ", length %#018" B_PRIx64 "\n",
gKernelArgs.physical_memory_range[i].start,
gKernelArgs.physical_memory_range[i].size);
}
dprintf("allocated phys memory ranges:\n");
for (uint32 i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
dprintf(" base %#018" B_PRIx64 ", length %#018" B_PRIx64 "\n",
gKernelArgs.physical_allocated_range[i].start,
gKernelArgs.physical_allocated_range[i].size);
}
dprintf("allocated virt memory ranges:\n");
for (uint32 i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
dprintf(" base %#018" B_PRIx64 ", length %#018" B_PRIx64 "\n",
gKernelArgs.virtual_allocated_range[i].start,
gKernelArgs.virtual_allocated_range[i].size);
}
}
static void
convert_preloaded_image(preloaded_elf64_image* image)
{
fix_address(image->next);
fix_address(image->name);
fix_address(image->debug_string_table);
fix_address(image->syms);
fix_address(image->rel);
fix_address(image->rela);
fix_address(image->pltrel);
fix_address(image->debug_symbols);
}
/*! Convert all addresses in kernel_args to 64-bit addresses. */
static void
convert_kernel_args()
{
fix_address(gKernelArgs.boot_volume);
fix_address(gKernelArgs.vesa_modes);
fix_address(gKernelArgs.edid_info);
fix_address(gKernelArgs.debug_output);
fix_address(gKernelArgs.boot_splash);
fix_address(gKernelArgs.arch_args.apic);
fix_address(gKernelArgs.arch_args.hpet);
convert_preloaded_image(static_cast<preloaded_elf64_image*>(
gKernelArgs.kernel_image.Pointer()));
fix_address(gKernelArgs.kernel_image);
// Iterate over the preloaded images. Must save the next address before
// converting, as the next pointer will be converted.
preloaded_image* image = gKernelArgs.preloaded_images;
fix_address(gKernelArgs.preloaded_images);
while (image) {
preloaded_image* next = image->next;
convert_preloaded_image(static_cast<preloaded_elf64_image*>(image));
image = next;
}
// Set correct kernel stack addresses.
for (uint32 i = 0; i < gKernelArgs.num_cpus; i++) {
gKernelArgs.cpu_kstack[i].start
= fix_address(gKernelArgs.cpu_kstack[i].start);
}
// Fix driver settings files.
driver_settings_file* file = gKernelArgs.driver_settings;
fix_address(gKernelArgs.driver_settings);
while (file) {
driver_settings_file* next = file->next;
fix_address(file->next);
fix_address(file->buffer);
file = next;
}
}
void
long_start_kernel()
{
// Check whether long mode is supported.
cpuid_info info;
get_current_cpuid(&info, 0x80000001);
if ((info.regs.edx & (1<<29)) == 0)
panic("64-bit kernel requires a 64-bit CPU");
preloaded_elf64_image *image = static_cast<preloaded_elf64_image *>(
gKernelArgs.kernel_image.Pointer());
// TODO: x86_64 SMP, disable for now.
gKernelArgs.num_cpus = 1;
long_gdt_init();
long_idt_init();
long_mmu_init();
convert_kernel_args();
debug_cleanup();
// Calculate the arguments for long_enter_kernel().
uint64 entry = image->elf_header.e_entry;
uint64 stackTop = gKernelArgs.cpu_kstack[0].start
+ gKernelArgs.cpu_kstack[0].size;
uint64 kernelArgs = fix_address((addr_t)&gKernelArgs);
dprintf("kernel entry at 0x%llx, stack 0x%llx, args 0x%llx\n", entry,
stackTop, kernelArgs);
// We're about to enter the kernel -- disable console output.
stdout = NULL;
// Load the new GDT. The physical address is used because long_enter_kernel
// disables 32-bit paging.
gdt_idt_descr gdtr = { GDT_LIMIT - 1, gKernelArgs.arch_args.phys_gdt };
asm volatile("lgdt %0" :: "m"(gdtr));
// Enter the kernel!
long_enter_kernel(gKernelArgs.arch_args.phys_pgdir, entry, stackTop,
kernelArgs, 0);
panic("Shouldn't get here");
}

View File

@ -0,0 +1,18 @@
/*
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
* Distributed under the terms of the MIT License.
*/
#ifndef LONG_H
#define LONG_H
#include <SupportDefs.h>
extern "C" void long_enter_kernel(uint32 pml4, uint64 entry, uint64 stackTop,
uint64 kernelArgs, int currentCPU);
extern void long_start_kernel();
#endif /* LONG_H */

View File

@ -0,0 +1,75 @@
/*
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
* Distributed under the terms of the MIT License.
*/
#include <asm_defs.h>
#include <arch/x86_64/descriptors.h>
.code32
/*! void long_enter_kernel(uint32 pml4, uint64 entry, uint64 stackTop,
uint64 kernelArgs, int currentCPU);
*/
FUNCTION(long_enter_kernel):
// Currently running with 32-bit paging tables at an identity mapped
// address. To switch to 64-bit paging we must first disable 32-bit paging,
// otherwise loading the new CR3 will fault.
movl %cr0, %eax
andl $~(1<<31), %eax
movl %eax, %cr0
// Enable PAE.
movl %cr4, %eax
orl $(1<<5), %eax
movl %eax, %cr4
// Point CR3 to the kernel's PML4.
movl 4(%esp), %eax
movl %eax, %cr3
// Enable long mode by setting EFER.LME.
movl $0xC0000080, %ecx
rdmsr
orl $(1<<8), %eax
wrmsr
// Re-enable paging, which will put us in compatibility mode as we are
// currently in a 32-bit code segment.
movl %cr0, %ecx
orl $(1<<31), %ecx
movl %ecx, %cr0
// Jump into the 64-bit code segment.
ljmp $KERNEL_CODE_SEG, $.Llmode
.align 8
.code64
.Llmode:
// Set data segments.
mov $KERNEL_DATA_SEG, %ax
mov %ax, %ss
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %gs
// Clear the high 32 bits of RSP.
movl %esp, %esp
// Get the entry point address, arguments and new stack pointer.
movq 8(%rsp), %rax
movq 24(%rsp), %rdi
movl 32(%rsp), %esi
movq 16(%rsp), %rsp
// Clear the stack frame/RFLAGS.
xorq %rbp, %rbp
push $0
popf
// Call the kernel entry point.
call *%rax

View File

@ -419,6 +419,24 @@ mmu_allocate(void *virtualAddress, size_t size)
}
/*! Allocates a single page and returns both its virtual and physical
addresses.
*/
void *
mmu_allocate_page(addr_t *_physicalAddress)
{
addr_t virt = get_next_virtual_page();
addr_t phys = get_next_physical_page();
map_page(virt, phys, kDefaultPageFlags);
if (_physicalAddress)
*_physicalAddress = phys;
return (void *)virt;
}
/*! Allocates the given physical range.
\return \c true, if the range could be allocated, \c false otherwise.
*/
@ -478,6 +496,35 @@ mmu_free(void *virtualAddress, size_t size)
}
size_t
mmu_get_virtual_usage()
{
return sNextVirtualAddress - KERNEL_BASE;
}
bool
mmu_get_virtual_mapping(addr_t virtualAddress, addr_t *_physicalAddress)
{
if (virtualAddress < KERNEL_BASE) {
panic("mmu_get_virtual_mapping: asked to lookup invalid page %p!\n",
(void *)virtualAddress);
}
uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
uint32 tableEntry = pageTable[(virtualAddress % (B_PAGE_SIZE * 1024))
/ B_PAGE_SIZE];
if ((tableEntry & (1<<0)) != 0) {
*_physicalAddress = tableEntry & 0xFFFFF000;
return true;
} else {
return false;
}
}
/*! Sets up the final and kernel accessible GDT and IDT tables.
BIOS calls won't work any longer after this function has
been called.
@ -501,10 +548,10 @@ mmu_init_for_kernel(void)
map_page(gKernelArgs.arch_args.vir_idt, (uint32)idt, kDefaultPageFlags);
// initialize it
interrupts_init_kernel_idt((void*)gKernelArgs.arch_args.vir_idt,
interrupts_init_kernel_idt((void*)(addr_t)gKernelArgs.arch_args.vir_idt,
IDT_LIMIT);
TRACE("idt at virtual address 0x%lx\n", gKernelArgs.arch_args.vir_idt);
TRACE("idt at virtual address 0x%llx\n", gKernelArgs.arch_args.vir_idt);
}
// set up a new gdt
@ -524,7 +571,7 @@ mmu_init_for_kernel(void)
// put standard segment descriptors in it
segment_descriptor* virtualGDT
= (segment_descriptor*)gKernelArgs.arch_args.vir_gdt;
= (segment_descriptor*)(addr_t)gKernelArgs.arch_args.vir_gdt;
clear_segment_descriptor(&virtualGDT[0]);
// seg 0x08 - kernel 4GB code
@ -548,7 +595,7 @@ mmu_init_for_kernel(void)
// load the GDT
gdtDescriptor.limit = GDT_LIMIT - 1;
gdtDescriptor.base = (void*)gKernelArgs.arch_args.vir_gdt;
gdtDescriptor.base = (void*)(addr_t)gKernelArgs.arch_args.vir_gdt;
asm("lgdt %0;"
: : "m" (gdtDescriptor));

View File

@ -20,9 +20,15 @@ extern void mmu_init(void);
extern void mmu_init_for_kernel(void);
extern addr_t mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags);
extern void *mmu_allocate(void *virtualAddress, size_t size);
extern void *mmu_allocate_page(addr_t *_physicalAddress);
extern bool mmu_allocate_physical(addr_t base, size_t size);
extern void mmu_free(void *virtualAddress, size_t size);
// Used by the long mode switch code
extern size_t mmu_get_virtual_usage();
extern bool mmu_get_virtual_mapping(addr_t virtualAddress,
addr_t *_physicalAddress);
#ifdef __cplusplus
}
#endif

View File

@ -366,7 +366,7 @@ smp_cpu_ready(void)
// Set up the final idt
idt_descr.a = IDT_LIMIT - 1;
idt_descr.b = (uint32 *)gKernelArgs.arch_args.vir_idt;
idt_descr.b = (uint32 *)(addr_t)gKernelArgs.arch_args.vir_idt;
asm("lidt %0;"
: : "m" (idt_descr));

View File

@ -22,6 +22,7 @@
#include "hpet.h"
#include "interrupts.h"
#include "keyboard.h"
#include "long.h"
#include "mmu.h"
#include "multiboot.h"
#include "serial.h"
@ -75,6 +76,12 @@ platform_boot_options(void)
extern "C" void
platform_start_kernel(void)
{
// 64-bit kernel entry is all handled in long.cpp
if (gKernelArgs.kernel_image->elf_class == ELFCLASS64) {
long_start_kernel();
return;
}
static struct kernel_args *args = &gKernelArgs;
// something goes wrong when we pass &gKernelArgs directly
// to the assembler inline below - might be a bug in GCC

View File

@ -37,6 +37,8 @@ local bios_ia32_src =
apm.cpp
interrupts.cpp
interrupts_asm.S
long.cpp
long_asm.S
;
local bios_ia32_edid_src =

View File

@ -817,7 +817,7 @@ arch_cpu_init_post_vm(kernel_args *args)
uint32 i;
// account for the segment descriptors
gGDT = (segment_descriptor *)args->arch_args.vir_gdt;
gGDT = (segment_descriptor *)(addr_t)args->arch_args.vir_gdt;
create_area("gdt", (void **)&gGDT, B_EXACT_ADDRESS, B_PAGE_SIZE,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);

View File

@ -553,7 +553,7 @@ arch_int_init(struct kernel_args *args)
interrupt_handler_function** table;
// set the global sIDT variable
sIDTs[0] = (desc_table *)args->arch_args.vir_idt;
sIDTs[0] = (desc_table *)(addr_t)args->arch_args.vir_idt;
// setup the standard programmable interrupt controller
pic_init();

View File

@ -266,7 +266,7 @@ X86PagingMethod32Bit::Init(kernel_args* args,
TRACE("X86PagingMethod32Bit::Init(): entry\n");
// page hole set up in stage2
fPageHole = (page_table_entry*)args->arch_args.page_hole;
fPageHole = (page_table_entry*)(addr_t)args->arch_args.page_hole;
// calculate where the pgdir would be
fPageHolePageDir = (page_directory_entry*)
(((addr_t)args->arch_args.page_hole)
@ -276,7 +276,7 @@ X86PagingMethod32Bit::Init(kernel_args* args,
sizeof(page_directory_entry) * NUM_USER_PGDIR_ENTS);
fKernelPhysicalPageDirectory = args->arch_args.phys_pgdir;
fKernelVirtualPageDirectory = (page_directory_entry*)
fKernelVirtualPageDirectory = (page_directory_entry*)(addr_t)
args->arch_args.vir_pgdir;
#ifdef TRACE_X86_PAGING_METHOD_32_BIT

View File

@ -56,7 +56,8 @@ struct X86PagingMethodPAE::ToPAESwitcher {
fKernelArgs(args)
{
// page hole set up in the boot loader
fPageHole = (page_table_entry*)fKernelArgs->arch_args.page_hole;
fPageHole = (page_table_entry*)
(addr_t)fKernelArgs->arch_args.page_hole;
// calculate where the page dir would be
fPageHolePageDir = (page_directory_entry*)

View File

@ -78,6 +78,8 @@ static int32 main2(void *);
extern "C" int
_start(kernel_args *bootKernelArgs, int currentCPU)
{
while (1) {}
if (bootKernelArgs->kernel_args_size != sizeof(kernel_args)
|| bootKernelArgs->version != CURRENT_KERNEL_ARGS_VERSION) {
// This is something we cannot handle right now - release kernels