boot/efi/x86: implement MMU initialization and jump to kernel

Change-Id: Ie42c69f3851acae3a8184aa97ab2dd01c9485f46
Reviewed-on: https://review.haiku-os.org/c/haiku/+/4850
Tested-by: Commit checker robot <no-reply+buildbot@haiku-os.org>
Reviewed-by: Alex von Gluck IV <kallisti5@unixzen.com>
This commit is contained in:
David Karoly 2022-01-25 14:01:54 +01:00 committed by Alex von Gluck IV
parent 20c0887491
commit 5e5299336b
4 changed files with 654 additions and 3 deletions

View File

@ -17,6 +17,7 @@ for platform in [ MultiBootSubDirSetup efi ] {
local arch_src =
crt0-efi-$(TARGET_ARCH).S
entry.S
relocation_func.cpp
arch_mmu.cpp
arch_smp.cpp

View File

@ -1,11 +1,403 @@
/*
* Copyright 2021 Haiku, Inc. All rights reserved.
* Copyright 2021-2022 Haiku, Inc. All rights reserved.
* Released under the terms of the MIT License.
*/
#include <algorithm>
#include <kernel.h>
#include <arch_kernel.h>
#include <arch/cpu.h>
#include <arch/x86/descriptors.h>
#include <boot/platform.h>
#include <boot/stage2.h>
#include <efi/types.h>
#include <efi/boot-services.h>
#include "efi_platform.h"
#include "mmu.h"
//#define TRACE_MMU
#ifdef TRACE_MMU
# define TRACE(x...) dprintf(x)
#else
# define TRACE(x...) ;
#endif
//#define TRACE_MEMORY_MAP
//#define TRACE_PAGE_DIRECTORY
#define VADDR_TO_PDENT(va) (((va) / B_PAGE_SIZE) / 1024)
#define VADDR_TO_PTENT(va) (((va) / B_PAGE_SIZE) % 1024)
#define X86_PDE_ADDRESS_MASK 0xfffff000
#define X86_PTE_ADDRESS_MASK 0xfffff000
#define ALIGN_PAGEDIR B_PAGE_SIZE
struct gdt_idt_descr {
uint16_t limit;
uint32_t base;
} _PACKED;
gdt_idt_descr gBootGDTDescriptor;
segment_descriptor *gBootGDT = NULL;
static const uint32_t kDefaultPageTableFlags = 0x07; // present, user, R/W
static uint32_t *sPageDirectory = NULL;
#ifdef TRACE_PAGE_DIRECTORY
static void
dump_page_dir(void)
{
dprintf("=== Page Directory ===\n");
for (uint32_t i = 0; i < 1024; i++) {
uint32_t directoryEntry = sPageDirectory[i];
if (directoryEntry != 0) {
dprintf("virt 0x%08x --> page table 0x%08x type 0x%08x\n",
i << 22, directoryEntry & X86_PDE_ADDRESS_MASK,
directoryEntry & (~X86_PDE_ADDRESS_MASK));
uint32_t *pageTable = (uint32_t *)(directoryEntry & X86_PDE_ADDRESS_MASK);
for (uint32_t j = 0; j < 1024; j++) {
uint32_t tableEntry = pageTable[j];
if (tableEntry != 0) {
dprintf("virt 0x%08x --> page 0x%08x type+flags 0x%08x\n",
(i << 22) | (j << 12),
tableEntry & X86_PTE_ADDRESS_MASK,
tableEntry & (~X86_PTE_ADDRESS_MASK));
}
}
}
}
}
#endif
static uint32_t *
get_next_page_table(void)
{
uint32_t *pageTable = (uint32_t *)mmu_allocate_page();
memset(pageTable, 0, B_PAGE_SIZE);
return pageTable;
}
static void
arch_mmu_init_gdt(void)
{
if (platform_allocate_region((void **)&gBootGDT,
BOOT_GDT_SEGMENT_COUNT * sizeof(segment_descriptor), 0, false) != B_OK) {
panic("Failed to allocate GDT.\n");
}
STATIC_ASSERT(BOOT_GDT_SEGMENT_COUNT > KERNEL_CODE_SEGMENT
&& BOOT_GDT_SEGMENT_COUNT > KERNEL_DATA_SEGMENT
&& BOOT_GDT_SEGMENT_COUNT > USER_CODE_SEGMENT
&& BOOT_GDT_SEGMENT_COUNT > USER_DATA_SEGMENT);
// set up a new gdt
// put standard segment descriptors in GDT
clear_segment_descriptor(&gBootGDT[0]);
// seg 0x08 - kernel 4GB code
set_segment_descriptor(&gBootGDT[KERNEL_CODE_SEGMENT], 0, 0xffffffff,
DT_CODE_READABLE, DPL_KERNEL);
// seg 0x10 - kernel 4GB data
set_segment_descriptor(&gBootGDT[KERNEL_DATA_SEGMENT], 0, 0xffffffff,
DT_DATA_WRITEABLE, DPL_KERNEL);
// seg 0x1b - ring 3 user 4GB code
set_segment_descriptor(&gBootGDT[USER_CODE_SEGMENT], 0, 0xffffffff,
DT_CODE_READABLE, DPL_USER);
// seg 0x23 - ring 3 user 4GB data
set_segment_descriptor(&gBootGDT[USER_DATA_SEGMENT], 0, 0xffffffff,
DT_DATA_WRITEABLE, DPL_USER);
addr_t virtualGDT;
platform_bootloader_address_to_kernel_address(gBootGDT, &virtualGDT);
gBootGDTDescriptor.limit = BOOT_GDT_SEGMENT_COUNT * sizeof(segment_descriptor);
gBootGDTDescriptor.base = (uint32_t)virtualGDT;
TRACE("gdt phys 0x%08x virt 0x%08" B_PRIxADDR " desc 0x%08x\n",
(uint32_t)gBootGDT, virtualGDT,
(uint32_t)&gBootGDTDescriptor);
TRACE("gdt limit=%d base=0x%08x\n",
gBootGDTDescriptor.limit, gBootGDTDescriptor.base);
}
static void
map_page(addr_t virtAddr, phys_addr_t physAddr, uint32_t flags)
{
physAddr &= ~(B_PAGE_SIZE - 1);
uint32_t *pageTable = NULL;
uint32_t pageDirectoryIndex = VADDR_TO_PDENT(virtAddr);
uint32_t pageDirectoryEntry = sPageDirectory[pageDirectoryIndex];
if (pageDirectoryEntry == 0) {
//TRACE("get next page table for address 0x%08" B_PRIxADDR "\n",
// virtAddr);
pageTable = get_next_page_table();
sPageDirectory[pageDirectoryIndex] = (uint32_t)pageTable | kDefaultPageTableFlags;
} else {
pageTable = (uint32_t *)(pageDirectoryEntry & X86_PDE_ADDRESS_MASK);
}
uint32_t pageTableIndex = VADDR_TO_PTENT(virtAddr);
pageTable[pageTableIndex] = physAddr | flags;
}
static void
map_range(addr_t virtAddr, phys_addr_t physAddr, size_t size, uint32_t flags)
{
//TRACE("map 0x%08" B_PRIxADDR " --> 0x%08" B_PRIxPHYSADDR
// ", len=0x%08" B_PRIxSIZE ", flags=0x%08" PRIx32 "\n",
// virtAddr, physAddr, size, flags);
for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
map_page(virtAddr + offset, physAddr + offset, flags);
}
if (virtAddr >= KERNEL_LOAD_BASE)
ASSERT_ALWAYS(insert_virtual_allocated_range(virtAddr, size) >= B_OK);
}
static void
build_physical_memory_list(size_t memoryMapSize,
efi_memory_descriptor *memoryMap, size_t descriptorSize,
uint32_t descriptorVersion)
{
addr_t addr = (addr_t)memoryMap;
gKernelArgs.num_physical_memory_ranges = 0;
// First scan: Add all usable ranges
for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptorSize);
switch (entry->Type) {
case EfiLoaderCode:
case EfiLoaderData:
case EfiBootServicesCode:
case EfiBootServicesData:
case EfiConventionalMemory: {
// Usable memory.
// Ignore memory below 1MB and above 512GB.
uint64_t base = entry->PhysicalStart;
uint64_t end = entry->PhysicalStart + entry->NumberOfPages * B_PAGE_SIZE;
uint64_t originalSize = end - base;
if (base < 0x100000)
base = 0x100000;
if (end > (512ull * 1024 * 1024 * 1024))
end = 512ull * 1024 * 1024 * 1024;
gKernelArgs.ignored_physical_memory
+= originalSize - (max_c(end, base) - base);
if (base >= end)
break;
uint64_t size = end - base;
insert_physical_memory_range(base, size);
break;
}
default:
break;
}
}
uint64_t initialPhysicalMemory = total_physical_memory();
// Second scan: Remove everything reserved that may overlap
for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptorSize);
switch (entry->Type) {
case EfiLoaderCode:
case EfiLoaderData:
case EfiBootServicesCode:
case EfiBootServicesData:
case EfiConventionalMemory:
break;
default:
uint64_t base = entry->PhysicalStart;
uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
remove_physical_memory_range(base, size);
}
}
gKernelArgs.ignored_physical_memory
+= initialPhysicalMemory - total_physical_memory();
sort_address_ranges(gKernelArgs.physical_memory_range,
gKernelArgs.num_physical_memory_ranges);
}
static void
build_physical_allocated_list(size_t memoryMapSize,
efi_memory_descriptor *memoryMap, size_t descriptorSize,
uint32_t descriptorVersion)
{
addr_t addr = (addr_t)memoryMap;
for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptorSize);
switch (entry->Type) {
case EfiLoaderData: {
uint64_t base = entry->PhysicalStart;
uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
insert_physical_allocated_range(base, size);
break;
}
default:
;
}
}
sort_address_ranges(gKernelArgs.physical_allocated_range,
gKernelArgs.num_physical_allocated_ranges);
}
void
arch_mmu_post_efi_setup(size_t memoryMapSize,
efi_memory_descriptor *memoryMap, size_t descriptorSize,
uint32_t descriptorVersion)
{
build_physical_allocated_list(memoryMapSize, memoryMap,
descriptorSize, descriptorVersion);
// Switch EFI to virtual mode, using the kernel pmap.
kRuntimeServices->SetVirtualAddressMap(memoryMapSize, descriptorSize,
descriptorVersion, memoryMap);
#ifdef TRACE_MEMORY_MAP
dprintf("phys memory ranges:\n");
for (uint32_t i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
uint64 start = gKernelArgs.physical_memory_range[i].start;
uint64 size = gKernelArgs.physical_memory_range[i].size;
dprintf(" 0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
start, start + size, size);
}
dprintf("allocated phys memory ranges:\n");
for (uint32_t i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
uint64 start = gKernelArgs.physical_allocated_range[i].start;
uint64 size = gKernelArgs.physical_allocated_range[i].size;
dprintf(" 0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
start, start + size, size);
}
dprintf("allocated virt memory ranges:\n");
for (uint32_t i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
uint64 start = gKernelArgs.virtual_allocated_range[i].start;
uint64 size = gKernelArgs.virtual_allocated_range[i].size;
dprintf(" 0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
start, start + size, size);
}
#endif
}
static void
arch_mmu_allocate_page_directory(void)
{
if (platform_allocate_region((void **)&sPageDirectory,
B_PAGE_SIZE + ALIGN_PAGEDIR, 0, false) != B_OK)
panic("Failed to allocate page directory.");
sPageDirectory = (uint32_t *)ROUNDUP((uint32_t)sPageDirectory, ALIGN_PAGEDIR);
memset(sPageDirectory, 0, B_PAGE_SIZE);
TRACE("sPageDirectory = 0x%08x\n", (uint32_t)sPageDirectory);
}
uint32_t
arch_mmu_generate_post_efi_page_tables(size_t memoryMapSize,
efi_memory_descriptor *memoryMap, size_t descriptorSize,
uint32_t descriptorVersion)
{
build_physical_memory_list(memoryMapSize, memoryMap,
descriptorSize, descriptorVersion);
//TODO: find out how to map EFI runtime services
//they are not mapped for now because the kernel doesn't use them anyway
#if 0
addr_t memoryMapAddr = (addr_t)memoryMap;
for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
efi_memory_descriptor* entry =
(efi_memory_descriptor *)(memoryMapAddr + i * descriptorSize);
if ((entry->Attribute & EFI_MEMORY_RUNTIME) != 0)
map_range(entry->VirtualStart, entry->PhysicalStart,
entry->NumberOfPages * B_PAGE_SIZE,
kDefaultPageFlags);
}
#endif
void* cookie = NULL;
addr_t vaddr;
phys_addr_t paddr;
size_t size;
while (mmu_next_region(&cookie, &vaddr, &paddr, &size)) {
map_range(vaddr, paddr, size,
kDefaultPageFlags);
}
// identity mapping for first 1MB
map_range((addr_t)0, (phys_addr_t)0, 1024*1024, kDefaultPageFlags);
sort_address_ranges(gKernelArgs.virtual_allocated_range,
gKernelArgs.num_virtual_allocated_ranges);
// Map the page directory into kernel space at 0xffc00000-0xffffffff
// this enables a mmu trick where the 4 MB region that this pgdir entry
// represents now maps the 4MB of potential pagetables that the pgdir
// points to. Thrown away later in VM bringup, but useful for now.
sPageDirectory[1023] = (uint32_t)sPageDirectory | kDefaultPageFlags;
addr_t virtPageDirectory;
platform_bootloader_address_to_kernel_address((void*)sPageDirectory, &virtPageDirectory);
gKernelArgs.arch_args.phys_pgdir = (uint32_t)sPageDirectory;
gKernelArgs.arch_args.vir_pgdir = (uint32_t)virtPageDirectory;
gKernelArgs.arch_args.page_hole = 0xffc00000;
gKernelArgs.arch_args.virtual_end
= gKernelArgs.virtual_allocated_range[gKernelArgs.num_virtual_allocated_ranges-1].start
+ gKernelArgs.virtual_allocated_range[gKernelArgs.num_virtual_allocated_ranges-1].size;
TRACE("gKernelArgs.arch_args.phys_pgdir = 0x%08" B_PRIx32 "\n",
gKernelArgs.arch_args.phys_pgdir);
TRACE("gKernelArgs.arch_args.vir_pgdir = 0x%08" B_PRIx64 "\n",
gKernelArgs.arch_args.vir_pgdir);
TRACE("gKernelArgs.arch_args.page_hole = 0x%08" B_PRIx64 "\n",
gKernelArgs.arch_args.page_hole);
TRACE("gKernelArgs.arch_args.virtual_end = 0x%08" B_PRIx64 "\n",
gKernelArgs.arch_args.virtual_end);
#ifdef TRACE_PAGE_DIRECTORY
dump_page_dir();
#endif
return (uint32_t)sPageDirectory;
}
void
arch_mmu_init(void)
{
arch_mmu_allocate_page_directory();
arch_mmu_init_gdt();
}

View File

@ -1,13 +1,45 @@
/*
* Copyright 2021 Haiku, Inc. All rights reserved.
* Copyright 2021-2022 Haiku, Inc. All rights reserved.
* Released under the terms of the MIT License.
*/
#include <kernel.h>
#include <boot/platform.h>
#include <boot/stage2.h>
#include <boot/stdio.h>
#include "efi_platform.h"
#include "mmu.h"
#include "serial.h"
#include "smp.h"
struct gdt_idt_descr {
uint16 limit;
void* base;
} _PACKED;
extern gdt_idt_descr gBootGDTDescriptor;
extern "C" typedef void (*enter_kernel_t)(uint32_t, addr_t, addr_t, addr_t,
struct gdt_idt_descr *);
// From entry.S
extern "C" void arch_enter_kernel(uint32_t pageDirectory, addr_t kernelArgs,
addr_t kernelEntry, addr_t kernelStackTop, struct gdt_idt_descr *gdtDescriptor);
// From arch_mmu.cpp
extern void arch_mmu_post_efi_setup(size_t memoryMapSize,
efi_memory_descriptor *memoryMap, size_t descriptorSize,
uint32_t descriptorVersion);
extern uint32_t arch_mmu_generate_post_efi_page_tables(size_t memoryMapSize,
efi_memory_descriptor *memoryMap, size_t descriptorSize,
uint32_t descriptorVersion);
void
@ -19,7 +51,161 @@ arch_convert_kernel_args(void)
}
static const char*
memory_region_type_str(int type)
{
switch (type) {
case EfiReservedMemoryType:
return "ReservedMemoryType";
case EfiLoaderCode:
return "LoaderCode";
case EfiLoaderData:
return "LoaderData";
case EfiBootServicesCode:
return "BootServicesCode";
case EfiBootServicesData:
return "BootServicesData";
case EfiRuntimeServicesCode:
return "RuntimeServicesCode";
case EfiRuntimeServicesData:
return "RuntimeServicesData";
case EfiConventionalMemory:
return "ConventionalMemory";
case EfiUnusableMemory:
return "UnusableMemory";
case EfiACPIReclaimMemory:
return "ACPIReclaimMemory";
case EfiACPIMemoryNVS:
return "ACPIMemoryNVS";
case EfiMemoryMappedIO:
return "MMIO";
case EfiMemoryMappedIOPortSpace:
return "MMIOPortSpace";
case EfiPalCode:
return "PalCode";
case EfiPersistentMemory:
return "PersistentMemory";
default:
return "unknown";
}
}
void
arch_start_kernel(addr_t kernelEntry)
{
// Copy entry.S trampoline to lower 1M
enter_kernel_t enter_kernel = (enter_kernel_t)0xa000;
memcpy((void *)enter_kernel, (void *)arch_enter_kernel, B_PAGE_SIZE);
// Allocate virtual memory for kernel args
struct kernel_args *kernelArgs = NULL;
if (platform_allocate_region((void **)&kernelArgs,
sizeof(struct kernel_args), 0, false) != B_OK)
panic("Failed to allocate kernel args.");
addr_t virtKernelArgs;
platform_bootloader_address_to_kernel_address((void*)kernelArgs,
&virtKernelArgs);
// Prepare to exit EFI boot services.
// Read the memory map.
// First call is to determine the buffer size.
size_t memoryMapSize = 0;
efi_memory_descriptor dummy;
size_t mapKey;
size_t descriptorSize;
uint32_t descriptorVersion;
if (kBootServices->GetMemoryMap(&memoryMapSize, &dummy, &mapKey,
&descriptorSize, &descriptorVersion) != EFI_BUFFER_TOO_SMALL) {
panic("Unable to determine size of system memory map");
}
// Allocate a buffer twice as large as needed just in case it gets bigger
// between calls to ExitBootServices.
size_t actualMemoryMapSize = memoryMapSize * 2;
efi_memory_descriptor *memoryMap
= (efi_memory_descriptor *)kernel_args_malloc(actualMemoryMapSize);
if (memoryMap == NULL)
panic("Unable to allocate memory map.");
// Read (and print) the memory map.
memoryMapSize = actualMemoryMapSize;
if (kBootServices->GetMemoryMap(&memoryMapSize, memoryMap, &mapKey,
&descriptorSize, &descriptorVersion) != EFI_SUCCESS) {
panic("Unable to fetch system memory map.");
}
addr_t addr = (addr_t)memoryMap;
dprintf("System provided memory map:\n");
for (size_t i = 0; i < memoryMapSize / descriptorSize; i++) {
efi_memory_descriptor *entry
= (efi_memory_descriptor *)(addr + i * descriptorSize);
dprintf(" phys: 0x%08" PRIx64 "-0x%08" PRIx64
", virt: 0x%08" PRIx64 "-0x%08" PRIx64
", type: %s (%#x), attr: %#" PRIx64 "\n",
entry->PhysicalStart,
entry->PhysicalStart + entry->NumberOfPages * B_PAGE_SIZE,
entry->VirtualStart,
entry->VirtualStart + entry->NumberOfPages * B_PAGE_SIZE,
memory_region_type_str(entry->Type), entry->Type,
entry->Attribute);
}
// Generate page tables for use after ExitBootServices.
uint32_t pageDirectory = arch_mmu_generate_post_efi_page_tables(
memoryMapSize, memoryMap, descriptorSize, descriptorVersion);
// Attempt to fetch the memory map and exit boot services.
// This needs to be done in a loop, as ExitBootServices can change the
// memory map.
// Even better: Only GetMemoryMap and ExitBootServices can be called after
// the first call to ExitBootServices, as the firmware is permitted to
// partially exit. This is why twice as much space was allocated for the
// memory map, as it's impossible to allocate more now.
// A changing memory map shouldn't affect the generated page tables, as
// they only needed to know about the maximum address, not any specific
// entry.
dprintf("Calling ExitBootServices. So long, EFI!\n");
while (true) {
if (kBootServices->ExitBootServices(kImage, mapKey) == EFI_SUCCESS) {
// The console was provided by boot services, disable it.
stdout = NULL;
stderr = NULL;
// Also switch to legacy serial output
// (may not work on all systems)
serial_switch_to_legacy();
dprintf("Switched to legacy serial output\n");
break;
}
memoryMapSize = actualMemoryMapSize;
if (kBootServices->GetMemoryMap(&memoryMapSize, memoryMap, &mapKey,
&descriptorSize, &descriptorVersion) != EFI_SUCCESS) {
panic("Unable to fetch system memory map.");
}
}
// Update EFI, generate final kernel physical memory map, etc.
arch_mmu_post_efi_setup(memoryMapSize, memoryMap,
descriptorSize, descriptorVersion);
// Copy final kernel args
// This should be the last step before jumping to the kernel
// as there are some fixups happening to kernel_args even in the last minute
memcpy(kernelArgs, &gKernelArgs, sizeof(struct kernel_args));
smp_boot_other_cpus(pageDirectory, kernelEntry);
// Enter the kernel!
dprintf("enter_kernel(pageDirectory: 0x%08x, kernelArgs: 0x%08x, "
"kernelEntry: 0x%08x, sp: 0x%08x, gBootGDTDescriptor: 0x%08x)\n",
pageDirectory, (uint32_t)virtKernelArgs, (uint32_t)kernelEntry,
(uint32_t)(gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size),
(uint32_t)&gBootGDTDescriptor);
enter_kernel(pageDirectory, virtKernelArgs, kernelEntry,
gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size,
&gBootGDTDescriptor);
}

View File

@ -0,0 +1,72 @@
/*
* Copyright 2021 Haiku, Inc. All rights reserved.
* Released under the terms of the MIT License.
*/
#include <asm_defs.h>
#include <arch/x86/descriptors.h>
.text
.code32
/*
extern "C" void arch_enter_kernel(uint32_t pageDirectory, addr_t kernelArgs,
addr_t kernelEntry, addr_t kernelStackTop,
struct gdt_idt_descr *gdtDescriptor);
*/
FUNCTION(arch_enter_kernel):
movl 4(%esp), %edx // pageDirectory
movl 8(%esp), %ecx // kernelArgs
movl 12(%esp), %ebx // kernelEntry
movl 16(%esp), %eax // kernelStackTop
movl 20(%esp), %esi // gdtDescriptor
// initialize stack
movl %eax, %esp
// set page table
movl %edx, %eax
movl %eax, %cr3
// disable interrupts
cli
// clear direction flag
cld
// initialize floating point unit
fninit
movl %esi, %eax
lgdt (%eax)
// initialize CR0
// - bit #31: Enable Paging
// - bit #16: Write Protect
// - bit #5: Numeric Error Handling
// - bit #0: Protected Mode
movl $0x80010021, %eax
movl %eax, %cr0
// Set data segments.
movw $KERNEL_DATA_SELECTOR, %ax
movw %ax, %ss
movw %ax, %ds
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
pushl $0x0 // currentCpu
pushl %ecx // kernelArgs
pushl $0x0 // fake return address
pushl $KERNEL_CODE_SELECTOR
pushl %ebx // kernelEntry
lret
//return
movl $-1, %eax
ret
FUNCTION_END(arch_enter_kernel)