boot/efi/arm: code cleanup
* Use TRACE() for logging * Use B_PRI format strings * Reformat local variables and function arguments to use camelCase * Remove comment related to ConvertPointer * Fix indentation for switch statements * Remove variable sFirstPageTable as it's not really used Change-Id: Iace275e5a3311f13a5018f497c3132e472a20848 Reviewed-on: https://review.haiku-os.org/c/haiku/+/4885 Tested-by: Commit checker robot <no-reply+buildbot@haiku-os.org> Reviewed-by: Alex von Gluck IV <kallisti5@unixzen.com>
This commit is contained in:
parent
f991c7ee03
commit
a2d528242a
@ -17,16 +17,28 @@
|
||||
#include "mmu.h"
|
||||
#include "efi_platform.h"
|
||||
|
||||
#define ALIGN_PAGEDIR (1024 * 16)
|
||||
#define MAX_PAGE_TABLES 192
|
||||
#define PAGE_TABLE_AREA_SIZE (MAX_PAGE_TABLES * ARM_MMU_L2_COARSE_TABLE_SIZE)
|
||||
|
||||
//#define TRACE_MMU
|
||||
#ifdef TRACE_MMU
|
||||
# define TRACE(x...) dprintf(x)
|
||||
#else
|
||||
# define TRACE(x...) ;
|
||||
#endif
|
||||
|
||||
|
||||
//#define TRACE_MEMORY_MAP
|
||||
//#define TRACE_PAGE_DIRECTORY
|
||||
|
||||
#define ALIGN_PAGEDIR (1024 * 16)
|
||||
#define MAX_PAGE_TABLES 192
|
||||
#define PAGE_TABLE_AREA_SIZE (MAX_PAGE_TABLES * ARM_MMU_L2_COARSE_TABLE_SIZE)
|
||||
|
||||
static uint32_t *sPageDirectory = NULL;
|
||||
static uint32_t *sFirstPageTable = NULL;
|
||||
static uint32_t *sNextPageTable = NULL;
|
||||
static uint32_t *sLastPageTable = NULL;
|
||||
|
||||
|
||||
#ifdef TRACE_PAGE_DIRECTORY
|
||||
static void
|
||||
dump_page_dir(void)
|
||||
{
|
||||
@ -50,25 +62,27 @@ dump_page_dir(void)
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static uint32 *
|
||||
get_next_page_table(void)
|
||||
{
|
||||
uint32 *page_table = sNextPageTable;
|
||||
uint32 *pageTable = sNextPageTable;
|
||||
sNextPageTable += ARM_MMU_L2_COARSE_ENTRY_COUNT;
|
||||
if (sNextPageTable >= sLastPageTable)
|
||||
panic("ran out of page tables\n");
|
||||
return page_table;
|
||||
return pageTable;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
map_page(addr_t virt_addr, phys_addr_t phys_addr, uint32_t flags)
|
||||
map_page(addr_t virtAddr, phys_addr_t physAddr, uint32_t flags)
|
||||
{
|
||||
phys_addr &= ~(B_PAGE_SIZE - 1);
|
||||
physAddr &= ~(B_PAGE_SIZE - 1);
|
||||
|
||||
uint32 *pageTable = NULL;
|
||||
uint32 pageDirectoryIndex = VADDR_TO_PDENT(virt_addr);
|
||||
uint32 pageDirectoryIndex = VADDR_TO_PDENT(virtAddr);
|
||||
uint32 pageDirectoryEntry = sPageDirectory[pageDirectoryIndex];
|
||||
|
||||
if (pageDirectoryEntry == 0) {
|
||||
@ -78,22 +92,23 @@ map_page(addr_t virt_addr, phys_addr_t phys_addr, uint32_t flags)
|
||||
pageTable = (uint32 *)(pageDirectoryEntry & ARM_PDE_ADDRESS_MASK);
|
||||
}
|
||||
|
||||
uint32 pageTableIndex = VADDR_TO_PTENT(virt_addr);
|
||||
pageTable[pageTableIndex] = phys_addr | flags | ARM_MMU_L2_TYPE_SMALLNEW;
|
||||
uint32 pageTableIndex = VADDR_TO_PTENT(virtAddr);
|
||||
pageTable[pageTableIndex] = physAddr | flags | ARM_MMU_L2_TYPE_SMALLNEW;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
map_range(addr_t virt_addr, phys_addr_t phys_addr, size_t size, uint32_t flags)
|
||||
map_range(addr_t virtAddr, phys_addr_t physAddr, size_t size, uint32_t flags)
|
||||
{
|
||||
//dprintf("map 0x%08x --> 0x%08x, len=0x%08x, flags=0x%08x\n",
|
||||
// (uint32_t)virt_addr, (uint32_t)phys_addr, (uint32_t)size, flags);
|
||||
//TRACE("map 0x%08" B_PRIxADDR " --> 0x%08" B_PRIxPHYSADDR
|
||||
// ", len=0x%08" B_PRIxSIZE ", flags=0x%08" PRIx32 "\n",
|
||||
// virtAddr, physAddr, size, flags);
|
||||
|
||||
for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
|
||||
map_page(virt_addr + offset, phys_addr + offset, flags);
|
||||
map_page(virtAddr + offset, physAddr + offset, flags);
|
||||
}
|
||||
|
||||
ASSERT_ALWAYS(insert_virtual_allocated_range(virt_addr, size) >= B_OK);
|
||||
ASSERT_ALWAYS(insert_virtual_allocated_range(virtAddr, size) >= B_OK);
|
||||
}
|
||||
|
||||
|
||||
@ -120,12 +135,12 @@ map_range_to_new_area(addr_range& range, uint32_t flags)
|
||||
return;
|
||||
}
|
||||
|
||||
phys_addr_t phys_addr = range.start;
|
||||
addr_t virt_addr = get_next_virtual_address(range.size);
|
||||
phys_addr_t physAddr = range.start;
|
||||
addr_t virtAddr = get_next_virtual_address(range.size);
|
||||
|
||||
map_range(virt_addr, phys_addr, range.size, flags);
|
||||
map_range(virtAddr, physAddr, range.size, flags);
|
||||
|
||||
range.start = virt_addr;
|
||||
range.start = virtAddr;
|
||||
|
||||
insert_virtual_range_to_keep(range.start, range.size);
|
||||
}
|
||||
@ -142,51 +157,50 @@ map_range_to_new_area(efi_memory_descriptor *entry, uint32_t flags)
|
||||
|
||||
|
||||
static void
|
||||
build_physical_memory_list(size_t memory_map_size,
|
||||
efi_memory_descriptor *memory_map, size_t descriptor_size,
|
||||
uint32_t descriptor_version)
|
||||
build_physical_memory_list(size_t memoryMapSize,
|
||||
efi_memory_descriptor *memoryMap, size_t descriptorSize,
|
||||
uint32_t descriptorVersion)
|
||||
{
|
||||
addr_t addr = (addr_t)memory_map;
|
||||
addr_t addr = (addr_t)memoryMap;
|
||||
|
||||
gKernelArgs.num_physical_memory_ranges = 0;
|
||||
|
||||
// First scan: Add all usable ranges
|
||||
for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
|
||||
efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptor_size);
|
||||
for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
|
||||
efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptorSize);
|
||||
switch (entry->Type) {
|
||||
case EfiLoaderCode:
|
||||
case EfiLoaderData:
|
||||
case EfiBootServicesCode:
|
||||
case EfiBootServicesData:
|
||||
case EfiConventionalMemory: {
|
||||
// Usable memory.
|
||||
uint64_t base = entry->PhysicalStart;
|
||||
uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
|
||||
insert_physical_memory_range(base, size);
|
||||
break;
|
||||
}
|
||||
case EfiACPIReclaimMemory:
|
||||
// ACPI reclaim -- physical memory we could actually use later
|
||||
break;
|
||||
case EfiLoaderCode:
|
||||
case EfiLoaderData:
|
||||
case EfiBootServicesCode:
|
||||
case EfiBootServicesData:
|
||||
case EfiConventionalMemory: {
|
||||
// Usable memory.
|
||||
uint64_t base = entry->PhysicalStart;
|
||||
uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
|
||||
insert_physical_memory_range(base, size);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t initialPhysicalMemory = total_physical_memory();
|
||||
|
||||
// Second scan: Remove everything reserved that may overlap
|
||||
for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
|
||||
efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptor_size);
|
||||
for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
|
||||
efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptorSize);
|
||||
switch (entry->Type) {
|
||||
case EfiLoaderCode:
|
||||
case EfiLoaderData:
|
||||
case EfiBootServicesCode:
|
||||
case EfiBootServicesData:
|
||||
case EfiConventionalMemory:
|
||||
break;
|
||||
default:
|
||||
uint64_t base = entry->PhysicalStart;
|
||||
uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
|
||||
remove_physical_memory_range(base, size);
|
||||
case EfiLoaderCode:
|
||||
case EfiLoaderData:
|
||||
case EfiBootServicesCode:
|
||||
case EfiBootServicesData:
|
||||
case EfiConventionalMemory:
|
||||
break;
|
||||
default:
|
||||
uint64_t base = entry->PhysicalStart;
|
||||
uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
|
||||
remove_physical_memory_range(base, size);
|
||||
}
|
||||
}
|
||||
|
||||
@ -199,22 +213,22 @@ build_physical_memory_list(size_t memory_map_size,
|
||||
|
||||
|
||||
static void
|
||||
build_physical_allocated_list(size_t memory_map_size,
|
||||
efi_memory_descriptor *memory_map, size_t descriptor_size,
|
||||
uint32_t descriptor_version)
|
||||
build_physical_allocated_list(size_t memoryMapSize,
|
||||
efi_memory_descriptor *memoryMap, size_t descriptorSize,
|
||||
uint32_t descriptorVersion)
|
||||
{
|
||||
addr_t addr = (addr_t)memory_map;
|
||||
for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
|
||||
efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptor_size);
|
||||
addr_t addr = (addr_t)memoryMap;
|
||||
for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
|
||||
efi_memory_descriptor* entry = (efi_memory_descriptor *)(addr + i * descriptorSize);
|
||||
switch (entry->Type) {
|
||||
case EfiLoaderData: {
|
||||
uint64_t base = entry->PhysicalStart;
|
||||
uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
|
||||
insert_physical_allocated_range(base, size);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
;
|
||||
case EfiLoaderData: {
|
||||
uint64_t base = entry->PhysicalStart;
|
||||
uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
|
||||
insert_physical_allocated_range(base, size);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
;
|
||||
}
|
||||
}
|
||||
|
||||
@ -224,56 +238,50 @@ build_physical_allocated_list(size_t memory_map_size,
|
||||
|
||||
|
||||
void
|
||||
arch_mmu_init()
|
||||
arch_mmu_post_efi_setup(size_t memoryMapSize,
|
||||
efi_memory_descriptor *memoryMap, size_t descriptorSize,
|
||||
uint32_t descriptorVersion)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
arch_mmu_post_efi_setup(size_t memory_map_size,
|
||||
efi_memory_descriptor *memory_map, size_t descriptor_size,
|
||||
uint32_t descriptor_version)
|
||||
{
|
||||
build_physical_allocated_list(memory_map_size, memory_map,
|
||||
descriptor_size, descriptor_version);
|
||||
build_physical_allocated_list(memoryMapSize, memoryMap,
|
||||
descriptorSize, descriptorVersion);
|
||||
|
||||
// Switch EFI to virtual mode, using the kernel pmap.
|
||||
// Something involving ConvertPointer might need to be done after this?
|
||||
// http://wiki.phoenix.com/wiki/index.php/EFI_RUNTIME_SERVICES
|
||||
kRuntimeServices->SetVirtualAddressMap(memory_map_size, descriptor_size,
|
||||
descriptor_version, memory_map);
|
||||
kRuntimeServices->SetVirtualAddressMap(memoryMapSize, descriptorSize,
|
||||
descriptorVersion, memoryMap);
|
||||
|
||||
#ifdef TRACE_MEMORY_MAP
|
||||
dprintf("phys memory ranges:\n");
|
||||
for (uint32_t i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
|
||||
uint32_t start = (uint32_t)gKernelArgs.physical_memory_range[i].start;
|
||||
uint32_t size = (uint32_t)gKernelArgs.physical_memory_range[i].size;
|
||||
dprintf(" 0x%08x-0x%08x, length 0x%08x\n",
|
||||
uint64 start = gKernelArgs.physical_memory_range[i].start;
|
||||
uint64 size = gKernelArgs.physical_memory_range[i].size;
|
||||
dprintf(" 0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
|
||||
start, start + size, size);
|
||||
}
|
||||
|
||||
dprintf("allocated phys memory ranges:\n");
|
||||
for (uint32_t i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
|
||||
uint32_t start = (uint32_t)gKernelArgs.physical_allocated_range[i].start;
|
||||
uint32_t size = (uint32_t)gKernelArgs.physical_allocated_range[i].size;
|
||||
dprintf(" 0x%08x-0x%08x, length 0x%08x\n",
|
||||
uint64 start = gKernelArgs.physical_allocated_range[i].start;
|
||||
uint64 size = gKernelArgs.physical_allocated_range[i].size;
|
||||
dprintf(" 0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
|
||||
start, start + size, size);
|
||||
}
|
||||
|
||||
dprintf("allocated virt memory ranges:\n");
|
||||
for (uint32_t i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
|
||||
uint32_t start = (uint32_t)gKernelArgs.virtual_allocated_range[i].start;
|
||||
uint32_t size = (uint32_t)gKernelArgs.virtual_allocated_range[i].size;
|
||||
dprintf(" 0x%08x-0x%08x, length 0x%08x\n",
|
||||
uint64 start = gKernelArgs.virtual_allocated_range[i].start;
|
||||
uint64 size = gKernelArgs.virtual_allocated_range[i].size;
|
||||
dprintf(" 0x%08" B_PRIx64 "-0x%08" B_PRIx64 ", length 0x%08" B_PRIx64 "\n",
|
||||
start, start + size, size);
|
||||
}
|
||||
|
||||
dprintf("virt memory ranges to keep:\n");
|
||||
for (uint32_t i = 0; i < gKernelArgs.arch_args.num_virtual_ranges_to_keep; i++) {
|
||||
uint32_t start = (uint32_t)gKernelArgs.arch_args.virtual_ranges_to_keep[i].start;
|
||||
uint32_t size = (uint32_t)gKernelArgs.arch_args.virtual_ranges_to_keep[i].size;
|
||||
dprintf(" 0x%08x-0x%08x, length 0x%08x\n",
|
||||
uint32 start = gKernelArgs.arch_args.virtual_ranges_to_keep[i].start;
|
||||
uint32 size = gKernelArgs.arch_args.virtual_ranges_to_keep[i].size;
|
||||
dprintf(" 0x%08" B_PRIx32 "-0x%08" B_PRIx32 ", length 0x%08" B_PRIx32 "\n",
|
||||
start, start + size, size);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -286,32 +294,31 @@ arch_mmu_allocate_page_tables(void)
|
||||
sPageDirectory = (uint32 *)ROUNDUP((uint32)sPageDirectory, ALIGN_PAGEDIR);
|
||||
memset(sPageDirectory, 0, ARM_MMU_L1_TABLE_SIZE);
|
||||
|
||||
sFirstPageTable = (uint32*)((uint32)sPageDirectory + ARM_MMU_L1_TABLE_SIZE);
|
||||
sNextPageTable = sFirstPageTable;
|
||||
sLastPageTable = (uint32*)((uint32)sFirstPageTable + PAGE_TABLE_AREA_SIZE);
|
||||
sNextPageTable = (uint32*)((uint32)sPageDirectory + ARM_MMU_L1_TABLE_SIZE);
|
||||
sLastPageTable = (uint32*)((uint32)sNextPageTable + PAGE_TABLE_AREA_SIZE);
|
||||
|
||||
memset(sFirstPageTable, 0, PAGE_TABLE_AREA_SIZE);
|
||||
memset(sNextPageTable, 0, PAGE_TABLE_AREA_SIZE);
|
||||
|
||||
dprintf("sPageDirectory = 0x%08x\n", (uint32)sPageDirectory);
|
||||
dprintf("sFirstPageTable = 0x%08x\n", (uint32)sFirstPageTable);
|
||||
dprintf("sLastPageTable = 0x%08x\n", (uint32)sLastPageTable);
|
||||
TRACE("sPageDirectory = 0x%08x\n", (uint32)sPageDirectory);
|
||||
TRACE("sNextPageTable = 0x%08x\n", (uint32)sNextPageTable);
|
||||
TRACE("sLastPageTable = 0x%08x\n", (uint32)sLastPageTable);
|
||||
}
|
||||
|
||||
uint32_t
|
||||
arch_mmu_generate_post_efi_page_tables(size_t memory_map_size,
|
||||
efi_memory_descriptor *memory_map, size_t descriptor_size,
|
||||
uint32_t descriptor_version)
|
||||
{
|
||||
addr_t memory_map_addr = (addr_t)memory_map;
|
||||
|
||||
uint32_t
|
||||
arch_mmu_generate_post_efi_page_tables(size_t memoryMapSize,
|
||||
efi_memory_descriptor *memoryMap, size_t descriptorSize,
|
||||
uint32_t descriptorVersion)
|
||||
{
|
||||
arch_mmu_allocate_page_tables();
|
||||
|
||||
build_physical_memory_list(memory_map_size, memory_map,
|
||||
descriptor_size, descriptor_version);
|
||||
build_physical_memory_list(memoryMapSize, memoryMap,
|
||||
descriptorSize, descriptorVersion);
|
||||
|
||||
for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
|
||||
addr_t memoryMapAddr = (addr_t)memoryMap;
|
||||
for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
|
||||
efi_memory_descriptor* entry =
|
||||
(efi_memory_descriptor *)(memory_map_addr + i * descriptor_size);
|
||||
(efi_memory_descriptor *)(memoryMapAddr + i * descriptorSize);
|
||||
if ((entry->Attribute & EFI_MEMORY_RUNTIME) != 0) {
|
||||
map_range_to_new_area(entry,
|
||||
ARM_MMU_L2_FLAG_B | ARM_MMU_L2_FLAG_C | ARM_MMU_L2_FLAG_AP_RW);
|
||||
@ -332,21 +339,30 @@ arch_mmu_generate_post_efi_page_tables(size_t memory_map_size,
|
||||
sort_address_ranges(gKernelArgs.virtual_allocated_range,
|
||||
gKernelArgs.num_virtual_allocated_ranges);
|
||||
|
||||
addr_t vir_pgdir;
|
||||
platform_bootloader_address_to_kernel_address((void*)sPageDirectory, &vir_pgdir);
|
||||
addr_t virtPageDirectory;
|
||||
platform_bootloader_address_to_kernel_address((void*)sPageDirectory, &virtPageDirectory);
|
||||
|
||||
gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
|
||||
gKernelArgs.arch_args.vir_pgdir = (uint32)vir_pgdir;
|
||||
gKernelArgs.arch_args.vir_pgdir = (uint32)virtPageDirectory;
|
||||
gKernelArgs.arch_args.next_pagetable = (uint32)(sNextPageTable) - (uint32)sPageDirectory;
|
||||
|
||||
dprintf("gKernelArgs.arch_args.phys_pgdir = 0x%08x\n",
|
||||
TRACE("gKernelArgs.arch_args.phys_pgdir = 0x%08x\n",
|
||||
(uint32_t)gKernelArgs.arch_args.phys_pgdir);
|
||||
dprintf("gKernelArgs.arch_args.vir_pgdir = 0x%08x\n",
|
||||
TRACE("gKernelArgs.arch_args.vir_pgdir = 0x%08x\n",
|
||||
(uint32_t)gKernelArgs.arch_args.vir_pgdir);
|
||||
dprintf("gKernelArgs.arch_args.next_pagetable = 0x%08x\n",
|
||||
TRACE("gKernelArgs.arch_args.next_pagetable = 0x%08x\n",
|
||||
(uint32_t)gKernelArgs.arch_args.next_pagetable);
|
||||
|
||||
//dump_page_dir();
|
||||
#ifdef TRACE_PAGE_DIRECTORY
|
||||
dump_page_dir();
|
||||
#endif
|
||||
|
||||
return (uint32_t)sPageDirectory;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
arch_mmu_init()
|
||||
{
|
||||
// empty
|
||||
}
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include "efi_platform.h"
|
||||
#include "mmu.h"
|
||||
#include "smp.h"
|
||||
|
||||
|
||||
#define ALIGN_MEMORY_MAP 4
|
||||
@ -19,17 +20,18 @@
|
||||
extern "C" typedef void (*arch_enter_kernel_t)(uint32_t, addr_t, addr_t, addr_t);
|
||||
|
||||
|
||||
// From entry.S
|
||||
extern "C" void arch_enter_kernel(uint32_t ttbr, addr_t kernelArgs,
|
||||
addr_t kernelEntry, addr_t kernelStackTop);
|
||||
|
||||
// From arch_mmu.cpp
|
||||
extern void arch_mmu_post_efi_setup(size_t memory_map_size,
|
||||
efi_memory_descriptor *memory_map, size_t descriptor_size,
|
||||
uint32_t descriptor_version);
|
||||
extern void arch_mmu_post_efi_setup(size_t memoryMapSize,
|
||||
efi_memory_descriptor *memoryMap, size_t descriptorSize,
|
||||
uint32_t descriptorVersion);
|
||||
|
||||
extern uint32_t arch_mmu_generate_post_efi_page_tables(size_t memory_map_size,
|
||||
efi_memory_descriptor *memory_map, size_t descriptor_size,
|
||||
uint32_t descriptor_version);
|
||||
extern uint32_t arch_mmu_generate_post_efi_page_tables(size_t memoryMapSize,
|
||||
efi_memory_descriptor *memoryMap, size_t descriptorSize,
|
||||
uint32_t descriptorVersion);
|
||||
|
||||
|
||||
void
|
||||
@ -43,38 +45,38 @@ static const char*
|
||||
memory_region_type_str(int type)
|
||||
{
|
||||
switch (type) {
|
||||
case EfiReservedMemoryType:
|
||||
return "ReservedMemoryType";
|
||||
case EfiLoaderCode:
|
||||
return "LoaderCode";
|
||||
case EfiLoaderData:
|
||||
return "LoaderData";
|
||||
case EfiBootServicesCode:
|
||||
return "BootServicesCode";
|
||||
case EfiBootServicesData:
|
||||
return "BootServicesData";
|
||||
case EfiRuntimeServicesCode:
|
||||
return "RuntimeServicesCode";
|
||||
case EfiRuntimeServicesData:
|
||||
return "RuntimeServicesData";
|
||||
case EfiConventionalMemory:
|
||||
return "ConventionalMemory";
|
||||
case EfiUnusableMemory:
|
||||
return "UnusableMemory";
|
||||
case EfiACPIReclaimMemory:
|
||||
return "ACPIReclaimMemory";
|
||||
case EfiACPIMemoryNVS:
|
||||
return "ACPIMemoryNVS";
|
||||
case EfiMemoryMappedIO:
|
||||
return "MMIO";
|
||||
case EfiMemoryMappedIOPortSpace:
|
||||
return "MMIOPortSpace";
|
||||
case EfiPalCode:
|
||||
return "PalCode";
|
||||
case EfiPersistentMemory:
|
||||
return "PersistentMemory";
|
||||
default:
|
||||
return "unknown";
|
||||
case EfiReservedMemoryType:
|
||||
return "ReservedMemoryType";
|
||||
case EfiLoaderCode:
|
||||
return "LoaderCode";
|
||||
case EfiLoaderData:
|
||||
return "LoaderData";
|
||||
case EfiBootServicesCode:
|
||||
return "BootServicesCode";
|
||||
case EfiBootServicesData:
|
||||
return "BootServicesData";
|
||||
case EfiRuntimeServicesCode:
|
||||
return "RuntimeServicesCode";
|
||||
case EfiRuntimeServicesData:
|
||||
return "RuntimeServicesData";
|
||||
case EfiConventionalMemory:
|
||||
return "ConventionalMemory";
|
||||
case EfiUnusableMemory:
|
||||
return "UnusableMemory";
|
||||
case EfiACPIReclaimMemory:
|
||||
return "ACPIReclaimMemory";
|
||||
case EfiACPIMemoryNVS:
|
||||
return "ACPIMemoryNVS";
|
||||
case EfiMemoryMappedIO:
|
||||
return "MMIO";
|
||||
case EfiMemoryMappedIOPortSpace:
|
||||
return "MMIOPortSpace";
|
||||
case EfiPalCode:
|
||||
return "PalCode";
|
||||
case EfiPersistentMemory:
|
||||
return "PersistentMemory";
|
||||
default:
|
||||
return "unknown";
|
||||
}
|
||||
}
|
||||
|
||||
@ -129,44 +131,45 @@ arch_start_kernel(addr_t kernelEntry)
|
||||
// Prepare to exit EFI boot services.
|
||||
// Read the memory map.
|
||||
// First call is to determine the buffer size.
|
||||
size_t memory_map_size = 0;
|
||||
size_t memoryMapSize = 0;
|
||||
efi_memory_descriptor dummy;
|
||||
efi_memory_descriptor *memory_map;
|
||||
size_t map_key;
|
||||
size_t descriptor_size;
|
||||
uint32_t descriptor_version;
|
||||
if (kBootServices->GetMemoryMap(&memory_map_size, &dummy, &map_key,
|
||||
&descriptor_size, &descriptor_version) != EFI_BUFFER_TOO_SMALL) {
|
||||
size_t mapKey;
|
||||
size_t descriptorSize;
|
||||
uint32_t descriptorVersion;
|
||||
if (kBootServices->GetMemoryMap(&memoryMapSize, &dummy, &mapKey,
|
||||
&descriptorSize, &descriptorVersion) != EFI_BUFFER_TOO_SMALL) {
|
||||
panic("Unable to determine size of system memory map");
|
||||
}
|
||||
|
||||
// Allocate a buffer twice as large as needed just in case it gets bigger
|
||||
// between calls to ExitBootServices.
|
||||
size_t actual_memory_map_size = memory_map_size * 2;
|
||||
memory_map
|
||||
= (efi_memory_descriptor *)kernel_args_malloc(actual_memory_map_size +
|
||||
size_t actualMemoryMapSize = memoryMapSize * 2;
|
||||
efi_memory_descriptor *memoryMap
|
||||
= (efi_memory_descriptor *)kernel_args_malloc(actualMemoryMapSize +
|
||||
ALIGN_MEMORY_MAP);
|
||||
|
||||
// align memory_map to 4-byte boundary
|
||||
// otherwise we get alignment exception when calling GetMemoryMap below
|
||||
memory_map = (efi_memory_descriptor *)ROUNDUP((uint32_t)memory_map, ALIGN_MEMORY_MAP);
|
||||
memoryMap = (efi_memory_descriptor *)ROUNDUP((uint32_t)memoryMap, ALIGN_MEMORY_MAP);
|
||||
|
||||
if (memory_map == NULL)
|
||||
if (memoryMap == NULL)
|
||||
panic("Unable to allocate memory map.");
|
||||
|
||||
// Read (and print) the memory map.
|
||||
memory_map_size = actual_memory_map_size;
|
||||
if (kBootServices->GetMemoryMap(&memory_map_size, memory_map, &map_key,
|
||||
&descriptor_size, &descriptor_version) != EFI_SUCCESS) {
|
||||
memoryMapSize = actualMemoryMapSize;
|
||||
if (kBootServices->GetMemoryMap(&memoryMapSize, memoryMap, &mapKey,
|
||||
&descriptorSize, &descriptorVersion) != EFI_SUCCESS) {
|
||||
panic("Unable to fetch system memory map.");
|
||||
}
|
||||
|
||||
addr_t addr = (addr_t)memory_map;
|
||||
addr_t addr = (addr_t)memoryMap;
|
||||
dprintf("System provided memory map:\n");
|
||||
for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
|
||||
for (size_t i = 0; i < memoryMapSize / descriptorSize; ++i) {
|
||||
efi_memory_descriptor *entry
|
||||
= (efi_memory_descriptor *)(addr + i * descriptor_size);
|
||||
dprintf(" phys: 0x%08llx-0x%08llx, virt: 0x%08llx-0x%08llx, type: %s (%#x), attr: %#llx\n",
|
||||
= (efi_memory_descriptor *)(addr + i * descriptorSize);
|
||||
dprintf(" phys: 0x%08" PRIx64 "-0x%08" PRIx64
|
||||
", virt: 0x%08" PRIx64 "-0x%08" PRIx64
|
||||
", type: %s (%#x), attr: %#" PRIx64 "\n",
|
||||
entry->PhysicalStart,
|
||||
entry->PhysicalStart + entry->NumberOfPages * B_PAGE_SIZE,
|
||||
entry->VirtualStart,
|
||||
@ -177,7 +180,7 @@ arch_start_kernel(addr_t kernelEntry)
|
||||
|
||||
// Generate page tables for use after ExitBootServices.
|
||||
uint32_t final_ttbr0 = arch_mmu_generate_post_efi_page_tables(
|
||||
memory_map_size, memory_map, descriptor_size, descriptor_version);
|
||||
memoryMapSize, memoryMap, descriptorSize, descriptorVersion);
|
||||
|
||||
// Attempt to fetch the memory map and exit boot services.
|
||||
// This needs to be done in a loop, as ExitBootServices can change the
|
||||
@ -191,7 +194,7 @@ arch_start_kernel(addr_t kernelEntry)
|
||||
// entry.
|
||||
dprintf("Calling ExitBootServices. So long, EFI!\n");
|
||||
while (true) {
|
||||
if (kBootServices->ExitBootServices(kImage, map_key) == EFI_SUCCESS) {
|
||||
if (kBootServices->ExitBootServices(kImage, mapKey) == EFI_SUCCESS) {
|
||||
// The console was provided by boot services, disable it.
|
||||
stdout = NULL;
|
||||
stderr = NULL;
|
||||
@ -200,23 +203,23 @@ arch_start_kernel(addr_t kernelEntry)
|
||||
break;
|
||||
}
|
||||
|
||||
memory_map_size = actual_memory_map_size;
|
||||
if (kBootServices->GetMemoryMap(&memory_map_size, memory_map, &map_key,
|
||||
&descriptor_size, &descriptor_version) != EFI_SUCCESS) {
|
||||
memoryMapSize = actualMemoryMapSize;
|
||||
if (kBootServices->GetMemoryMap(&memoryMapSize, memoryMap, &mapKey,
|
||||
&descriptorSize, &descriptorVersion) != EFI_SUCCESS) {
|
||||
panic("Unable to fetch system memory map.");
|
||||
}
|
||||
}
|
||||
|
||||
// Update EFI, generate final kernel physical memory map, etc.
|
||||
arch_mmu_post_efi_setup(memory_map_size, memory_map,
|
||||
descriptor_size, descriptor_version);
|
||||
arch_mmu_post_efi_setup(memoryMapSize, memoryMap,
|
||||
descriptorSize, descriptorVersion);
|
||||
|
||||
// Copy final kernel args
|
||||
// This should be the last step before jumping to the kernel
|
||||
// as there are some fixups happening to kernel_args even in the last minute
|
||||
memcpy(kernelArgs, &gKernelArgs, sizeof(struct kernel_args));
|
||||
|
||||
//smp_boot_other_cpus(final_pml4, kernelEntry);
|
||||
//smp_boot_other_cpus(final_ttbr0, kernelEntry);
|
||||
|
||||
// Enter the kernel!
|
||||
dprintf("enter_kernel(ttbr0: 0x%08x, kernelArgs: 0x%08x, "
|
||||
|
Loading…
Reference in New Issue
Block a user