stivale2: Introduce fully virtual kernel mappings
This commit is contained in:
parent
6397010b75
commit
c7eb6319c0
@ -430,7 +430,7 @@ static void elf64_get_ranges(uint8_t *elf, uint64_t slide, bool use_paddr, struc
|
||||
*_ranges = ranges;
|
||||
}
|
||||
|
||||
int elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *top, uint64_t *_slide, uint32_t alloc_type, bool kaslr, bool use_paddr, struct elf_range **ranges, uint64_t *ranges_count) {
|
||||
int elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *top, uint64_t *_slide, uint32_t alloc_type, bool kaslr, bool use_paddr, struct elf_range **ranges, uint64_t *ranges_count, bool fully_virtual, uint64_t *physical_base, uint64_t *virtual_base) {
|
||||
struct elf64_hdr hdr;
|
||||
memcpy(&hdr, elf + (0), sizeof(struct elf64_hdr));
|
||||
|
||||
@ -457,6 +457,41 @@ int elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *top, uint64_t *_sl
|
||||
|
||||
uint64_t max_align = elf64_max_align(elf);
|
||||
|
||||
uint64_t base_load_addr;
|
||||
|
||||
if (fully_virtual) {
|
||||
simulation = false;
|
||||
|
||||
uint64_t min_vaddr = (uint64_t)-1;
|
||||
uint64_t max_vaddr = 0;
|
||||
for (uint16_t i = 0; i < hdr.ph_num; i++) {
|
||||
struct elf64_phdr phdr;
|
||||
memcpy(&phdr, elf + (hdr.phoff + i * sizeof(struct elf64_phdr)),
|
||||
sizeof(struct elf64_phdr));
|
||||
|
||||
if (phdr.p_type != PT_LOAD)
|
||||
continue;
|
||||
|
||||
if (phdr.p_type != PT_LOAD)
|
||||
continue;
|
||||
|
||||
if (phdr.p_vaddr < min_vaddr) {
|
||||
min_vaddr = phdr.p_vaddr;
|
||||
}
|
||||
|
||||
if (phdr.p_vaddr + phdr.p_memsz > max_vaddr) {
|
||||
max_vaddr = phdr.p_vaddr + phdr.p_memsz;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t image_size = max_vaddr - min_vaddr;
|
||||
|
||||
base_load_addr = (uintptr_t)ext_mem_alloc_type_aligned(image_size, alloc_type, max_align);
|
||||
|
||||
*physical_base = base_load_addr;
|
||||
*virtual_base = min_vaddr;
|
||||
}
|
||||
|
||||
if (!elf64_is_relocatable(elf, &hdr)) {
|
||||
simulation = false;
|
||||
goto final;
|
||||
@ -490,6 +525,10 @@ final:
|
||||
if (load_addr & ((uint64_t)1 << 63)) {
|
||||
higher_half = true;
|
||||
load_addr -= FIXED_HIGHER_HALF_OFFSET_64;
|
||||
|
||||
if (fully_virtual) {
|
||||
load_addr += base_load_addr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -497,7 +536,9 @@ final:
|
||||
panic("elf: Higher half executable trying to load too high");
|
||||
}
|
||||
|
||||
load_addr += slide;
|
||||
if (!fully_virtual) {
|
||||
load_addr += slide;
|
||||
}
|
||||
|
||||
uint64_t this_top = load_addr + phdr.p_memsz;
|
||||
|
||||
@ -522,7 +563,8 @@ final:
|
||||
mem_size = phdr.p_memsz;
|
||||
}
|
||||
|
||||
if (!memmap_alloc_range((size_t)mem_base, (size_t)mem_size, alloc_type, true, false, simulation, false)) {
|
||||
if (!fully_virtual &&
|
||||
!memmap_alloc_range((size_t)mem_base, (size_t)mem_size, alloc_type, true, false, simulation, false)) {
|
||||
if (++try_count == max_simulated_tries || simulation == false) {
|
||||
panic("elf: Failed to allocate necessary memory ranges");
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ struct elf_section_hdr_info {
|
||||
|
||||
int elf_bits(uint8_t *elf);
|
||||
|
||||
int elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *top, uint64_t *_slide, uint32_t alloc_type, bool kaslr, bool use_paddr, struct elf_range **ranges, uint64_t *ranges_count);
|
||||
int elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *top, uint64_t *_slide, uint32_t alloc_type, bool kaslr, bool use_paddr, struct elf_range **ranges, uint64_t *ranges_count, bool fully_virtual, uint64_t *physical_base, uint64_t *virtual_base);
|
||||
int elf64_load_section(uint8_t *elf, void *buffer, const char *name, size_t limit, uint64_t slide);
|
||||
struct elf_section_hdr_info* elf64_section_hdr_info(uint8_t *elf);
|
||||
|
||||
|
@ -46,6 +46,7 @@ void pmm_randomise_memory(void);
|
||||
|
||||
void *ext_mem_alloc(size_t count);
|
||||
void *ext_mem_alloc_type(size_t count, uint32_t type);
|
||||
void *ext_mem_alloc_type_aligned(size_t count, uint32_t type, size_t alignment);
|
||||
|
||||
void *conv_mem_alloc(size_t count);
|
||||
|
||||
|
@ -574,9 +574,13 @@ void *ext_mem_alloc(size_t count) {
|
||||
return ext_mem_alloc_type(count, MEMMAP_BOOTLOADER_RECLAIMABLE);
|
||||
}
|
||||
|
||||
// Allocate memory top down, hopefully without bumping into kernel or modules
|
||||
void *ext_mem_alloc_type(size_t count, uint32_t type) {
|
||||
count = ALIGN_UP(count, 4096);
|
||||
return ext_mem_alloc_type_aligned(count, type, 4096);
|
||||
}
|
||||
|
||||
// Allocate memory top down, hopefully without bumping into kernel or modules
|
||||
void *ext_mem_alloc_type_aligned(size_t count, uint32_t type, size_t alignment) {
|
||||
count = ALIGN_UP(count, alignment);
|
||||
|
||||
if (allocations_disallowed)
|
||||
panic("Memory allocations disallowed");
|
||||
@ -595,7 +599,7 @@ void *ext_mem_alloc_type(size_t count, uint32_t type) {
|
||||
continue;
|
||||
}
|
||||
|
||||
int64_t alloc_base = ALIGN_DOWN(entry_top - (int64_t)count, 4096);
|
||||
int64_t alloc_base = ALIGN_DOWN(entry_top - (int64_t)count, alignment);
|
||||
|
||||
// This entry is too small for us.
|
||||
if (alloc_base < entry_base)
|
||||
|
@ -104,7 +104,7 @@ void multiboot1_load(char *config, char *cmdline) {
|
||||
break;
|
||||
case 64: {
|
||||
uint64_t e, t;
|
||||
if (elf64_load(kernel, &e, &t, NULL, MEMMAP_KERNEL_AND_MODULES, false, true, NULL, NULL))
|
||||
if (elf64_load(kernel, &e, &t, NULL, MEMMAP_KERNEL_AND_MODULES, false, true, NULL, NULL, false, NULL, NULL))
|
||||
panic("multiboot1: ELF64 load failure");
|
||||
entry_point = e;
|
||||
kernel_top = t;
|
||||
|
@ -225,7 +225,7 @@ void multiboot2_load(char *config, char* cmdline) {
|
||||
|
||||
break;
|
||||
case 64: {
|
||||
if (elf64_load(kernel, &e, &t, NULL, MEMMAP_KERNEL_AND_MODULES, false, true, NULL, NULL))
|
||||
if (elf64_load(kernel, &e, &t, NULL, MEMMAP_KERNEL_AND_MODULES, false, true, NULL, NULL, false, NULL, NULL))
|
||||
panic("multiboot2: ELF64 load failure");
|
||||
|
||||
break;
|
||||
|
@ -127,7 +127,7 @@ void stivale_load(char *config, char *cmdline) {
|
||||
if (!loaded_by_anchor) {
|
||||
if (elf64_load(kernel, &entry_point, NULL, &slide,
|
||||
STIVALE_MMAP_KERNEL_AND_MODULES, kaslr, false,
|
||||
NULL, NULL))
|
||||
NULL, NULL, false, NULL, NULL))
|
||||
panic("stivale: ELF64 load failure");
|
||||
|
||||
ret = elf64_load_section(kernel, &stivale_hdr, ".stivalehdr",
|
||||
@ -300,7 +300,7 @@ void stivale_load(char *config, char *cmdline) {
|
||||
|
||||
pagemap_t pagemap = {0};
|
||||
if (bits == 64)
|
||||
pagemap = stivale_build_pagemap(want_5lv, false, NULL, 0);
|
||||
pagemap = stivale_build_pagemap(want_5lv, false, NULL, 0, false, 0);
|
||||
|
||||
// Reserve 32K at 0x70000
|
||||
memmap_alloc_range(0x70000, 0x8000, MEMMAP_USABLE, true, true, false, false);
|
||||
@ -324,7 +324,8 @@ void stivale_load(char *config, char *cmdline) {
|
||||
stivale_hdr.stack, false);
|
||||
}
|
||||
|
||||
pagemap_t stivale_build_pagemap(bool level5pg, bool unmap_null, struct elf_range *ranges, size_t ranges_count) {
|
||||
pagemap_t stivale_build_pagemap(bool level5pg, bool unmap_null, struct elf_range *ranges, size_t ranges_count,
|
||||
bool want_fully_virtual, uint64_t physical_base) {
|
||||
pagemap_t pagemap = new_pagemap(level5pg ? 5 : 4);
|
||||
uint64_t higher_half_base = level5pg ? 0xff00000000000000 : 0xffff800000000000;
|
||||
|
||||
@ -336,10 +337,14 @@ pagemap_t stivale_build_pagemap(bool level5pg, bool unmap_null, struct elf_range
|
||||
} else {
|
||||
for (size_t i = 0; i < ranges_count; i++) {
|
||||
uint64_t virt = ranges[i].base;
|
||||
uint64_t phys = virt;
|
||||
uint64_t phys;
|
||||
|
||||
if (phys & ((uint64_t)1 << 63)) {
|
||||
phys -= FIXED_HIGHER_HALF_OFFSET_64;
|
||||
if (virt & ((uint64_t)1 << 63)) {
|
||||
if (want_fully_virtual) {
|
||||
phys = physical_base + (virt - FIXED_HIGHER_HALF_OFFSET_64);
|
||||
} else {
|
||||
phys = virt - FIXED_HIGHER_HALF_OFFSET_64;
|
||||
}
|
||||
} else {
|
||||
panic("stivale2: Protected memory ranges are only supported for higher half kernels");
|
||||
}
|
||||
|
@ -10,7 +10,8 @@ void stivale_load(char *config, char *cmdline);
|
||||
|
||||
bool stivale_load_by_anchor(void **_anchor, const char *magic,
|
||||
uint8_t *file, uint64_t filesize);
|
||||
pagemap_t stivale_build_pagemap(bool level5pg, bool unmap_null, struct elf_range *ranges, size_t ranges_count);
|
||||
pagemap_t stivale_build_pagemap(bool level5pg, bool unmap_null, struct elf_range *ranges, size_t ranges_count,
|
||||
bool want_fully_virtual, uint64_t physical_base);
|
||||
__attribute__((noreturn)) void stivale_spinup(
|
||||
int bits, bool level5pg, pagemap_t *pagemap,
|
||||
uint64_t entry_point, uint64_t stivale_struct, uint64_t stack,
|
||||
|
@ -32,22 +32,36 @@
|
||||
|
||||
struct stivale2_struct stivale2_struct = {0};
|
||||
|
||||
inline static size_t get_phys_addr(uint64_t addr) {
|
||||
if (addr & ((uint64_t)1 << 63))
|
||||
return addr - FIXED_HIGHER_HALF_OFFSET_64;
|
||||
return addr;
|
||||
}
|
||||
#define get_phys_addr(addr) ({ \
|
||||
uintptr_t r1; \
|
||||
if ((addr) & ((uint64_t)1 << 63)) { \
|
||||
if (want_fully_virtual) { \
|
||||
r1 = physical_base + ((addr) - virtual_base); \
|
||||
} else { \
|
||||
r1 = (addr) - FIXED_HIGHER_HALF_OFFSET_64; \
|
||||
} \
|
||||
} else { \
|
||||
r1 = addr; \
|
||||
} \
|
||||
r1; \
|
||||
})
|
||||
|
||||
static void *get_tag(struct stivale2_header *s, uint64_t id) {
|
||||
struct stivale2_tag *tag = (void*)get_phys_addr(s->tags);
|
||||
for (;;) {
|
||||
if (tag == NULL)
|
||||
return NULL;
|
||||
if (tag->identifier == id)
|
||||
return tag;
|
||||
tag = (void*)get_phys_addr(tag->next);
|
||||
}
|
||||
}
|
||||
#define get_tag(s, id) ({ \
|
||||
void *r; \
|
||||
struct stivale2_tag *tag = (void *)get_phys_addr((s)->tags); \
|
||||
for (;;) { \
|
||||
if (tag == NULL) { \
|
||||
r = NULL; \
|
||||
break; \
|
||||
} \
|
||||
if (tag->identifier == (id)) { \
|
||||
r = tag; \
|
||||
break; \
|
||||
} \
|
||||
tag = (void *)get_phys_addr(tag->next); \
|
||||
} \
|
||||
r; \
|
||||
})
|
||||
|
||||
#define append_tag(S, TAG) ({ \
|
||||
(TAG)->next = (S)->tags; \
|
||||
@ -110,6 +124,9 @@ void stivale2_load(char *config, char *cmdline, bool pxe, void *efi_system_table
|
||||
}
|
||||
|
||||
bool want_pmrs = false;
|
||||
bool want_fully_virtual = false;
|
||||
|
||||
uint64_t physical_base, virtual_base;
|
||||
|
||||
int ret = 0;
|
||||
switch (bits) {
|
||||
@ -141,10 +158,15 @@ void stivale2_load(char *config, char *cmdline, bool pxe, void *efi_system_table
|
||||
want_pmrs = true;
|
||||
}
|
||||
|
||||
if (want_pmrs && (stivale2_hdr.flags & (1 << 3))) {
|
||||
want_fully_virtual = true;
|
||||
}
|
||||
|
||||
if (elf64_load(kernel, &entry_point, NULL, &slide,
|
||||
STIVALE2_MMAP_KERNEL_AND_MODULES, kaslr, false,
|
||||
want_pmrs ? &ranges : NULL,
|
||||
want_pmrs ? &ranges_count : NULL))
|
||||
want_pmrs ? &ranges_count : NULL,
|
||||
want_fully_virtual, &physical_base, &virtual_base))
|
||||
panic("stivale2: ELF64 load failure");
|
||||
|
||||
ret = elf64_load_section(kernel, &stivale2_hdr, ".stivale2hdr",
|
||||
@ -579,6 +601,23 @@ have_tm_tag:;
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////
|
||||
// Create PMRs struct tag
|
||||
//////////////////////////////////////////////
|
||||
{
|
||||
if (want_fully_virtual) {
|
||||
struct stivale2_struct_tag_kernel_base_address *tag =
|
||||
ext_mem_alloc(sizeof(struct stivale2_struct_tag_kernel_base_address));
|
||||
|
||||
tag->tag.identifier = STIVALE2_STRUCT_TAG_KERNEL_BASE_ADDRESS_ID;
|
||||
|
||||
tag->physical_base_address = physical_base;
|
||||
tag->virtual_base_address = virtual_base;
|
||||
|
||||
append_tag(&stivale2_struct, (struct stivale2_tag *)tag);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////
|
||||
// Create EFI system table struct tag
|
||||
//////////////////////////////////////////////
|
||||
@ -599,7 +638,8 @@ have_tm_tag:;
|
||||
if (bits == 64)
|
||||
pagemap = stivale_build_pagemap(want_5lv, unmap_null,
|
||||
want_pmrs ? ranges : NULL,
|
||||
want_pmrs ? ranges_count : 0);
|
||||
want_pmrs ? ranges_count : 0,
|
||||
want_fully_virtual, physical_base);
|
||||
|
||||
#if uefi == 1
|
||||
efi_exit_boot_services();
|
||||
|
@ -1,5 +1,5 @@
|
||||
CC = cc
|
||||
CFLAGS = -O2
|
||||
CFLAGS = -O2 -g
|
||||
LDFLAGS =
|
||||
LD = ld
|
||||
QEMU = qemu-system-x86_64
|
||||
@ -9,10 +9,7 @@ INTERNALLDFLAGS := \
|
||||
-Tlinker.ld \
|
||||
-nostdlib \
|
||||
-zmax-page-size=0x1000 \
|
||||
-static \
|
||||
-pie \
|
||||
--no-dynamic-linker \
|
||||
-ztext
|
||||
-static
|
||||
|
||||
INTERNALCFLAGS := \
|
||||
-I../stivale \
|
||||
@ -20,14 +17,15 @@ INTERNALCFLAGS := \
|
||||
-std=gnu11 \
|
||||
-ffreestanding \
|
||||
-fno-stack-protector \
|
||||
-fno-pic -fpie \
|
||||
-fno-pic -fno-pie \
|
||||
-mabi=sysv \
|
||||
-mno-80387 \
|
||||
-mno-mmx \
|
||||
-mno-3dnow \
|
||||
-mno-sse \
|
||||
-mno-sse2 \
|
||||
-mno-red-zone
|
||||
-mno-red-zone \
|
||||
-mcmodel=kernel
|
||||
|
||||
all: test.elf
|
||||
|
||||
|
@ -6,12 +6,11 @@ PHDRS
|
||||
text PT_LOAD FLAGS((1 << 0) | (1 << 2)) ; /* Execute + Read */
|
||||
rodata PT_LOAD FLAGS((1 << 2)) ; /* Read only */
|
||||
data PT_LOAD FLAGS((1 << 1) | (1 << 2)) ; /* Write + Read */
|
||||
dynamic PT_DYNAMIC FLAGS((1 << 1) | (1 << 2)) ; /* Dynamic segment needed for PIE */
|
||||
}
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
. = 0xffffffff80200000;
|
||||
. = 0xffffffff80000000;
|
||||
|
||||
.text : {
|
||||
*(.text*)
|
||||
@ -37,10 +36,6 @@ SECTIONS
|
||||
*(.data*)
|
||||
} :data
|
||||
|
||||
.dynamic : {
|
||||
*(.dynamic)
|
||||
} :data :dynamic
|
||||
|
||||
.bss : {
|
||||
*(COMMON)
|
||||
*(.bss*)
|
||||
|
@ -40,7 +40,7 @@ __attribute__((section(".stivale2hdr"), used))
|
||||
struct stivale2_header header2 = {
|
||||
.entry_point = (uint64_t)stivale2_main,
|
||||
.stack = (uintptr_t)stacks[0] + sizeof(stack),
|
||||
.flags = (1 << 1) | (1 << 2),
|
||||
.flags = (1 << 1) | (1 << 2) | (1 << 3),
|
||||
.tags = (uint64_t)&any_video_request
|
||||
};
|
||||
|
||||
@ -204,6 +204,13 @@ void stivale2_main(struct stivale2_struct *info) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case STIVALE2_STRUCT_TAG_KERNEL_BASE_ADDRESS_ID: {
|
||||
struct stivale2_struct_tag_kernel_base_address *t = (void *)tag;
|
||||
e9_puts("Kernel base address:");
|
||||
e9_printf("\tPhysical base address: %x", t->physical_base_address);
|
||||
e9_printf("\tVirtual base address: %x", t->virtual_base_address);
|
||||
break;
|
||||
}
|
||||
case STIVALE2_STRUCT_TAG_SMP_ID: {
|
||||
struct stivale2_struct_tag_smp *s = (struct stivale2_struct_tag_smp *)tag;
|
||||
e9_puts("SMP tag:");
|
||||
|
Loading…
Reference in New Issue
Block a user