mirror of
https://github.com/limine-bootloader/limine
synced 2024-12-12 01:33:59 +03:00
elf: Always do ASLR when loading relocatable ELFs and handle unavailable memory ranges instead of crashing
This commit is contained in:
parent
621a004bf2
commit
30b750a2ad
@ -113,7 +113,7 @@ int fread(struct file_handle *fd, void *buf, uint64_t loc, uint64_t count) {
|
|||||||
|
|
||||||
void *freadall(struct file_handle *fd, uint32_t type) {
|
void *freadall(struct file_handle *fd, uint32_t type) {
|
||||||
if (fd->is_memfile) {
|
if (fd->is_memfile) {
|
||||||
memmap_alloc_range((uint64_t)(size_t)fd->fd, fd->size, type, false, true);
|
memmap_alloc_range((uint64_t)(size_t)fd->fd, fd->size, type, false, true, false);
|
||||||
return fd->fd;
|
return fd->fd;
|
||||||
} else {
|
} else {
|
||||||
void *ret = ext_mem_alloc_aligned_type(fd->size, 4096, type);
|
void *ret = ext_mem_alloc_aligned_type(fd->size, 4096, type);
|
||||||
|
@ -4,9 +4,12 @@
|
|||||||
#include <lib/libc.h>
|
#include <lib/libc.h>
|
||||||
#include <lib/elf.h>
|
#include <lib/elf.h>
|
||||||
#include <lib/print.h>
|
#include <lib/print.h>
|
||||||
|
#include <lib/rand.h>
|
||||||
#include <mm/pmm.h>
|
#include <mm/pmm.h>
|
||||||
#include <fs/file.h>
|
#include <fs/file.h>
|
||||||
|
|
||||||
|
#define KASLR_SLIDE_BITMASK ((uintptr_t)0x3ffff000)
|
||||||
|
|
||||||
#define PT_LOAD 0x00000001
|
#define PT_LOAD 0x00000001
|
||||||
#define PT_INTERP 0x00000003
|
#define PT_INTERP 0x00000003
|
||||||
#define PT_PHDR 0x00000006
|
#define PT_PHDR 0x00000006
|
||||||
@ -133,6 +136,27 @@ int elf_bits(struct file_handle *fd) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool elf64_is_relocatable(struct file_handle *fd, struct elf64_hdr *hdr) {
|
||||||
|
// Find RELA sections
|
||||||
|
for (uint16_t i = 0; i < hdr->sh_num; i++) {
|
||||||
|
struct elf64_shdr section;
|
||||||
|
fread(fd, §ion, hdr->shoff + i * sizeof(struct elf64_shdr),
|
||||||
|
sizeof(struct elf64_shdr));
|
||||||
|
|
||||||
|
if (section.sh_type != SHT_RELA)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (section.sh_entsize != sizeof(struct elf64_rela)) {
|
||||||
|
print("elf: Unknown sh_entsize for RELA section!\n");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static int elf64_apply_relocations(struct file_handle *fd, struct elf64_hdr *hdr, void *buffer, uint64_t vaddr, size_t size, uint64_t slide) {
|
static int elf64_apply_relocations(struct file_handle *fd, struct elf64_hdr *hdr, void *buffer, uint64_t vaddr, size_t size, uint64_t slide) {
|
||||||
// Find RELA sections
|
// Find RELA sections
|
||||||
for (uint16_t i = 0; i < hdr->sh_num; i++) {
|
for (uint16_t i = 0; i < hdr->sh_num; i++) {
|
||||||
@ -268,7 +292,7 @@ int elf32_load_section(struct file_handle *fd, void *buffer, const char *name, s
|
|||||||
return 2;
|
return 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
int elf64_load(struct file_handle *fd, uint64_t *entry_point, uint64_t *top, uint64_t slide, uint32_t alloc_type) {
|
int elf64_load(struct file_handle *fd, uint64_t *entry_point, uint64_t *_slide, uint32_t alloc_type) {
|
||||||
struct elf64_hdr hdr;
|
struct elf64_hdr hdr;
|
||||||
fread(fd, &hdr, 0, sizeof(struct elf64_hdr));
|
fread(fd, &hdr, 0, sizeof(struct elf64_hdr));
|
||||||
|
|
||||||
@ -287,8 +311,20 @@ int elf64_load(struct file_handle *fd, uint64_t *entry_point, uint64_t *top, uin
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
*top = 0;
|
uint64_t slide = 0;
|
||||||
|
bool simulation = true;
|
||||||
|
size_t try_count = 0;
|
||||||
|
size_t max_simulated_tries = 250;
|
||||||
|
|
||||||
|
if (!elf64_is_relocatable(fd, &hdr)) {
|
||||||
|
simulation = false;
|
||||||
|
goto final;
|
||||||
|
}
|
||||||
|
|
||||||
|
again:
|
||||||
|
slide = rand64() & KASLR_SLIDE_BITMASK;
|
||||||
|
|
||||||
|
final:
|
||||||
for (uint16_t i = 0; i < hdr.ph_num; i++) {
|
for (uint16_t i = 0; i < hdr.ph_num; i++) {
|
||||||
struct elf64_phdr phdr;
|
struct elf64_phdr phdr;
|
||||||
fread(fd, &phdr, hdr.phoff + i * sizeof(struct elf64_phdr),
|
fread(fd, &phdr, hdr.phoff + i * sizeof(struct elf64_phdr),
|
||||||
@ -304,12 +340,11 @@ int elf64_load(struct file_handle *fd, uint64_t *entry_point, uint64_t *top, uin
|
|||||||
|
|
||||||
load_vaddr += slide;
|
load_vaddr += slide;
|
||||||
|
|
||||||
uint64_t this_top = load_vaddr + phdr.p_memsz;
|
if (!memmap_alloc_range((size_t)load_vaddr, (size_t)phdr.p_memsz, alloc_type, true, false, simulation)) {
|
||||||
|
if (++try_count == max_simulated_tries || simulation == false)
|
||||||
if (this_top > *top)
|
return -1;
|
||||||
*top = this_top;
|
goto again;
|
||||||
|
}
|
||||||
memmap_alloc_range((size_t)load_vaddr, (size_t)phdr.p_memsz, alloc_type, true, true);
|
|
||||||
|
|
||||||
fread(fd, (void *)(uintptr_t)load_vaddr, phdr.p_offset, phdr.p_filesz);
|
fread(fd, (void *)(uintptr_t)load_vaddr, phdr.p_offset, phdr.p_filesz);
|
||||||
|
|
||||||
@ -324,12 +359,18 @@ int elf64_load(struct file_handle *fd, uint64_t *entry_point, uint64_t *top, uin
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (simulation) {
|
||||||
|
simulation = false;
|
||||||
|
goto final;
|
||||||
|
}
|
||||||
|
|
||||||
*entry_point = hdr.entry + slide;
|
*entry_point = hdr.entry + slide;
|
||||||
|
*_slide = slide;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int elf32_load(struct file_handle *fd, uint32_t *entry_point, uint32_t *top, uint32_t alloc_type) {
|
int elf32_load(struct file_handle *fd, uint32_t *entry_point, uint32_t alloc_type) {
|
||||||
struct elf32_hdr hdr;
|
struct elf32_hdr hdr;
|
||||||
fread(fd, &hdr, 0, sizeof(struct elf32_hdr));
|
fread(fd, &hdr, 0, sizeof(struct elf32_hdr));
|
||||||
|
|
||||||
@ -348,8 +389,6 @@ int elf32_load(struct file_handle *fd, uint32_t *entry_point, uint32_t *top, uin
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
*top = 0;
|
|
||||||
|
|
||||||
for (uint16_t i = 0; i < hdr.ph_num; i++) {
|
for (uint16_t i = 0; i < hdr.ph_num; i++) {
|
||||||
struct elf32_phdr phdr;
|
struct elf32_phdr phdr;
|
||||||
fread(fd, &phdr, hdr.phoff + i * sizeof(struct elf32_phdr),
|
fread(fd, &phdr, hdr.phoff + i * sizeof(struct elf32_phdr),
|
||||||
@ -358,11 +397,7 @@ int elf32_load(struct file_handle *fd, uint32_t *entry_point, uint32_t *top, uin
|
|||||||
if (phdr.p_type != PT_LOAD)
|
if (phdr.p_type != PT_LOAD)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
uint32_t this_top = phdr.p_vaddr + phdr.p_memsz;
|
memmap_alloc_range((size_t)phdr.p_paddr, (size_t)phdr.p_memsz, alloc_type, true, true, false);
|
||||||
if (this_top > *top)
|
|
||||||
*top = this_top;
|
|
||||||
|
|
||||||
memmap_alloc_range((size_t)phdr.p_paddr, (size_t)phdr.p_memsz, alloc_type, true, true);
|
|
||||||
|
|
||||||
fread(fd, (void *)(uintptr_t)phdr.p_paddr, phdr.p_offset, phdr.p_filesz);
|
fread(fd, (void *)(uintptr_t)phdr.p_paddr, phdr.p_offset, phdr.p_filesz);
|
||||||
|
|
||||||
|
@ -8,10 +8,10 @@
|
|||||||
|
|
||||||
int elf_bits(struct file_handle *fd);
|
int elf_bits(struct file_handle *fd);
|
||||||
|
|
||||||
int elf64_load(struct file_handle *fd, uint64_t *entry_point, uint64_t *top, uint64_t slide, uint32_t alloc_type);
|
int elf64_load(struct file_handle *fd, uint64_t *entry_point, uint64_t *slide, uint32_t alloc_type);
|
||||||
int elf64_load_section(struct file_handle *fd, void *buffer, const char *name, size_t limit, uint64_t slide);
|
int elf64_load_section(struct file_handle *fd, void *buffer, const char *name, size_t limit, uint64_t slide);
|
||||||
|
|
||||||
int elf32_load(struct file_handle *fd, uint32_t *entry_point, uint32_t *top, uint32_t alloc_type);
|
int elf32_load(struct file_handle *fd, uint32_t *entry_point, uint32_t alloc_type);
|
||||||
int elf32_load_section(struct file_handle *fd, void *buffer, const char *name, size_t limit);
|
int elf32_load_section(struct file_handle *fd, void *buffer, const char *name, size_t limit);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -25,7 +25,7 @@ extern size_t memmap_entries;
|
|||||||
void init_memmap(void);
|
void init_memmap(void);
|
||||||
struct e820_entry_t *get_memmap(size_t *entries);
|
struct e820_entry_t *get_memmap(size_t *entries);
|
||||||
void print_memmap(struct e820_entry_t *mm, size_t size);
|
void print_memmap(struct e820_entry_t *mm, size_t size);
|
||||||
bool memmap_alloc_range(uint64_t base, uint64_t length, uint32_t type, bool free_only, bool panic);
|
bool memmap_alloc_range(uint64_t base, uint64_t length, uint32_t type, bool free_only, bool panic, bool simulation);
|
||||||
|
|
||||||
void *ext_mem_alloc(size_t count);
|
void *ext_mem_alloc(size_t count);
|
||||||
void *ext_mem_alloc_type(size_t count, uint32_t type);
|
void *ext_mem_alloc_type(size_t count, uint32_t type);
|
||||||
|
@ -342,7 +342,7 @@ void init_memmap(void) {
|
|||||||
|
|
||||||
memmap_alloc_range(bump_allocator_base,
|
memmap_alloc_range(bump_allocator_base,
|
||||||
bump_allocator_limit - bump_allocator_base,
|
bump_allocator_limit - bump_allocator_base,
|
||||||
MEMMAP_REMOVE_RANGE, true, true);
|
MEMMAP_REMOVE_RANGE, true, true, false);
|
||||||
|
|
||||||
print("pmm: Conventional mem allocator base: %X\n", bump_allocator_base);
|
print("pmm: Conventional mem allocator base: %X\n", bump_allocator_base);
|
||||||
print("pmm: Conventional mem allocator limit: %X\n", bump_allocator_limit);
|
print("pmm: Conventional mem allocator limit: %X\n", bump_allocator_limit);
|
||||||
@ -399,7 +399,7 @@ void *ext_mem_alloc_aligned_type(size_t count, size_t alignment, uint32_t type)
|
|||||||
|
|
||||||
// We now reserve the range we need.
|
// We now reserve the range we need.
|
||||||
int64_t aligned_length = entry_top - alloc_base;
|
int64_t aligned_length = entry_top - alloc_base;
|
||||||
memmap_alloc_range((uint64_t)alloc_base, (uint64_t)aligned_length, type, true, true);
|
memmap_alloc_range((uint64_t)alloc_base, (uint64_t)aligned_length, type, true, true, false);
|
||||||
|
|
||||||
void *ret = (void *)(size_t)alloc_base;
|
void *ret = (void *)(size_t)alloc_base;
|
||||||
|
|
||||||
@ -414,7 +414,7 @@ void *ext_mem_alloc_aligned_type(size_t count, size_t alignment, uint32_t type)
|
|||||||
panic("High memory allocator: Out of memory");
|
panic("High memory allocator: Out of memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
bool memmap_alloc_range(uint64_t base, uint64_t length, uint32_t type, bool free_only, bool do_panic) {
|
bool memmap_alloc_range(uint64_t base, uint64_t length, uint32_t type, bool free_only, bool do_panic, bool simulation) {
|
||||||
if (length == 0)
|
if (length == 0)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
@ -443,6 +443,9 @@ bool memmap_alloc_range(uint64_t base, uint64_t length, uint32_t type, bool free
|
|||||||
if (type == MEMMAP_REMOVE_RANGE &&
|
if (type == MEMMAP_REMOVE_RANGE &&
|
||||||
base == entry_base && top == entry_top) {
|
base == entry_base && top == entry_top) {
|
||||||
|
|
||||||
|
if (simulation)
|
||||||
|
return true;
|
||||||
|
|
||||||
// Eradicate from memmap
|
// Eradicate from memmap
|
||||||
for (size_t j = i; j < memmap_entries - 1; j++) {
|
for (size_t j = i; j < memmap_entries - 1; j++) {
|
||||||
memmap[j] = memmap[j+1];
|
memmap[j] = memmap[j+1];
|
||||||
@ -454,6 +457,10 @@ bool memmap_alloc_range(uint64_t base, uint64_t length, uint32_t type, bool free
|
|||||||
|
|
||||||
if (base >= entry_base && base < entry_top &&
|
if (base >= entry_base && base < entry_top &&
|
||||||
top >= entry_base && top <= entry_top) {
|
top >= entry_base && top <= entry_top) {
|
||||||
|
|
||||||
|
if (simulation)
|
||||||
|
return true;
|
||||||
|
|
||||||
struct e820_entry_t *target;
|
struct e820_entry_t *target;
|
||||||
|
|
||||||
memmap[i].length -= entry_top - base;
|
memmap[i].length -= entry_top - base;
|
||||||
|
@ -417,7 +417,7 @@ void linux_load(char *config, char *cmdline) {
|
|||||||
for (;;) {
|
for (;;) {
|
||||||
if (memmap_alloc_range(kernel_load_addr,
|
if (memmap_alloc_range(kernel_load_addr,
|
||||||
kernel->size - real_mode_code_size,
|
kernel->size - real_mode_code_size,
|
||||||
MEMMAP_BOOTLOADER_RECLAIMABLE, true, false))
|
MEMMAP_BOOTLOADER_RECLAIMABLE, true, false, false))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
kernel_load_addr += 0x100000;
|
kernel_load_addr += 0x100000;
|
||||||
@ -451,7 +451,7 @@ void linux_load(char *config, char *cmdline) {
|
|||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (memmap_alloc_range(modules_mem_base, size_of_all_modules,
|
if (memmap_alloc_range(modules_mem_base, size_of_all_modules,
|
||||||
MEMMAP_BOOTLOADER_RECLAIMABLE, true, false))
|
MEMMAP_BOOTLOADER_RECLAIMABLE, true, false, false))
|
||||||
break;
|
break;
|
||||||
modules_mem_base -= 4096;
|
modules_mem_base -= 4096;
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,6 @@
|
|||||||
#include <lib/config.h>
|
#include <lib/config.h>
|
||||||
#include <lib/time.h>
|
#include <lib/time.h>
|
||||||
#include <lib/print.h>
|
#include <lib/print.h>
|
||||||
#include <lib/rand.h>
|
|
||||||
#include <lib/real.h>
|
#include <lib/real.h>
|
||||||
#include <lib/uri.h>
|
#include <lib/uri.h>
|
||||||
#include <lib/fb.h>
|
#include <lib/fb.h>
|
||||||
@ -23,8 +22,6 @@
|
|||||||
#include <mm/mtrr.h>
|
#include <mm/mtrr.h>
|
||||||
#include <stivale/stivale.h>
|
#include <stivale/stivale.h>
|
||||||
|
|
||||||
#define KASLR_SLIDE_BITMASK 0x000FFF000u
|
|
||||||
|
|
||||||
struct stivale_struct stivale_struct = {0};
|
struct stivale_struct stivale_struct = {0};
|
||||||
|
|
||||||
void stivale_load(char *config, char *cmdline) {
|
void stivale_load(char *config, char *cmdline) {
|
||||||
@ -50,9 +47,11 @@ void stivale_load(char *config, char *cmdline) {
|
|||||||
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
uint64_t slide = 0;
|
|
||||||
|
|
||||||
bool level5pg = false;
|
bool level5pg = false;
|
||||||
|
|
||||||
|
uint64_t slide = 0;
|
||||||
|
uint64_t entry_point = 0;
|
||||||
|
|
||||||
switch (bits) {
|
switch (bits) {
|
||||||
case 64: {
|
case 64: {
|
||||||
// Check if 64 bit CPU
|
// Check if 64 bit CPU
|
||||||
@ -66,19 +65,21 @@ void stivale_load(char *config, char *cmdline) {
|
|||||||
level5pg = true;
|
level5pg = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
char *s_kaslr = config_get_value(config, 0, "KASLR");
|
if (elf64_load(kernel, &entry_point, &slide, 10))
|
||||||
if (s_kaslr != NULL && !strcmp(s_kaslr, "yes")) {
|
panic("stivale: ELF64 load failure");
|
||||||
// KASLR is enabled, set the slide
|
|
||||||
slide = rand64() & KASLR_SLIDE_BITMASK;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = elf64_load_section(kernel, &stivale_hdr, ".stivalehdr", sizeof(struct stivale_header), slide);
|
ret = elf64_load_section(kernel, &stivale_hdr, ".stivalehdr", sizeof(struct stivale_header), slide);
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case 32:
|
case 32: {
|
||||||
|
if (elf32_load(kernel, (uint32_t *)&entry_point, 10))
|
||||||
|
panic("stivale: ELF32 load failure");
|
||||||
|
|
||||||
ret = elf32_load_section(kernel, &stivale_hdr, ".stivalehdr", sizeof(struct stivale_header));
|
ret = elf32_load_section(kernel, &stivale_hdr, ".stivalehdr", sizeof(struct stivale_header));
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
panic("stivale: Not 32 nor 64 bit x86 ELF file.");
|
panic("stivale: Not 32 nor 64 bit x86 ELF file.");
|
||||||
}
|
}
|
||||||
@ -96,26 +97,13 @@ void stivale_load(char *config, char *cmdline) {
|
|||||||
panic("stivale: Section .stivalehdr is smaller than size of the struct.");
|
panic("stivale: Section .stivalehdr is smaller than size of the struct.");
|
||||||
}
|
}
|
||||||
|
|
||||||
print("stivale: Requested stack at %X\n", stivale_hdr.stack);
|
|
||||||
|
|
||||||
uint64_t entry_point = 0;
|
|
||||||
uint64_t top_used_addr = 0;
|
|
||||||
|
|
||||||
switch (bits) {
|
|
||||||
case 64:
|
|
||||||
elf64_load(kernel, &entry_point, &top_used_addr, slide, 10);
|
|
||||||
break;
|
|
||||||
case 32:
|
|
||||||
elf32_load(kernel, (uint32_t *)&entry_point, (uint32_t *)&top_used_addr, 10);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (stivale_hdr.entry_point != 0)
|
if (stivale_hdr.entry_point != 0)
|
||||||
entry_point = stivale_hdr.entry_point;
|
entry_point = stivale_hdr.entry_point;
|
||||||
|
|
||||||
print("stivale: Kernel slide: %X\n", slide);
|
print("stivale: Kernel slide: %X\n", slide);
|
||||||
|
|
||||||
print("stivale: Top used address in ELF: %X\n", top_used_addr);
|
print("stivale: Entry point at: %X\n", entry_point);
|
||||||
|
print("stivale: Requested stack at: %X\n", stivale_hdr.stack);
|
||||||
|
|
||||||
stivale_struct.module_count = 0;
|
stivale_struct.module_count = 0;
|
||||||
uint64_t *prev_mod_ptr = &stivale_struct.modules;
|
uint64_t *prev_mod_ptr = &stivale_struct.modules;
|
||||||
|
@ -68,35 +68,39 @@ void stivale2_load(char *config, char *cmdline, bool pxe, void *efi_system_table
|
|||||||
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
uint64_t slide = 0;
|
|
||||||
|
|
||||||
bool level5pg = false;
|
bool level5pg = false;
|
||||||
|
|
||||||
|
uint64_t slide = 0;
|
||||||
|
uint64_t entry_point = 0;
|
||||||
|
|
||||||
switch (bits) {
|
switch (bits) {
|
||||||
case 64: {
|
case 64: {
|
||||||
// Check if 64 bit CPU
|
// Check if 64 bit CPU
|
||||||
uint32_t eax, ebx, ecx, edx;
|
uint32_t eax, ebx, ecx, edx;
|
||||||
if (!cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx) || !(edx & (1 << 29))) {
|
if (!cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx) || !(edx & (1 << 29))) {
|
||||||
panic("stivale: This CPU does not support 64-bit mode.");
|
panic("stivale2: This CPU does not support 64-bit mode.");
|
||||||
}
|
}
|
||||||
// Check if 5-level paging is available
|
// Check if 5-level paging is available
|
||||||
if (cpuid(0x00000007, 0, &eax, &ebx, &ecx, &edx) && (ecx & (1 << 16))) {
|
if (cpuid(0x00000007, 0, &eax, &ebx, &ecx, &edx) && (ecx & (1 << 16))) {
|
||||||
print("stivale: CPU has 5-level paging support\n");
|
print("stivale2: CPU has 5-level paging support\n");
|
||||||
level5pg = true;
|
level5pg = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
char *s_kaslr = config_get_value(config, 0, "KASLR");
|
if (elf64_load(kernel, &entry_point, &slide, 10))
|
||||||
if (s_kaslr != NULL && !strcmp(s_kaslr, "yes")) {
|
panic("stivale2: ELF64 load failure");
|
||||||
// KASLR is enabled, set the slide
|
|
||||||
slide = rand64() & KASLR_SLIDE_BITMASK;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = elf64_load_section(kernel, &stivale2_hdr, ".stivale2hdr", sizeof(struct stivale2_header), slide);
|
ret = elf64_load_section(kernel, &stivale2_hdr, ".stivale2hdr", sizeof(struct stivale2_header), slide);
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case 32:
|
case 32: {
|
||||||
|
if (elf32_load(kernel, (uint32_t *)&entry_point, 10))
|
||||||
|
panic("stivale2: ELF32 load failure");
|
||||||
|
|
||||||
ret = elf32_load_section(kernel, &stivale2_hdr, ".stivale2hdr", sizeof(struct stivale2_header));
|
ret = elf32_load_section(kernel, &stivale2_hdr, ".stivale2hdr", sizeof(struct stivale2_header));
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
panic("stivale2: Not 32 nor 64 bit x86 ELF file.");
|
panic("stivale2: Not 32 nor 64 bit x86 ELF file.");
|
||||||
}
|
}
|
||||||
@ -114,26 +118,13 @@ void stivale2_load(char *config, char *cmdline, bool pxe, void *efi_system_table
|
|||||||
panic("stivale2: Section .stivale2hdr is smaller than size of the struct.");
|
panic("stivale2: Section .stivale2hdr is smaller than size of the struct.");
|
||||||
}
|
}
|
||||||
|
|
||||||
print("stivale2: Requested stack at %X\n", stivale2_hdr.stack);
|
|
||||||
|
|
||||||
uint64_t entry_point = 0;
|
|
||||||
uint64_t top_used_addr = 0;
|
|
||||||
|
|
||||||
switch (bits) {
|
|
||||||
case 64:
|
|
||||||
elf64_load(kernel, &entry_point, &top_used_addr, slide, 0x1001);
|
|
||||||
break;
|
|
||||||
case 32:
|
|
||||||
elf32_load(kernel, (uint32_t *)&entry_point, (uint32_t *)&top_used_addr, 0x1001);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (stivale2_hdr.entry_point != 0)
|
if (stivale2_hdr.entry_point != 0)
|
||||||
entry_point = stivale2_hdr.entry_point;
|
entry_point = stivale2_hdr.entry_point;
|
||||||
|
|
||||||
print("stivale2: Kernel slide: %X\n", slide);
|
print("stivale2: Kernel slide: %X\n", slide);
|
||||||
|
|
||||||
print("stivale2: Top used address in ELF: %X\n", top_used_addr);
|
print("stivale2: Entry point at: %X\n", entry_point);
|
||||||
|
print("stivale2: Requested stack at: %X\n", stivale2_hdr.stack);
|
||||||
|
|
||||||
strcpy(stivale2_struct.bootloader_brand, "Limine");
|
strcpy(stivale2_struct.bootloader_brand, "Limine");
|
||||||
strcpy(stivale2_struct.bootloader_version, LIMINE_VERSION);
|
strcpy(stivale2_struct.bootloader_version, LIMINE_VERSION);
|
||||||
|
Loading…
Reference in New Issue
Block a user