multiboot: Load modules right after kernel to emulate GRUB behaviour

This commit is contained in:
mintsuki 2021-07-02 23:58:22 +02:00
parent 04c4c42779
commit d53facc731
5 changed files with 40 additions and 12 deletions

View File

@ -292,7 +292,7 @@ int elf32_load_section(uint8_t *elf, void *buffer, const char *name, size_t limi
return 2;
}
int elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *_slide, uint32_t alloc_type, bool kaslr, bool use_paddr) {
int elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *top, uint64_t *_slide, uint32_t alloc_type, bool kaslr, bool use_paddr) {
struct elf64_hdr hdr;
memcpy(&hdr, elf + (0), sizeof(struct elf64_hdr));
@ -329,6 +329,9 @@ again:
slide = rand64() & KASLR_SLIDE_BITMASK;
final:
if (top)
*top = 0;
for (uint16_t i = 0; i < hdr.ph_num; i++) {
struct elf64_phdr phdr;
memcpy(&phdr, elf + (hdr.phoff + i * sizeof(struct elf64_phdr)),
@ -350,6 +353,13 @@ final:
load_addr += slide;
if (top) {
uint64_t this_top = load_addr + phdr.p_memsz;
if (this_top > *top) {
*top = this_top;
}
}
if (!memmap_alloc_range((size_t)load_addr, (size_t)phdr.p_memsz, alloc_type, true, false, simulation, false)) {
if (++try_count == max_simulated_tries || simulation == false)
return -1;
@ -391,7 +401,7 @@ final:
return 0;
}
int elf32_load(uint8_t *elf, uint32_t *entry_point, uint32_t alloc_type) {
int elf32_load(uint8_t *elf, uint32_t *entry_point, uint32_t *top, uint32_t alloc_type) {
struct elf32_hdr hdr;
memcpy(&hdr, elf + (0), sizeof(struct elf32_hdr));
@ -413,6 +423,9 @@ int elf32_load(uint8_t *elf, uint32_t *entry_point, uint32_t alloc_type) {
uint32_t entry = hdr.entry;
bool entry_adjusted = false;
if (top)
*top = 0;
for (uint16_t i = 0; i < hdr.ph_num; i++) {
struct elf32_phdr phdr;
memcpy(&phdr, elf + (hdr.phoff + i * sizeof(struct elf32_phdr)),
@ -421,6 +434,13 @@ int elf32_load(uint8_t *elf, uint32_t *entry_point, uint32_t alloc_type) {
if (phdr.p_type != PT_LOAD)
continue;
if (top) {
uint32_t this_top = phdr.p_paddr + phdr.p_memsz;
if (this_top > *top) {
*top = this_top;
}
}
memmap_alloc_range((size_t)phdr.p_paddr, (size_t)phdr.p_memsz, alloc_type, true, true, false, false);
memcpy((void *)(uintptr_t)phdr.p_paddr, elf + (phdr.p_offset), phdr.p_filesz);

View File

@ -9,10 +9,10 @@
int elf_bits(uint8_t *elf);
int elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *slide, uint32_t alloc_type, bool kaslr, bool use_paddr);
int elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *top, uint64_t *slide, uint32_t alloc_type, bool kaslr, bool use_paddr);
int elf64_load_section(uint8_t *elf, void *buffer, const char *name, size_t limit, uint64_t slide);
int elf32_load(uint8_t *elf, uint32_t *entry_point, uint32_t alloc_type);
int elf32_load(uint8_t *elf, uint32_t *entry_point, uint32_t *top, uint32_t alloc_type);
int elf32_load_section(uint8_t *elf, void *buffer, const char *name, size_t limit);
#endif

View File

@ -52,6 +52,7 @@ void multiboot1_load(char *config, char *cmdline) {
panic("multiboot1: Header checksum is invalid");
uint32_t entry_point = 0;
uint32_t elf_top = 0;
if (header.flags & (1 << 16)) {
if (header.load_addr > header.header_addr)
@ -85,14 +86,15 @@ void multiboot1_load(char *config, char *cmdline) {
switch (bits) {
case 32:
if (elf32_load(kernel, &entry_point, MEMMAP_KERNEL_AND_MODULES))
if (elf32_load(kernel, &entry_point, &elf_top, MEMMAP_KERNEL_AND_MODULES))
panic("multiboot1: ELF32 load failure");
break;
case 64: {
uint64_t e;
if (elf64_load(kernel, &e, NULL, MEMMAP_KERNEL_AND_MODULES, false, true))
uint64_t e, t;
if (elf64_load(kernel, &e, &t, NULL, MEMMAP_KERNEL_AND_MODULES, false, true))
panic("multiboot1: ELF64 load failure");
entry_point = e;
elf_top = t;
break;
}
@ -129,7 +131,13 @@ void multiboot1_load(char *config, char *cmdline) {
char *cmdline = config_get_value(config, i, "MODULE_STRING");
m->begin = (uint32_t)(size_t)freadall(&f, MEMMAP_KERNEL_AND_MODULES);
void *module_addr = (void *)ALIGN_UP(elf_top, 4096);
memmap_alloc_range((uintptr_t)module_addr, f.size, MEMMAP_KERNEL_AND_MODULES,
true, true, false, false);
elf_top = (uintptr_t)module_addr + f.size;
fread(&f, module_addr, 0, f.size);
m->begin = (uint32_t)(size_t)module_addr;
m->end = m->begin + f.size;
m->cmdline = (uint32_t)(size_t)cmdline;
m->pad = 0;

View File

@ -79,7 +79,7 @@ void stivale_load(char *config, char *cmdline) {
level5pg = true;
}
if (elf64_load(kernel, &entry_point, &slide, STIVALE_MMAP_KERNEL_AND_MODULES, kaslr, false))
if (elf64_load(kernel, &entry_point, NULL, &slide, STIVALE_MMAP_KERNEL_AND_MODULES, kaslr, false))
panic("stivale: ELF64 load failure");
ret = elf64_load_section(kernel, &stivale_hdr, ".stivalehdr", sizeof(struct stivale_header), slide);
@ -87,7 +87,7 @@ void stivale_load(char *config, char *cmdline) {
break;
}
case 32: {
if (elf32_load(kernel, (uint32_t *)&entry_point, 10))
if (elf32_load(kernel, (uint32_t *)&entry_point, NULL, 10))
panic("stivale: ELF32 load failure");
ret = elf32_load_section(kernel, &stivale_hdr, ".stivalehdr", sizeof(struct stivale_header));

View File

@ -101,7 +101,7 @@ void stivale2_load(char *config, char *cmdline, bool pxe, void *efi_system_table
level5pg = true;
}
if (elf64_load(kernel, &entry_point, &slide, STIVALE2_MMAP_KERNEL_AND_MODULES, kaslr, false))
if (elf64_load(kernel, &entry_point, NULL, &slide, STIVALE2_MMAP_KERNEL_AND_MODULES, kaslr, false))
panic("stivale2: ELF64 load failure");
ret = elf64_load_section(kernel, &stivale2_hdr, ".stivale2hdr", sizeof(struct stivale2_header), slide);
@ -109,7 +109,7 @@ void stivale2_load(char *config, char *cmdline, bool pxe, void *efi_system_table
break;
}
case 32: {
if (elf32_load(kernel, (uint32_t *)&entry_point, 10))
if (elf32_load(kernel, (uint32_t *)&entry_point, NULL, 10))
panic("stivale2: ELF32 load failure");
ret = elf32_load_section(kernel, &stivale2_hdr, ".stivale2hdr", sizeof(struct stivale2_header));