limine/elf: Optimise not to scan .bss for requests

This commit is contained in:
mintsuki 2023-07-08 23:44:50 +02:00
parent 99d671ecf8
commit 9333f1ca8b
3 changed files with 16 additions and 5 deletions

View File

@ -408,7 +408,7 @@ static void elf64_get_ranges(uint8_t *elf, uint64_t slide, struct elf_range **_r
*_ranges = ranges;
}
bool elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *_slide, uint32_t alloc_type, bool kaslr, struct elf_range **ranges, uint64_t *ranges_count, uint64_t *physical_base, uint64_t *virtual_base, uint64_t *_image_size, bool *is_reloc) {
bool elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *_slide, uint32_t alloc_type, bool kaslr, struct elf_range **ranges, uint64_t *ranges_count, uint64_t *physical_base, uint64_t *virtual_base, uint64_t *_image_size, uint64_t *_image_size_before_bss, bool *is_reloc) {
struct elf64_hdr *hdr = (void *)elf;
if (strncmp((char *)hdr->ident, "\177ELF", 4)) {
@ -534,6 +534,8 @@ again:
}
}
uint64_t bss_size;
for (uint16_t i = 0; i < hdr->ph_num; i++) {
struct elf64_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
@ -564,6 +566,10 @@ again:
memcpy((void *)(uintptr_t)load_addr, elf + (phdr->p_offset), phdr->p_filesz);
if (i == hdr->ph_num - 1) {
bss_size = phdr->p_memsz - phdr->p_filesz;
}
if (!elf64_apply_relocations(elf, hdr, (void *)(uintptr_t)load_addr, phdr->p_vaddr, phdr->p_memsz, slide)) {
panic(true, "elf: Failed to apply relocations");
}
@ -574,6 +580,10 @@ again:
#endif
}
if (_image_size_before_bss != NULL) {
*_image_size_before_bss = image_size - bss_size;
}
*virtual_base += slide;
*entry_point = entry + slide;
if (_slide) {

View File

@ -30,7 +30,7 @@ struct elf_section_hdr_info elf64_section_hdr_info(uint8_t *elf);
struct elf_section_hdr_info elf32_section_hdr_info(uint8_t *elf);
bool elf64_load_section(uint8_t *elf, void *buffer, const char *name, size_t limit, uint64_t slide);
bool elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *_slide, uint32_t alloc_type, bool kaslr, struct elf_range **ranges, uint64_t *ranges_count, uint64_t *physical_base, uint64_t *virtual_base, uint64_t *image_size, bool *is_reloc);
bool elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *_slide, uint32_t alloc_type, bool kaslr, struct elf_range **ranges, uint64_t *ranges_count, uint64_t *physical_base, uint64_t *virtual_base, uint64_t *image_size, uint64_t *image_size_before_bss, bool *is_reloc);
bool elf32_load_elsewhere(uint8_t *elf, uint64_t *entry_point,
struct elsewhere_range **ranges,

View File

@ -331,13 +331,14 @@ noreturn void limine_load(char *config, char *cmdline) {
struct elf_range *ranges;
uint64_t ranges_count;
uint64_t image_size;
uint64_t image_size_before_bss;
bool is_reloc;
if (!elf64_load(kernel, &entry_point, &slide,
MEMMAP_KERNEL_AND_MODULES, kaslr,
&ranges, &ranges_count,
&physical_base, &virtual_base, &image_size,
&physical_base, &virtual_base, NULL,
&image_size_before_bss,
&is_reloc)) {
panic(true, "limine: ELF64 load failure");
}
@ -358,7 +359,7 @@ noreturn void limine_load(char *config, char *cmdline) {
requests = ext_mem_alloc(MAX_REQUESTS * sizeof(void *));
requests_count = 0;
uint64_t common_magic[2] = { LIMINE_COMMON_MAGIC };
for (size_t i = 0; i < ALIGN_DOWN(image_size, 8); i += 8) {
for (size_t i = 0; i < ALIGN_DOWN(image_size_before_bss, 8); i += 8) {
uint64_t *p = (void *)(uintptr_t)physical_base + i;
if (p[0] != common_magic[0]) {