Revert "Revert "elf: General refactor""
This reverts commit 4c8516c6a6
.
This commit is contained in:
parent
4c8516c6a6
commit
831af39ed4
610
common/lib/elf.c
610
common/lib/elf.c
|
@ -102,15 +102,14 @@ struct elf64_dyn {
|
|||
};
|
||||
|
||||
int elf_bits(uint8_t *elf) {
|
||||
struct elf64_hdr hdr;
|
||||
memcpy(&hdr, elf + (0), 20);
|
||||
struct elf64_hdr *hdr = (void *)elf;
|
||||
|
||||
if (strncmp((char *)hdr.ident, "\177ELF", 4)) {
|
||||
if (strncmp((char *)hdr->ident, "\177ELF", 4)) {
|
||||
printv("elf: Not a valid ELF file.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
switch (hdr.machine) {
|
||||
switch (hdr->machine) {
|
||||
case ARCH_X86_64:
|
||||
case ARCH_AARCH64:
|
||||
return 64;
|
||||
|
@ -121,6 +120,32 @@ int elf_bits(uint8_t *elf) {
|
|||
}
|
||||
}
|
||||
|
||||
struct elf_section_hdr_info elf64_section_hdr_info(uint8_t *elf) {
|
||||
struct elf_section_hdr_info info = {0};
|
||||
|
||||
struct elf64_hdr *hdr = (void *)elf;
|
||||
|
||||
info.num = hdr->sh_num;
|
||||
info.section_entry_size = hdr->shdr_size;
|
||||
info.str_section_idx = hdr->shstrndx;
|
||||
info.section_offset = hdr->shoff;
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
struct elf_section_hdr_info elf32_section_hdr_info(uint8_t *elf) {
|
||||
struct elf_section_hdr_info info = {0};
|
||||
|
||||
struct elf32_hdr *hdr = (void *)elf;
|
||||
|
||||
info.num = hdr->sh_num;
|
||||
info.section_entry_size = hdr->shdr_size;
|
||||
info.str_section_idx = hdr->shstrndx;
|
||||
info.section_offset = hdr->shoff;
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
static bool elf64_is_relocatable(uint8_t *elf, struct elf64_hdr *hdr) {
|
||||
if (hdr->phdr_size < sizeof(struct elf64_phdr)) {
|
||||
panic(true, "elf: phdr_size < sizeof(struct elf64_phdr)");
|
||||
|
@ -128,20 +153,16 @@ static bool elf64_is_relocatable(uint8_t *elf, struct elf64_hdr *hdr) {
|
|||
|
||||
// Find DYN segment
|
||||
for (uint16_t i = 0; i < hdr->ph_num; i++) {
|
||||
struct elf64_phdr phdr;
|
||||
memcpy(&phdr, elf + (hdr->phoff + i * hdr->phdr_size),
|
||||
sizeof(struct elf64_phdr));
|
||||
struct elf64_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
|
||||
|
||||
if (phdr.p_type != PT_DYNAMIC) {
|
||||
if (phdr->p_type != PT_DYNAMIC) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (uint16_t j = 0; j < phdr.p_filesz / sizeof(struct elf64_dyn); j++) {
|
||||
struct elf64_dyn dyn;
|
||||
memcpy(&dyn, elf + (phdr.p_offset + j * sizeof(struct elf64_dyn)),
|
||||
sizeof(struct elf64_dyn));
|
||||
for (uint16_t j = 0; j < phdr->p_filesz / sizeof(struct elf64_dyn); j++) {
|
||||
struct elf64_dyn *dyn = (void *)elf + (phdr->p_offset + j * sizeof(struct elf64_dyn));
|
||||
|
||||
switch (dyn.d_tag) {
|
||||
switch (dyn->d_tag) {
|
||||
case DT_RELA:
|
||||
return true;
|
||||
}
|
||||
|
@ -151,37 +172,33 @@ static bool elf64_is_relocatable(uint8_t *elf, struct elf64_hdr *hdr) {
|
|||
return false;
|
||||
}
|
||||
|
||||
static int elf64_apply_relocations(uint8_t *elf, struct elf64_hdr *hdr, void *buffer, uint64_t vaddr, size_t size, uint64_t slide) {
|
||||
static bool elf64_apply_relocations(uint8_t *elf, struct elf64_hdr *hdr, void *buffer, uint64_t vaddr, size_t size, uint64_t slide) {
|
||||
if (hdr->phdr_size < sizeof(struct elf64_phdr)) {
|
||||
panic(true, "elf: phdr_size < sizeof(struct elf64_phdr)");
|
||||
}
|
||||
|
||||
// Find DYN segment
|
||||
for (uint16_t i = 0; i < hdr->ph_num; i++) {
|
||||
struct elf64_phdr phdr;
|
||||
memcpy(&phdr, elf + (hdr->phoff + i * hdr->phdr_size),
|
||||
sizeof(struct elf64_phdr));
|
||||
struct elf64_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
|
||||
|
||||
if (phdr.p_type != PT_DYNAMIC)
|
||||
if (phdr->p_type != PT_DYNAMIC)
|
||||
continue;
|
||||
|
||||
uint64_t rela_offset = 0;
|
||||
uint64_t rela_size = 0;
|
||||
uint64_t rela_ent = 0;
|
||||
for (uint16_t j = 0; j < phdr.p_filesz / sizeof(struct elf64_dyn); j++) {
|
||||
struct elf64_dyn dyn;
|
||||
memcpy(&dyn, elf + (phdr.p_offset + j * sizeof(struct elf64_dyn)),
|
||||
sizeof(struct elf64_dyn));
|
||||
for (uint16_t j = 0; j < phdr->p_filesz / sizeof(struct elf64_dyn); j++) {
|
||||
struct elf64_dyn *dyn = (void *)elf + (phdr->p_offset + j * sizeof(struct elf64_dyn));
|
||||
|
||||
switch (dyn.d_tag) {
|
||||
switch (dyn->d_tag) {
|
||||
case DT_RELA:
|
||||
rela_offset = dyn.d_un;
|
||||
rela_offset = dyn->d_un;
|
||||
break;
|
||||
case DT_RELAENT:
|
||||
rela_ent = dyn.d_un;
|
||||
rela_ent = dyn->d_un;
|
||||
break;
|
||||
case DT_RELASZ:
|
||||
rela_size = dyn.d_un;
|
||||
rela_size = dyn->d_un;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -192,27 +209,24 @@ static int elf64_apply_relocations(uint8_t *elf, struct elf64_hdr *hdr, void *bu
|
|||
|
||||
if (rela_ent != sizeof(struct elf64_rela)) {
|
||||
print("elf: Unknown sh_entsize for RELA section!\n");
|
||||
return 1;
|
||||
return false;
|
||||
}
|
||||
|
||||
for (uint16_t j = 0; j < hdr->ph_num; j++) {
|
||||
struct elf64_phdr _phdr;
|
||||
memcpy(&_phdr, elf + (hdr->phoff + j * hdr->phdr_size),
|
||||
sizeof(struct elf64_phdr));
|
||||
struct elf64_phdr *_phdr = (void *)elf + (hdr->phoff + j * hdr->phdr_size);
|
||||
|
||||
if (_phdr.p_vaddr <= rela_offset && _phdr.p_vaddr + _phdr.p_filesz > rela_offset) {
|
||||
rela_offset -= _phdr.p_vaddr;
|
||||
rela_offset += _phdr.p_offset;
|
||||
if (_phdr->p_vaddr <= rela_offset && _phdr->p_vaddr + _phdr->p_filesz > rela_offset) {
|
||||
rela_offset -= _phdr->p_vaddr;
|
||||
rela_offset += _phdr->p_offset;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// This is a RELA header, get and apply all relocations
|
||||
for (uint64_t offset = 0; offset < rela_size; offset += rela_ent) {
|
||||
struct elf64_rela relocation;
|
||||
memcpy(&relocation, elf + (rela_offset + offset), sizeof(struct elf64_rela));
|
||||
struct elf64_rela *relocation = (void *)elf + (rela_offset + offset);
|
||||
|
||||
switch (relocation.r_info) {
|
||||
switch (relocation->r_info) {
|
||||
#if defined (__x86_64__) || defined (__i386__)
|
||||
case R_X86_64_RELATIVE:
|
||||
#elif defined (__aarch64__)
|
||||
|
@ -222,218 +236,106 @@ static int elf64_apply_relocations(uint8_t *elf, struct elf64_hdr *hdr, void *bu
|
|||
#endif
|
||||
{
|
||||
// Relocation is before buffer
|
||||
if (relocation.r_addr < vaddr)
|
||||
if (relocation->r_addr < vaddr)
|
||||
continue;
|
||||
|
||||
// Relocation is after buffer
|
||||
if (vaddr + size < relocation.r_addr + 8)
|
||||
if (vaddr + size < relocation->r_addr + 8)
|
||||
continue;
|
||||
|
||||
// It's inside it, calculate where it is
|
||||
uint64_t *ptr = (uint64_t *)((uint8_t *)buffer - vaddr + relocation.r_addr);
|
||||
uint64_t *ptr = (uint64_t *)((uint8_t *)buffer - vaddr + relocation->r_addr);
|
||||
|
||||
// Write the relocated value
|
||||
*ptr = slide + relocation.r_addend;
|
||||
*ptr = slide + relocation->r_addend;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
print("elf: Unknown RELA type: %x\n", relocation.r_info);
|
||||
return 1;
|
||||
default: {
|
||||
print("elf: Unknown RELA type: %x\n", relocation->r_info);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
int elf64_load_section(uint8_t *elf, void *buffer, const char *name, size_t limit, uint64_t slide) {
|
||||
struct elf64_hdr hdr;
|
||||
memcpy(&hdr, elf + (0), sizeof(struct elf64_hdr));
|
||||
bool elf64_load_section(uint8_t *elf, void *buffer, const char *name, size_t limit, uint64_t slide) {
|
||||
struct elf64_hdr *hdr = (void *)elf;
|
||||
|
||||
if (strncmp((char *)hdr.ident, "\177ELF", 4)) {
|
||||
if (strncmp((char *)hdr->ident, "\177ELF", 4)) {
|
||||
printv("elf: Not a valid ELF file.\n");
|
||||
return 1;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (hdr.ident[EI_DATA] != BITS_LE) {
|
||||
if (hdr->ident[EI_DATA] != BITS_LE) {
|
||||
printv("elf: Not a Little-endian ELF file.\n");
|
||||
return 1;
|
||||
return false;
|
||||
}
|
||||
|
||||
#if defined (__x86_64__) || defined (__i386__)
|
||||
if (hdr.machine != ARCH_X86_64) {
|
||||
if (hdr->machine != ARCH_X86_64) {
|
||||
printv("elf: Not an x86_64 ELF file.\n");
|
||||
return 1;
|
||||
return false;
|
||||
}
|
||||
#elif defined (__aarch64__)
|
||||
if (hdr.machine != ARCH_AARCH64) {
|
||||
if (hdr->machine != ARCH_AARCH64) {
|
||||
printv("elf: Not an aarch64 ELF file.\n");
|
||||
return 1;
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
#error Unknown architecture
|
||||
#endif
|
||||
|
||||
if (hdr.shdr_size < sizeof(struct elf64_shdr)) {
|
||||
if (hdr->shdr_size < sizeof(struct elf64_shdr)) {
|
||||
panic(true, "elf: shdr_size < sizeof(struct elf64_shdr)");
|
||||
}
|
||||
|
||||
struct elf64_shdr shstrtab;
|
||||
memcpy(&shstrtab, elf + (hdr.shoff + hdr.shstrndx * hdr.shdr_size),
|
||||
sizeof(struct elf64_shdr));
|
||||
struct elf64_shdr *shstrtab = (void *)elf + (hdr->shoff + hdr->shstrndx * hdr->shdr_size);
|
||||
|
||||
char *names = ext_mem_alloc(shstrtab.sh_size);
|
||||
memcpy(names, elf + (shstrtab.sh_offset), shstrtab.sh_size);
|
||||
char *names = (void *)elf + shstrtab->sh_offset;
|
||||
|
||||
int ret;
|
||||
for (uint16_t i = 0; i < hdr->sh_num; i++) {
|
||||
struct elf64_shdr *section = (void *)elf + (hdr->shoff + i * hdr->shdr_size);
|
||||
|
||||
for (uint16_t i = 0; i < hdr.sh_num; i++) {
|
||||
struct elf64_shdr section;
|
||||
memcpy(§ion, elf + (hdr.shoff + i * hdr.shdr_size),
|
||||
sizeof(struct elf64_shdr));
|
||||
|
||||
if (!strcmp(&names[section.sh_name], name)) {
|
||||
if (strcmp(&names[section->sh_name], name) == 0) {
|
||||
if (limit == 0) {
|
||||
*(void **)buffer = ext_mem_alloc(section.sh_size);
|
||||
*(void **)buffer = ext_mem_alloc(section->sh_size);
|
||||
buffer = *(void **)buffer;
|
||||
limit = section.sh_size;
|
||||
limit = section->sh_size;
|
||||
}
|
||||
if (section.sh_size > limit) {
|
||||
ret = 3;
|
||||
goto out;
|
||||
if (section->sh_size > limit) {
|
||||
return false;
|
||||
}
|
||||
if (section.sh_size < limit) {
|
||||
ret = 4;
|
||||
goto out;
|
||||
}
|
||||
memcpy(buffer, elf + (section.sh_offset), section.sh_size);
|
||||
ret = elf64_apply_relocations(elf, &hdr, buffer, section.sh_addr, section.sh_size, slide);
|
||||
goto out;
|
||||
memcpy(buffer, elf + section->sh_offset, section->sh_size);
|
||||
return elf64_apply_relocations(elf, hdr, buffer, section->sh_addr, section->sh_size, slide);
|
||||
}
|
||||
}
|
||||
|
||||
ret = 2;
|
||||
|
||||
out:
|
||||
pmm_free(names, shstrtab.sh_size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/// SAFETY: The caller must ensure that the provided `elf` is a valid 64-bit
|
||||
/// ELF file.
|
||||
struct elf_section_hdr_info* elf64_section_hdr_info(uint8_t *elf) {
|
||||
struct elf_section_hdr_info* info = ext_mem_alloc(sizeof(struct elf_section_hdr_info));
|
||||
|
||||
struct elf64_hdr hdr;
|
||||
memcpy(&hdr, elf + (0), sizeof(struct elf64_hdr));
|
||||
|
||||
info->num = hdr.sh_num;
|
||||
info->section_entry_size = hdr.shdr_size;
|
||||
info->str_section_idx = hdr.shstrndx;
|
||||
info->section_offset = hdr.shoff;
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
/// SAFETY: The caller must ensure that the provided `elf` is a valid 32-bit
|
||||
/// ELF file.
|
||||
struct elf_section_hdr_info* elf32_section_hdr_info(uint8_t *elf) {
|
||||
struct elf_section_hdr_info* info = ext_mem_alloc(sizeof(struct elf_section_hdr_info));
|
||||
|
||||
struct elf32_hdr hdr;
|
||||
memcpy(&hdr, elf + (0), sizeof(struct elf32_hdr));
|
||||
|
||||
info->num = hdr.sh_num;
|
||||
info->section_entry_size = hdr.shdr_size;
|
||||
info->str_section_idx = hdr.shstrndx;
|
||||
info->section_offset = hdr.shoff;
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
int elf32_load_section(uint8_t *elf, void *buffer, const char *name, size_t limit) {
|
||||
struct elf32_hdr hdr;
|
||||
memcpy(&hdr, elf + (0), sizeof(struct elf32_hdr));
|
||||
|
||||
if (strncmp((char *)hdr.ident, "\177ELF", 4)) {
|
||||
printv("elf: Not a valid ELF file.\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (hdr.ident[EI_DATA] != BITS_LE) {
|
||||
printv("elf: Not a Little-endian ELF file.\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (hdr.machine != ARCH_X86_32) {
|
||||
printv("elf: Not an x86_32 ELF file.\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (hdr.shdr_size < sizeof(struct elf32_shdr)) {
|
||||
panic(true, "elf: shdr_size < sizeof(struct elf32_shdr)");
|
||||
}
|
||||
|
||||
struct elf32_shdr shstrtab;
|
||||
memcpy(&shstrtab, elf + (hdr.shoff + hdr.shstrndx * hdr.shdr_size),
|
||||
sizeof(struct elf32_shdr));
|
||||
|
||||
char *names = ext_mem_alloc(shstrtab.sh_size);
|
||||
memcpy(names, elf + (shstrtab.sh_offset), shstrtab.sh_size);
|
||||
|
||||
int ret;
|
||||
|
||||
for (uint16_t i = 0; i < hdr.sh_num; i++) {
|
||||
struct elf32_shdr section;
|
||||
memcpy(§ion, elf + (hdr.shoff + i * hdr.shdr_size),
|
||||
sizeof(struct elf32_shdr));
|
||||
|
||||
if (!strcmp(&names[section.sh_name], name)) {
|
||||
if (section.sh_size > limit) {
|
||||
ret = 3;
|
||||
goto out;
|
||||
}
|
||||
if (section.sh_size < limit) {
|
||||
ret = 4;
|
||||
goto out;
|
||||
}
|
||||
memcpy(buffer, elf + (section.sh_offset), section.sh_size);
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
ret = 2;
|
||||
|
||||
out:
|
||||
pmm_free(names, shstrtab.sh_size);
|
||||
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
static uint64_t elf64_max_align(uint8_t *elf) {
|
||||
uint64_t ret = 0;
|
||||
|
||||
struct elf64_hdr hdr;
|
||||
memcpy(&hdr, elf + (0), sizeof(struct elf64_hdr));
|
||||
struct elf64_hdr *hdr = (void *)elf;
|
||||
|
||||
if (hdr.phdr_size < sizeof(struct elf64_phdr)) {
|
||||
if (hdr->phdr_size < sizeof(struct elf64_phdr)) {
|
||||
panic(true, "elf: phdr_size < sizeof(struct elf64_phdr)");
|
||||
}
|
||||
|
||||
for (uint16_t i = 0; i < hdr.ph_num; i++) {
|
||||
struct elf64_phdr phdr;
|
||||
memcpy(&phdr, elf + (hdr.phoff + i * hdr.phdr_size),
|
||||
sizeof(struct elf64_phdr));
|
||||
for (uint16_t i = 0; i < hdr->ph_num; i++) {
|
||||
struct elf64_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
|
||||
|
||||
if (phdr.p_type != PT_LOAD)
|
||||
if (phdr->p_type != PT_LOAD) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (phdr.p_align > ret) {
|
||||
ret = phdr.p_align;
|
||||
if (phdr->p_align > ret) {
|
||||
ret = phdr->p_align;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -445,24 +347,22 @@ static uint64_t elf64_max_align(uint8_t *elf) {
|
|||
}
|
||||
|
||||
static void elf64_get_ranges(uint8_t *elf, uint64_t slide, struct elf_range **_ranges, uint64_t *_ranges_count) {
|
||||
struct elf64_hdr hdr;
|
||||
memcpy(&hdr, elf + (0), sizeof(struct elf64_hdr));
|
||||
struct elf64_hdr *hdr = (void *)elf;
|
||||
|
||||
uint64_t ranges_count = 0;
|
||||
|
||||
if (hdr.phdr_size < sizeof(struct elf64_phdr)) {
|
||||
if (hdr->phdr_size < sizeof(struct elf64_phdr)) {
|
||||
panic(true, "elf: phdr_size < sizeof(struct elf64_phdr)");
|
||||
}
|
||||
|
||||
for (uint16_t i = 0; i < hdr.ph_num; i++) {
|
||||
struct elf64_phdr phdr;
|
||||
memcpy(&phdr, elf + (hdr.phoff + i * hdr.phdr_size),
|
||||
sizeof(struct elf64_phdr));
|
||||
for (uint16_t i = 0; i < hdr->ph_num; i++) {
|
||||
struct elf64_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
|
||||
|
||||
if (phdr.p_type != PT_LOAD)
|
||||
if (phdr->p_type != PT_LOAD) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (phdr.p_vaddr < FIXED_HIGHER_HALF_OFFSET_64) {
|
||||
if (phdr->p_vaddr < FIXED_HIGHER_HALF_OFFSET_64) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -470,35 +370,29 @@ static void elf64_get_ranges(uint8_t *elf, uint64_t slide, struct elf_range **_r
|
|||
}
|
||||
|
||||
if (ranges_count == 0) {
|
||||
panic(true, "elf: Attempted to use PMRs but no higher half PHDRs exist");
|
||||
panic(true, "elf: No higher half PHDRs exist");
|
||||
}
|
||||
|
||||
struct elf_range *ranges = ext_mem_alloc(ranges_count * sizeof(struct elf_range));
|
||||
|
||||
size_t r = 0;
|
||||
for (uint16_t i = 0; i < hdr.ph_num; i++) {
|
||||
struct elf64_phdr phdr;
|
||||
memcpy(&phdr, elf + (hdr.phoff + i * hdr.phdr_size),
|
||||
sizeof(struct elf64_phdr));
|
||||
for (uint16_t i = 0; i < hdr->ph_num; i++) {
|
||||
struct elf64_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
|
||||
|
||||
if (phdr.p_type != PT_LOAD)
|
||||
continue;
|
||||
|
||||
uint64_t load_addr = 0;
|
||||
|
||||
load_addr = phdr.p_vaddr;
|
||||
|
||||
if (phdr.p_vaddr < FIXED_HIGHER_HALF_OFFSET_64) {
|
||||
if (phdr->p_type != PT_LOAD) {
|
||||
continue;
|
||||
}
|
||||
|
||||
load_addr += slide;
|
||||
if (phdr->p_vaddr < FIXED_HIGHER_HALF_OFFSET_64) {
|
||||
continue;
|
||||
}
|
||||
|
||||
uint64_t this_top = load_addr + phdr.p_memsz;
|
||||
uint64_t load_addr = phdr->p_vaddr + slide;
|
||||
uint64_t this_top = load_addr + phdr->p_memsz;
|
||||
|
||||
ranges[r].base = load_addr & ~(phdr.p_align - 1);
|
||||
ranges[r].length = ALIGN_UP(this_top - ranges[r].base, phdr.p_align);
|
||||
ranges[r].permissions = phdr.p_flags & 0b111;
|
||||
ranges[r].base = load_addr & ~(phdr->p_align - 1);
|
||||
ranges[r].length = ALIGN_UP(this_top - ranges[r].base, phdr->p_align);
|
||||
ranges[r].permissions = phdr->p_flags & 0b111;
|
||||
|
||||
r++;
|
||||
}
|
||||
|
@ -507,25 +401,24 @@ static void elf64_get_ranges(uint8_t *elf, uint64_t slide, struct elf_range **_r
|
|||
*_ranges = ranges;
|
||||
}
|
||||
|
||||
int elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *top, uint64_t *_slide, uint32_t alloc_type, bool kaslr, struct elf_range **ranges, uint64_t *ranges_count, bool fully_virtual, uint64_t *physical_base, uint64_t *virtual_base, uint64_t *_image_size, bool *is_reloc) {
|
||||
struct elf64_hdr hdr;
|
||||
memcpy(&hdr, elf + (0), sizeof(struct elf64_hdr));
|
||||
bool elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *_slide, uint32_t alloc_type, bool kaslr, struct elf_range **ranges, uint64_t *ranges_count, uint64_t *physical_base, uint64_t *virtual_base, uint64_t *_image_size, bool *is_reloc) {
|
||||
struct elf64_hdr *hdr = (void *)elf;
|
||||
|
||||
if (strncmp((char *)hdr.ident, "\177ELF", 4)) {
|
||||
if (strncmp((char *)hdr->ident, "\177ELF", 4)) {
|
||||
printv("elf: Not a valid ELF file.\n");
|
||||
return -1;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (hdr.ident[EI_DATA] != BITS_LE) {
|
||||
if (hdr->ident[EI_DATA] != BITS_LE) {
|
||||
panic(true, "elf: Not a Little-endian ELF file.\n");
|
||||
}
|
||||
|
||||
#if defined (__x86_64__) || defined (__i386__)
|
||||
if (hdr.machine != ARCH_X86_64) {
|
||||
if (hdr->machine != ARCH_X86_64) {
|
||||
panic(true, "elf: Not an x86_64 ELF file.\n");
|
||||
}
|
||||
#elif defined (__aarch64__)
|
||||
if (hdr.machine != ARCH_AARCH64) {
|
||||
if (hdr->machine != ARCH_AARCH64) {
|
||||
panic(true, "elf: Not an aarch64 ELF file.\n");
|
||||
}
|
||||
#else
|
||||
|
@ -537,65 +430,56 @@ int elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *top, uint64_t *_sl
|
|||
}
|
||||
|
||||
uint64_t slide = 0;
|
||||
bool simulation = true;
|
||||
size_t try_count = 0;
|
||||
size_t max_simulated_tries = 0x100000;
|
||||
size_t max_simulated_tries = 0x10000;
|
||||
|
||||
uint64_t entry = hdr.entry;
|
||||
uint64_t entry = hdr->entry;
|
||||
|
||||
uint64_t max_align = elf64_max_align(elf);
|
||||
|
||||
uint64_t image_size = 0;
|
||||
|
||||
if (hdr.phdr_size < sizeof(struct elf64_phdr)) {
|
||||
if (hdr->phdr_size < sizeof(struct elf64_phdr)) {
|
||||
panic(true, "elf: phdr_size < sizeof(struct elf64_phdr)");
|
||||
}
|
||||
|
||||
if (fully_virtual) {
|
||||
simulation = false;
|
||||
uint64_t min_vaddr = (uint64_t)-1;
|
||||
uint64_t max_vaddr = 0;
|
||||
for (uint16_t i = 0; i < hdr->ph_num; i++) {
|
||||
struct elf64_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
|
||||
|
||||
uint64_t min_vaddr = (uint64_t)-1;
|
||||
uint64_t max_vaddr = 0;
|
||||
for (uint16_t i = 0; i < hdr.ph_num; i++) {
|
||||
struct elf64_phdr phdr;
|
||||
memcpy(&phdr, elf + (hdr.phoff + i * hdr.phdr_size),
|
||||
sizeof(struct elf64_phdr));
|
||||
|
||||
if (phdr.p_type != PT_LOAD) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Drop entries not in the higher half
|
||||
if (phdr.p_vaddr < FIXED_HIGHER_HALF_OFFSET_64) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (phdr.p_vaddr < min_vaddr) {
|
||||
min_vaddr = phdr.p_vaddr;
|
||||
}
|
||||
|
||||
if (phdr.p_vaddr + phdr.p_memsz > max_vaddr) {
|
||||
max_vaddr = phdr.p_vaddr + phdr.p_memsz;
|
||||
}
|
||||
if (phdr->p_type != PT_LOAD) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (max_vaddr == 0 || min_vaddr == (uint64_t)-1) {
|
||||
panic(true, "elf: Attempted to use fully virtual mappings but no higher half PHDRs exist");
|
||||
// Drop entries not in the higher half
|
||||
if (phdr->p_vaddr < FIXED_HIGHER_HALF_OFFSET_64) {
|
||||
continue;
|
||||
}
|
||||
|
||||
image_size = max_vaddr - min_vaddr;
|
||||
if (phdr->p_vaddr < min_vaddr) {
|
||||
min_vaddr = phdr->p_vaddr;
|
||||
}
|
||||
|
||||
*physical_base = (uintptr_t)ext_mem_alloc_type_aligned(image_size, alloc_type, max_align);
|
||||
*virtual_base = min_vaddr;
|
||||
if (_image_size) {
|
||||
*_image_size = image_size;
|
||||
if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
|
||||
max_vaddr = phdr->p_vaddr + phdr->p_memsz;
|
||||
}
|
||||
}
|
||||
|
||||
if (!elf64_is_relocatable(elf, &hdr)) {
|
||||
simulation = false;
|
||||
goto final;
|
||||
} else {
|
||||
if (max_vaddr == 0 || min_vaddr == (uint64_t)-1) {
|
||||
panic(true, "elf: No higher half PHDRs exist");
|
||||
}
|
||||
|
||||
image_size = max_vaddr - min_vaddr;
|
||||
|
||||
*physical_base = (uintptr_t)ext_mem_alloc_type_aligned(image_size, alloc_type, max_align);
|
||||
*virtual_base = min_vaddr;
|
||||
|
||||
if (_image_size) {
|
||||
*_image_size = image_size;
|
||||
}
|
||||
|
||||
if (elf64_is_relocatable(elf, hdr)) {
|
||||
if (is_reloc) {
|
||||
*is_reloc = true;
|
||||
}
|
||||
|
@ -605,98 +489,45 @@ again:
|
|||
if (kaslr) {
|
||||
slide = rand32() & ~(max_align - 1);
|
||||
|
||||
if (fully_virtual) {
|
||||
if ((*virtual_base - FIXED_HIGHER_HALF_OFFSET_64) + slide + image_size >= 0x80000000) {
|
||||
if (++try_count == max_simulated_tries) {
|
||||
panic(true, "elf: Image wants to load too high");
|
||||
}
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final:
|
||||
if (top)
|
||||
*top = 0;
|
||||
|
||||
bool higher_half = false;
|
||||
|
||||
for (uint16_t i = 0; i < hdr.ph_num; i++) {
|
||||
struct elf64_phdr phdr;
|
||||
memcpy(&phdr, elf + (hdr.phoff + i * hdr.phdr_size),
|
||||
sizeof(struct elf64_phdr));
|
||||
|
||||
if (phdr.p_type != PT_LOAD)
|
||||
continue;
|
||||
|
||||
// Sanity checks
|
||||
if (phdr.p_filesz > phdr.p_memsz) {
|
||||
panic(true, "elf: p_filesz > p_memsz");
|
||||
}
|
||||
|
||||
uint64_t load_addr = phdr.p_vaddr;
|
||||
|
||||
if (phdr.p_vaddr >= FIXED_HIGHER_HALF_OFFSET_64) {
|
||||
higher_half = true;
|
||||
|
||||
if (fully_virtual) {
|
||||
load_addr = *physical_base + (phdr.p_vaddr - *virtual_base);
|
||||
} else {
|
||||
load_addr = phdr.p_vaddr - FIXED_HIGHER_HALF_OFFSET_64;
|
||||
}
|
||||
} else if (ranges) {
|
||||
// Drop lower half
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!fully_virtual) {
|
||||
load_addr += slide;
|
||||
}
|
||||
|
||||
uint64_t this_top = load_addr + phdr.p_memsz;
|
||||
|
||||
if (top) {
|
||||
if (this_top > *top) {
|
||||
*top = this_top;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t mem_base, mem_size;
|
||||
|
||||
if (ranges) {
|
||||
mem_base = load_addr & ~(phdr.p_align - 1);
|
||||
mem_size = this_top - mem_base;
|
||||
} else {
|
||||
mem_base = load_addr;
|
||||
mem_size = phdr.p_memsz;
|
||||
}
|
||||
|
||||
if (!fully_virtual &&
|
||||
((higher_half == true && this_top > 0x80000000)
|
||||
|| !memmap_alloc_range((size_t)mem_base, (size_t)mem_size, alloc_type, true, false, simulation, false))) {
|
||||
if (simulation == false || ++try_count == max_simulated_tries) {
|
||||
panic(true, "elf: Failed to allocate necessary memory range (%X-%X)", mem_base, mem_base + mem_size);
|
||||
}
|
||||
if (!kaslr) {
|
||||
slide += max_align;
|
||||
if ((*virtual_base - FIXED_HIGHER_HALF_OFFSET_64) + slide + image_size >= 0x80000000) {
|
||||
if (++try_count == max_simulated_tries) {
|
||||
panic(true, "elf: Image wants to load too high");
|
||||
}
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
||||
if (simulation) {
|
||||
for (uint16_t i = 0; i < hdr->ph_num; i++) {
|
||||
struct elf64_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
|
||||
|
||||
if (phdr->p_type != PT_LOAD) {
|
||||
continue;
|
||||
}
|
||||
|
||||
memcpy((void *)(uintptr_t)load_addr, elf + (phdr.p_offset), phdr.p_filesz);
|
||||
|
||||
size_t to_zero = (size_t)(phdr.p_memsz - phdr.p_filesz);
|
||||
|
||||
if (to_zero) {
|
||||
void *ptr = (void *)(uintptr_t)(load_addr + phdr.p_filesz);
|
||||
memset(ptr, 0, to_zero);
|
||||
// Drop entries not in the higher half
|
||||
if (phdr->p_vaddr < FIXED_HIGHER_HALF_OFFSET_64) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (elf64_apply_relocations(elf, &hdr, (void *)(uintptr_t)load_addr, phdr.p_vaddr, phdr.p_memsz, slide)) {
|
||||
// Sanity checks
|
||||
if (phdr->p_filesz > phdr->p_memsz) {
|
||||
panic(true, "elf: p_filesz > p_memsz");
|
||||
}
|
||||
|
||||
uint64_t load_addr = *physical_base + (phdr->p_vaddr - *virtual_base);
|
||||
|
||||
#if defined (__aarch64__)
|
||||
uint64_t this_top = load_addr + phdr->p_memsz;
|
||||
|
||||
uint64_t mem_base, mem_size;
|
||||
|
||||
mem_base = load_addr & ~(phdr->p_align - 1);
|
||||
mem_size = this_top - mem_base;
|
||||
#endif
|
||||
|
||||
memcpy((void *)(uintptr_t)load_addr, elf + (phdr->p_offset), phdr->p_filesz);
|
||||
|
||||
if (!elf64_apply_relocations(elf, hdr, (void *)(uintptr_t)load_addr, phdr->p_vaddr, phdr->p_memsz, slide)) {
|
||||
panic(true, "elf: Failed to apply relocations");
|
||||
}
|
||||
|
||||
|
@ -706,96 +537,17 @@ final:
|
|||
#endif
|
||||
}
|
||||
|
||||
if (simulation) {
|
||||
simulation = false;
|
||||
goto final;
|
||||
}
|
||||
|
||||
if (fully_virtual) {
|
||||
*virtual_base += slide;
|
||||
}
|
||||
|
||||
*virtual_base += slide;
|
||||
*entry_point = entry + slide;
|
||||
if (_slide)
|
||||
if (_slide) {
|
||||
*_slide = slide;
|
||||
}
|
||||
|
||||
if (ranges_count != NULL && ranges != NULL) {
|
||||
elf64_get_ranges(elf, slide, ranges, ranges_count);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int elf32_load(uint8_t *elf, uint32_t *entry_point, uint32_t *top, uint32_t alloc_type) {
|
||||
struct elf32_hdr hdr;
|
||||
memcpy(&hdr, elf + (0), sizeof(struct elf32_hdr));
|
||||
|
||||
if (strncmp((char *)hdr.ident, "\177ELF", 4)) {
|
||||
printv("elf: Not a valid ELF file.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (hdr.ident[EI_DATA] != BITS_LE) {
|
||||
printv("elf: Not a Little-endian ELF file.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (hdr.machine != ARCH_X86_32) {
|
||||
printv("elf: Not an x86_32 ELF file.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
uint32_t entry = hdr.entry;
|
||||
bool entry_adjusted = false;
|
||||
|
||||
if (top)
|
||||
*top = 0;
|
||||
|
||||
if (hdr.phdr_size < sizeof(struct elf32_phdr)) {
|
||||
panic(true, "elf: phdr_size < sizeof(struct elf32_phdr)");
|
||||
}
|
||||
|
||||
for (uint16_t i = 0; i < hdr.ph_num; i++) {
|
||||
struct elf32_phdr phdr;
|
||||
memcpy(&phdr, elf + (hdr.phoff + i * hdr.phdr_size),
|
||||
sizeof(struct elf32_phdr));
|
||||
|
||||
if (phdr.p_type != PT_LOAD)
|
||||
continue;
|
||||
|
||||
// Sanity checks
|
||||
if (phdr.p_filesz > phdr.p_memsz) {
|
||||
panic(true, "elf: p_filesz > p_memsz");
|
||||
}
|
||||
|
||||
if (top) {
|
||||
uint32_t this_top = phdr.p_paddr + phdr.p_memsz;
|
||||
if (this_top > *top) {
|
||||
*top = this_top;
|
||||
}
|
||||
}
|
||||
|
||||
memmap_alloc_range((size_t)phdr.p_paddr, (size_t)phdr.p_memsz, alloc_type, true, true, false, false);
|
||||
|
||||
memcpy((void *)(uintptr_t)phdr.p_paddr, elf + (phdr.p_offset), phdr.p_filesz);
|
||||
|
||||
size_t to_zero = (size_t)(phdr.p_memsz - phdr.p_filesz);
|
||||
|
||||
if (to_zero) {
|
||||
void *ptr = (void *)(uintptr_t)(phdr.p_paddr + phdr.p_filesz);
|
||||
memset(ptr, 0, to_zero);
|
||||
}
|
||||
|
||||
if (!entry_adjusted && entry >= phdr.p_vaddr && entry < (phdr.p_vaddr + phdr.p_memsz)) {
|
||||
entry -= phdr.p_vaddr;
|
||||
entry += phdr.p_paddr;
|
||||
entry_adjusted = true;
|
||||
}
|
||||
}
|
||||
|
||||
*entry_point = entry;
|
||||
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool elf32_load_elsewhere(uint8_t *elf, uint64_t *entry_point,
|
||||
|
|
|
@ -26,13 +26,11 @@ struct elf_section_hdr_info {
|
|||
|
||||
int elf_bits(uint8_t *elf);
|
||||
|
||||
int elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *top, uint64_t *_slide, uint32_t alloc_type, bool kaslr, struct elf_range **ranges, uint64_t *ranges_count, bool fully_virtual, uint64_t *physical_base, uint64_t *virtual_base, uint64_t *image_size, bool *is_reloc);
|
||||
int elf64_load_section(uint8_t *elf, void *buffer, const char *name, size_t limit, uint64_t slide);
|
||||
struct elf_section_hdr_info* elf64_section_hdr_info(uint8_t *elf);
|
||||
struct elf_section_hdr_info elf64_section_hdr_info(uint8_t *elf);
|
||||
struct elf_section_hdr_info elf32_section_hdr_info(uint8_t *elf);
|
||||
|
||||
int elf32_load(uint8_t *elf, uint32_t *entry_point, uint32_t *top, uint32_t alloc_type);
|
||||
int elf32_load_section(uint8_t *elf, void *buffer, const char *name, size_t limit);
|
||||
struct elf_section_hdr_info* elf32_section_hdr_info(uint8_t *elf);
|
||||
bool elf64_load_section(uint8_t *elf, void *buffer, const char *name, size_t limit, uint64_t slide);
|
||||
bool elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *_slide, uint32_t alloc_type, bool kaslr, struct elf_range **ranges, uint64_t *ranges_count, uint64_t *physical_base, uint64_t *virtual_base, uint64_t *image_size, bool *is_reloc);
|
||||
|
||||
bool elf32_load_elsewhere(uint8_t *elf, uint64_t *entry_point,
|
||||
struct elsewhere_range **ranges,
|
||||
|
@ -57,6 +55,7 @@ struct elf64_hdr {
|
|||
uint16_t sh_num;
|
||||
uint16_t shstrndx;
|
||||
};
|
||||
|
||||
struct elf64_shdr {
|
||||
uint32_t sh_name;
|
||||
uint32_t sh_type;
|
||||
|
@ -69,6 +68,7 @@ struct elf64_shdr {
|
|||
uint64_t sh_addralign;
|
||||
uint64_t sh_entsize;
|
||||
};
|
||||
|
||||
struct elf64_sym {
|
||||
uint32_t st_name;
|
||||
uint8_t st_info;
|
||||
|
|
|
@ -308,10 +308,10 @@ noreturn void limine_load(char *config, char *cmdline) {
|
|||
uint64_t image_size;
|
||||
bool is_reloc;
|
||||
|
||||
if (elf64_load(kernel, &entry_point, NULL, &slide,
|
||||
if (!elf64_load(kernel, &entry_point, &slide,
|
||||
MEMMAP_KERNEL_AND_MODULES, kaslr,
|
||||
&ranges, &ranges_count,
|
||||
true, &physical_base, &virtual_base, &image_size,
|
||||
&physical_base, &virtual_base, &image_size,
|
||||
&is_reloc)) {
|
||||
panic(true, "limine: ELF64 load failure");
|
||||
}
|
||||
|
@ -319,7 +319,7 @@ noreturn void limine_load(char *config, char *cmdline) {
|
|||
kaslr = kaslr && is_reloc;
|
||||
|
||||
// Load requests
|
||||
if (elf64_load_section(kernel, &requests, ".limine_reqs", 0, slide) == 0) {
|
||||
if (elf64_load_section(kernel, &requests, ".limine_reqs", 0, slide)) {
|
||||
for (size_t i = 0; ; i++) {
|
||||
if (requests[i] == NULL) {
|
||||
break;
|
||||
|
|
|
@ -83,7 +83,8 @@ noreturn void multiboot1_load(char *config, char *cmdline) {
|
|||
if (header.magic + header.flags + header.checksum)
|
||||
panic(true, "multiboot1: Header checksum is invalid");
|
||||
|
||||
struct elf_section_hdr_info *section_hdr_info = NULL;
|
||||
bool section_hdr_info_valid = false;
|
||||
struct elf_section_hdr_info section_hdr_info = {0};
|
||||
|
||||
uint64_t entry_point;
|
||||
struct elsewhere_range *ranges;
|
||||
|
@ -132,12 +133,14 @@ noreturn void multiboot1_load(char *config, char *cmdline) {
|
|||
panic(true, "multiboot1: ELF32 load failure");
|
||||
|
||||
section_hdr_info = elf32_section_hdr_info(kernel);
|
||||
section_hdr_info_valid = true;
|
||||
break;
|
||||
case 64: {
|
||||
if (!elf64_load_elsewhere(kernel, &entry_point, &ranges, &ranges_count))
|
||||
panic(true, "multiboot1: ELF64 load failure");
|
||||
|
||||
section_hdr_info = elf64_section_hdr_info(kernel);
|
||||
section_hdr_info_valid = true;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
@ -161,8 +164,8 @@ noreturn void multiboot1_load(char *config, char *cmdline) {
|
|||
cmdline,
|
||||
n_modules,
|
||||
modules_cmdlines_size,
|
||||
section_hdr_info ? section_hdr_info->section_entry_size : 0,
|
||||
section_hdr_info ? section_hdr_info->num : 0
|
||||
section_hdr_info_valid ? section_hdr_info.section_entry_size : 0,
|
||||
section_hdr_info_valid ? section_hdr_info.num : 0
|
||||
);
|
||||
|
||||
// Realloc elsewhere ranges to include mb1 info, modules, and elf sections
|
||||
|
@ -170,7 +173,7 @@ noreturn void multiboot1_load(char *config, char *cmdline) {
|
|||
(ranges_count
|
||||
+ 1 /* mb1 info range */
|
||||
+ n_modules
|
||||
+ (section_hdr_info ? section_hdr_info->num : 0)));
|
||||
+ (section_hdr_info_valid ? section_hdr_info.num : 0)));
|
||||
|
||||
memcpy(new_ranges, ranges, sizeof(struct elsewhere_range) * ranges_count);
|
||||
pmm_free(ranges, sizeof(struct elsewhere_range) * ranges_count);
|
||||
|
@ -197,20 +200,20 @@ noreturn void multiboot1_load(char *config, char *cmdline) {
|
|||
struct multiboot1_info *multiboot1_info =
|
||||
mb1_info_alloc(&mb1_info_raw, sizeof(struct multiboot1_info));
|
||||
|
||||
if (section_hdr_info != NULL) {
|
||||
multiboot1_info->elf_sect.num = section_hdr_info->num;
|
||||
multiboot1_info->elf_sect.size = section_hdr_info->section_entry_size;
|
||||
multiboot1_info->elf_sect.shndx = section_hdr_info->str_section_idx;
|
||||
if (section_hdr_info_valid == true) {
|
||||
multiboot1_info->elf_sect.num = section_hdr_info.num;
|
||||
multiboot1_info->elf_sect.size = section_hdr_info.section_entry_size;
|
||||
multiboot1_info->elf_sect.shndx = section_hdr_info.str_section_idx;
|
||||
|
||||
void *sections = mb1_info_alloc(&mb1_info_raw,
|
||||
section_hdr_info->section_entry_size * section_hdr_info->num);
|
||||
section_hdr_info.section_entry_size * section_hdr_info.num);
|
||||
|
||||
multiboot1_info->elf_sect.addr = (uintptr_t)sections - mb1_info_slide;
|
||||
|
||||
memcpy(sections, kernel + section_hdr_info->section_offset, section_hdr_info->section_entry_size * section_hdr_info->num);
|
||||
memcpy(sections, kernel + section_hdr_info.section_offset, section_hdr_info.section_entry_size * section_hdr_info.num);
|
||||
|
||||
for (size_t i = 0; i < section_hdr_info->num; i++) {
|
||||
struct elf64_shdr *shdr = (void *)sections + i * section_hdr_info->section_entry_size;
|
||||
for (size_t i = 0; i < section_hdr_info.num; i++) {
|
||||
struct elf64_shdr *shdr = (void *)sections + i * section_hdr_info.section_entry_size;
|
||||
|
||||
if (shdr->sh_addr != 0 || shdr->sh_size == 0) {
|
||||
continue;
|
||||
|
|
|
@ -188,7 +188,8 @@ noreturn void multiboot2_load(char *config, char* cmdline) {
|
|||
}
|
||||
}
|
||||
|
||||
struct elf_section_hdr_info *section_hdr_info = NULL;
|
||||
bool section_hdr_info_valid = false;
|
||||
struct elf_section_hdr_info section_hdr_info = {0};
|
||||
|
||||
struct elsewhere_range *ranges;
|
||||
uint64_t ranges_count;
|
||||
|
@ -241,12 +242,14 @@ noreturn void multiboot2_load(char *config, char* cmdline) {
|
|||
panic(true, "multiboot2: ELF32 load failure");
|
||||
|
||||
section_hdr_info = elf32_section_hdr_info(kernel);
|
||||
section_hdr_info_valid = true;
|
||||
break;
|
||||
case 64: {
|
||||
if (!elf64_load_elsewhere(kernel, &e, &ranges, &ranges_count))
|
||||
panic(true, "multiboot2: ELF64 load failure");
|
||||
|
||||
section_hdr_info = elf64_section_hdr_info(kernel);
|
||||
section_hdr_info_valid = true;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
@ -293,8 +296,8 @@ noreturn void multiboot2_load(char *config, char* cmdline) {
|
|||
size_t mb2_info_size = get_multiboot2_info_size(
|
||||
cmdline,
|
||||
modules_size,
|
||||
section_hdr_info ? section_hdr_info->section_entry_size : 0,
|
||||
section_hdr_info ? section_hdr_info->num : 0,
|
||||
section_hdr_info_valid ? section_hdr_info.section_entry_size : 0,
|
||||
section_hdr_info_valid ? section_hdr_info.num : 0,
|
||||
smbios_tag_size
|
||||
);
|
||||
|
||||
|
@ -305,7 +308,7 @@ noreturn void multiboot2_load(char *config, char* cmdline) {
|
|||
(ranges_count
|
||||
+ 1 /* mb2 info range */
|
||||
+ n_modules
|
||||
+ (section_hdr_info ? section_hdr_info->num : 0)));
|
||||
+ (section_hdr_info_valid ? section_hdr_info.num : 0)));
|
||||
|
||||
memcpy(new_ranges, ranges, sizeof(struct elsewhere_range) * ranges_count);
|
||||
pmm_free(ranges, sizeof(struct elsewhere_range) * ranges_count);
|
||||
|
@ -333,25 +336,25 @@ noreturn void multiboot2_load(char *config, char* cmdline) {
|
|||
//////////////////////////////////////////////
|
||||
// Create ELF info tag
|
||||
//////////////////////////////////////////////
|
||||
if (section_hdr_info == NULL) {
|
||||
if (section_hdr_info_valid == false) {
|
||||
if (is_elf_info_requested) {
|
||||
panic(true, "multiboot2: Cannot return ELF file information");
|
||||
}
|
||||
} else {
|
||||
uint32_t size = sizeof(struct multiboot_tag_elf_sections) + section_hdr_info->section_entry_size * section_hdr_info->num;
|
||||
uint32_t size = sizeof(struct multiboot_tag_elf_sections) + section_hdr_info.section_entry_size * section_hdr_info.num;
|
||||
struct multiboot_tag_elf_sections *tag = (struct multiboot_tag_elf_sections*)(mb2_info + info_idx);
|
||||
|
||||
tag->type = MULTIBOOT_TAG_TYPE_ELF_SECTIONS;
|
||||
tag->size = size;
|
||||
|
||||
tag->num = section_hdr_info->num;
|
||||
tag->entsize = section_hdr_info->section_entry_size;
|
||||
tag->shndx = section_hdr_info->str_section_idx;
|
||||
tag->num = section_hdr_info.num;
|
||||
tag->entsize = section_hdr_info.section_entry_size;
|
||||
tag->shndx = section_hdr_info.str_section_idx;
|
||||
|
||||
memcpy(tag->sections, kernel + section_hdr_info->section_offset, section_hdr_info->section_entry_size * section_hdr_info->num);
|
||||
memcpy(tag->sections, kernel + section_hdr_info.section_offset, section_hdr_info.section_entry_size * section_hdr_info.num);
|
||||
|
||||
for (size_t i = 0; i < section_hdr_info->num; i++) {
|
||||
struct elf64_shdr *shdr = (void *)tag->sections + i * section_hdr_info->section_entry_size;
|
||||
for (size_t i = 0; i < section_hdr_info.num; i++) {
|
||||
struct elf64_shdr *shdr = (void *)tag->sections + i * section_hdr_info.section_entry_size;
|
||||
|
||||
if (shdr->sh_addr != 0 || shdr->sh_size == 0) {
|
||||
continue;
|
||||
|
|
Loading…
Reference in New Issue