rulimine/common/lib/elf.c

766 lines
21 KiB
C
Raw Normal View History

2020-03-25 03:04:18 +03:00
#include <stdint.h>
#include <stddef.h>
2022-08-27 00:44:47 +03:00
#include <lib/misc.h>
#include <sys/cpu.h>
2020-03-25 03:04:18 +03:00
#include <lib/libc.h>
#include <lib/elf.h>
2020-05-10 01:38:27 +03:00
#include <lib/print.h>
#include <lib/rand.h>
#include <lib/elsewhere.h>
2020-09-20 13:03:44 +03:00
#include <mm/pmm.h>
2020-04-14 06:20:55 +03:00
#include <fs/file.h>
2020-03-25 03:04:18 +03:00
#define PT_LOAD 0x00000001
#define PT_DYNAMIC 0x00000002
2020-03-25 03:04:18 +03:00
#define PT_INTERP 0x00000003
#define PT_PHDR 0x00000006
#define DT_NULL 0x00000000
#define DT_NEEDED 0x00000001
#define DT_RELA 0x00000007
#define DT_RELASZ 0x00000008
#define DT_RELAENT 0x00000009
#define DT_FLAGS_1 0x6ffffffb
#define DF_1_PIE 0x08000000
#define ABI_SYSV 0x00
#define ARCH_X86_64 0x3e
#define ARCH_X86_32 0x03
#define ARCH_AARCH64 0xb7
#define ARCH_RISCV 0xf3
#define BITS_LE 0x01
#define ELFCLASS64 0x02
#define ET_DYN 0x0003
#define SHT_RELA 0x00000004
#define R_X86_64_RELATIVE 0x00000008
#define R_AARCH64_RELATIVE 0x00000403
#define R_RISCV_RELATIVE 0x00000003
2020-03-25 03:04:18 +03:00
/* Indices into identification array */
#define EI_CLASS 4
#define EI_DATA 5
#define EI_VERSION 6
#define EI_OSABI 7
2020-03-25 03:04:18 +03:00
2020-04-18 19:01:29 +03:00
struct elf32_hdr {
uint8_t ident[16];
uint16_t type;
uint16_t machine;
uint32_t version;
uint32_t entry;
uint32_t phoff;
uint32_t shoff;
uint32_t flags;
uint16_t hdr_size;
uint16_t phdr_size;
uint16_t ph_num;
uint16_t shdr_size;
uint16_t sh_num;
uint16_t shstrndx;
};
struct elf64_phdr {
2020-03-25 03:04:18 +03:00
uint32_t p_type;
uint32_t p_flags;
2020-03-25 03:31:59 +03:00
uint64_t p_offset;
uint64_t p_vaddr;
uint64_t p_paddr;
uint64_t p_filesz;
uint64_t p_memsz;
uint64_t p_align;
2020-03-25 03:04:18 +03:00
};
2020-04-18 19:01:29 +03:00
struct elf32_phdr {
uint32_t p_type;
uint32_t p_offset;
uint32_t p_vaddr;
uint32_t p_paddr;
uint32_t p_filesz;
uint32_t p_memsz;
uint32_t p_flags;
uint32_t p_align;
};
struct elf64_rela {
uint64_t r_addr;
uint32_t r_info;
uint32_t r_symbol;
uint64_t r_addend;
};
struct elf64_dyn {
uint64_t d_tag;
uint64_t d_un;
};
2021-03-28 16:46:59 +03:00
int elf_bits(uint8_t *elf) {
2022-09-11 11:05:45 +03:00
struct elf64_hdr *hdr = (void *)elf;
2020-04-18 19:01:29 +03:00
2022-09-11 11:05:45 +03:00
if (strncmp((char *)hdr->ident, "\177ELF", 4)) {
2021-11-29 00:18:49 +03:00
printv("elf: Not a valid ELF file.\n");
2020-04-18 19:01:29 +03:00
return -1;
}
2022-09-11 11:05:45 +03:00
switch (hdr->machine) {
2020-04-18 19:01:29 +03:00
case ARCH_X86_64:
case ARCH_AARCH64:
2020-04-18 19:01:29 +03:00
return 64;
case ARCH_RISCV:
return (hdr->ident[EI_CLASS] == ELFCLASS64) ? 64 : 32;
2020-04-18 19:01:29 +03:00
case ARCH_X86_32:
return 32;
default:
return -1;
}
}
2022-09-11 11:05:45 +03:00
struct elf_section_hdr_info elf64_section_hdr_info(uint8_t *elf) {
struct elf_section_hdr_info info = {0};
struct elf64_hdr *hdr = (void *)elf;
info.num = hdr->sh_num;
info.section_entry_size = hdr->shdr_size;
info.str_section_idx = hdr->shstrndx;
info.section_offset = hdr->shoff;
return info;
}
struct elf_section_hdr_info elf32_section_hdr_info(uint8_t *elf) {
struct elf_section_hdr_info info = {0};
struct elf32_hdr *hdr = (void *)elf;
info.num = hdr->sh_num;
info.section_entry_size = hdr->shdr_size;
info.str_section_idx = hdr->shstrndx;
info.section_offset = hdr->shoff;
return info;
}
2021-03-28 16:46:59 +03:00
static bool elf64_is_relocatable(uint8_t *elf, struct elf64_hdr *hdr) {
2022-03-13 19:46:11 +03:00
if (hdr->phdr_size < sizeof(struct elf64_phdr)) {
panic(true, "elf: phdr_size < sizeof(struct elf64_phdr)");
}
// Find DYN segment
for (uint16_t i = 0; i < hdr->ph_num; i++) {
2022-09-11 11:05:45 +03:00
struct elf64_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
2022-09-11 11:05:45 +03:00
if (phdr->p_type != PT_DYNAMIC) {
continue;
}
2022-09-11 11:05:45 +03:00
for (uint16_t j = 0; j < phdr->p_filesz / sizeof(struct elf64_dyn); j++) {
struct elf64_dyn *dyn = (void *)elf + (phdr->p_offset + j * sizeof(struct elf64_dyn));
2022-09-11 11:05:45 +03:00
switch (dyn->d_tag) {
case DT_FLAGS_1:
if (dyn->d_un & DF_1_PIE) {
return true;
}
break;
case DT_RELA:
return true;
}
}
}
return false;
}
2022-09-11 11:05:45 +03:00
static bool elf64_apply_relocations(uint8_t *elf, struct elf64_hdr *hdr, void *buffer, uint64_t vaddr, size_t size, uint64_t slide) {
2022-03-13 19:46:11 +03:00
if (hdr->phdr_size < sizeof(struct elf64_phdr)) {
panic(true, "elf: phdr_size < sizeof(struct elf64_phdr)");
}
// Find DYN segment
for (uint16_t i = 0; i < hdr->ph_num; i++) {
2022-09-11 11:05:45 +03:00
struct elf64_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
2022-09-11 11:05:45 +03:00
if (phdr->p_type != PT_DYNAMIC)
continue;
uint64_t rela_offset = 0;
uint64_t rela_size = 0;
uint64_t rela_ent = 0;
2022-09-11 11:05:45 +03:00
for (uint16_t j = 0; j < phdr->p_filesz / sizeof(struct elf64_dyn); j++) {
struct elf64_dyn *dyn = (void *)elf + (phdr->p_offset + j * sizeof(struct elf64_dyn));
2022-09-11 11:05:45 +03:00
switch (dyn->d_tag) {
case DT_RELA:
2022-09-11 11:05:45 +03:00
rela_offset = dyn->d_un;
break;
case DT_RELAENT:
2022-09-11 11:05:45 +03:00
rela_ent = dyn->d_un;
break;
case DT_RELASZ:
2022-09-11 11:05:45 +03:00
rela_size = dyn->d_un;
break;
}
}
if (rela_offset == 0) {
break;
}
if (rela_ent != sizeof(struct elf64_rela)) {
print("elf: Unknown sh_entsize for RELA section!\n");
2022-09-11 11:05:45 +03:00
return false;
}
for (uint16_t j = 0; j < hdr->ph_num; j++) {
2022-09-11 11:05:45 +03:00
struct elf64_phdr *_phdr = (void *)elf + (hdr->phoff + j * hdr->phdr_size);
2022-09-11 11:05:45 +03:00
if (_phdr->p_vaddr <= rela_offset && _phdr->p_vaddr + _phdr->p_filesz > rela_offset) {
rela_offset -= _phdr->p_vaddr;
rela_offset += _phdr->p_offset;
break;
}
}
// This is a RELA header, get and apply all relocations
for (uint64_t offset = 0; offset < rela_size; offset += rela_ent) {
2022-09-11 11:05:45 +03:00
struct elf64_rela *relocation = (void *)elf + (rela_offset + offset);
2022-09-11 11:05:45 +03:00
switch (relocation->r_info) {
#if defined (__x86_64__) || defined (__i386__)
case R_X86_64_RELATIVE:
#elif defined (__aarch64__)
case R_AARCH64_RELATIVE:
#elif defined (__riscv64)
case R_RISCV_RELATIVE:
#else
#error Unknown architecture
#endif
{
// Relocation is before buffer
2022-09-11 11:05:45 +03:00
if (relocation->r_addr < vaddr)
continue;
// Relocation is after buffer
2022-09-11 11:05:45 +03:00
if (vaddr + size < relocation->r_addr + 8)
continue;
// It's inside it, calculate where it is
2022-09-11 11:05:45 +03:00
uint64_t *ptr = (uint64_t *)((uint8_t *)buffer - vaddr + relocation->r_addr);
// Write the relocated value
2022-09-11 11:05:45 +03:00
*ptr = slide + relocation->r_addend;
break;
2021-03-28 16:46:59 +03:00
}
2022-09-11 11:05:45 +03:00
default: {
print("elf: Unknown RELA type: %x\n", relocation->r_info);
return false;
}
}
}
break;
}
2022-09-11 11:05:45 +03:00
return true;
}
2022-09-11 11:05:45 +03:00
bool elf64_load_section(uint8_t *elf, void *buffer, const char *name, size_t limit, uint64_t slide) {
struct elf64_hdr *hdr = (void *)elf;
2022-09-11 11:05:45 +03:00
if (strncmp((char *)hdr->ident, "\177ELF", 4)) {
2021-11-29 00:18:49 +03:00
printv("elf: Not a valid ELF file.\n");
2022-09-11 11:05:45 +03:00
return false;
2020-03-26 02:46:35 +03:00
}
2022-09-11 11:05:45 +03:00
if (hdr->ident[EI_DATA] != BITS_LE) {
2021-11-29 00:18:49 +03:00
printv("elf: Not a Little-endian ELF file.\n");
2022-09-11 11:05:45 +03:00
return false;
2020-03-26 02:46:35 +03:00
}
#if defined (__x86_64__) || defined (__i386__)
2022-09-11 11:05:45 +03:00
if (hdr->machine != ARCH_X86_64) {
printv("elf: Not an x86-64 ELF file.\n");
2022-09-11 11:05:45 +03:00
return false;
2020-03-26 02:46:35 +03:00
}
#elif defined (__aarch64__)
2022-09-11 11:05:45 +03:00
if (hdr->machine != ARCH_AARCH64) {
printv("elf: Not an aarch64 ELF file.\n");
2022-09-11 11:05:45 +03:00
return false;
}
#elif defined (__riscv64)
if (hdr->machine != ARCH_RISCV && hdr->ident[EI_CLASS] == ELFCLASS64) {
printv("elf: Not a riscv64 ELF file.\n");
return false;
}
#else
#error Unknown architecture
#endif
2020-03-26 02:46:35 +03:00
if (hdr->sh_num == 0) {
return false;
}
2022-09-11 11:05:45 +03:00
if (hdr->shdr_size < sizeof(struct elf64_shdr)) {
2022-03-13 19:46:11 +03:00
panic(true, "elf: shdr_size < sizeof(struct elf64_shdr)");
}
2022-09-11 11:05:45 +03:00
struct elf64_shdr *shstrtab = (void *)elf + (hdr->shoff + hdr->shstrndx * hdr->shdr_size);
2022-09-11 11:05:45 +03:00
char *names = (void *)elf + shstrtab->sh_offset;
2022-09-11 11:05:45 +03:00
for (uint16_t i = 0; i < hdr->sh_num; i++) {
struct elf64_shdr *section = (void *)elf + (hdr->shoff + i * hdr->shdr_size);
2021-10-21 02:27:05 +03:00
2022-09-11 11:05:45 +03:00
if (strcmp(&names[section->sh_name], name) == 0) {
if (limit == 0) {
2022-09-11 11:05:45 +03:00
*(void **)buffer = ext_mem_alloc(section->sh_size);
buffer = *(void **)buffer;
2022-09-11 11:05:45 +03:00
limit = section->sh_size;
2021-10-21 02:27:05 +03:00
}
2022-09-11 11:05:45 +03:00
if (section->sh_size > limit) {
return false;
2021-10-21 02:27:05 +03:00
}
2022-09-11 11:05:45 +03:00
memcpy(buffer, elf + section->sh_offset, section->sh_size);
return elf64_apply_relocations(elf, hdr, buffer, section->sh_addr, section->sh_size, slide);
2020-04-18 19:01:29 +03:00
}
}
2022-09-11 11:05:45 +03:00
return false;
2020-04-18 19:01:29 +03:00
}
static uint64_t elf64_max_align(uint8_t *elf) {
2021-07-17 09:19:03 +03:00
uint64_t ret = 0;
2021-07-15 14:34:24 +03:00
2022-09-11 11:05:45 +03:00
struct elf64_hdr *hdr = (void *)elf;
2021-07-15 14:34:24 +03:00
2022-09-11 11:05:45 +03:00
if (hdr->phdr_size < sizeof(struct elf64_phdr)) {
2022-03-13 19:46:11 +03:00
panic(true, "elf: phdr_size < sizeof(struct elf64_phdr)");
}
2022-09-11 11:05:45 +03:00
for (uint16_t i = 0; i < hdr->ph_num; i++) {
struct elf64_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
2021-07-15 14:34:24 +03:00
2022-09-11 11:05:45 +03:00
if (phdr->p_type != PT_LOAD) {
2021-07-15 14:34:24 +03:00
continue;
2022-09-11 11:05:45 +03:00
}
2021-07-15 14:34:24 +03:00
2022-09-11 11:05:45 +03:00
if (phdr->p_align > ret) {
ret = phdr->p_align;
2021-07-17 09:19:03 +03:00
}
2021-07-15 14:34:24 +03:00
}
2021-07-17 09:19:03 +03:00
if (ret == 0) {
2021-12-11 21:58:00 +03:00
panic(true, "elf: Executable has no loadable segments");
2021-07-15 14:34:24 +03:00
}
return ret;
}
static void elf64_get_ranges(uint8_t *elf, uint64_t slide, struct elf_range **_ranges, uint64_t *_ranges_count) {
2022-09-11 11:05:45 +03:00
struct elf64_hdr *hdr = (void *)elf;
2021-07-15 14:34:24 +03:00
uint64_t ranges_count = 0;
2022-09-11 11:05:45 +03:00
if (hdr->phdr_size < sizeof(struct elf64_phdr)) {
2022-03-13 19:46:11 +03:00
panic(true, "elf: phdr_size < sizeof(struct elf64_phdr)");
}
2022-09-11 11:05:45 +03:00
for (uint16_t i = 0; i < hdr->ph_num; i++) {
struct elf64_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
2021-07-15 14:34:24 +03:00
2022-09-11 11:05:45 +03:00
if (phdr->p_type != PT_LOAD) {
2021-07-15 14:34:24 +03:00
continue;
2022-09-11 11:05:45 +03:00
}
2021-07-15 14:34:24 +03:00
2022-09-11 11:05:45 +03:00
if (phdr->p_vaddr < FIXED_HIGHER_HALF_OFFSET_64) {
continue;
}
2021-07-15 14:34:24 +03:00
ranges_count++;
}
if (ranges_count == 0) {
2022-09-11 11:05:45 +03:00
panic(true, "elf: No higher half PHDRs exist");
}
2021-07-15 14:34:24 +03:00
struct elf_range *ranges = ext_mem_alloc(ranges_count * sizeof(struct elf_range));
size_t r = 0;
2022-09-11 11:05:45 +03:00
for (uint16_t i = 0; i < hdr->ph_num; i++) {
struct elf64_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
2021-07-15 14:34:24 +03:00
2022-09-11 11:05:45 +03:00
if (phdr->p_type != PT_LOAD) {
2021-07-15 14:34:24 +03:00
continue;
2022-09-11 11:05:45 +03:00
}
2021-07-15 14:34:24 +03:00
2022-09-11 11:05:45 +03:00
if (phdr->p_vaddr < FIXED_HIGHER_HALF_OFFSET_64) {
continue;
2021-07-15 14:34:24 +03:00
}
2022-09-11 11:05:45 +03:00
uint64_t load_addr = phdr->p_vaddr + slide;
uint64_t this_top = load_addr + phdr->p_memsz;
2022-09-11 11:05:45 +03:00
ranges[r].base = load_addr & ~(phdr->p_align - 1);
ranges[r].length = ALIGN_UP(this_top - ranges[r].base, phdr->p_align);
ranges[r].permissions = phdr->p_flags & 0b111;
2021-07-15 14:34:24 +03:00
r++;
}
*_ranges_count = ranges_count;
*_ranges = ranges;
}
bool elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *_slide, uint32_t alloc_type, bool kaslr, struct elf_range **ranges, uint64_t *ranges_count, uint64_t *physical_base, uint64_t *virtual_base, uint64_t *_image_size, uint64_t *_image_size_before_bss, bool *is_reloc) {
2022-09-11 11:05:45 +03:00
struct elf64_hdr *hdr = (void *)elf;
2020-03-25 03:04:18 +03:00
2022-09-11 11:05:45 +03:00
if (strncmp((char *)hdr->ident, "\177ELF", 4)) {
printv("elf: Not a valid ELF file.\n");
2022-09-11 11:05:45 +03:00
return false;
2020-03-25 03:04:18 +03:00
}
2022-09-11 11:05:45 +03:00
if (hdr->ident[EI_DATA] != BITS_LE) {
2021-12-11 21:58:00 +03:00
panic(true, "elf: Not a Little-endian ELF file.\n");
2020-03-25 03:04:18 +03:00
}
#if defined (__x86_64__) || defined (__i386__)
2022-09-11 11:05:45 +03:00
if (hdr->machine != ARCH_X86_64) {
panic(true, "elf: Not an x86-64 ELF file.\n");
2020-03-25 03:31:59 +03:00
}
#elif defined (__aarch64__)
2022-09-11 11:05:45 +03:00
if (hdr->machine != ARCH_AARCH64) {
panic(true, "elf: Not an aarch64 ELF file.\n");
}
#elif defined (__riscv64)
if (hdr->machine != ARCH_RISCV && hdr->ident[EI_CLASS] == ELFCLASS64) {
panic(true, "elf: Not a riscv64 ELF file.\n");
}
#else
#error Unknown architecture
#endif
2020-03-25 03:31:59 +03:00
2022-03-28 06:13:47 +03:00
if (is_reloc) {
*is_reloc = false;
}
uint64_t slide = 0;
size_t try_count = 0;
2022-09-11 11:05:45 +03:00
size_t max_simulated_tries = 0x10000;
2022-09-11 11:05:45 +03:00
uint64_t entry = hdr->entry;
uint64_t max_align = elf64_max_align(elf);
2021-07-15 14:34:24 +03:00
uint64_t image_size = 0;
2022-09-11 11:05:45 +03:00
if (hdr->phdr_size < sizeof(struct elf64_phdr)) {
2022-03-13 19:46:11 +03:00
panic(true, "elf: phdr_size < sizeof(struct elf64_phdr)");
}
2022-09-11 11:05:45 +03:00
uint64_t min_vaddr = (uint64_t)-1;
uint64_t max_vaddr = 0;
for (uint16_t i = 0; i < hdr->ph_num; i++) {
struct elf64_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
2022-09-11 11:05:45 +03:00
if (phdr->p_type != PT_LOAD) {
continue;
}
2022-09-11 11:05:45 +03:00
// Drop entries not in the higher half
if (phdr->p_vaddr < FIXED_HIGHER_HALF_OFFSET_64) {
continue;
}
2022-10-10 05:35:28 +03:00
// check for overlapping phdrs
for (uint16_t j = 0; j < hdr->ph_num; j++) {
struct elf64_phdr *phdr_in = (void *)elf + (hdr->phoff + j * hdr->phdr_size);
if (phdr_in->p_type != PT_LOAD) {
continue;
}
// Drop entries not in the higher half
if (phdr_in->p_vaddr < FIXED_HIGHER_HALF_OFFSET_64) {
continue;
}
if (phdr_in == phdr) {
continue;
}
if ((phdr_in->p_vaddr >= phdr->p_vaddr
&& phdr_in->p_vaddr < phdr->p_vaddr + phdr->p_memsz)
||
(phdr_in->p_vaddr + phdr_in->p_memsz > phdr->p_vaddr
&& phdr_in->p_vaddr + phdr_in->p_memsz <= phdr->p_vaddr + phdr->p_memsz)) {
panic(true, "elf: Attempted to load ELF file with overlapping PHDRs (%u and %u overlap)", i, j);
}
if (ranges != NULL) {
uint64_t page_rounded_base = ALIGN_DOWN(phdr->p_vaddr, 4096);
uint64_t page_rounded_top = ALIGN_UP(phdr->p_vaddr + phdr->p_memsz, 4096);
uint64_t page_rounded_base_in = ALIGN_DOWN(phdr_in->p_vaddr, 4096);
uint64_t page_rounded_top_in = ALIGN_UP(phdr_in->p_vaddr + phdr_in->p_memsz, 4096);
if ((page_rounded_base >= page_rounded_base_in
&& page_rounded_base < page_rounded_top_in)
||
(page_rounded_top > page_rounded_base_in
&& page_rounded_top <= page_rounded_top_in)) {
if ((phdr->p_flags & 0b111) != (phdr_in->p_flags & 0b111)) {
panic(true, "elf: Attempted to load ELF file with PHDRs with different permissions sharing the same memory page.");
}
}
}
2022-10-10 05:35:28 +03:00
}
2022-09-11 11:05:45 +03:00
if (phdr->p_vaddr < min_vaddr) {
min_vaddr = phdr->p_vaddr;
}
2022-09-11 11:05:45 +03:00
if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
max_vaddr = phdr->p_vaddr + phdr->p_memsz;
}
2022-09-11 11:05:45 +03:00
}
2022-09-11 11:05:45 +03:00
if (max_vaddr == 0 || min_vaddr == (uint64_t)-1) {
panic(true, "elf: No higher half PHDRs exist");
}
2022-09-11 11:05:45 +03:00
image_size = max_vaddr - min_vaddr;
*physical_base = (uintptr_t)ext_mem_alloc_type_aligned(image_size, alloc_type, max_align);
*virtual_base = min_vaddr;
if (_image_size) {
*_image_size = image_size;
}
2022-09-11 11:05:45 +03:00
if (elf64_is_relocatable(elf, hdr)) {
2022-03-28 06:13:47 +03:00
if (is_reloc) {
*is_reloc = true;
}
}
again:
if (*is_reloc && kaslr) {
slide = rand32() & ~(max_align - 1);
2020-03-30 23:27:15 +03:00
2022-09-11 11:05:45 +03:00
if ((*virtual_base - FIXED_HIGHER_HALF_OFFSET_64) + slide + image_size >= 0x80000000) {
if (++try_count == max_simulated_tries) {
panic(true, "elf: Image wants to load too high");
}
2022-09-11 11:05:45 +03:00
goto again;
}
}
uint64_t bss_size = 0;
2022-09-11 11:05:45 +03:00
for (uint16_t i = 0; i < hdr->ph_num; i++) {
struct elf64_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
2020-03-25 03:04:18 +03:00
2022-09-11 11:05:45 +03:00
if (phdr->p_type != PT_LOAD) {
2020-03-25 03:04:18 +03:00
continue;
}
2022-09-11 11:05:45 +03:00
// Drop entries not in the higher half
if (phdr->p_vaddr < FIXED_HIGHER_HALF_OFFSET_64) {
continue;
}
2022-09-11 11:05:45 +03:00
// Sanity checks
if (phdr->p_filesz > phdr->p_memsz) {
panic(true, "elf: p_filesz > p_memsz");
}
2022-09-11 11:05:45 +03:00
uint64_t load_addr = *physical_base + (phdr->p_vaddr - *virtual_base);
2021-07-15 14:34:24 +03:00
2022-09-11 11:05:45 +03:00
#if defined (__aarch64__)
uint64_t this_top = load_addr + phdr->p_memsz;
uint64_t mem_base, mem_size;
2022-09-11 11:05:45 +03:00
mem_base = load_addr & ~(phdr->p_align - 1);
mem_size = this_top - mem_base;
#endif
2020-03-25 03:04:18 +03:00
2022-09-11 11:05:45 +03:00
memcpy((void *)(uintptr_t)load_addr, elf + (phdr->p_offset), phdr->p_filesz);
2020-03-25 03:04:18 +03:00
bss_size = phdr->p_memsz - phdr->p_filesz;
2022-09-11 11:05:45 +03:00
if (!elf64_apply_relocations(elf, hdr, (void *)(uintptr_t)load_addr, phdr->p_vaddr, phdr->p_memsz, slide)) {
2021-12-11 21:58:00 +03:00
panic(true, "elf: Failed to apply relocations");
}
#if defined (__aarch64__)
clean_dcache_poc(mem_base, mem_base + mem_size);
inval_icache_pou(mem_base, mem_base + mem_size);
#endif
2020-03-25 03:04:18 +03:00
}
if (_image_size_before_bss != NULL) {
*_image_size_before_bss = image_size - bss_size;
}
2022-09-11 11:05:45 +03:00
*virtual_base += slide;
*entry_point = entry + slide;
2022-09-11 11:05:45 +03:00
if (_slide) {
*_slide = slide;
2022-09-11 11:05:45 +03:00
}
2021-07-15 14:34:24 +03:00
if (ranges_count != NULL && ranges != NULL) {
elf64_get_ranges(elf, slide, ranges, ranges_count);
2021-07-15 14:34:24 +03:00
}
2022-09-11 11:05:45 +03:00
return true;
2020-04-18 19:01:29 +03:00
}
bool elf32_load_elsewhere(uint8_t *elf, uint64_t *entry_point,
struct elsewhere_range **ranges,
uint64_t *ranges_count) {
struct elf32_hdr *hdr = (void *)elf;
if (strncmp((char *)hdr->ident, "\177ELF", 4)) {
printv("elf: Not a valid ELF file.\n");
return false;
}
if (hdr->ident[EI_DATA] != BITS_LE) {
printv("elf: Not a Little-endian ELF file.\n");
return false;
}
if (hdr->machine != ARCH_X86_32) {
printv("elf: Not an x86_32 ELF file.\n");
return false;
}
*entry_point = hdr->entry;
bool entry_adjusted = false;
if (hdr->phdr_size < sizeof(struct elf32_phdr)) {
panic(true, "elf: phdr_size < sizeof(struct elf32_phdr)");
}
*ranges_count = 0;
for (uint16_t i = 0; i < hdr->ph_num; i++) {
struct elf32_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
if (phdr->p_type != PT_LOAD)
continue;
*ranges_count += 1;
}
*ranges = ext_mem_alloc(sizeof(struct elsewhere_range) * *ranges_count);
size_t cur_entry = 0;
for (uint16_t i = 0; i < hdr->ph_num; i++) {
struct elf32_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
if (phdr->p_type != PT_LOAD)
continue;
// Sanity checks
if (phdr->p_filesz > phdr->p_memsz) {
panic(true, "elf: p_filesz > p_memsz");
}
void *elsewhere = ext_mem_alloc(phdr->p_memsz);
memcpy(elsewhere, elf + phdr->p_offset, phdr->p_filesz);
if (!entry_adjusted
&& *entry_point >= phdr->p_vaddr
&& *entry_point < (phdr->p_vaddr + phdr->p_memsz)) {
*entry_point -= phdr->p_vaddr;
*entry_point += phdr->p_paddr;
entry_adjusted = true;
}
(*ranges)[cur_entry].elsewhere = (uintptr_t)elsewhere;
(*ranges)[cur_entry].target = phdr->p_paddr;
(*ranges)[cur_entry].length = phdr->p_memsz;
cur_entry++;
}
return true;
}
bool elf64_load_elsewhere(uint8_t *elf, uint64_t *entry_point,
struct elsewhere_range **ranges,
uint64_t *ranges_count) {
struct elf64_hdr *hdr = (void *)elf;
if (strncmp((char *)hdr->ident, "\177ELF", 4)) {
printv("elf: Not a valid ELF file.\n");
return false;
}
if (hdr->ident[EI_DATA] != BITS_LE) {
printv("elf: Not a Little-endian ELF file.\n");
return false;
}
if (hdr->machine != ARCH_X86_64) {
printv("elf: Not an x86-64 ELF file.\n");
return false;
}
*entry_point = hdr->entry;
bool entry_adjusted = false;
if (hdr->phdr_size < sizeof(struct elf64_phdr)) {
panic(true, "elf: phdr_size < sizeof(struct elf64_phdr)");
}
*ranges_count = 0;
for (uint16_t i = 0; i < hdr->ph_num; i++) {
struct elf64_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
if (phdr->p_type != PT_LOAD)
continue;
*ranges_count += 1;
}
*ranges = ext_mem_alloc(sizeof(struct elsewhere_range) * *ranges_count);
size_t cur_entry = 0;
for (uint16_t i = 0; i < hdr->ph_num; i++) {
struct elf64_phdr *phdr = (void *)elf + (hdr->phoff + i * hdr->phdr_size);
if (phdr->p_type != PT_LOAD)
continue;
// Sanity checks
if (phdr->p_filesz > phdr->p_memsz) {
panic(true, "elf: p_filesz > p_memsz");
}
void *elsewhere = ext_mem_alloc(phdr->p_memsz);
memcpy(elsewhere, elf + phdr->p_offset, phdr->p_filesz);
if (!entry_adjusted
&& *entry_point >= phdr->p_vaddr
&& *entry_point < (phdr->p_vaddr + phdr->p_memsz)) {
*entry_point -= phdr->p_vaddr;
*entry_point += phdr->p_paddr;
entry_adjusted = true;
}
(*ranges)[cur_entry].elsewhere = (uintptr_t)elsewhere;
(*ranges)[cur_entry].target = phdr->p_paddr;
(*ranges)[cur_entry].length = phdr->p_memsz;
cur_entry++;
}
return true;
}