2020-03-25 03:04:18 +03:00
|
|
|
#include <stdint.h>
|
|
|
|
#include <stddef.h>
|
|
|
|
#include <lib/blib.h>
|
|
|
|
#include <lib/libc.h>
|
|
|
|
#include <lib/elf.h>
|
2020-05-10 01:38:27 +03:00
|
|
|
#include <lib/print.h>
|
2021-03-26 17:47:59 +03:00
|
|
|
#include <lib/rand.h>
|
2020-09-20 13:03:44 +03:00
|
|
|
#include <mm/pmm.h>
|
2020-04-14 06:20:55 +03:00
|
|
|
#include <fs/file.h>
|
2020-03-25 03:04:18 +03:00
|
|
|
|
|
|
|
#define PT_LOAD 0x00000001
|
|
|
|
#define PT_INTERP 0x00000003
|
|
|
|
#define PT_PHDR 0x00000006
|
|
|
|
|
2020-04-18 19:01:29 +03:00
|
|
|
#define ABI_SYSV 0x00
|
2020-03-25 03:04:18 +03:00
|
|
|
#define ARCH_X86_64 0x3e
|
2020-04-18 19:01:29 +03:00
|
|
|
#define ARCH_X86_32 0x03
|
|
|
|
#define BITS_LE 0x01
|
2020-05-29 12:05:50 +03:00
|
|
|
#define ET_DYN 0x0003
|
|
|
|
#define SHT_RELA 0x00000004
|
|
|
|
#define R_X86_64_RELATIVE 0x00000008
|
2020-03-25 03:04:18 +03:00
|
|
|
|
|
|
|
/* Indices into identification array */
|
2020-03-26 01:22:32 +03:00
|
|
|
#define EI_CLASS 4
|
|
|
|
#define EI_DATA 5
|
|
|
|
#define EI_VERSION 6
|
|
|
|
#define EI_OSABI 7
|
2020-03-25 03:04:18 +03:00
|
|
|
|
|
|
|
|
2020-04-18 19:01:29 +03:00
|
|
|
struct elf32_hdr {
|
|
|
|
uint8_t ident[16];
|
|
|
|
uint16_t type;
|
|
|
|
uint16_t machine;
|
|
|
|
uint32_t version;
|
|
|
|
uint32_t entry;
|
|
|
|
uint32_t phoff;
|
|
|
|
uint32_t shoff;
|
|
|
|
uint32_t flags;
|
|
|
|
uint16_t hdr_size;
|
|
|
|
uint16_t phdr_size;
|
|
|
|
uint16_t ph_num;
|
|
|
|
uint16_t shdr_size;
|
|
|
|
uint16_t sh_num;
|
|
|
|
uint16_t shstrndx;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct elf64_phdr {
|
2020-03-25 03:04:18 +03:00
|
|
|
uint32_t p_type;
|
|
|
|
uint32_t p_flags;
|
2020-03-25 03:31:59 +03:00
|
|
|
uint64_t p_offset;
|
|
|
|
uint64_t p_vaddr;
|
|
|
|
uint64_t p_paddr;
|
|
|
|
uint64_t p_filesz;
|
|
|
|
uint64_t p_memsz;
|
|
|
|
uint64_t p_align;
|
2020-03-25 03:04:18 +03:00
|
|
|
};
|
|
|
|
|
2020-04-18 19:01:29 +03:00
|
|
|
struct elf32_phdr {
|
|
|
|
uint32_t p_type;
|
|
|
|
uint32_t p_offset;
|
|
|
|
uint32_t p_vaddr;
|
|
|
|
uint32_t p_paddr;
|
|
|
|
uint32_t p_filesz;
|
|
|
|
uint32_t p_memsz;
|
|
|
|
uint32_t p_flags;
|
|
|
|
uint32_t p_align;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct elf32_shdr {
|
|
|
|
uint32_t sh_name;
|
|
|
|
uint32_t sh_type;
|
|
|
|
uint32_t sh_flags;
|
|
|
|
uint32_t sh_addr;
|
|
|
|
uint32_t sh_offset;
|
|
|
|
uint32_t sh_size;
|
|
|
|
uint32_t sh_link;
|
|
|
|
uint32_t sh_info;
|
|
|
|
uint32_t sh_addralign;
|
|
|
|
uint32_t sh_entsize;
|
2020-03-26 01:22:32 +03:00
|
|
|
};
|
|
|
|
|
2020-05-29 12:05:50 +03:00
|
|
|
struct elf64_rela {
|
|
|
|
uint64_t r_addr;
|
|
|
|
uint32_t r_info;
|
|
|
|
uint32_t r_symbol;
|
|
|
|
uint64_t r_addend;
|
|
|
|
};
|
|
|
|
|
2021-03-28 16:46:59 +03:00
|
|
|
int elf_bits(uint8_t *elf) {
|
2020-04-18 19:01:29 +03:00
|
|
|
struct elf64_hdr hdr;
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy(&hdr, elf + (0), 20);
|
2020-04-18 19:01:29 +03:00
|
|
|
|
|
|
|
if (strncmp((char *)hdr.ident, "\177ELF", 4)) {
|
2021-11-29 00:18:49 +03:00
|
|
|
printv("elf: Not a valid ELF file.\n");
|
2020-04-18 19:01:29 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (hdr.machine) {
|
|
|
|
case ARCH_X86_64:
|
|
|
|
return 64;
|
|
|
|
case ARCH_X86_32:
|
|
|
|
return 32;
|
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-28 16:46:59 +03:00
|
|
|
static bool elf64_is_relocatable(uint8_t *elf, struct elf64_hdr *hdr) {
|
2021-03-26 17:47:59 +03:00
|
|
|
// Find RELA sections
|
|
|
|
for (uint16_t i = 0; i < hdr->sh_num; i++) {
|
|
|
|
struct elf64_shdr section;
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy(§ion, elf + (hdr->shoff + i * sizeof(struct elf64_shdr)),
|
2021-03-26 17:47:59 +03:00
|
|
|
sizeof(struct elf64_shdr));
|
|
|
|
|
|
|
|
if (section.sh_type != SHT_RELA)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (section.sh_entsize != sizeof(struct elf64_rela)) {
|
|
|
|
print("elf: Unknown sh_entsize for RELA section!\n");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
2021-08-29 17:11:56 +03:00
|
|
|
|
2021-03-26 17:47:59 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2021-03-28 16:46:59 +03:00
|
|
|
static int elf64_apply_relocations(uint8_t *elf, struct elf64_hdr *hdr, void *buffer, uint64_t vaddr, size_t size, uint64_t slide) {
|
2020-05-29 12:05:50 +03:00
|
|
|
// Find RELA sections
|
|
|
|
for (uint16_t i = 0; i < hdr->sh_num; i++) {
|
|
|
|
struct elf64_shdr section;
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy(§ion, elf + (hdr->shoff + i * sizeof(struct elf64_shdr)),
|
2020-05-29 12:05:50 +03:00
|
|
|
sizeof(struct elf64_shdr));
|
|
|
|
|
|
|
|
if (section.sh_type != SHT_RELA)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (section.sh_entsize != sizeof(struct elf64_rela)) {
|
|
|
|
print("elf: Unknown sh_entsize for RELA section!\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is a RELA header, get and apply all relocations
|
|
|
|
for (uint64_t offset = 0; offset < section.sh_size; offset += section.sh_entsize) {
|
|
|
|
struct elf64_rela relocation;
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy(&relocation, elf + (section.sh_offset + offset), sizeof(relocation));
|
2020-05-29 12:05:50 +03:00
|
|
|
|
|
|
|
switch (relocation.r_info) {
|
2021-03-28 16:46:59 +03:00
|
|
|
case R_X86_64_RELATIVE: {
|
2020-05-29 12:05:50 +03:00
|
|
|
// Relocation is before buffer
|
|
|
|
if (relocation.r_addr < vaddr)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Relocation is after buffer
|
|
|
|
if (vaddr + size < relocation.r_addr + 8)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// It's inside it, calculate where it is
|
|
|
|
uint64_t *ptr = (uint64_t *)((uint8_t *)buffer - vaddr + relocation.r_addr);
|
|
|
|
|
|
|
|
// Write the relocated value
|
|
|
|
*ptr = slide + relocation.r_addend;
|
|
|
|
break;
|
2021-03-28 16:46:59 +03:00
|
|
|
}
|
2020-05-29 12:05:50 +03:00
|
|
|
default:
|
2021-03-28 16:46:59 +03:00
|
|
|
print("elf: Unknown RELA type: %x\n", relocation.r_info);
|
2020-05-29 12:05:50 +03:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-28 16:46:59 +03:00
|
|
|
int elf64_load_section(uint8_t *elf, void *buffer, const char *name, size_t limit, uint64_t slide) {
|
2020-04-18 19:01:29 +03:00
|
|
|
struct elf64_hdr hdr;
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy(&hdr, elf + (0), sizeof(struct elf64_hdr));
|
2020-03-26 01:22:32 +03:00
|
|
|
|
2020-03-26 02:46:35 +03:00
|
|
|
if (strncmp((char *)hdr.ident, "\177ELF", 4)) {
|
2021-11-29 00:18:49 +03:00
|
|
|
printv("elf: Not a valid ELF file.\n");
|
2020-03-26 02:46:35 +03:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hdr.ident[EI_DATA] != BITS_LE) {
|
2021-11-29 00:18:49 +03:00
|
|
|
printv("elf: Not a Little-endian ELF file.\n");
|
2020-03-26 02:46:35 +03:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hdr.machine != ARCH_X86_64) {
|
2021-11-29 00:18:49 +03:00
|
|
|
printv("elf: Not an x86_64 ELF file.\n");
|
2020-03-26 02:46:35 +03:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2020-04-18 19:01:29 +03:00
|
|
|
struct elf64_shdr shstrtab;
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy(&shstrtab, elf + (hdr.shoff + hdr.shstrndx * sizeof(struct elf64_shdr)),
|
2020-04-18 19:01:29 +03:00
|
|
|
sizeof(struct elf64_shdr));
|
2020-03-26 01:22:32 +03:00
|
|
|
|
2020-10-24 16:27:30 +03:00
|
|
|
char *names = ext_mem_alloc(shstrtab.sh_size);
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy(names, elf + (shstrtab.sh_offset), shstrtab.sh_size);
|
2020-03-26 01:22:32 +03:00
|
|
|
|
2021-10-21 02:27:05 +03:00
|
|
|
int ret;
|
|
|
|
|
2020-03-26 01:22:32 +03:00
|
|
|
for (uint16_t i = 0; i < hdr.sh_num; i++) {
|
2020-04-18 19:01:29 +03:00
|
|
|
struct elf64_shdr section;
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy(§ion, elf + (hdr.shoff + i * sizeof(struct elf64_shdr)),
|
2020-04-18 19:01:29 +03:00
|
|
|
sizeof(struct elf64_shdr));
|
2020-03-26 01:57:10 +03:00
|
|
|
|
|
|
|
if (!strcmp(&names[section.sh_name], name)) {
|
2021-10-21 02:27:05 +03:00
|
|
|
if (section.sh_size > limit) {
|
|
|
|
ret = 3;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (section.sh_size < limit) {
|
|
|
|
ret = 4;
|
|
|
|
goto out;
|
|
|
|
}
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy(buffer, elf + (section.sh_offset), section.sh_size);
|
2021-10-21 02:27:05 +03:00
|
|
|
ret = elf64_apply_relocations(elf, &hdr, buffer, section.sh_addr, section.sh_size, slide);
|
|
|
|
goto out;
|
2020-03-26 01:22:32 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-21 02:27:05 +03:00
|
|
|
ret = 2;
|
|
|
|
|
|
|
|
out:
|
|
|
|
pmm_free(names, shstrtab.sh_size);
|
|
|
|
|
|
|
|
return ret;
|
2020-03-26 01:22:32 +03:00
|
|
|
}
|
|
|
|
|
2021-09-09 11:40:07 +03:00
|
|
|
/// SAFETY: The caller must ensure that the provided `elf` is a valid 64-bit
|
|
|
|
/// ELF file.
|
2021-09-12 05:54:37 +03:00
|
|
|
struct elf_section_hdr_info* elf64_section_hdr_info(uint8_t *elf) {
|
2021-09-10 11:02:54 +03:00
|
|
|
struct elf_section_hdr_info* info = ext_mem_alloc(sizeof(struct elf_section_hdr_info));
|
|
|
|
|
2021-09-09 11:40:07 +03:00
|
|
|
struct elf64_hdr hdr;
|
2021-09-10 11:02:54 +03:00
|
|
|
memcpy(&hdr, elf + (0), sizeof(struct elf64_hdr));
|
2021-09-09 11:40:07 +03:00
|
|
|
|
2021-09-10 11:02:54 +03:00
|
|
|
info->num = hdr.sh_num;
|
2021-09-09 11:40:07 +03:00
|
|
|
info->section_entry_size = hdr.shdr_size;
|
2021-09-10 11:02:54 +03:00
|
|
|
info->section_hdr_size = info->num * info->section_entry_size;
|
2021-09-09 11:40:07 +03:00
|
|
|
info->str_section_idx = hdr.shstrndx;
|
|
|
|
info->section_hdrs = ext_mem_alloc(info->section_hdr_size);
|
|
|
|
|
|
|
|
memcpy(info->section_hdrs, elf + (hdr.shoff), info->section_hdr_size);
|
2021-09-10 11:02:54 +03:00
|
|
|
|
2021-09-12 05:54:37 +03:00
|
|
|
return info;
|
2021-09-09 11:40:07 +03:00
|
|
|
}
|
|
|
|
|
2021-09-12 05:54:37 +03:00
|
|
|
/// SAFETY: The caller must ensure that the provided `elf` is a valid 32-bit
|
2021-09-09 11:40:07 +03:00
|
|
|
/// ELF file.
|
2021-09-12 05:54:37 +03:00
|
|
|
struct elf_section_hdr_info* elf32_section_hdr_info(uint8_t *elf) {
|
2021-09-10 11:02:54 +03:00
|
|
|
struct elf_section_hdr_info* info = ext_mem_alloc(sizeof(struct elf_section_hdr_info));
|
|
|
|
|
2021-09-09 11:40:07 +03:00
|
|
|
struct elf32_hdr hdr;
|
|
|
|
memcpy(&hdr, elf + (0), sizeof(struct elf32_hdr));
|
|
|
|
|
2021-09-10 11:02:54 +03:00
|
|
|
info->num = hdr.sh_num;
|
2021-09-09 11:40:07 +03:00
|
|
|
info->section_entry_size = hdr.shdr_size;
|
2021-09-10 11:02:54 +03:00
|
|
|
info->section_hdr_size = info->num * info->section_entry_size;
|
2021-09-09 11:40:07 +03:00
|
|
|
info->str_section_idx = hdr.shstrndx;
|
|
|
|
info->section_hdrs = ext_mem_alloc(info->section_hdr_size);
|
|
|
|
|
|
|
|
memcpy(info->section_hdrs, elf + (hdr.shoff), info->section_hdr_size);
|
2021-09-10 11:02:54 +03:00
|
|
|
|
2021-09-12 05:54:37 +03:00
|
|
|
return info;
|
2021-09-09 11:40:07 +03:00
|
|
|
}
|
|
|
|
|
2021-03-28 16:46:59 +03:00
|
|
|
int elf32_load_section(uint8_t *elf, void *buffer, const char *name, size_t limit) {
|
2020-04-18 19:01:29 +03:00
|
|
|
struct elf32_hdr hdr;
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy(&hdr, elf + (0), sizeof(struct elf32_hdr));
|
2020-04-18 19:01:29 +03:00
|
|
|
|
|
|
|
if (strncmp((char *)hdr.ident, "\177ELF", 4)) {
|
2021-11-29 00:18:49 +03:00
|
|
|
printv("elf: Not a valid ELF file.\n");
|
2020-04-18 19:01:29 +03:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hdr.ident[EI_DATA] != BITS_LE) {
|
2021-11-29 00:18:49 +03:00
|
|
|
printv("elf: Not a Little-endian ELF file.\n");
|
2020-04-18 19:01:29 +03:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hdr.machine != ARCH_X86_32) {
|
2021-11-29 00:18:49 +03:00
|
|
|
printv("elf: Not an x86_32 ELF file.\n");
|
2020-04-18 19:01:29 +03:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct elf32_shdr shstrtab;
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy(&shstrtab, elf + (hdr.shoff + hdr.shstrndx * sizeof(struct elf32_shdr)),
|
2020-04-18 19:01:29 +03:00
|
|
|
sizeof(struct elf32_shdr));
|
|
|
|
|
2020-10-24 16:27:30 +03:00
|
|
|
char *names = ext_mem_alloc(shstrtab.sh_size);
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy(names, elf + (shstrtab.sh_offset), shstrtab.sh_size);
|
2020-03-25 03:04:18 +03:00
|
|
|
|
2021-10-21 02:27:05 +03:00
|
|
|
int ret;
|
|
|
|
|
2020-04-18 19:01:29 +03:00
|
|
|
for (uint16_t i = 0; i < hdr.sh_num; i++) {
|
|
|
|
struct elf32_shdr section;
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy(§ion, elf + (hdr.shoff + i * sizeof(struct elf32_shdr)),
|
2020-04-18 19:01:29 +03:00
|
|
|
sizeof(struct elf32_shdr));
|
|
|
|
|
|
|
|
if (!strcmp(&names[section.sh_name], name)) {
|
2021-10-21 02:27:05 +03:00
|
|
|
if (section.sh_size > limit) {
|
|
|
|
ret = 3;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (section.sh_size < limit) {
|
|
|
|
ret = 4;
|
|
|
|
goto out;
|
|
|
|
}
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy(buffer, elf + (section.sh_offset), section.sh_size);
|
2021-10-21 02:27:05 +03:00
|
|
|
ret = 0;
|
|
|
|
goto out;
|
2020-04-18 19:01:29 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-21 02:27:05 +03:00
|
|
|
ret = 2;
|
|
|
|
|
|
|
|
out:
|
|
|
|
pmm_free(names, shstrtab.sh_size);
|
|
|
|
|
|
|
|
return ret;
|
2020-04-18 19:01:29 +03:00
|
|
|
}
|
|
|
|
|
2021-07-25 02:18:36 +03:00
|
|
|
static uint64_t elf64_max_align(uint8_t *elf) {
|
2021-07-17 09:19:03 +03:00
|
|
|
uint64_t ret = 0;
|
2021-07-15 14:34:24 +03:00
|
|
|
|
|
|
|
struct elf64_hdr hdr;
|
|
|
|
memcpy(&hdr, elf + (0), sizeof(struct elf64_hdr));
|
|
|
|
|
|
|
|
for (uint16_t i = 0; i < hdr.ph_num; i++) {
|
|
|
|
struct elf64_phdr phdr;
|
|
|
|
memcpy(&phdr, elf + (hdr.phoff + i * sizeof(struct elf64_phdr)),
|
|
|
|
sizeof(struct elf64_phdr));
|
|
|
|
|
|
|
|
if (phdr.p_type != PT_LOAD)
|
|
|
|
continue;
|
|
|
|
|
2021-07-25 02:18:36 +03:00
|
|
|
if (phdr.p_align > ret) {
|
|
|
|
ret = phdr.p_align;
|
2021-07-17 09:19:03 +03:00
|
|
|
}
|
2021-07-15 14:34:24 +03:00
|
|
|
}
|
|
|
|
|
2021-07-17 09:19:03 +03:00
|
|
|
if (ret == 0) {
|
2021-12-11 21:58:00 +03:00
|
|
|
panic(true, "elf: Executable has no loadable segments");
|
2021-07-15 14:34:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2021-07-17 09:19:03 +03:00
|
|
|
static void elf64_get_ranges(uint8_t *elf, uint64_t slide, bool use_paddr, struct elf_range **_ranges, uint64_t *_ranges_count) {
|
2021-07-15 14:34:24 +03:00
|
|
|
struct elf64_hdr hdr;
|
|
|
|
memcpy(&hdr, elf + (0), sizeof(struct elf64_hdr));
|
|
|
|
|
|
|
|
uint64_t ranges_count = 0;
|
|
|
|
|
|
|
|
for (uint16_t i = 0; i < hdr.ph_num; i++) {
|
|
|
|
struct elf64_phdr phdr;
|
|
|
|
memcpy(&phdr, elf + (hdr.phoff + i * sizeof(struct elf64_phdr)),
|
|
|
|
sizeof(struct elf64_phdr));
|
|
|
|
|
|
|
|
if (phdr.p_type != PT_LOAD)
|
|
|
|
continue;
|
|
|
|
|
2021-11-05 05:22:19 +03:00
|
|
|
if (!use_paddr && phdr.p_vaddr < FIXED_HIGHER_HALF_OFFSET_64) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-07-15 14:34:24 +03:00
|
|
|
ranges_count++;
|
|
|
|
}
|
|
|
|
|
2021-12-06 02:21:38 +03:00
|
|
|
if (ranges_count == 0) {
|
2021-12-11 21:58:00 +03:00
|
|
|
panic(true, "elf: Attempted to use PMRs but no higher half PHDRs exist");
|
2021-12-06 02:21:38 +03:00
|
|
|
}
|
|
|
|
|
2021-07-15 14:34:24 +03:00
|
|
|
struct elf_range *ranges = ext_mem_alloc(ranges_count * sizeof(struct elf_range));
|
|
|
|
|
|
|
|
size_t r = 0;
|
|
|
|
for (uint16_t i = 0; i < hdr.ph_num; i++) {
|
|
|
|
struct elf64_phdr phdr;
|
|
|
|
memcpy(&phdr, elf + (hdr.phoff + i * sizeof(struct elf64_phdr)),
|
|
|
|
sizeof(struct elf64_phdr));
|
|
|
|
|
|
|
|
if (phdr.p_type != PT_LOAD)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
uint64_t load_addr = 0;
|
|
|
|
|
|
|
|
if (use_paddr) {
|
|
|
|
load_addr = phdr.p_paddr;
|
|
|
|
} else {
|
|
|
|
load_addr = phdr.p_vaddr;
|
2021-11-05 05:22:19 +03:00
|
|
|
|
|
|
|
if (phdr.p_vaddr < FIXED_HIGHER_HALF_OFFSET_64) {
|
|
|
|
continue;
|
|
|
|
}
|
2021-07-15 14:34:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
load_addr += slide;
|
|
|
|
|
2021-07-25 02:18:36 +03:00
|
|
|
uint64_t this_top = load_addr + phdr.p_memsz;
|
|
|
|
|
|
|
|
ranges[r].base = load_addr & ~(phdr.p_align - 1);
|
2021-11-01 00:01:29 +03:00
|
|
|
ranges[r].length = ALIGN_UP(this_top - ranges[r].base, phdr.p_align);
|
2021-07-15 14:34:24 +03:00
|
|
|
ranges[r].permissions = phdr.p_flags & 0b111;
|
|
|
|
|
|
|
|
r++;
|
|
|
|
}
|
|
|
|
|
|
|
|
*_ranges_count = ranges_count;
|
|
|
|
*_ranges = ranges;
|
|
|
|
}
|
|
|
|
|
2021-10-29 02:15:11 +03:00
|
|
|
int elf64_load(uint8_t *elf, uint64_t *entry_point, uint64_t *top, uint64_t *_slide, uint32_t alloc_type, bool kaslr, bool use_paddr, struct elf_range **ranges, uint64_t *ranges_count, bool fully_virtual, uint64_t *physical_base, uint64_t *virtual_base) {
|
2020-04-18 19:01:29 +03:00
|
|
|
struct elf64_hdr hdr;
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy(&hdr, elf + (0), sizeof(struct elf64_hdr));
|
2020-03-25 03:04:18 +03:00
|
|
|
|
|
|
|
if (strncmp((char *)hdr.ident, "\177ELF", 4)) {
|
2021-07-25 02:18:36 +03:00
|
|
|
printv("elf: Not a valid ELF file.\n");
|
2020-03-30 23:27:15 +03:00
|
|
|
return -1;
|
2020-03-25 03:04:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (hdr.ident[EI_DATA] != BITS_LE) {
|
2021-12-11 21:58:00 +03:00
|
|
|
panic(true, "elf: Not a Little-endian ELF file.\n");
|
2020-03-25 03:04:18 +03:00
|
|
|
}
|
|
|
|
|
2020-03-25 03:31:59 +03:00
|
|
|
if (hdr.machine != ARCH_X86_64) {
|
2021-12-11 21:58:00 +03:00
|
|
|
panic(true, "elf: Not an x86_64 ELF file.\n");
|
2020-03-25 03:31:59 +03:00
|
|
|
}
|
|
|
|
|
2021-03-26 17:47:59 +03:00
|
|
|
uint64_t slide = 0;
|
|
|
|
bool simulation = true;
|
|
|
|
size_t try_count = 0;
|
2021-08-29 17:11:56 +03:00
|
|
|
size_t max_simulated_tries = 0x100000;
|
2021-03-26 17:47:59 +03:00
|
|
|
|
2021-06-29 16:50:20 +03:00
|
|
|
uint64_t entry = hdr.entry;
|
|
|
|
bool entry_adjusted = false;
|
|
|
|
|
2021-07-29 16:44:27 +03:00
|
|
|
uint64_t max_align = elf64_max_align(elf);
|
2021-07-15 14:34:24 +03:00
|
|
|
|
2021-10-29 20:51:22 +03:00
|
|
|
uint64_t image_size = 0;
|
|
|
|
|
2021-10-29 02:15:11 +03:00
|
|
|
if (fully_virtual) {
|
|
|
|
simulation = false;
|
|
|
|
|
|
|
|
uint64_t min_vaddr = (uint64_t)-1;
|
|
|
|
uint64_t max_vaddr = 0;
|
|
|
|
for (uint16_t i = 0; i < hdr.ph_num; i++) {
|
|
|
|
struct elf64_phdr phdr;
|
|
|
|
memcpy(&phdr, elf + (hdr.phoff + i * sizeof(struct elf64_phdr)),
|
|
|
|
sizeof(struct elf64_phdr));
|
|
|
|
|
2021-11-05 05:22:19 +03:00
|
|
|
if (phdr.p_type != PT_LOAD) {
|
2021-10-29 02:15:11 +03:00
|
|
|
continue;
|
2021-11-05 05:22:19 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Drop entries not in the higher half
|
|
|
|
if (phdr.p_vaddr < FIXED_HIGHER_HALF_OFFSET_64) {
|
|
|
|
continue;
|
|
|
|
}
|
2021-10-29 02:15:11 +03:00
|
|
|
|
|
|
|
if (phdr.p_vaddr < min_vaddr) {
|
|
|
|
min_vaddr = phdr.p_vaddr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (phdr.p_vaddr + phdr.p_memsz > max_vaddr) {
|
|
|
|
max_vaddr = phdr.p_vaddr + phdr.p_memsz;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-06 02:21:38 +03:00
|
|
|
if (max_vaddr == 0 || min_vaddr == (uint64_t)-1) {
|
2021-12-11 21:58:00 +03:00
|
|
|
panic(true, "elf: Attempted to use fully virtual mappings but no higher half PHDRs exist");
|
2021-12-06 02:21:38 +03:00
|
|
|
}
|
|
|
|
|
2021-10-29 20:51:22 +03:00
|
|
|
image_size = max_vaddr - min_vaddr;
|
2021-10-29 02:15:11 +03:00
|
|
|
|
2021-10-29 03:15:17 +03:00
|
|
|
*physical_base = (uintptr_t)ext_mem_alloc_type_aligned(image_size, alloc_type, max_align);
|
2021-10-29 02:15:11 +03:00
|
|
|
*virtual_base = min_vaddr;
|
|
|
|
}
|
|
|
|
|
2021-03-28 16:46:59 +03:00
|
|
|
if (!elf64_is_relocatable(elf, &hdr)) {
|
2021-03-26 17:47:59 +03:00
|
|
|
simulation = false;
|
|
|
|
goto final;
|
|
|
|
}
|
|
|
|
|
|
|
|
again:
|
2021-10-29 20:51:22 +03:00
|
|
|
if (kaslr) {
|
2021-07-29 16:44:27 +03:00
|
|
|
slide = rand32() & ~(max_align - 1);
|
2020-03-30 23:27:15 +03:00
|
|
|
|
2021-10-29 20:51:22 +03:00
|
|
|
if (fully_virtual) {
|
|
|
|
if ((*virtual_base - FIXED_HIGHER_HALF_OFFSET_64) + slide + image_size >= 0x80000000) {
|
|
|
|
if (++try_count == max_simulated_tries) {
|
2021-12-11 21:58:00 +03:00
|
|
|
panic(true, "elf: Image wants to load too high");
|
2021-10-29 20:51:22 +03:00
|
|
|
}
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-26 17:47:59 +03:00
|
|
|
final:
|
2021-07-03 00:58:22 +03:00
|
|
|
if (top)
|
|
|
|
*top = 0;
|
|
|
|
|
2021-07-29 16:44:27 +03:00
|
|
|
bool higher_half = false;
|
|
|
|
|
2020-03-25 03:04:18 +03:00
|
|
|
for (uint16_t i = 0; i < hdr.ph_num; i++) {
|
2020-04-18 19:01:29 +03:00
|
|
|
struct elf64_phdr phdr;
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy(&phdr, elf + (hdr.phoff + i * sizeof(struct elf64_phdr)),
|
2020-04-18 19:01:29 +03:00
|
|
|
sizeof(struct elf64_phdr));
|
2020-03-25 03:04:18 +03:00
|
|
|
|
|
|
|
if (phdr.p_type != PT_LOAD)
|
|
|
|
continue;
|
|
|
|
|
2021-06-29 16:50:20 +03:00
|
|
|
uint64_t load_addr = 0;
|
|
|
|
|
|
|
|
if (use_paddr) {
|
|
|
|
load_addr = phdr.p_paddr;
|
|
|
|
} else {
|
2021-11-01 04:43:22 +03:00
|
|
|
load_addr = phdr.p_vaddr;
|
|
|
|
|
2021-11-05 05:22:19 +03:00
|
|
|
if (phdr.p_vaddr >= FIXED_HIGHER_HALF_OFFSET_64) {
|
2021-07-29 16:44:27 +03:00
|
|
|
higher_half = true;
|
2021-10-29 02:15:11 +03:00
|
|
|
|
|
|
|
if (fully_virtual) {
|
2021-10-29 03:15:17 +03:00
|
|
|
load_addr = *physical_base + (phdr.p_vaddr - *virtual_base);
|
|
|
|
} else {
|
|
|
|
load_addr = phdr.p_vaddr - FIXED_HIGHER_HALF_OFFSET_64;
|
2021-10-29 02:15:11 +03:00
|
|
|
}
|
2021-11-05 05:22:19 +03:00
|
|
|
} else if (ranges) {
|
|
|
|
// Drop lower half
|
|
|
|
continue;
|
2021-07-29 16:44:27 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-10-29 02:15:11 +03:00
|
|
|
if (!fully_virtual) {
|
|
|
|
load_addr += slide;
|
|
|
|
}
|
2020-05-29 12:05:50 +03:00
|
|
|
|
2021-07-25 02:18:36 +03:00
|
|
|
uint64_t this_top = load_addr + phdr.p_memsz;
|
2021-07-15 14:34:24 +03:00
|
|
|
|
2021-07-03 00:58:22 +03:00
|
|
|
if (top) {
|
|
|
|
if (this_top > *top) {
|
|
|
|
*top = this_top;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-25 02:18:36 +03:00
|
|
|
uint64_t mem_base, mem_size;
|
|
|
|
|
|
|
|
if (ranges) {
|
|
|
|
mem_base = load_addr & ~(phdr.p_align - 1);
|
|
|
|
mem_size = this_top - mem_base;
|
|
|
|
} else {
|
|
|
|
mem_base = load_addr;
|
|
|
|
mem_size = phdr.p_memsz;
|
|
|
|
}
|
|
|
|
|
2021-10-29 02:15:11 +03:00
|
|
|
if (!fully_virtual &&
|
2021-10-31 07:17:37 +03:00
|
|
|
((higher_half == true && this_top > 0x80000000)
|
|
|
|
|| !memmap_alloc_range((size_t)mem_base, (size_t)mem_size, alloc_type, true, false, simulation, false))) {
|
2021-07-25 02:18:36 +03:00
|
|
|
if (++try_count == max_simulated_tries || simulation == false) {
|
2021-12-11 21:58:00 +03:00
|
|
|
panic(true, "elf: Failed to allocate necessary memory range (%X-%X)", mem_base, mem_base + mem_size);
|
2021-07-25 02:18:36 +03:00
|
|
|
}
|
2021-08-29 17:11:56 +03:00
|
|
|
if (!kaslr) {
|
|
|
|
slide += max_align;
|
|
|
|
}
|
2021-03-26 17:47:59 +03:00
|
|
|
goto again;
|
|
|
|
}
|
2020-05-03 00:38:57 +03:00
|
|
|
|
2021-06-29 16:50:20 +03:00
|
|
|
memcpy((void *)(uintptr_t)load_addr, elf + (phdr.p_offset), phdr.p_filesz);
|
2020-03-25 03:04:18 +03:00
|
|
|
|
2021-07-25 02:18:36 +03:00
|
|
|
size_t to_zero = (size_t)(phdr.p_memsz - phdr.p_filesz);
|
2020-03-25 03:04:18 +03:00
|
|
|
|
|
|
|
if (to_zero) {
|
2021-06-29 16:50:20 +03:00
|
|
|
void *ptr = (void *)(uintptr_t)(load_addr + phdr.p_filesz);
|
2020-03-25 03:04:18 +03:00
|
|
|
memset(ptr, 0, to_zero);
|
|
|
|
}
|
2020-05-29 12:05:50 +03:00
|
|
|
|
2021-07-25 02:18:36 +03:00
|
|
|
if (elf64_apply_relocations(elf, &hdr, (void *)(uintptr_t)load_addr, phdr.p_vaddr, phdr.p_memsz, slide)) {
|
2021-12-11 21:58:00 +03:00
|
|
|
panic(true, "elf: Failed to apply relocations");
|
2021-07-25 02:18:36 +03:00
|
|
|
}
|
2021-06-29 16:50:20 +03:00
|
|
|
|
|
|
|
if (use_paddr) {
|
2021-10-02 22:32:47 +03:00
|
|
|
if (!entry_adjusted && entry >= phdr.p_vaddr && entry < (phdr.p_vaddr + phdr.p_memsz)) {
|
2021-06-29 16:50:20 +03:00
|
|
|
entry -= phdr.p_vaddr;
|
|
|
|
entry += phdr.p_paddr;
|
|
|
|
entry_adjusted = true;
|
|
|
|
}
|
|
|
|
}
|
2020-03-25 03:04:18 +03:00
|
|
|
}
|
|
|
|
|
2021-03-26 17:47:59 +03:00
|
|
|
if (simulation) {
|
|
|
|
simulation = false;
|
|
|
|
goto final;
|
|
|
|
}
|
|
|
|
|
2021-10-29 20:51:22 +03:00
|
|
|
if (fully_virtual) {
|
|
|
|
*virtual_base += slide;
|
|
|
|
}
|
|
|
|
|
2021-06-29 16:50:20 +03:00
|
|
|
*entry_point = entry + slide;
|
|
|
|
if (_slide)
|
|
|
|
*_slide = slide;
|
2020-03-25 06:38:23 +03:00
|
|
|
|
2021-07-15 14:34:24 +03:00
|
|
|
if (ranges_count != NULL && ranges != NULL) {
|
2021-07-17 09:19:03 +03:00
|
|
|
elf64_get_ranges(elf, slide, use_paddr, ranges, ranges_count);
|
2021-07-15 14:34:24 +03:00
|
|
|
}
|
|
|
|
|
2020-03-25 07:05:06 +03:00
|
|
|
return 0;
|
2020-03-25 03:04:18 +03:00
|
|
|
}
|
2020-04-18 19:01:29 +03:00
|
|
|
|
2021-07-03 00:58:22 +03:00
|
|
|
int elf32_load(uint8_t *elf, uint32_t *entry_point, uint32_t *top, uint32_t alloc_type) {
|
2020-04-18 19:01:29 +03:00
|
|
|
struct elf32_hdr hdr;
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy(&hdr, elf + (0), sizeof(struct elf32_hdr));
|
2020-04-18 19:01:29 +03:00
|
|
|
|
|
|
|
if (strncmp((char *)hdr.ident, "\177ELF", 4)) {
|
2021-11-29 00:18:49 +03:00
|
|
|
printv("elf: Not a valid ELF file.\n");
|
2020-04-18 19:01:29 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hdr.ident[EI_DATA] != BITS_LE) {
|
2021-11-29 00:18:49 +03:00
|
|
|
printv("elf: Not a Little-endian ELF file.\n");
|
2020-04-18 19:01:29 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hdr.machine != ARCH_X86_32) {
|
2021-11-29 00:18:49 +03:00
|
|
|
printv("elf: Not an x86_32 ELF file.\n");
|
2020-04-18 19:01:29 +03:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2021-06-29 16:19:46 +03:00
|
|
|
uint32_t entry = hdr.entry;
|
|
|
|
bool entry_adjusted = false;
|
|
|
|
|
2021-07-03 00:58:22 +03:00
|
|
|
if (top)
|
|
|
|
*top = 0;
|
|
|
|
|
2020-04-18 19:01:29 +03:00
|
|
|
for (uint16_t i = 0; i < hdr.ph_num; i++) {
|
|
|
|
struct elf32_phdr phdr;
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy(&phdr, elf + (hdr.phoff + i * sizeof(struct elf32_phdr)),
|
2020-04-18 19:01:29 +03:00
|
|
|
sizeof(struct elf32_phdr));
|
|
|
|
|
|
|
|
if (phdr.p_type != PT_LOAD)
|
|
|
|
continue;
|
|
|
|
|
2021-07-03 00:58:22 +03:00
|
|
|
if (top) {
|
|
|
|
uint32_t this_top = phdr.p_paddr + phdr.p_memsz;
|
|
|
|
if (this_top > *top) {
|
|
|
|
*top = this_top;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-07 03:10:28 +03:00
|
|
|
memmap_alloc_range((size_t)phdr.p_paddr, (size_t)phdr.p_memsz, alloc_type, true, true, false, false);
|
2020-05-03 00:38:57 +03:00
|
|
|
|
2021-03-28 16:46:59 +03:00
|
|
|
memcpy((void *)(uintptr_t)phdr.p_paddr, elf + (phdr.p_offset), phdr.p_filesz);
|
2020-04-18 19:01:29 +03:00
|
|
|
|
|
|
|
size_t to_zero = (size_t)(phdr.p_memsz - phdr.p_filesz);
|
|
|
|
|
|
|
|
if (to_zero) {
|
2021-03-02 12:23:43 +03:00
|
|
|
void *ptr = (void *)(uintptr_t)(phdr.p_paddr + phdr.p_filesz);
|
2020-04-18 19:01:29 +03:00
|
|
|
memset(ptr, 0, to_zero);
|
|
|
|
}
|
2021-06-29 16:19:46 +03:00
|
|
|
|
2021-10-02 22:32:47 +03:00
|
|
|
if (!entry_adjusted && entry >= phdr.p_vaddr && entry < (phdr.p_vaddr + phdr.p_memsz)) {
|
2021-06-29 16:19:46 +03:00
|
|
|
entry -= phdr.p_vaddr;
|
|
|
|
entry += phdr.p_paddr;
|
|
|
|
entry_adjusted = true;
|
|
|
|
}
|
2020-04-18 19:01:29 +03:00
|
|
|
}
|
|
|
|
|
2021-06-29 16:19:46 +03:00
|
|
|
*entry_point = entry;
|
2020-04-18 19:01:29 +03:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|