rulimine/common/protos/limine.c
2022-03-20 08:05:12 +01:00

624 lines
20 KiB
C

#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include <config.h>
#include <protos/stivale.h>
#include <protos/stivale2.h>
#include <lib/elf.h>
#include <lib/blib.h>
#include <lib/acpi.h>
#include <lib/config.h>
#include <lib/time.h>
#include <lib/print.h>
#include <lib/real.h>
#include <lib/libc.h>
#include <lib/gterm.h>
#include <lib/uri.h>
#include <sys/smp.h>
#include <sys/cpu.h>
#include <sys/gdt.h>
#include <lib/fb.h>
#include <lib/term.h>
#include <sys/pic.h>
#include <sys/lapic.h>
#include <fs/file.h>
#include <mm/pmm.h>
#include <stivale2.h>
#include <pxe/tftp.h>
#include <drivers/edid.h>
#include <drivers/vga_textmode.h>
#include <lib/rand.h>
#define LIMINE_NO_POINTERS
#include <protos/limine.h>
#include <limine.h>
#define MAX_REQUESTS 128
#define MAX_MEMMAP 256
static uint64_t physical_base, virtual_base, slide, direct_map_offset;
static size_t requests_count;
static void *requests[MAX_REQUESTS];
static struct limine_file_location get_file_loc(struct file_handle *file) {
struct limine_file_location ret = {0};
if (file->pxe) {
ret.pxe_ip = file->pxe_ip;
ret.pxe_port = file->pxe_port;
return ret;
}
struct volume *vol = file->vol;
ret.partition_index = vol->partition;
ret.mbr_disk_id = mbr_get_id(vol);
if (vol->guid_valid) {
memcpy(&ret.part_uuid, &vol->guid, sizeof(struct limine_uuid));
}
if (vol->part_guid_valid) {
memcpy(&ret.gpt_part_uuid, &vol->part_guid, sizeof(struct limine_uuid));
}
struct guid gpt_disk_uuid;
if (gpt_get_guid(&gpt_disk_uuid, vol->backing_dev ?: vol) == true) {
memcpy(&ret.gpt_disk_uuid, &gpt_disk_uuid, sizeof(struct limine_uuid));
}
return ret;
}
static uint64_t reported_addr(void *addr) {
return (uint64_t)(uintptr_t)addr + direct_map_offset;
}
/*
static uintptr_t get_phys_addr(uint64_t addr) {
return physical_base + (addr - virtual_base);
}
*/
static void *_get_request(uint64_t id[4]) {
for (size_t i = 0; i < requests_count; i++) {
uint64_t *p = requests[i];
if (p[2] != id[2]) {
continue;
}
if (p[3] != id[3]) {
continue;
}
return p;
}
return NULL;
}
#define get_request(REQ) _get_request((uint64_t[4])REQ)
#define FEAT_START do {
#define FEAT_END } while (0);
bool limine_load(char *config, char *cmdline) {
uint32_t eax, ebx, ecx, edx;
char *kernel_path = config_get_value(config, 0, "KERNEL_PATH");
if (kernel_path == NULL)
panic(true, "limine: KERNEL_PATH not specified");
struct file_handle *kernel_file;
if ((kernel_file = uri_open(kernel_path)) == NULL)
panic(true, "limine: Failed to open kernel with path `%s`. Is the path correct?", kernel_path);
uint8_t *kernel = freadall(kernel_file, MEMMAP_BOOTLOADER_RECLAIMABLE);
size_t kernel_file_size = kernel_file->size;
struct limine_file_location *kl = ext_mem_alloc(sizeof(struct limine_file_location));
*kl = get_file_loc(kernel_file);
fclose(kernel_file);
char *kaslr_s = config_get_value(config, 0, "KASLR");
bool kaslr = true;
if (kaslr_s != NULL && strcmp(kaslr_s, "no") == 0)
kaslr = false;
int bits = elf_bits(kernel);
if (bits == -1 || bits == 32) {
printv("limine: Kernel in unrecognised format");
return false;
}
// ELF loading
uint64_t entry_point = 0;
struct elf_range *ranges;
uint64_t ranges_count;
if (elf64_load(kernel, &entry_point, NULL, &slide,
MEMMAP_KERNEL_AND_MODULES, kaslr, false,
&ranges, &ranges_count,
true, &physical_base, &virtual_base)) {
return false;
}
// Load requests
requests_count = 0;
uint64_t common_magic[2] = { LIMINE_COMMON_MAGIC };
for (size_t i = 0; i < ALIGN_DOWN(kernel_file_size, 8); i += 8) {
uint64_t *p = (void *)(uintptr_t)physical_base + i;
if (p[0] != common_magic[0]) {
continue;
}
if (p[1] != common_magic[1]) {
continue;
}
if (requests_count == MAX_REQUESTS) {
panic(true, "limine: Maximum requests exceeded");
}
// Check for a conflict
if (_get_request(p) != NULL) {
panic(true, "limine: Conflict detected for request ID %X %X", p[2], p[3]);
}
requests[requests_count++] = p;
}
if (requests_count == 0) {
return false;
}
// Check if 64 bit CPU
if (!cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx) || !(edx & (1 << 29))) {
panic(true, "limine: This CPU does not support 64-bit mode.");
}
print("limine: Loading kernel `%s`...\n", kernel_path);
printv("limine: Physical base: %X\n", physical_base);
printv("limine: Virtual base: %X\n", virtual_base);
printv("limine: Slide: %X\n", slide);
printv("limine: ELF entry point: %X\n", entry_point);
printv("limine: Requests count: %u\n", requests_count);
// 5 level paging feature & HHDM slide
bool want_5lv;
FEAT_START
// Check if 5-level paging is available
bool level5pg = false;
if (cpuid(0x00000007, 0, &eax, &ebx, &ecx, &edx) && (ecx & (1 << 16))) {
printv("limine: CPU has 5-level paging support\n");
level5pg = true;
}
struct limine_5_level_paging_request *lv5pg_request = get_request(LIMINE_5_LEVEL_PAGING_REQUEST);
want_5lv = lv5pg_request != NULL && level5pg;
direct_map_offset = want_5lv ? 0xff00000000000000 : 0xffff800000000000;
if (kaslr) {
direct_map_offset += (rand64() & ~((uint64_t)0x40000000 - 1)) & 0xfffffffffff;
}
if (want_5lv) {
void *lv5pg_response = ext_mem_alloc(sizeof(struct limine_5_level_paging_response));
lv5pg_request->response = reported_addr(lv5pg_response);
}
FEAT_END
// Entry point feature
FEAT_START
struct limine_entry_point_request *entrypoint_request = get_request(LIMINE_ENTRY_POINT_REQUEST);
if (entrypoint_request == NULL) {
break;
}
entry_point = entrypoint_request->entry;
print("limine: Entry point at %X\n", entry_point);
struct limine_entry_point_response *entrypoint_response =
ext_mem_alloc(sizeof(struct limine_entry_point_response));
entrypoint_request->response = reported_addr(entrypoint_response);
FEAT_END
// Bootloader info feature
FEAT_START
struct limine_bootloader_info_request *bootloader_info_request = get_request(LIMINE_BOOTLOADER_INFO_REQUEST);
if (bootloader_info_request == NULL) {
break; // next feature
}
struct limine_bootloader_info_response *bootloader_info_response =
ext_mem_alloc(sizeof(struct limine_bootloader_info_response));
bootloader_info_response->name = reported_addr("Limine");
bootloader_info_response->version = reported_addr(LIMINE_VERSION);
bootloader_info_request->response = reported_addr(bootloader_info_response);
FEAT_END
// HHDM feature
FEAT_START
struct limine_hhdm_request *hhdm_request = get_request(LIMINE_HHDM_REQUEST);
if (hhdm_request == NULL) {
break; // next feature
}
struct limine_hhdm_response *hhdm_response =
ext_mem_alloc(sizeof(struct limine_hhdm_response));
hhdm_response->address = direct_map_offset;
hhdm_request->response = reported_addr(hhdm_response);
FEAT_END
// RSDP feature
FEAT_START
struct limine_rsdp_request *rsdp_request = get_request(LIMINE_RSDP_REQUEST);
if (rsdp_request == NULL) {
break; // next feature
}
struct limine_rsdp_response *rsdp_response =
ext_mem_alloc(sizeof(struct limine_rsdp_response));
void *rsdp = acpi_get_rsdp();
if (rsdp) {
rsdp_response->address = reported_addr(rsdp);
}
rsdp_request->response = reported_addr(rsdp_response);
FEAT_END
// SMBIOS feature
FEAT_START
struct limine_smbios_request *smbios_request = get_request(LIMINE_SMBIOS_REQUEST);
if (smbios_request == NULL) {
break; // next feature
}
struct limine_smbios_response *smbios_response =
ext_mem_alloc(sizeof(struct limine_smbios_response));
void *smbios_entry_32 = NULL, *smbios_entry_64 = NULL;
acpi_get_smbios(&smbios_entry_32, &smbios_entry_64);
if (smbios_entry_32) {
smbios_response->entry_32 = reported_addr(smbios_entry_32);
}
if (smbios_entry_64) {
smbios_response->entry_64 = reported_addr(smbios_entry_64);
}
smbios_request->response = reported_addr(smbios_response);
FEAT_END
#if uefi == 1
// EFI system table feature
FEAT_START
struct limine_efi_system_table_request *est_request = get_request(LIMINE_EFI_SYSTEM_TABLE_REQUEST);
if (est_request == NULL) {
break; // next feature
}
struct limine_efi_system_table_response *est_response =
ext_mem_alloc(sizeof(struct limine_efi_system_table_response));
est_response->address = reported_addr(gST);
est_request->response = reported_addr(est_response);
FEAT_END
#endif
// Modules
FEAT_START
struct limine_module_request *module_request = get_request(LIMINE_MODULE_REQUEST);
if (module_request == NULL) {
break; // next feature
}
size_t module_count;
for (module_count = 0; ; module_count++) {
char *module_file = config_get_value(config, module_count, "MODULE_PATH");
if (module_file == NULL)
break;
}
// Module 0 is always the kernel
module_count++;
struct limine_module_response *module_response =
ext_mem_alloc(sizeof(struct limine_module_response));
uint64_t *module_base = ext_mem_alloc(sizeof(uint64_t) * module_count);
uint64_t *module_length = ext_mem_alloc(sizeof(uint64_t) * module_count);
uint64_t *module_path = ext_mem_alloc(sizeof(uint64_t) * module_count);
uint64_t *module_cmdline = ext_mem_alloc(sizeof(uint64_t) * module_count);
uint64_t *module_file_location = ext_mem_alloc(sizeof(uint64_t) * module_count);
module_base[0] = reported_addr(kernel);
module_length[0] = kernel_file_size;
module_path[0] = reported_addr(kernel_path);
module_cmdline[0] = reported_addr(cmdline);
module_file_location[0] = reported_addr(kl);
for (size_t i = 1; i < module_count; i++) {
struct conf_tuple conf_tuple =
config_get_tuple(config, i - 1,
"MODULE_PATH", "MODULE_CMDLINE");
char *m_path = conf_tuple.value1;
char *m_cmdline = conf_tuple.value2;
if (m_cmdline == NULL) {
m_cmdline = "";
}
print("limine: Loading module `%s`...\n", m_path);
struct file_handle *f;
if ((f = uri_open(m_path)) == NULL)
panic(true, "limine: Failed to open module with path `%s`. Is the path correct?", module_path);
module_base[i] = reported_addr(freadall(f, MEMMAP_KERNEL_AND_MODULES));
module_length[i] = f->size;
module_path[i] = reported_addr(m_path);
module_cmdline[i] = reported_addr(m_cmdline);
struct limine_file_location *l = ext_mem_alloc(sizeof(struct limine_file_location));
*l = get_file_loc(f);
module_file_location[i] = reported_addr(l);
fclose(f);
}
module_response->module_count = module_count;
module_response->module_base = reported_addr(module_base);
module_response->module_length = reported_addr(module_length);
module_response->module_path = reported_addr(module_path);
module_response->module_cmdline = reported_addr(module_cmdline);
module_response->module_file_location = reported_addr(module_file_location);
module_request->response = reported_addr(module_response);
FEAT_END
// Framebuffer feature
FEAT_START
term_deinit();
size_t req_width = 0, req_height = 0, req_bpp = 0;
char *resolution = config_get_value(config, 0, "RESOLUTION");
if (resolution != NULL) {
parse_resolution(&req_width, &req_height, &req_bpp, resolution);
}
struct fb_info fb;
if (!fb_init(&fb, req_width, req_height, req_bpp)) {
panic(true, "limine: Could not acquire framebuffer");
}
struct limine_framebuffer_request *framebuffer_request = get_request(LIMINE_FRAMEBUFFER_REQUEST);
if (framebuffer_request == NULL) {
break; // next feature
}
memmap_alloc_range(fb.framebuffer_addr,
(uint64_t)fb.framebuffer_pitch * fb.framebuffer_height,
MEMMAP_FRAMEBUFFER, false, false, false, true);
struct limine_framebuffer_response *framebuffer_response =
ext_mem_alloc(sizeof(struct limine_framebuffer_response));
// For now we only support 1 framebuffer
size_t fb_count = 1;
uint64_t *fb_address = ext_mem_alloc(sizeof(uint64_t) * fb_count);
uint16_t *fb_width = ext_mem_alloc(sizeof(uint16_t) * fb_count);
uint16_t *fb_height = ext_mem_alloc(sizeof(uint16_t) * fb_count);
uint16_t *fb_pitch = ext_mem_alloc(sizeof(uint16_t) * fb_count);
uint16_t *fb_bpp = ext_mem_alloc(sizeof(uint16_t) * fb_count);
uint8_t *fb_memory_model = ext_mem_alloc(sizeof(uint8_t) * fb_count);
uint8_t *fb_red_mask_size = ext_mem_alloc(sizeof(uint8_t) * fb_count);
uint8_t *fb_red_mask_shift = ext_mem_alloc(sizeof(uint8_t) * fb_count);
uint8_t *fb_green_mask_size = ext_mem_alloc(sizeof(uint8_t) * fb_count);
uint8_t *fb_green_mask_shift = ext_mem_alloc(sizeof(uint8_t) * fb_count);
uint8_t *fb_blue_mask_size = ext_mem_alloc(sizeof(uint8_t) * fb_count);
uint8_t *fb_blue_mask_shift = ext_mem_alloc(sizeof(uint8_t) * fb_count);
uint64_t *fb_edid_size = ext_mem_alloc(sizeof(uint64_t) * fb_count);
uint64_t *fb_edid = ext_mem_alloc(sizeof(uint64_t) * fb_count);
framebuffer_response->fb_count = fb_count;
framebuffer_response->fb_address = reported_addr(fb_address);
framebuffer_response->fb_width = reported_addr(fb_width);
framebuffer_response->fb_height = reported_addr(fb_height);
framebuffer_response->fb_pitch = reported_addr(fb_pitch);
framebuffer_response->fb_bpp = reported_addr(fb_bpp);
framebuffer_response->fb_memory_model = reported_addr(fb_memory_model);
framebuffer_response->fb_red_mask_size = reported_addr(fb_red_mask_size);
framebuffer_response->fb_red_mask_shift = reported_addr(fb_red_mask_shift);
framebuffer_response->fb_green_mask_size = reported_addr(fb_green_mask_size);
framebuffer_response->fb_green_mask_shift = reported_addr(fb_green_mask_shift);
framebuffer_response->fb_blue_mask_size = reported_addr(fb_blue_mask_size);
framebuffer_response->fb_blue_mask_shift = reported_addr(fb_blue_mask_shift);
framebuffer_response->fb_edid_size = reported_addr(fb_edid_size);
framebuffer_response->fb_edid = reported_addr(fb_edid);
struct edid_info_struct *edid_info = get_edid_info();
if (edid_info != NULL) {
*fb_edid_size = sizeof(struct edid_info_struct);
*fb_edid = reported_addr(edid_info);
}
*fb_memory_model = LIMINE_FRAMEBUFFER_RGB;
*fb_address = reported_addr((void *)(uintptr_t)fb.framebuffer_addr);
*fb_width = fb.framebuffer_width;
*fb_height = fb.framebuffer_height;
*fb_bpp = fb.framebuffer_bpp;
*fb_pitch = fb.framebuffer_pitch;
*fb_red_mask_size = fb.red_mask_size;
*fb_red_mask_shift = fb.red_mask_shift;
*fb_green_mask_size = fb.green_mask_size;
*fb_green_mask_shift = fb.green_mask_shift;
*fb_blue_mask_size = fb.blue_mask_size;
*fb_blue_mask_shift = fb.blue_mask_shift;
framebuffer_request->response = reported_addr(framebuffer_response);
FEAT_END
// Wrap-up stuff before memmap close
struct gdtr *local_gdt = ext_mem_alloc(sizeof(struct gdtr));
local_gdt->limit = gdt.limit;
uint64_t local_gdt_base = (uint64_t)gdt.ptr;
local_gdt_base += direct_map_offset;
local_gdt->ptr = local_gdt_base;
#if defined (__i386__)
local_gdt->ptr_hi = local_gdt_base >> 32;
#endif
void *stack = ext_mem_alloc(8192) + 8192;
pagemap_t pagemap = {0};
pagemap = stivale_build_pagemap(want_5lv, true, ranges, ranges_count, true,
physical_base, virtual_base, direct_map_offset);
#if uefi == 1
efi_exit_boot_services();
#endif
// SMP
FEAT_START
struct limine_smp_request *smp_request = get_request(LIMINE_SMP_REQUEST);
if (smp_request == NULL) {
break; // next feature
}
struct smp_information *smp_info;
size_t cpu_count;
uint32_t bsp_lapic_id;
smp_info = init_smp(0, (void **)&smp_info,
&cpu_count, &bsp_lapic_id,
true, want_5lv,
pagemap, smp_request->flags & LIMINE_SMP_X2APIC, true,
direct_map_offset);
if (smp_info == NULL) {
break;
}
for (size_t i = 0; i < cpu_count; i++) {
void *cpu_stack = ext_mem_alloc(8192) + 8192;
smp_info[i].stack_addr = reported_addr(cpu_stack + 8192);
}
struct limine_smp_response *smp_response =
ext_mem_alloc(sizeof(struct limine_smp_response));
smp_response->flags |= (smp_request->flags & LIMINE_SMP_X2APIC) && x2apic_check();
smp_response->bsp_lapic_id = bsp_lapic_id;
smp_response->cpu_count = cpu_count;
uint32_t *cpu_processor_id = ext_mem_alloc(sizeof(uint32_t) * cpu_count);
uint32_t *cpu_lapic_id = ext_mem_alloc(sizeof(uint32_t) * cpu_count);
uint64_t *cpu_goto_address = ext_mem_alloc(sizeof(uint64_t) * cpu_count);
uint64_t *cpu_extra_argument = ext_mem_alloc(sizeof(uint64_t) * cpu_count);
for (size_t i = 0; i < cpu_count; i++) {
cpu_processor_id[i] = smp_info[i].acpi_processor_uid;
cpu_lapic_id[i] = smp_info[i].lapic_id;
cpu_goto_address[i] = reported_addr(&smp_info[i].goto_address);
cpu_extra_argument[i] = reported_addr(&smp_info[i].extra_argument);
}
smp_response->cpu_processor_id = reported_addr(cpu_processor_id);
smp_response->cpu_lapic_id = reported_addr(cpu_lapic_id);
smp_response->cpu_goto_address = reported_addr(cpu_goto_address);
smp_response->cpu_extra_argument = reported_addr(cpu_extra_argument);
smp_request->response = reported_addr(smp_response);
FEAT_END
// Memmap
FEAT_START
struct limine_memmap_request *memmap_request = get_request(LIMINE_MEMMAP_REQUEST);
struct limine_memmap_response *memmap_response;
uint64_t *memmap_base, *memmap_length;
uint32_t *memmap_type;
if (memmap_request != NULL) {
memmap_response = ext_mem_alloc(sizeof(struct limine_memmap_response));
memmap_base = ext_mem_alloc(sizeof(uint64_t) * MAX_MEMMAP);
memmap_length = ext_mem_alloc(sizeof(uint64_t) * MAX_MEMMAP);
memmap_type = ext_mem_alloc(sizeof(uint32_t) * MAX_MEMMAP);
}
size_t mmap_entries;
struct e820_entry_t *mmap = get_memmap(&mmap_entries);
if (memmap_request == NULL) {
break; // next feature
}
if (mmap_entries > MAX_MEMMAP) {
panic(false, "limine: Too many memmap entries");
}
for (size_t i = 0; i < mmap_entries; i++) {
memmap_base[i] = mmap[i].base;
memmap_length[i] = mmap[i].length;
switch (mmap[i].type) {
case MEMMAP_USABLE:
memmap_type[i] = LIMINE_MEMMAP_USABLE;
break;
case MEMMAP_ACPI_RECLAIMABLE:
memmap_type[i] = LIMINE_MEMMAP_ACPI_RECLAIMABLE;
break;
case MEMMAP_ACPI_NVS:
memmap_type[i] = LIMINE_MEMMAP_ACPI_NVS;
break;
case MEMMAP_BAD_MEMORY:
memmap_type[i] = LIMINE_MEMMAP_BAD_MEMORY;
break;
case MEMMAP_BOOTLOADER_RECLAIMABLE:
memmap_type[i] = LIMINE_MEMMAP_BOOTLOADER_RECLAIMABLE;
break;
case MEMMAP_KERNEL_AND_MODULES:
memmap_type[i] = LIMINE_MEMMAP_KERNEL_AND_MODULES;
break;
case MEMMAP_FRAMEBUFFER:
memmap_type[i] = LIMINE_MEMMAP_FRAMEBUFFER;
break;
default:
case MEMMAP_RESERVED:
memmap_type[i] = LIMINE_MEMMAP_RESERVED;
break;
}
}
memmap_response->entry_count = mmap_entries;
memmap_response->entry_base = reported_addr(memmap_base);
memmap_response->entry_length = reported_addr(memmap_length);
memmap_response->entry_type = reported_addr(memmap_type);
memmap_request->response = reported_addr(memmap_response);
FEAT_END
stivale_spinup(64, want_5lv, &pagemap, entry_point, 0,
reported_addr(stack), true, (uintptr_t)local_gdt);
__builtin_unreachable();
}