rulimine/src/lib/memmap.c

219 lines
5.8 KiB
C
Raw Normal View History

2020-06-05 18:51:33 +03:00
#include <stddef.h>
#include <stdint.h>
#include <stdbool.h>
#include <lib/memmap.h>
#include <lib/e820.h>
#include <lib/blib.h>
2020-06-05 21:57:09 +03:00
#include <lib/print.h>
2020-06-05 18:51:33 +03:00
#define PAGE_SIZE 4096
#define MEMMAP_BASE ((size_t)0x100000)
#define MEMMAP_MAX_ENTRIES 256
#define MEMMAP_USABLE 1
#define MEMMAP_RESERVED 2
#define MEMMAP_ACPI_RECLAIMABLE 3
#define MEMMAP_ACPI_NVS 4
#define MEMMAP_BAD_MEMORY 5
#define MEMMAP_BOOTLOADER_RECLAIMABLE 0x1000
#define MEMMAP_KERNEL_AND_MODULES 0x1001
2020-06-05 18:51:33 +03:00
static struct e820_entry_t memmap[MEMMAP_MAX_ENTRIES];
static size_t memmap_entries = 0;
2020-06-05 21:57:09 +03:00
static const char *memmap_type(uint32_t type) {
switch (type) {
case MEMMAP_USABLE:
2020-06-05 21:57:09 +03:00
return "Usable RAM";
case MEMMAP_RESERVED:
2020-06-05 21:57:09 +03:00
return "Reserved";
case MEMMAP_ACPI_RECLAIMABLE:
2020-06-05 21:57:09 +03:00
return "ACPI reclaimable";
case MEMMAP_ACPI_NVS:
2020-06-05 21:57:09 +03:00
return "ACPI NVS";
case MEMMAP_BAD_MEMORY:
2020-06-05 21:57:09 +03:00
return "Bad memory";
case MEMMAP_BOOTLOADER_RECLAIMABLE:
return "Bootloader reclaimable";
case MEMMAP_KERNEL_AND_MODULES:
2020-06-05 21:57:09 +03:00
return "Kernel/Modules";
default:
return "???";
}
}
void print_memmap(struct e820_entry_t *mm, size_t size) {
for (size_t i = 0; i < size; i++) {
2020-06-05 22:34:11 +03:00
print("[%X -> %X] : %X <%s>\n",
2020-06-05 21:57:09 +03:00
mm[i].base,
mm[i].base + mm[i].length,
mm[i].length,
memmap_type(mm[i].type));
}
}
2020-06-06 17:52:21 +03:00
static int align_entry(uint64_t *base, uint64_t *length) {
if (*length < PAGE_SIZE)
2020-06-05 22:34:11 +03:00
return -1;
2020-06-06 17:52:21 +03:00
uint64_t orig_base = *base;
2020-06-05 22:34:11 +03:00
2020-06-06 17:52:21 +03:00
*base = ALIGN_UP(*base, PAGE_SIZE);
2020-06-05 18:51:33 +03:00
2020-06-06 17:52:21 +03:00
*length -= (*base - orig_base);
*length = ALIGN_DOWN(*length, PAGE_SIZE);
2020-06-05 18:51:33 +03:00
if (!length)
return -1;
uint64_t top = *base + *length;
if (*base < MEMMAP_BASE) {
if (top > MEMMAP_BASE) {
*length -= MEMMAP_BASE - *base;
*base = MEMMAP_BASE;
} else {
return -1;
}
}
return 0;
}
2020-06-06 17:52:21 +03:00
static void sanitise_entries(void) {
2020-06-05 18:51:33 +03:00
for (size_t i = 0; i < memmap_entries; i++) {
if (memmap[i].type != 1)
continue;
2020-06-06 17:52:21 +03:00
// Check if the entry overlaps other entries
for (size_t j = 0; j < memmap_entries; j++) {
if (j == i)
continue;
uint64_t base = memmap[i].base;
uint64_t length = memmap[i].length;
uint64_t top = base + length;
uint64_t res_base = memmap[j].base;
uint64_t res_length = memmap[j].length;
uint64_t res_top = res_base + res_length;
// TODO actually handle splitting off usable chunks
if ( (res_base >= base && res_base < top)
&& (res_top >= base && res_top < top) ) {
panic("A non-usable e820 entry is inside a usable section.");
}
if (res_base >= base && res_base < top) {
top = res_base;
}
if (res_top >= base && res_top < top) {
base = res_top;
}
memmap[i].base = base;
memmap[i].length = top - base;
}
if (!memmap[i].length || align_entry(&memmap[i].base, &memmap[i].length)) {
2020-06-05 18:51:33 +03:00
// Eradicate from memmap
for (size_t j = i; j < memmap_entries - 1; j++) {
memmap[j] = memmap[j+1];
}
memmap_entries--;
2020-06-06 17:52:21 +03:00
i--;
2020-06-05 18:51:33 +03:00
}
}
2020-06-05 20:09:57 +03:00
// Sort the entries
for (size_t p = 0; p < memmap_entries - 1; p++) {
uint64_t min = memmap[p].base;
size_t min_index = p;
for (size_t i = p; i < memmap_entries; i++) {
if (memmap[i].base < min) {
min = memmap[i].base;
min_index = i;
}
}
struct e820_entry_t min_e = memmap[min_index];
memmap[min_index] = memmap[p];
memmap[p] = min_e;
}
2020-06-06 17:52:21 +03:00
}
struct e820_entry_t *get_memmap(size_t *entries) {
sanitise_entries();
2020-06-05 20:09:57 +03:00
2020-06-05 18:51:33 +03:00
*entries = memmap_entries;
2020-06-05 21:57:09 +03:00
print("Memory map requested. Current layout:\n");
print_memmap(memmap, memmap_entries);
2020-06-05 18:51:33 +03:00
return memmap;
}
void init_memmap(void) {
for (size_t i = 0; i < e820_entries; i++) {
if (memmap_entries == MEMMAP_MAX_ENTRIES) {
panic("Memory map exhausted.");
}
2020-06-06 17:52:21 +03:00
memmap[memmap_entries++] = e820_map[i];
2020-06-05 18:51:33 +03:00
}
2020-06-06 17:52:21 +03:00
sanitise_entries();
2020-06-05 18:51:33 +03:00
}
2020-08-11 19:00:51 +03:00
void memmap_alloc_range(uint64_t base, uint64_t length, uint32_t type) {
2020-06-05 18:51:33 +03:00
uint64_t top = base + length;
for (size_t i = 0; i < memmap_entries; i++) {
if (memmap[i].type != 1)
continue;
uint64_t entry_base = memmap[i].base;
uint64_t entry_top = memmap[i].base + memmap[i].length;
if (base >= entry_base && base < entry_top &&
top >= entry_base && top < entry_top) {
memmap[i].length = base - entry_base;
if (memmap[i].length == 0) {
// Eradicate from memmap
for (size_t j = i; j < memmap_entries - 1; j++) {
memmap[j] = memmap[j+1];
}
memmap_entries--;
}
if (memmap_entries >= MEMMAP_MAX_ENTRIES) {
panic("Memory map exhausted.");
}
struct e820_entry_t *target = &memmap[memmap_entries];
target->length = entry_top - top;
if (target->length != 0) {
target->base = top;
target->type = 1;
memmap_entries++;
}
if (memmap_entries >= MEMMAP_MAX_ENTRIES) {
panic("Memory map exhausted.");
}
target = &memmap[memmap_entries++];
2020-08-11 19:00:51 +03:00
target->type = type;
2020-06-05 18:51:33 +03:00
target->base = base;
target->length = length;
return;
}
}
panic("Out of memory");
2020-06-05 21:57:09 +03:00
}