Rework memmap

This commit is contained in:
mintsuki 2020-06-06 16:52:21 +02:00
parent dddfe43874
commit d8b9a2bb3b
2 changed files with 48 additions and 62 deletions

Binary file not shown.

View File

@ -42,23 +42,16 @@ void print_memmap(struct e820_entry_t *mm, size_t size) {
}
}
static int align_entry_down(uint64_t *base, uint64_t *length) {
static int align_entry(uint64_t *base, uint64_t *length) {
if (*length < PAGE_SIZE)
return -1;
uint64_t orig_base = *base;
*base = ALIGN_UP(*base, PAGE_SIZE);
*base = ALIGN_UP(*base, PAGE_SIZE);
if ((*base - orig_base) > *length)
return -1;
*length = *length - (*base - orig_base);
*length = ALIGN_DOWN(*length, PAGE_SIZE);
return 0;
}
static int align_entry_with_base(uint64_t *base, uint64_t *length) {
if (align_entry_down(base, length))
return -1;
*length -= (*base - orig_base);
*length = ALIGN_DOWN(*length, PAGE_SIZE);
if (!length)
return -1;
@ -77,24 +70,51 @@ static int align_entry_with_base(uint64_t *base, uint64_t *length) {
return 0;
}
static void memmap_align_free_entries(void) {
static void sanitise_entries(void) {
for (size_t i = 0; i < memmap_entries; i++) {
if (memmap[i].type != 1)
continue;
if (align_entry_down(&memmap[i].base, &memmap[i].length)
|| !memmap[i].length) {
// Check if the entry overlaps other entries
for (size_t j = 0; j < memmap_entries; j++) {
if (j == i)
continue;
uint64_t base = memmap[i].base;
uint64_t length = memmap[i].length;
uint64_t top = base + length;
uint64_t res_base = memmap[j].base;
uint64_t res_length = memmap[j].length;
uint64_t res_top = res_base + res_length;
// TODO actually handle splitting off usable chunks
if ( (res_base >= base && res_base < top)
&& (res_top >= base && res_top < top) ) {
panic("A non-usable e820 entry is inside a usable section.");
}
if (res_base >= base && res_base < top) {
top = res_base;
}
if (res_top >= base && res_top < top) {
base = res_top;
}
memmap[i].base = base;
memmap[i].length = top - base;
}
if (!memmap[i].length || align_entry(&memmap[i].base, &memmap[i].length)) {
// Eradicate from memmap
for (size_t j = i; j < memmap_entries - 1; j++) {
memmap[j] = memmap[j+1];
}
memmap_entries--;
i--;
}
}
}
struct e820_entry_t *get_memmap(size_t *entries) {
memmap_align_free_entries();
// Sort the entries
for (size_t p = 0; p < memmap_entries - 1; p++) {
@ -110,6 +130,10 @@ struct e820_entry_t *get_memmap(size_t *entries) {
memmap[min_index] = memmap[p];
memmap[p] = min_e;
}
}
struct e820_entry_t *get_memmap(size_t *entries) {
sanitise_entries();
*entries = memmap_entries;
@ -125,48 +149,10 @@ void init_memmap(void) {
panic("Memory map exhausted.");
}
if (e820_map[i].type != 1) {
memmap[memmap_entries++] = e820_map[i];
continue;
}
uint64_t base = e820_map[i].base;
uint64_t length = e820_map[i].length;
uint64_t top = base + length;
// Check if the entry overlaps non-usable entries
for (size_t j = 0; j < e820_entries; j++) {
if (e820_map[j].type == 1)
continue;
size_t res_base = e820_map[j].base;
size_t res_length = e820_map[j].length;
size_t res_top = res_base + res_length;
// TODO actually handle splitting off usable chunks
if ( (res_base >= base && res_base < top)
&& (res_top >= base && res_top < top) ) {
panic("A non-usable e820 entry is inside a usable section.");
}
if (res_base >= base && res_base < top) {
top = res_base;
}
if (res_top >= base && res_top < top) {
base = res_top;
}
}
if (align_entry_with_base(&base, &length))
continue;
memmap[memmap_entries].type = 1;
memmap[memmap_entries].base = base;
memmap[memmap_entries].length = length;
memmap_entries++;
memmap[memmap_entries++] = e820_map[i];
}
sanitise_entries();
}
void memmap_alloc_range(uint64_t base, uint64_t length) {