Merge pull request #151 from Andy-Python-Programmer/trunk

Use 1GiB pages where ever its possible
This commit is contained in:
mint 2022-01-27 05:44:16 +01:00 committed by GitHub
commit 10b585a9f1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 69 additions and 13 deletions

View File

@ -3,6 +3,7 @@
#include <mm/vmm.h>
#include <mm/pmm.h>
#include <lib/blib.h>
#include <lib/print.h>
#include <sys/cpu.h>
#define PT_SIZE ((uint64_t)0x1000)
@ -39,7 +40,25 @@ pagemap_t new_pagemap(int lv) {
return pagemap;
}
void map_page(pagemap_t pagemap, uint64_t virt_addr, uint64_t phys_addr, uint64_t flags, bool hugepages) {
static bool is_1gib_page_supported(void) {
// Cache the cpuid result :^)
static bool CACHE_INIT = false;
static bool CACHE = false;
if (!CACHE_INIT) {
// Check if 1GiB pages are supported:
uint32_t eax, ebx, ecx, edx;
CACHE = cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx) && ((edx & 1 << 26) == 1 << 26);
CACHE_INIT = true;
printv("paging: 1GiB pages are %s!\n", CACHE ? "supported" : "not supported");
}
return CACHE;
}
void map_page(pagemap_t pagemap, uint64_t virt_addr, uint64_t phys_addr, uint64_t flags, enum page_size pg_size) {
// Calculate the indices in the various tables using the virtual address
size_t pml5_entry = (virt_addr & ((uint64_t)0x1ff << 48)) >> 48;
size_t pml4_entry = (virt_addr & ((uint64_t)0x1ff << 39)) >> 39;
@ -65,9 +84,24 @@ level5:
pml4 = get_next_level(pml5, pml5_entry);
level4:
pml3 = get_next_level(pml4, pml4_entry);
if (pg_size == Size1GiB) {
// Check if 1GiB pages are avaliable.
if (is_1gib_page_supported()) {
pml3[pml3_entry] = (pt_entry_t)(phys_addr | flags | (1 << 7));
return;
} else {
// If 1GiB pages are not supported then emulate it by splitting them into
// 2MiB pages.
for (uint64_t i = 0; i < 0x40000000; i += 0x200000) {
map_page(pagemap, virt_addr + i, phys_addr + i, flags, Size2MiB);
}
}
}
pml2 = get_next_level(pml3, pml3_entry);
if (hugepages) {
if (pg_size == Size2MiB) {
pml2[pml2_entry] = (pt_entry_t)(phys_addr | flags | (1 << 7));
return;
}

View File

@ -13,8 +13,14 @@ typedef struct {
void *top_level;
} pagemap_t;
enum page_size {
Size4KiB,
Size2MiB,
Size1GiB
};
void vmm_assert_nx(void);
pagemap_t new_pagemap(int lv);
void map_page(pagemap_t pagemap, uint64_t virt_addr, uint64_t phys_addr, uint64_t flags, bool hugepages);
void map_page(pagemap_t pagemap, uint64_t virt_addr, uint64_t phys_addr, uint64_t flags, enum page_size page_size);
#endif

View File

@ -385,8 +385,8 @@ pagemap_t stivale_build_pagemap(bool level5pg, bool unmap_null, struct elf_range
if (ranges_count == 0) {
// Map 0 to 2GiB at 0xffffffff80000000
for (uint64_t i = 0; i < 0x80000000; i += 0x200000) {
map_page(pagemap, 0xffffffff80000000 + i, i, 0x03, true);
for (uint64_t i = 0; i < 0x80000000; i += 0x40000000) {
map_page(pagemap, 0xffffffff80000000 + i, i, 0x03, Size1GiB);
}
} else {
for (size_t i = 0; i < ranges_count; i++) {
@ -408,7 +408,7 @@ pagemap_t stivale_build_pagemap(bool level5pg, bool unmap_null, struct elf_range
(ranges[i].permissions & ELF_PF_W ? VMM_FLAG_WRITE : 0);
for (uint64_t j = 0; j < ranges[i].length; j += 0x1000) {
map_page(pagemap, virt + j, phys + j, pf, false);
map_page(pagemap, virt + j, phys + j, pf, Size4KiB);
}
}
}
@ -416,14 +416,30 @@ pagemap_t stivale_build_pagemap(bool level5pg, bool unmap_null, struct elf_range
// Sub 2MiB mappings
for (uint64_t i = 0; i < 0x200000; i += 0x1000) {
if (!(i == 0 && unmap_null))
map_page(pagemap, i, i, 0x03, false);
map_page(pagemap, direct_map_offset + i, i, 0x03, false);
map_page(pagemap, i, i, 0x03, Size4KiB);
map_page(pagemap, direct_map_offset + i, i, 0x03, Size4KiB);
}
// Map 2MiB to 4GiB at higher half base and 0
for (uint64_t i = 0x200000; i < 0x100000000; i += 0x200000) {
map_page(pagemap, i, i, 0x03, true);
map_page(pagemap, direct_map_offset + i, i, 0x03, true);
//
// NOTE: We cannot just directly map from 2MiB to 4GiB with 1GiB
// pages because if you do the math.
//
// start = 0x200000
// end = 0x40000000
//
// pages_required = (end - start) / (4096 * 512 * 512)
//
// So we map 2MiB to 1GiB with 2MiB pages and then map the rest
// with 1GiB pages :^)
for (uint64_t i = 0x200000; i < 0x40000000; i += 0x200000) {
map_page(pagemap, i, i, 0x03, Size2MiB);
map_page(pagemap, direct_map_offset + i, i, 0x03, Size2MiB);
}
for (uint64_t i = 0x40000000; i < 0x100000000; i += 0x40000000) {
map_page(pagemap, i, i, 0x03, Size1GiB);
map_page(pagemap, direct_map_offset + i, i, 0x03, Size1GiB);
}
size_t _memmap_entries = memmap_entries;
@ -450,8 +466,8 @@ pagemap_t stivale_build_pagemap(bool level5pg, bool unmap_null, struct elf_range
for (uint64_t j = 0; j < aligned_length; j += 0x200000) {
uint64_t page = aligned_base + j;
map_page(pagemap, page, page, 0x03, true);
map_page(pagemap, direct_map_offset + page, page, 0x03, true);
map_page(pagemap, page, page, 0x03, Size2MiB);
map_page(pagemap, direct_map_offset + page, page, 0x03, Size2MiB);
}
}