misc: Backports from trunk

This commit is contained in:
mintsuki 2023-09-16 15:12:14 -05:00
parent c5c43cb0fd
commit 042a6e9554
4 changed files with 47 additions and 28 deletions

View File

@ -119,6 +119,12 @@ level4:
pml1 = get_next_level(pagemap, pml2, virt_addr, pg_size, 1, pml2_entry);
// PML1 wants PAT bit at 7 instead of 12
if (flags & ((uint64_t)1 << 12)) {
flags &= ~((uint64_t)1 << 12);
flags |= ((uint64_t)1 << 7);
}
pml1[pml1_entry] = (pt_entry_t)(phys_addr | flags);
}

View File

@ -8,7 +8,7 @@
#define VMM_FLAG_WRITE ((uint64_t)1 << 1)
#define VMM_FLAG_NOEXEC ((uint64_t)1 << 63)
#define VMM_FLAG_FB ((uint64_t)0)
#define VMM_FLAG_FB (((uint64_t)1 << 3) | ((uint64_t)1 << 12))
#define VMM_MAX_LEVEL 3

View File

@ -41,28 +41,25 @@ static pagemap_t build_pagemap(int paging_mode, bool nx, struct elf_range *range
pagemap_t pagemap = new_pagemap(paging_mode);
if (ranges_count == 0) {
// Map 0 to 2GiB at 0xffffffff80000000
for (uint64_t i = 0; i < 0x80000000; i += 0x40000000) {
map_page(pagemap, 0xffffffff80000000 + i, i, VMM_FLAG_WRITE, Size1GiB);
panic(true, "limine: ranges_count == 0");
}
for (size_t i = 0; i < ranges_count; i++) {
uint64_t virt = ranges[i].base;
uint64_t phys;
if (virt & ((uint64_t)1 << 63)) {
phys = physical_base + (virt - virtual_base);
} else {
panic(false, "limine: Virtual address of a PHDR in lower half");
}
} else {
for (size_t i = 0; i < ranges_count; i++) {
uint64_t virt = ranges[i].base;
uint64_t phys;
if (virt & ((uint64_t)1 << 63)) {
phys = physical_base + (virt - virtual_base);
} else {
panic(false, "limine: Protected memory ranges are only supported for higher half kernels");
}
uint64_t pf =
(ranges[i].permissions & ELF_PF_X ? 0 : (nx ? VMM_FLAG_NOEXEC : 0)) |
(ranges[i].permissions & ELF_PF_W ? VMM_FLAG_WRITE : 0);
uint64_t pf =
(ranges[i].permissions & ELF_PF_X ? 0 : (nx ? VMM_FLAG_NOEXEC : 0)) |
(ranges[i].permissions & ELF_PF_W ? VMM_FLAG_WRITE : 0);
for (uint64_t j = 0; j < ranges[i].length; j += 0x1000) {
map_page(pagemap, virt + j, phys + j, pf, Size4KiB);
}
for (uint64_t j = 0; j < ranges[i].length; j += 0x1000) {
map_page(pagemap, virt + j, phys + j, pf, Size4KiB);
}
}
@ -108,11 +105,13 @@ static pagemap_t build_pagemap(int paging_mode, bool nx, struct elf_range *range
uint64_t length = _memmap[i].length;
uint64_t top = base + length;
if (base < 0x100000000)
if (base < 0x100000000) {
base = 0x100000000;
}
if (base >= top)
if (base >= top) {
continue;
}
uint64_t aligned_base = ALIGN_DOWN(base, 0x40000000);
uint64_t aligned_top = ALIGN_UP(top, 0x40000000);
@ -125,16 +124,16 @@ static pagemap_t build_pagemap(int paging_mode, bool nx, struct elf_range *range
}
}
// Map the framebuffer as uncacheable
#if defined (__aarch64__)
// Map the framebuffer with appropriate permissions
for (size_t i = 0; i < _memmap_entries; i++) {
if (_memmap[i].type != MEMMAP_FRAMEBUFFER) {
continue;
}
uint64_t base = _memmap[i].base;
uint64_t length = _memmap[i].length;
uint64_t top = base + length;
if (_memmap[i].type != MEMMAP_FRAMEBUFFER)
continue;
uint64_t aligned_base = ALIGN_DOWN(base, 0x1000);
uint64_t aligned_top = ALIGN_UP(top, 0x1000);
uint64_t aligned_length = aligned_top - aligned_base;
@ -145,7 +144,6 @@ static pagemap_t build_pagemap(int paging_mode, bool nx, struct elf_range *range
map_page(pagemap, direct_map_offset + page, page, VMM_FLAG_WRITE | VMM_FLAG_FB, Size4KiB);
}
}
#endif
return pagemap;
}
@ -1079,6 +1077,16 @@ FEAT_END
rm_int(0x15, &r, &r);
#endif
// Set PAT as:
// PAT0 -> WB (06)
// PAT1 -> WT (04)
// PAT2 -> UC- (07)
// PAT3 -> UC (00)
// PAT4 -> WP (05)
// PAT5 -> WC (01)
uint64_t pat = (uint64_t)0x010500070406;
wrmsr(0x277, pat);
pic_mask_all();
io_apic_mask_all();

View File

@ -39,6 +39,11 @@ smp_trampoline_start:
xor eax, eax
mov cr4, eax
mov ecx, 0x277
mov eax, 0x00070406
mov edx, 0x00000105
wrmsr
test dword [ebx + (passed_info.target_mode - smp_trampoline_start)], (1 << 2)
jz .nox2apic