mirror of
https://github.com/limine-bootloader/limine
synced 2024-11-28 11:23:09 +03:00
misc: Converge with 5.x
This commit is contained in:
parent
d9f8ce2b6f
commit
f9682543fd
@ -21,10 +21,6 @@ way to modify its own EFI executable to bake in the BLAKE2B checksum of the conf
|
||||
a key added to the firmware's keychain. This prevents modifications to the config file (and in turn the checksums contained there)
|
||||
from going unnoticed.
|
||||
|
||||
### What about ext2/3/4? Why is that supported then?
|
||||
|
||||
Simply put, legacy. And because a lot of Linux users expect it to "work that way". ext2/3/4 support has been dropped as of Limine 6.x.
|
||||
|
||||
### But I don't want to have a separate FAT boot partition! I don't want it!!!
|
||||
|
||||
Well tough luck. It is `$year_following_2012` now and most PCs are equipped with UEFI and simply won't boot without a FAT EFI system partition
|
||||
|
22
PROTOCOL.md
22
PROTOCOL.md
@ -87,7 +87,7 @@ The protocol mandates kernels to load themselves at or above
|
||||
`0xffffffff80000000`. Lower half kernels are *not supported*.
|
||||
|
||||
At handoff, the kernel will be properly loaded and mapped with appropriate
|
||||
MMU permissions at the requested virtual memory address (provided it is at
|
||||
MMU permissions, as supervisor, at the requested virtual memory address (provided it is at
|
||||
or above `0xffffffff80000000`).
|
||||
|
||||
No specific physical memory placement is guaranteed, except that the kernel
|
||||
@ -95,12 +95,14 @@ is guaranteed to be physically contiguous. In order to determine
|
||||
where the kernel is loaded in physical memory, see the Kernel Address feature
|
||||
below.
|
||||
|
||||
Alongside the loaded kernel, the bootloader will set up memory mappings such
|
||||
that every usable, bootloader reclaimable, framebuffer, or kernel/modules
|
||||
memory map region is mapped at HHDM offset + its physical address.
|
||||
Additionally, the whole 0->4GiB physical memory region will also be mapped
|
||||
at HHDM offset + physical address, regardless of the contents of the
|
||||
memory map. These mappings are supervisor, read, write, execute (-rwx).
|
||||
Alongside the loaded kernel, the bootloader will set up memory mappings as such:
|
||||
```
|
||||
Base Physical Address | | Base Virtual Address
|
||||
0x0000000000001000 | (4 GiB - 0x1000) and any additional memory map region | 0x0000000000001000
|
||||
0x0000000000000000 | 4 GiB and any additional memory map region | HHDM start
|
||||
```
|
||||
Where "HHDM start" is returned by the Higher Half Direct Map feature (see below).
|
||||
These mappings are supervisor, read, write, execute (-rwx).
|
||||
|
||||
The bootloader page tables are in bootloader-reclaimable memory (see Memory Map
|
||||
feature below), and their specific layout is undefined as long as they provide
|
||||
@ -117,7 +119,7 @@ config).
|
||||
The kernel executable, loaded at or above `0xffffffff80000000`, sees all of its
|
||||
segments mapped using write-back (WB) caching at the page tables level.
|
||||
|
||||
All HHDM memory regions are mapped using write-back (WB) caching at the page
|
||||
All HHDM and identity map memory regions are mapped using write-back (WB) caching at the page
|
||||
tables level, except framebuffer regions which are mapped using write-combining
|
||||
(WC) caching at the page tables level.
|
||||
|
||||
@ -140,7 +142,7 @@ The MTRRs are left as the firmware set them up.
|
||||
The kernel executable, loaded at or above `0xffffffff80000000`, sees all of its
|
||||
segments mapped using Normal Write-Back RW-Allocate non-transient caching mode.
|
||||
|
||||
All HHDM memory regions are mapped using the Normal Write-Back RW-Allocate
|
||||
All HHDM and identity map memory regions are mapped using the Normal Write-Back RW-Allocate
|
||||
non-transient caching mode, except for the framebuffer regions, which are
|
||||
mapped in using an unspecified caching mode, correct for use with the
|
||||
framebuffer on the platform.
|
||||
@ -155,7 +157,7 @@ is used on its own.
|
||||
|
||||
If the `Svpbmt` extension is available, all framebuffer memory regions are mapped
|
||||
with `PBMT=NC` to enable write-combining optimizations. The kernel executable,
|
||||
loaded at or above `0xffffffff80000000`, and all HHDM memory regions are mapped
|
||||
loaded at or above `0xffffffff80000000`, and all HHDM and identity map memory regions are mapped
|
||||
with the default `PBMT=PMA`.
|
||||
|
||||
If the `Svpbmt` extension is not available, no PMAs can be overridden (effectively,
|
||||
|
@ -99,7 +99,7 @@ noreturn void enter_in_el1(uint64_t entry, uint64_t sp, uint64_t sctlr,
|
||||
uint64_t mair, uint64_t tcr, uint64_t ttbr0,
|
||||
uint64_t ttbr1, uint64_t target_x0);
|
||||
#elif defined (__riscv64)
|
||||
noreturn void riscv_spinup(uint64_t entry, uint64_t sp, uint64_t satp, uint64_t direct_map_offset);
|
||||
noreturn void riscv_spinup(uint64_t entry, uint64_t sp, uint64_t satp);
|
||||
#if defined (UEFI)
|
||||
RISCV_EFI_BOOT_PROTOCOL *get_riscv_boot_protocol(void);
|
||||
#endif
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
// noreturn void enter_in_el1(uint64_t entry, uint64_t sp, uint64_t sctlr,
|
||||
// uint64_t mair, uint64_t tcr, uint64_t ttbr0,
|
||||
// uint64_t ttbr1, uint64_t direct_map_offset)
|
||||
// uint64_t ttbr1, uint64_t target_x0)
|
||||
// Potentially drop to EL1 from EL2 (and also disable trapping to EL2), then
|
||||
// configure EL1 state and jump to kernel.
|
||||
|
||||
@ -13,50 +13,6 @@ enter_in_el1:
|
||||
msr spsel, #0
|
||||
mov sp, x1
|
||||
|
||||
PICK_EL x8, 0f, 2f
|
||||
0:
|
||||
// Switch to the new page tables
|
||||
|
||||
// Point the EL1t handler to the continuation, such that after we page fault,
|
||||
// execution continues and the kernel is entered.
|
||||
adrp x8, 1f
|
||||
add x8, x8, #:lo12:1f
|
||||
add x8, x8, x7
|
||||
msr vbar_el1, x8
|
||||
isb
|
||||
dsb sy
|
||||
isb
|
||||
|
||||
// Switch the page table registers
|
||||
msr mair_el1, x3
|
||||
msr tcr_el1, x4
|
||||
msr ttbr0_el1, x5
|
||||
msr ttbr1_el1, x6
|
||||
msr sctlr_el1, x2
|
||||
isb
|
||||
dsb sy
|
||||
isb
|
||||
|
||||
// Jump to the higher half mapping in case we didn't immediately crash
|
||||
br x8
|
||||
|
||||
// Alignment required by VBAR register
|
||||
.align 11
|
||||
1:
|
||||
// Zero out VBAR to avoid confusion
|
||||
msr vbar_el1, xzr
|
||||
|
||||
// Enter kernel in EL1
|
||||
mov x8, #0x3c4
|
||||
msr spsr_el1, x8
|
||||
msr elr_el1, x0
|
||||
|
||||
mov x0, xzr
|
||||
ZERO_REGS_EXCEPT_X0
|
||||
|
||||
eret
|
||||
|
||||
2:
|
||||
// Configure EL1 state
|
||||
msr mair_el1, x3
|
||||
msr tcr_el1, x4
|
||||
@ -66,6 +22,19 @@ enter_in_el1:
|
||||
dsb sy
|
||||
isb
|
||||
|
||||
PICK_EL x8, 0f, 1f
|
||||
0:
|
||||
// Enter kernel in EL1
|
||||
mov x8, #0x3c4
|
||||
msr spsr_el1, x8
|
||||
msr elr_el1, x0
|
||||
|
||||
mov x0, x7
|
||||
ZERO_REGS_EXCEPT_X0
|
||||
|
||||
eret
|
||||
|
||||
1:
|
||||
// Configure EL2-specific state for EL1
|
||||
|
||||
// Don't trap counters to EL2
|
||||
@ -88,7 +57,7 @@ enter_in_el1:
|
||||
msr spsr_el2, x8
|
||||
msr elr_el2, x0
|
||||
|
||||
mov x0, xzr
|
||||
mov x0, x7
|
||||
ZERO_REGS_EXCEPT_X0
|
||||
|
||||
eret
|
||||
|
@ -6,19 +6,11 @@ riscv_spinup:
|
||||
.option norelax
|
||||
csrci sstatus, 0x2
|
||||
csrw sie, zero
|
||||
|
||||
lla t0, 0f
|
||||
add t0, t0, a3
|
||||
csrw stvec, t0
|
||||
csrw satp, a2
|
||||
sfence.vma
|
||||
unimp
|
||||
.align 4
|
||||
0:
|
||||
csrw stvec, zero
|
||||
|
||||
mv t0, a0
|
||||
mv sp, a1
|
||||
csrw satp, a2
|
||||
|
||||
mv a0, zero
|
||||
mv a1, zero
|
||||
|
@ -63,8 +63,33 @@ static pagemap_t build_pagemap(int paging_mode, bool nx, struct elf_range *range
|
||||
}
|
||||
}
|
||||
|
||||
// Map 0->4GiB range to HHDM
|
||||
for (uint64_t i = 0; i < 0x100000000; i += 0x40000000) {
|
||||
// Sub 2MiB mappings
|
||||
for (uint64_t i = 0; i < 0x200000; i += 0x1000) {
|
||||
if (i != 0) {
|
||||
map_page(pagemap, i, i, VMM_FLAG_WRITE, Size4KiB);
|
||||
}
|
||||
map_page(pagemap, direct_map_offset + i, i, VMM_FLAG_WRITE, Size4KiB);
|
||||
}
|
||||
|
||||
// Map 2MiB to 4GiB at higher half base and 0
|
||||
//
|
||||
// NOTE: We cannot just directly map from 2MiB to 4GiB with 1GiB
|
||||
// pages because if you do the math.
|
||||
//
|
||||
// start = 0x200000
|
||||
// end = 0x40000000
|
||||
//
|
||||
// pages_required = (end - start) / (4096 * 512 * 512)
|
||||
//
|
||||
// So we map 2MiB to 1GiB with 2MiB pages and then map the rest
|
||||
// with 1GiB pages :^)
|
||||
for (uint64_t i = 0x200000; i < 0x40000000; i += 0x200000) {
|
||||
map_page(pagemap, i, i, VMM_FLAG_WRITE, Size2MiB);
|
||||
map_page(pagemap, direct_map_offset + i, i, VMM_FLAG_WRITE, Size2MiB);
|
||||
}
|
||||
|
||||
for (uint64_t i = 0x40000000; i < 0x100000000; i += 0x40000000) {
|
||||
map_page(pagemap, i, i, VMM_FLAG_WRITE, Size1GiB);
|
||||
map_page(pagemap, direct_map_offset + i, i, VMM_FLAG_WRITE, Size1GiB);
|
||||
}
|
||||
|
||||
@ -74,14 +99,8 @@ static pagemap_t build_pagemap(int paging_mode, bool nx, struct elf_range *range
|
||||
for (size_t i = 0; i < _memmap_entries; i++)
|
||||
_memmap[i] = memmap[i];
|
||||
|
||||
// Map all free memory regions to the higher half direct map offset
|
||||
// Map any other region of memory from the memmap
|
||||
for (size_t i = 0; i < _memmap_entries; i++) {
|
||||
if (_memmap[i].type != MEMMAP_USABLE
|
||||
&& _memmap[i].type != MEMMAP_BOOTLOADER_RECLAIMABLE
|
||||
&& _memmap[i].type != MEMMAP_KERNEL_AND_MODULES) {
|
||||
continue;
|
||||
}
|
||||
|
||||
uint64_t base = _memmap[i].base;
|
||||
uint64_t length = _memmap[i].length;
|
||||
uint64_t top = base + length;
|
||||
@ -100,6 +119,7 @@ static pagemap_t build_pagemap(int paging_mode, bool nx, struct elf_range *range
|
||||
|
||||
for (uint64_t j = 0; j < aligned_length; j += 0x40000000) {
|
||||
uint64_t page = aligned_base + j;
|
||||
map_page(pagemap, page, page, VMM_FLAG_WRITE, Size1GiB);
|
||||
map_page(pagemap, direct_map_offset + page, page, VMM_FLAG_WRITE, Size1GiB);
|
||||
}
|
||||
}
|
||||
@ -120,17 +140,11 @@ static pagemap_t build_pagemap(int paging_mode, bool nx, struct elf_range *range
|
||||
|
||||
for (uint64_t j = 0; j < aligned_length; j += 0x1000) {
|
||||
uint64_t page = aligned_base + j;
|
||||
map_page(pagemap, page, page, VMM_FLAG_WRITE | VMM_FLAG_FB, Size4KiB);
|
||||
map_page(pagemap, direct_map_offset + page, page, VMM_FLAG_WRITE | VMM_FLAG_FB, Size4KiB);
|
||||
}
|
||||
}
|
||||
|
||||
// XXX we do this as a quick and dirty way to switch to the higher half
|
||||
#if defined (__x86_64__) || defined (__i386__)
|
||||
for (uint64_t i = 0; i < 0x100000000; i += 0x40000000) {
|
||||
map_page(pagemap, i, i, VMM_FLAG_WRITE, Size1GiB);
|
||||
}
|
||||
#endif
|
||||
|
||||
return pagemap;
|
||||
}
|
||||
|
||||
@ -944,10 +958,9 @@ FEAT_START
|
||||
uint64_t bsp_mpidr;
|
||||
|
||||
smp_info = init_smp(&cpu_count, &bsp_mpidr,
|
||||
pagemap, LIMINE_MAIR(fb_attr), LIMINE_TCR(tsz, pa), LIMINE_SCTLR,
|
||||
direct_map_offset);
|
||||
pagemap, LIMINE_MAIR(fb_attr), LIMINE_TCR(tsz, pa), LIMINE_SCTLR);
|
||||
#elif defined (__riscv64)
|
||||
smp_info = init_smp(&cpu_count, pagemap, direct_map_offset);
|
||||
smp_info = init_smp(&cpu_count, pagemap);
|
||||
#else
|
||||
#error Unknown architecture
|
||||
#endif
|
||||
@ -1081,12 +1094,11 @@ FEAT_END
|
||||
|
||||
uint64_t reported_stack = reported_addr(stack);
|
||||
|
||||
common_spinup(limine_spinup_32, 10,
|
||||
common_spinup(limine_spinup_32, 8,
|
||||
paging_mode, (uint32_t)(uintptr_t)pagemap.top_level,
|
||||
(uint32_t)entry_point, (uint32_t)(entry_point >> 32),
|
||||
(uint32_t)reported_stack, (uint32_t)(reported_stack >> 32),
|
||||
(uint32_t)(uintptr_t)local_gdt, nx_available,
|
||||
(uint32_t)direct_map_offset, (uint32_t)(direct_map_offset >> 32));
|
||||
(uint32_t)(uintptr_t)local_gdt, nx_available);
|
||||
#elif defined (__aarch64__)
|
||||
vmm_assert_4k_pages();
|
||||
|
||||
@ -1094,13 +1106,12 @@ FEAT_END
|
||||
|
||||
enter_in_el1(entry_point, reported_stack, LIMINE_SCTLR, LIMINE_MAIR(fb_attr), LIMINE_TCR(tsz, pa),
|
||||
(uint64_t)pagemap.top_level[0],
|
||||
(uint64_t)pagemap.top_level[1],
|
||||
direct_map_offset);
|
||||
(uint64_t)pagemap.top_level[1], 0);
|
||||
#elif defined (__riscv64)
|
||||
uint64_t reported_stack = reported_addr(stack);
|
||||
uint64_t satp = make_satp(pagemap.paging_mode, pagemap.top_level);
|
||||
|
||||
riscv_spinup(entry_point, reported_stack, satp, direct_map_offset);
|
||||
riscv_spinup(entry_point, reported_stack, satp);
|
||||
#else
|
||||
#error Unknown architecture
|
||||
#endif
|
||||
|
@ -67,24 +67,6 @@ bits 64
|
||||
mov eax, [rsp+28] ; local_gdt
|
||||
lgdt [rax]
|
||||
|
||||
; Jump to higher half
|
||||
mov rax, qword [rsp+36]
|
||||
add rsp, rax
|
||||
call .p2
|
||||
.p2:
|
||||
add qword [rsp], .hh - .p2
|
||||
add qword [rsp], rax
|
||||
retq
|
||||
.hh:
|
||||
|
||||
; Unmap lower half entirely
|
||||
mov rsi, cr3
|
||||
lea rdi, [rsi + rax]
|
||||
mov rcx, 256
|
||||
xor rax, rax
|
||||
rep stosq
|
||||
mov cr3, rsi
|
||||
|
||||
; Push fake return address
|
||||
mov rsi, [rsp+20] ; stack
|
||||
sub rsi, 8
|
||||
|
@ -259,8 +259,6 @@ struct limine_smp_info *init_smp(size_t *cpu_count,
|
||||
struct trampoline_passed_info {
|
||||
uint64_t smp_tpl_booted_flag;
|
||||
|
||||
uint64_t smp_tpl_hhdm_offset;
|
||||
|
||||
uint64_t smp_tpl_ttbr0;
|
||||
uint64_t smp_tpl_ttbr1;
|
||||
|
||||
@ -283,8 +281,7 @@ static uint32_t psci_cpu_on = 0xC4000003;
|
||||
static bool try_start_ap(int boot_method, uint64_t method_ptr,
|
||||
struct limine_smp_info *info_struct,
|
||||
uint64_t ttbr0, uint64_t ttbr1, uint64_t mair,
|
||||
uint64_t tcr, uint64_t sctlr,
|
||||
uint64_t hhdm_offset) {
|
||||
uint64_t tcr, uint64_t sctlr) {
|
||||
// Prepare the trampoline
|
||||
static void *trampoline = NULL;
|
||||
if (trampoline == NULL) {
|
||||
@ -306,7 +303,6 @@ static bool try_start_ap(int boot_method, uint64_t method_ptr,
|
||||
passed_info->smp_tpl_mair = mair;
|
||||
passed_info->smp_tpl_tcr = tcr;
|
||||
passed_info->smp_tpl_sctlr = sctlr;
|
||||
passed_info->smp_tpl_hhdm_offset = hhdm_offset;
|
||||
|
||||
// Cache coherency between the I-Cache and D-Cache is not guaranteed by the
|
||||
// architecture and as such we must perform I-Cache invalidation.
|
||||
@ -390,8 +386,7 @@ static struct limine_smp_info *try_acpi_smp(size_t *cpu_count,
|
||||
pagemap_t pagemap,
|
||||
uint64_t mair,
|
||||
uint64_t tcr,
|
||||
uint64_t sctlr,
|
||||
uint64_t hhdm_offset) {
|
||||
uint64_t sctlr) {
|
||||
int boot_method = BOOT_WITH_ACPI_PARK;
|
||||
|
||||
// Search for FADT table
|
||||
@ -479,7 +474,7 @@ static struct limine_smp_info *try_acpi_smp(size_t *cpu_count,
|
||||
if (!try_start_ap(boot_method, gicc->parking_addr, info_struct,
|
||||
(uint64_t)(uintptr_t)pagemap.top_level[0],
|
||||
(uint64_t)(uintptr_t)pagemap.top_level[1],
|
||||
mair, tcr, sctlr, hhdm_offset)) {
|
||||
mair, tcr, sctlr)) {
|
||||
print("smp: FAILED to bring-up AP\n");
|
||||
continue;
|
||||
}
|
||||
@ -500,18 +495,16 @@ struct limine_smp_info *init_smp(size_t *cpu_count,
|
||||
pagemap_t pagemap,
|
||||
uint64_t mair,
|
||||
uint64_t tcr,
|
||||
uint64_t sctlr,
|
||||
uint64_t hhdm_offset) {
|
||||
uint64_t sctlr) {
|
||||
struct limine_smp_info *info = NULL;
|
||||
|
||||
//if (dtb_is_present() && (info = try_dtb_smp(cpu_count,
|
||||
// _bsp_iface_no, pagemap, mair, tcr, sctlr, hhdm_offset)))
|
||||
// _bsp_iface_no, pagemap, mair, tcr, sctlr)))
|
||||
// return info;
|
||||
|
||||
// No RSDP means no ACPI
|
||||
if (acpi_get_rsdp() && (info = try_acpi_smp(
|
||||
cpu_count, bsp_mpidr, pagemap,
|
||||
mair, tcr, sctlr, hhdm_offset)))
|
||||
if (acpi_get_rsdp() && (info = try_acpi_smp(cpu_count,
|
||||
bsp_mpidr, pagemap, mair, tcr, sctlr)))
|
||||
return info;
|
||||
|
||||
printv("Failed to figure out how to start APs.");
|
||||
@ -525,17 +518,14 @@ struct trampoline_passed_info {
|
||||
uint64_t smp_tpl_booted_flag;
|
||||
uint64_t smp_tpl_satp;
|
||||
uint64_t smp_tpl_info_struct;
|
||||
uint64_t smp_tpl_hhdm_offset;
|
||||
};
|
||||
|
||||
static bool smp_start_ap(size_t hartid, size_t satp, struct limine_smp_info *info_struct,
|
||||
uint64_t hhdm_offset) {
|
||||
static bool smp_start_ap(size_t hartid, size_t satp, struct limine_smp_info *info_struct) {
|
||||
static struct trampoline_passed_info passed_info;
|
||||
|
||||
passed_info.smp_tpl_booted_flag = 0;
|
||||
passed_info.smp_tpl_satp = satp;
|
||||
passed_info.smp_tpl_info_struct = (uint64_t)info_struct;
|
||||
passed_info.smp_tpl_hhdm_offset = hhdm_offset;
|
||||
|
||||
asm volatile ("" ::: "memory");
|
||||
|
||||
@ -551,7 +541,7 @@ static bool smp_start_ap(size_t hartid, size_t satp, struct limine_smp_info *inf
|
||||
return false;
|
||||
}
|
||||
|
||||
struct limine_smp_info *init_smp(size_t *cpu_count, pagemap_t pagemap, uint64_t hhdm_offset) {
|
||||
struct limine_smp_info *init_smp(size_t *cpu_count, pagemap_t pagemap) {
|
||||
size_t num_cpus = 0;
|
||||
for (struct riscv_hart *hart = hart_list; hart != NULL; hart = hart->next) {
|
||||
if (!(hart->flags & RISCV_HART_COPROC)) {
|
||||
@ -584,7 +574,7 @@ struct limine_smp_info *init_smp(size_t *cpu_count, pagemap_t pagemap, uint64_t
|
||||
|
||||
// Try to start the AP.
|
||||
size_t satp = make_satp(pagemap.paging_mode, pagemap.top_level);
|
||||
if (!smp_start_ap(hart->hartid, satp, info_struct, hhdm_offset)) {
|
||||
if (!smp_start_ap(hart->hartid, satp, info_struct)) {
|
||||
print("smp: FAILED to bring-up AP\n");
|
||||
continue;
|
||||
}
|
||||
|
@ -27,14 +27,12 @@ struct limine_smp_info *init_smp(size_t *cpu_count,
|
||||
pagemap_t pagemap,
|
||||
uint64_t mair,
|
||||
uint64_t tcr,
|
||||
uint64_t sctlr,
|
||||
uint64_t hhdm_offset);
|
||||
uint64_t sctlr);
|
||||
|
||||
#elif defined (__riscv64)
|
||||
|
||||
struct limine_smp_info *init_smp(size_t *cpu_count,
|
||||
pagemap_t pagemap,
|
||||
uint64_t hhdm_offset);
|
||||
pagemap_t pagemap);
|
||||
|
||||
#else
|
||||
#error Unknown architecture
|
||||
|
@ -1,7 +1,6 @@
|
||||
#include <lib/macros.aarch64_asm.h>
|
||||
|
||||
.set tpl_booted_flag, -64
|
||||
.set tpl_hhdm_offset, -56
|
||||
.set tpl_booted_flag, -56
|
||||
.set tpl_ttbr0, -48
|
||||
.set tpl_ttbr1, -40
|
||||
.set tpl_mair, -32
|
||||
@ -27,22 +26,20 @@ smp_trampoline_start:
|
||||
ldr x4, [x1, tpl_tcr]
|
||||
ldr x5, [x1, tpl_ttbr0]
|
||||
ldr x6, [x1, tpl_ttbr1]
|
||||
ldr x7, [x1, tpl_hhdm_offset]
|
||||
|
||||
PICK_EL x8, 1f, 0f
|
||||
0:
|
||||
// Configure EL2-specific state for EL1
|
||||
|
||||
// Configure EL1 page tables
|
||||
// Configure EL1 state
|
||||
msr mair_el1, x3
|
||||
msr tcr_el1, x4
|
||||
msr ttbr0_el1, x5
|
||||
msr ttbr1_el1, x6
|
||||
msr sctlr_el1, x2
|
||||
isb
|
||||
dsb sy
|
||||
isb
|
||||
|
||||
PICK_EL x8, 1f, 0f
|
||||
0:
|
||||
// Configure EL2-specific state for EL1
|
||||
|
||||
// Don't trap counters to EL2
|
||||
mrs x8, cnthctl_el2
|
||||
orr x8, x8, #3
|
||||
@ -63,67 +60,26 @@ smp_trampoline_start:
|
||||
// Run rest of trampoline in EL1
|
||||
mov x8, #0x3c4
|
||||
msr spsr_el2, x8
|
||||
adrp x8, 3f
|
||||
add x8, x8, :lo12:2f
|
||||
add x8, x8, x7 // Add HHDM offset
|
||||
adr x8, 1f
|
||||
msr elr_el2, x8
|
||||
|
||||
eret
|
||||
|
||||
1:
|
||||
msr spsel, #0
|
||||
|
||||
// Switch to the new page tables
|
||||
|
||||
// Point the EL1t handler to the continuation, such that after we page fault,
|
||||
// execution continues as expected.
|
||||
adrp x8, 2f
|
||||
add x8, x8, #:lo12:2f
|
||||
add x8, x8, x7
|
||||
msr vbar_el1, x8
|
||||
isb
|
||||
dsb sy
|
||||
isb
|
||||
|
||||
// Switch the page table registers
|
||||
msr mair_el1, x3
|
||||
msr tcr_el1, x4
|
||||
msr ttbr0_el1, x5
|
||||
msr ttbr1_el1, x6
|
||||
msr sctlr_el1, x2
|
||||
isb
|
||||
dsb sy
|
||||
isb
|
||||
|
||||
// Jump to the higher half mapping in case we didn't immediately crash
|
||||
br x8
|
||||
|
||||
// Alignment required by VBAR register
|
||||
.align 11
|
||||
2:
|
||||
// Zero out VBAR to avoid confusion
|
||||
msr vbar_el1, xzr
|
||||
|
||||
3:
|
||||
// Add HHDM offset to data pointer
|
||||
add x1, x1, x7
|
||||
|
||||
// Notify BSP we are alive
|
||||
mov x8, #1
|
||||
add x9, x1, tpl_booted_flag
|
||||
stlr x8, [x9]
|
||||
|
||||
// Wait for BSP to tell us where to go
|
||||
// Add HHDM offset to our info struct pointer
|
||||
add x0, x0, x7
|
||||
add x9, x0, #24
|
||||
4:
|
||||
2:
|
||||
ldar x8, [x9]
|
||||
cbnz x8, 5f
|
||||
cbnz x8, 3f
|
||||
yield
|
||||
b 4b
|
||||
b 2b
|
||||
|
||||
5:
|
||||
3:
|
||||
msr elr_el1, x8
|
||||
|
||||
msr spsel, #0
|
||||
|
@ -11,26 +11,9 @@ smp_trampoline_start:
|
||||
//
|
||||
// All other registers are undefined.
|
||||
|
||||
#define smp_tpl_booted_flag 0
|
||||
#define smp_tpl_satp 8
|
||||
#define smp_tpl_info_struct 16
|
||||
#define smp_tpl_hhdm_offset 24
|
||||
|
||||
ld a0, smp_tpl_info_struct(a1)
|
||||
ld t1, smp_tpl_hhdm_offset(a1)
|
||||
|
||||
// Set `stvec` so we page fault into the higher half after loading `satp`.
|
||||
lla t0, 0f
|
||||
add t0, t1, t0
|
||||
csrw stvec, t0
|
||||
ld t0, smp_tpl_satp(a1)
|
||||
ld a0, 16(a1)
|
||||
ld t0, 8(a1)
|
||||
csrw satp, t0
|
||||
sfence.vma
|
||||
unimp
|
||||
0:
|
||||
// Relocate the smp_info and passed_info pointers to the higher half.
|
||||
add a0, t1, a0
|
||||
add a1, t1, a1
|
||||
|
||||
// Tell the BSP we've started.
|
||||
li t0, 1
|
||||
|
@ -168,10 +168,6 @@ parking64:
|
||||
jmp .loop
|
||||
|
||||
.out:
|
||||
; Clear TLB
|
||||
mov rbx, cr3
|
||||
mov cr3, rbx
|
||||
|
||||
mov rsp, qword [rdi + 8]
|
||||
push 0
|
||||
push rax
|
||||
|
Loading…
Reference in New Issue
Block a user