limine: Remove dependency on identity map in AArch64 code

This commit is contained in:
Kacper Słomiński 2023-07-26 22:09:05 +02:00
parent eee638c583
commit 1a03213601
6 changed files with 121 additions and 86 deletions

View File

@ -95,9 +95,6 @@ noreturn void stage3_common(void);
#if defined (__x86_64__) || defined (__i386__)
noreturn void common_spinup(void *fnptr, int args, ...);
#elif defined (__aarch64__)
noreturn void enter_in_current_el(uint64_t entry, uint64_t sp, uint64_t sctlr,
uint64_t target_x0);
noreturn void enter_in_el1(uint64_t entry, uint64_t sp, uint64_t sctlr,
uint64_t mair, uint64_t tcr, uint64_t ttbr0,
uint64_t ttbr1, uint64_t target_x0);

View File

@ -2,56 +2,9 @@
.section .text
// noreturn void enter_in_current_el(uint64_t entry, uint64_t sp, uint64_t sctlr,
// uint64_t target_x0)
// Configure current EL state and jump to kernel. Used for Linux hence
// no paging register configuration (which requires SCTLR.M = 0).
.global enter_in_current_el
enter_in_current_el:
msr sp_el0, x1
// Sanity check that SCTLR.M = 0
and x8, x2, #0b1
cbnz x8, 99f
99:
wfi
b 99b
PICK_EL x8, 0f, 1f
0:
msr sctlr_el1, x2
dsb sy
isb
// Enter kernel in EL1
mov x8, #0x3c4
msr spsr_el1, x8
msr elr_el1, x0
mov x0, x3
ZERO_REGS_EXCEPT_X0
eret
1:
msr sctlr_el2, x2
dsb sy
isb
// Enter kernel in EL2
mov x8, #0x3c8
msr spsr_el2, x8
msr elr_el2, x0
mov x0, x3
ZERO_REGS_EXCEPT_X0
eret
// noreturn void enter_in_el1(uint64_t entry, uint64_t sp, uint64_t sctlr,
// uint64_t mair, uint64_t tcr, uint64_t ttbr0,
// uint64_t ttbr1, uint64_t target_x0)
// uint64_t ttbr1, uint64_t direct_map_offset)
// Potentially drop to EL1 from EL2 (and also disable trapping to EL2), then
// configure EL1 state and jump to kernel.
@ -60,6 +13,50 @@ enter_in_el1:
msr spsel, #0
mov sp, x1
PICK_EL x8, 0f, 2f
0:
// Switch to the new page tables
// Point the EL1t handler to the continuation, such that after we page fault,
// execution continues and the kernel is entered.
adrp x8, 1f
add x8, x8, #:lo12:1f
add x8, x8, x7
msr vbar_el1, x8
isb
dsb sy
isb
// Switch the page table registers
msr mair_el1, x3
msr tcr_el1, x4
msr ttbr0_el1, x5
msr ttbr1_el1, x6
msr sctlr_el1, x2
isb
dsb sy
isb
// Jump to the higher half mapping in case we didn't immediately crash
br x8
// Alignment required by VBAR register
.align 11
1:
// Zero out VBAR to avoid confusion
msr vbar_el1, xzr
// Enter kernel in EL1
mov x8, #0x3c4
msr spsr_el1, x8
msr elr_el1, x0
mov x0, xzr
ZERO_REGS_EXCEPT_X0
eret
2:
// Configure EL1 state
msr mair_el1, x3
msr tcr_el1, x4
@ -69,19 +66,6 @@ enter_in_el1:
dsb sy
isb
PICK_EL x8, 0f, 1f
0:
// Enter kernel in EL1
mov x8, #0x3c4
msr spsr_el1, x8
msr elr_el1, x0
mov x0, x7
ZERO_REGS_EXCEPT_X0
eret
1:
// Configure EL2-specific state for EL1
// Don't trap counters to EL2
@ -104,7 +88,7 @@ enter_in_el1:
msr spsr_el2, x8
msr elr_el2, x0
mov x0, x7
mov x0, xzr
ZERO_REGS_EXCEPT_X0
eret

View File

@ -955,7 +955,8 @@ FEAT_START
uint64_t bsp_mpidr;
smp_info = init_smp(&cpu_count, &bsp_mpidr,
pagemap, LIMINE_MAIR(fb_attr), LIMINE_TCR(tsz, pa), LIMINE_SCTLR);
pagemap, LIMINE_MAIR(fb_attr), LIMINE_TCR(tsz, pa), LIMINE_SCTLR,
direct_map_offset);
#elif defined (__riscv64)
if (!have_bsp_hartid) {
printv("smp: failed to get bsp's hart id\n");
@ -1099,7 +1100,8 @@ FEAT_END
enter_in_el1(entry_point, reported_stack, LIMINE_SCTLR, LIMINE_MAIR(fb_attr), LIMINE_TCR(tsz, pa),
(uint64_t)pagemap.top_level[0],
(uint64_t)pagemap.top_level[1], 0);
(uint64_t)pagemap.top_level[1],
direct_map_offset);
#elif defined (__riscv64)
uint64_t reported_stack = reported_addr(stack);
uint64_t satp = make_satp(pagemap.paging_mode, pagemap.top_level);

View File

@ -316,6 +316,8 @@ struct limine_smp_info *init_smp(size_t *cpu_count,
struct trampoline_passed_info {
uint64_t smp_tpl_booted_flag;
uint64_t smp_tpl_hhdm_offset;
uint64_t smp_tpl_ttbr0;
uint64_t smp_tpl_ttbr1;
@ -338,7 +340,8 @@ static uint32_t psci_cpu_on = 0xC4000003;
static bool try_start_ap(int boot_method, uint64_t method_ptr,
struct limine_smp_info *info_struct,
uint64_t ttbr0, uint64_t ttbr1, uint64_t mair,
uint64_t tcr, uint64_t sctlr) {
uint64_t tcr, uint64_t sctlr,
uint64_t hhdm_offset) {
// Prepare the trampoline
static void *trampoline = NULL;
if (trampoline == NULL) {
@ -360,6 +363,7 @@ static bool try_start_ap(int boot_method, uint64_t method_ptr,
passed_info->smp_tpl_mair = mair;
passed_info->smp_tpl_tcr = tcr;
passed_info->smp_tpl_sctlr = sctlr;
passed_info->smp_tpl_hhdm_offset = hhdm_offset;
// Cache coherency between the I-Cache and D-Cache is not guaranteed by the
// architecture and as such we must perform I-Cache invalidation.
@ -443,7 +447,8 @@ static struct limine_smp_info *try_acpi_smp(size_t *cpu_count,
pagemap_t pagemap,
uint64_t mair,
uint64_t tcr,
uint64_t sctlr) {
uint64_t sctlr,
uint64_t hhdm_offset) {
int boot_method = BOOT_WITH_ACPI_PARK;
// Search for FADT table
@ -531,7 +536,7 @@ static struct limine_smp_info *try_acpi_smp(size_t *cpu_count,
if (!try_start_ap(boot_method, gicc->parking_addr, info_struct,
(uint64_t)(uintptr_t)pagemap.top_level[0],
(uint64_t)(uintptr_t)pagemap.top_level[1],
mair, tcr, sctlr)) {
mair, tcr, sctlr, hhdm_offset)) {
print("smp: FAILED to bring-up AP\n");
continue;
}
@ -552,16 +557,18 @@ struct limine_smp_info *init_smp(size_t *cpu_count,
pagemap_t pagemap,
uint64_t mair,
uint64_t tcr,
uint64_t sctlr) {
uint64_t sctlr,
uint64_t hhdm_offset) {
struct limine_smp_info *info = NULL;
//if (dtb_is_present() && (info = try_dtb_smp(cpu_count,
// _bsp_iface_no, pagemap, mair, tcr, sctlr)))
// _bsp_iface_no, pagemap, mair, tcr, sctlr, hhdm_offset)))
// return info;
// No RSDP means no ACPI
if (acpi_get_rsdp() && (info = try_acpi_smp(cpu_count,
bsp_mpidr, pagemap, mair, tcr, sctlr)))
if (acpi_get_rsdp() && (info = try_acpi_smp(
cpu_count, bsp_mpidr, pagemap,
mair, tcr, sctlr, hhdm_offset)))
return info;
printv("Failed to figure out how to start APs.");

View File

@ -27,7 +27,8 @@ struct limine_smp_info *init_smp(size_t *cpu_count,
pagemap_t pagemap,
uint64_t mair,
uint64_t tcr,
uint64_t sctlr);
uint64_t sctlr,
uint64_t hhdm_offset);
#elif defined (__riscv64)

View File

@ -1,6 +1,7 @@
#include <lib/macros.aarch64_asm.h>
.set tpl_booted_flag, -56
.set tpl_booted_flag, -64
.set tpl_hhdm_offset, -56
.set tpl_ttbr0, -48
.set tpl_ttbr1, -40
.set tpl_mair, -32
@ -26,20 +27,22 @@ smp_trampoline_start:
ldr x4, [x1, tpl_tcr]
ldr x5, [x1, tpl_ttbr0]
ldr x6, [x1, tpl_ttbr1]
ldr x7, [x1, tpl_hhdm_offset]
// Configure EL1 state
PICK_EL x8, 1f, 0f
0:
// Configure EL2-specific state for EL1
// Configure EL1 page tables
msr mair_el1, x3
msr tcr_el1, x4
msr ttbr0_el1, x5
msr ttbr1_el1, x6
msr sctlr_el1, x2
isb
dsb sy
isb
PICK_EL x8, 1f, 0f
0:
// Configure EL2-specific state for EL1
// Don't trap counters to EL2
mrs x8, cnthctl_el2
orr x8, x8, #3
@ -60,26 +63,67 @@ smp_trampoline_start:
// Run rest of trampoline in EL1
mov x8, #0x3c4
msr spsr_el2, x8
adr x8, 1f
adrp x8, 3f
add x8, x8, :lo12:2f
add x8, x8, x7 // Add HHDM offset
msr elr_el2, x8
eret
1:
msr spsel, #0
// Switch to the new page tables
// Point the EL1t handler to the continuation, such that after we page fault,
// execution continues as expected.
adrp x8, 2f
add x8, x8, #:lo12:2f
add x8, x8, x7
msr vbar_el1, x8
isb
dsb sy
isb
// Switch the page table registers
msr mair_el1, x3
msr tcr_el1, x4
msr ttbr0_el1, x5
msr ttbr1_el1, x6
msr sctlr_el1, x2
isb
dsb sy
isb
// Jump to the higher half mapping in case we didn't immediately crash
br x8
// Alignment required by VBAR register
.align 11
2:
// Zero out VBAR to avoid confusion
msr vbar_el1, xzr
3:
// Add HHDM offset to data pointer
add x1, x1, x7
// Notify BSP we are alive
mov x8, #1
add x9, x1, tpl_booted_flag
stlr x8, [x9]
// Wait for BSP to tell us where to go
// Add HHDM offset to our info struct pointer
add x0, x0, x7
add x9, x0, #24
2:
4:
ldar x8, [x9]
cbnz x8, 3f
cbnz x8, 5f
yield
b 2b
b 4b
3:
5:
msr elr_el1, x8
msr spsel, #0