boot: ARM64 EFI port

* MMU mapping
* EL2 to EL1 transition (FreeBSD/Jaroslaw Pelczar)
* Initial implementation for cache cleaning and TLB invalidations (ARM)
* Processor Helper functions
* Additional Logging in boot process

Change-Id: Idcee93583418a3c3528c5d9586d3add487f9d5ca
Reviewed-on: https://review.haiku-os.org/c/haiku/+/4888
Reviewed-by: Adrien Destugues <pulkomandy@gmail.com>
Reviewed-by: Alex von Gluck IV <kallisti5@unixzen.com>
Tested-by: Commit checker robot <no-reply+buildbot@haiku-os.org>
This commit is contained in:
urnenfeld 2022-01-26 13:27:04 +01:00 committed by Alex von Gluck IV
parent 5e5299336b
commit f9412d9f8a
12 changed files with 1644 additions and 6 deletions

View File

@ -0,0 +1,85 @@
/*-
* Copyright (c) 2013, 2014 Andrew Turner
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_HYPERVISOR_H_
#define _MACHINE_HYPERVISOR_H_
/*
* These registers are only useful when in hypervisor context,
* e.g. specific to EL2, or controlling the hypervisor.
*/
/*
* Architecture feature trap register
*/
#define CPTR_RES0 0x7fefc800
#define CPTR_RES1 0x000033ff
#define CPTR_TFP 0x00000400
#define CPTR_TTA 0x00100000
#define CPTR_TCPAC 0x80000000
/*
* Hypervisor Config Register
*/
#define HCR_VM 0x0000000000000001
#define HCR_SWIO 0x0000000000000002
#define HCR_PTW 0x0000000000000004
#define HCR_FMO 0x0000000000000008
#define HCR_IMO 0x0000000000000010
#define HCR_AMO 0x0000000000000020
#define HCR_VF 0x0000000000000040
#define HCR_VI 0x0000000000000080
#define HCR_VSE 0x0000000000000100
#define HCR_FB 0x0000000000000200
#define HCR_BSU_MASK 0x0000000000000c00
#define HCR_DC 0x0000000000001000
#define HCR_TWI 0x0000000000002000
#define HCR_TWE 0x0000000000004000
#define HCR_TID0 0x0000000000008000
#define HCR_TID1 0x0000000000010000
#define HCR_TID2 0x0000000000020000
#define HCR_TID3 0x0000000000040000
#define HCR_TSC 0x0000000000080000
#define HCR_TIDCP 0x0000000000100000
#define HCR_TACR 0x0000000000200000
#define HCR_TSW 0x0000000000400000
#define HCR_TPC 0x0000000000800000
#define HCR_TPU 0x0000000001000000
#define HCR_TTLB 0x0000000002000000
#define HCR_TVM 0x0000000004000000
#define HCR_TGE 0x0000000008000000
#define HCR_TDZ 0x0000000010000000
#define HCR_HCD 0x0000000020000000
#define HCR_TRVM 0x0000000040000000
#define HCR_RW 0x0000000080000000
#define HCR_CD 0x0000000100000000
#define HCR_ID 0x0000000200000000
#endif

View File

@ -23,6 +23,11 @@ typedef struct {
// TODO: Deal with this later in the port
// FixedWidthPointer<void> fdt;
// uart_info uart;
uint64 phys_pgdir;
uint64 vir_pgdir;
uint64 next_pagetable;
} _PACKED arch_kernel_args;
#endif /* KERNEL_ARCH_ARM64_KERNEL_ARGS_H */

View File

@ -0,0 +1,127 @@
/*-
* Copyright (c) 2014 Andrew Turner
* Copyright (c) 2014-2015 The FreeBSD Foundation
* All rights reserved.
*
* This software was developed by Andrew Turner under
* sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_PTE_H_
#define _MACHINE_PTE_H_
#ifndef LOCORE
typedef uint64_t pd_entry_t; /* page directory entry */
typedef uint64_t pt_entry_t; /* page table entry */
#endif
/* Block and Page attributes */
/* TODO: Add the upper attributes */
#define ATTR_MASK_H UINT64_C(0xfff0000000000000)
#define ATTR_MASK_L UINT64_C(0x0000000000000fff)
#define ATTR_MASK (ATTR_MASK_H | ATTR_MASK_L)
/* Bits 58:55 are reserved for software */
#define ATTR_SW_DIRTY (1UL << 56)
#define ATTR_UXN (1UL << 54)
#define ATTR_PXN (1UL << 53)
#define ATTR_XN (ATTR_PXN | ATTR_UXN)
#define ATTR_CONTIGUOUS (1UL << 52)
#define ATTR_DBM (1UL << 51)
#define ATTR_nG (1 << 11)
#define ATTR_AF (1 << 10)
#define ATTR_SH(x) ((x) << 8)
#define ATTR_SH_MASK ATTR_SH(3)
#define ATTR_SH_NS 0 /* Non-shareable */
#define ATTR_SH_OS 2 /* Outer-shareable */
#define ATTR_SH_IS 3 /* Inner-shareable */
#define ATTR_AP_RW_BIT (1 << 7)
#define ATTR_AP(x) ((x) << 6)
#define ATTR_AP_MASK ATTR_AP(3)
#define ATTR_AP_RW (0 << 1)
#define ATTR_AP_RO (1 << 1)
#define ATTR_AP_USER (1 << 0)
#define ATTR_NS (1 << 5)
#define ATTR_IDX(x) ((x) << 2)
#define ATTR_IDX_MASK (7 << 2)
#define ATTR_DEFAULT (ATTR_AF | ATTR_SH(ATTR_SH_IS))
#define ATTR_DESCR_MASK 3
/* Level 0 table, 512GiB per entry */
#define L0_SHIFT 39
#define L0_SIZE (1ul << L0_SHIFT)
#define L0_OFFSET (L0_SIZE - 1ul)
#define L0_INVAL 0x0 /* An invalid address */
/* 0x1 Level 0 doesn't support block translation */
/* 0x2 also marks an invalid address */
#define L0_TABLE 0x3 /* A next-level table */
/* Level 1 table, 1GiB per entry */
#define L1_SHIFT 30
#define L1_SIZE (1 << L1_SHIFT)
#define L1_OFFSET (L1_SIZE - 1)
#define L1_INVAL L0_INVAL
#define L1_BLOCK 0x1
#define L1_TABLE L0_TABLE
/* Level 2 table, 2MiB per entry */
#define L2_SHIFT 21
#define L2_SIZE (1 << L2_SHIFT)
#define L2_OFFSET (L2_SIZE - 1)
#define L2_INVAL L1_INVAL
#define L2_BLOCK L1_BLOCK
#define L2_TABLE L1_TABLE
#define L2_BLOCK_MASK UINT64_C(0xffffffe00000)
/* Level 3 table, 4KiB per entry */
#define L3_SHIFT 12
#define L3_SIZE (1 << L3_SHIFT)
#define L3_OFFSET (L3_SIZE - 1)
#define L3_SHIFT 12
#define L3_INVAL 0x0
/* 0x1 is reserved */
/* 0x2 also marks an invalid address */
#define L3_PAGE 0x3
#define L0_ENTRIES_SHIFT 9
#define L0_ENTRIES (1 << L0_ENTRIES_SHIFT)
#define L0_ADDR_MASK (L0_ENTRIES - 1)
#define Ln_ENTRIES_SHIFT 9
#define Ln_ENTRIES (1 << Ln_ENTRIES_SHIFT)
#define Ln_ADDR_MASK (Ln_ENTRIES - 1)
#define Ln_TABLE_MASK ((1 << 12) - 1)
#define pmap_l0_index(va) (((va) >> L0_SHIFT) & L0_ADDR_MASK)
#define pmap_l1_index(va) (((va) >> L1_SHIFT) & Ln_ADDR_MASK)
#define pmap_l2_index(va) (((va) >> L2_SHIFT) & Ln_ADDR_MASK)
#define pmap_l3_index(va) (((va) >> L3_SHIFT) & Ln_ADDR_MASK)
#endif /* !_MACHINE_PTE_H_ */
/* End of pte.h */

View File

@ -18,11 +18,15 @@ for platform in [ MultiBootSubDirSetup efi ] {
local arch_src =
crt0-efi-$(TARGET_ARCH).S
entry.S
transition.S
exceptions.S
cache.S
relocation_func.cpp
arch_mmu.cpp
arch_smp.cpp
arch_start.cpp
arch_timer.cpp
arch_cache.cpp
;
BootMergeObject boot_platform_efi_arm64.o :

View File

@ -0,0 +1,245 @@
/*
* Copyright 2021-2022, Oliver Ruiz Dorantes. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#include <efi/types.h>
#include <kernel/arch/arm64/arm_registers.h>
#include <kernel/arch/arm64/arch_pte.h>
#include <arch_kernel.h>
extern "C" void _arch_exception_loop(void);
extern "C" void _arch_exception_panic(const char* someString, uint64 someValue);
extern "C" uint64 _arch_transition_EL2_EL1(void);
extern "C" void arch_cache_disable(void);
extern "C" void arch_cache_enable(void);
extern "C" void _arch_cache_flush_invalidate_all(void);
extern "C" void _arch_mmu_invalidate_tlb_all(uint8 el);
extern "C" void _arch_cache_clean_poc(void);
static const uint8 kInvalidExceptionLevel = 0xFFu;
#define AARCH64_CHECK_ACCESS(operand, address) \
__asm __volatile("at " #operand ", %0" : : "r"((uint64_t)address))
#define AARCH64_BREAK(id) \
__asm __volatile("brk " #id)
static inline uint64 arch_exception_level()
{
return (READ_SPECIALREG(CurrentEL) >> 2);
}
// Check arch_cpu.h macro ADDRESS_TRANSLATE_FUNC(stage) for alternative implementation
static inline bool arch_mmu_read_access(addr_t address) {
switch (arch_exception_level())
{
case 0:
AARCH64_CHECK_ACCESS(S1E0R, address);
break;
case 1:
AARCH64_CHECK_ACCESS(S1E1R, address);
break;
case 2:
AARCH64_CHECK_ACCESS(S1E2R, address);
break;
case 3:
AARCH64_CHECK_ACCESS(S1E3R, address);
break;
default:
return false;
}
return !(READ_SPECIALREG(PAR_EL1) & PAR_F);
}
static inline bool arch_mmu_write_access(addr_t address) {
switch (arch_exception_level())
{
case 0:
AARCH64_CHECK_ACCESS(S1E0W, address);
break;
case 1:
AARCH64_CHECK_ACCESS(S1E1W, address);
break;
case 2:
AARCH64_CHECK_ACCESS(S1E2W, address);
break;
case 3:
AARCH64_CHECK_ACCESS(S1E3W, address);
break;
default:
return false;
}
return !(READ_SPECIALREG(PAR_EL1) & PAR_F);
}
static inline uint64 arch_mmu_base_register(bool kernel = false)
{
switch (arch_exception_level())
{
case 1:
if (kernel) {
return READ_SPECIALREG(TTBR1_EL1);
} else {
return READ_SPECIALREG(TTBR0_EL1);
}
case 2:
if (kernel) {
/* This register is present only when
* FEAT_VHE is implemented. Otherwise,
* direct accesses to TTBR1_EL2 are UNDEFINED.
*/
return READ_SPECIALREG(TTBR0_EL2); // TTBR1_EL2
} else {
return READ_SPECIALREG(TTBR0_EL2);
}
case 3:
return READ_SPECIALREG(TTBR0_EL3);
default:
return false;
}
}
static inline uint64 _arch_mmu_get_sctlr()
{
switch (arch_exception_level())
{
case 1:
return READ_SPECIALREG(SCTLR_EL1);
case 2:
return READ_SPECIALREG(SCTLR_EL2);
case 3:
return READ_SPECIALREG(SCTLR_EL3);
default:
return false;
}
}
static inline void _arch_mmu_set_sctlr(uint64 sctlr)
{
switch (arch_exception_level())
{
case 1:
WRITE_SPECIALREG(SCTLR_EL1, sctlr);
break;
case 2:
WRITE_SPECIALREG(SCTLR_EL2, sctlr);
break;
case 3:
WRITE_SPECIALREG(SCTLR_EL3, sctlr);
break;
}
}
static inline bool arch_mmu_enabled()
{
return _arch_mmu_get_sctlr() & SCTLR_M;
}
static inline bool arch_mmu_cache_enabled()
{
return _arch_mmu_get_sctlr() & SCTLR_C;
}
static inline uint64 _arch_mmu_get_tcr(int el = kInvalidExceptionLevel) {
if (el == kInvalidExceptionLevel)
el = arch_exception_level();
switch (el)
{
case 1:
return READ_SPECIALREG(TCR_EL1);
case 2:
return READ_SPECIALREG(TCR_EL2);
case 3:
return READ_SPECIALREG(TCR_EL3);
default:
return 0;
}
}
// TODO: move to arm_registers.h
static constexpr uint64 TG_MASK = 0x3u;
static constexpr uint64 TG_4KB = 0x0u;
static constexpr uint64 TG_16KB = 0x2u;
static constexpr uint64 TG_64KB = 0x1u;
static constexpr uint64 TxSZ_MASK = (1 << 6) - 1;
static constexpr uint64 T0SZ_MASK = TxSZ_MASK;
static constexpr uint64 T1SZ_MASK = TxSZ_MASK << TCR_T1SZ_SHIFT;
static constexpr uint64 TCR_EPD1_DISABLE = (1 << 23);
static inline uint32 arch_mmu_user_address_bits()
{
uint64 reg = _arch_mmu_get_tcr();
return 64 - (reg & T0SZ_MASK);
}
static inline uint32 arch_mmu_user_granule()
{
static constexpr uint64 TCR_TG0_SHIFT = 14u;
uint64 reg = _arch_mmu_get_tcr();
return ((reg >> TCR_TG0_SHIFT) & TG_MASK);
}
/*
* Given that "EL2 and EL3 have a TTBR0, but no TTBR1. This means
* that is either EL2 or EL3 is using AArch64, they can only use
* virtual addresses in the range 0x0 to 0x0000FFFF_FFFFFFFF."
*
* Following calls might only have sense under EL1
*/
static inline uint32 arch_mmu_kernel_address_bits()
{
uint64 reg = _arch_mmu_get_tcr();
return 64 - ((reg & T1SZ_MASK) >> TCR_T1SZ_SHIFT);
}
static inline uint32 arch_mmu_kernel_granule()
{
uint64 reg = _arch_mmu_get_tcr();
return ((reg >> TCR_TG1_SHIFT) & TG_MASK);
}
/*
* Distinguish between kernel(TTBR1) and user(TTBR0) addressing
*/
static inline bool arch_mmu_is_kernel_address(uint64 address)
{
return address > KERNEL_BASE;
}
static inline constexpr uint32 arch_mmu_entries_per_granularity(uint32 granularity)
{
return (granularity / 8);
}

View File

@ -0,0 +1,34 @@
/*
* Copyright 2021-2022, Oliver Ruiz Dorantes. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#include <boot/platform.h>
#include <boot/stage2.h>
#include "aarch64.h"
void
arch_cache_disable()
{
if (arch_mmu_cache_enabled()) {
uint64 sctlr = _arch_mmu_get_sctlr();
sctlr &= ~(SCTLR_M | SCTLR_C);
_arch_mmu_set_sctlr(sctlr);
// _arch_cache_flush_invalidate_all();
_arch_cache_clean_poc();
_arch_mmu_invalidate_tlb_all(arch_exception_level());
}
}
void
arch_cache_enable()
{
if (!arch_mmu_cache_enabled()) {
uint64 sctlr = _arch_mmu_get_sctlr();
sctlr |= (SCTLR_M | SCTLR_C);
_arch_mmu_set_sctlr(sctlr);
}
}

View File

@ -1,10 +1,512 @@
/*
* Copyright 2019-2020 Haiku, Inc. All rights reserved.
* Copyright 2019-2022 Haiku, Inc. All rights reserved.
* Released under the terms of the MIT License.
*/
#include <boot/platform.h>
#include <boot/stage2.h>
#include "mmu.h"
#include "efi_platform.h"
#include "aarch64.h"
#include "arch_mmu.h"
// #define TRACE_MMU
#ifdef TRACE_MMU
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
ARMv8TranslationRegime::TranslationDescriptor translation4Kb48bits = {
{L0_SHIFT, L0_ADDR_MASK, false, true, false },
{L1_SHIFT, Ln_ADDR_MASK, true, true, false },
{L2_SHIFT, Ln_ADDR_MASK, true, true, false },
{L3_SHIFT, Ln_ADDR_MASK, false, false, true }
};
ARMv8TranslationRegime CurrentRegime(translation4Kb48bits);
/* ARM port */
static uint64_t* sPageDirectory = NULL;
// static uint64_t* sFirstPageTable = NULL;
static uint64_t* sNextPageTable = NULL;
// static uint64_t* sLastPageTable = NULL;
const char*
granule_type_str(int tg)
{
switch (tg) {
case TG_4KB:
return "4KB";
case TG_16KB:
return "16KB";
case TG_64KB:
return "64KB";
default:
return "Invalid Granule";
}
}
void
arch_mmu_dump_table(uint64* table, uint8 currentLevel)
{
ARMv8TranslationTableDescriptor ttd(table);
if (currentLevel >= CurrentRegime.MaxLevels()) {
// This should not happen
panic("Too many levels ...");
return;
}
uint64 EntriesPerLevel = arch_mmu_entries_per_granularity(CurrentRegime.Granularity());
for (uint i = 0 ; i < EntriesPerLevel; i++) {
if (!ttd.IsInvalid()) {
TRACE(("Level %d, @%0lx: TTD %016lx\t", currentLevel, ttd.Location(), ttd.Value()));
if (ttd.IsTable() && currentLevel < 3) {
TRACE(("Table! Next Level:\n"));
arch_mmu_dump_table(ttd.Dereference(), currentLevel + 1);
}
if (ttd.IsBlock() || (ttd.IsPage() && currentLevel == 3)) {
TRACE(("Block/Page"));
if (i & 1) { // 2 entries per row
TRACE(("\n"));
} else {
TRACE(("\t"));
}
}
}
ttd.Next();
}
}
void
arch_mmu_dump_present_tables()
{
#ifdef TRACE_MMU
if (arch_mmu_enabled()) {
uint64 address = arch_mmu_base_register();
TRACE(("Under TTBR0: %lx\n", address));
arch_mmu_dump_table(reinterpret_cast<uint64*>(address), 0);
/* We are willing to transition, but still in EL2, present MMU configuration
* for user is present in EL2 by TTBR0_EL2. Kernel side is not active, but
* allocated under sPageDirectory, defined under TTBR1_EL1.
*/
if (address != 0ul) {
TRACE(("Under allocated TTBR1_EL1:\n"));
arch_mmu_dump_table(sPageDirectory, 0);
}
}
#endif
}
void arch_mmu_setup_EL1() {
// Inherit TCR from EL2
uint64 tcr = READ_SPECIALREG(TCR_EL2);
// Enable TTBR1
tcr &= ~TCR_EPD1_DISABLE;
// Set space for kernel space
tcr &= ~T1SZ_MASK; // Clear
// TODO: Compiler dependency?
tcr |= TCR_T1SZ(__builtin_popcountl(KERNEL_BASE));
WRITE_SPECIALREG(TCR_EL1, tcr);
}
uint64
map_region(addr_t virt_addr, addr_t phys_addr, size_t size,
uint32_t level, uint64_t flags, uint64* descriptor)
{
ARMv8TranslationTableDescriptor ttd(descriptor);
if (level >= CurrentRegime.MaxLevels()) {
panic("Too many levels at mapping\n");
}
uint64 currentLevelSize = CurrentRegime.EntrySize(level);
ttd.JumpTo(CurrentRegime.DescriptorIndex(virt_addr, level));
uint64 remainingSizeInTable = CurrentRegime.TableSize(level)
- currentLevelSize * CurrentRegime.DescriptorIndex(virt_addr, level);
TRACE(("Level %x, Processing desc %lx indexing %lx\n",
level, reinterpret_cast<uint64>(descriptor), ttd.Location()));
if (ttd.IsInvalid()) {
// If the physical has the same alignment we could make a block here
// instead of using a complete next level table
if (size >= currentLevelSize && CurrentRegime.Aligned(phys_addr, level)) {
// Set it as block or page
if (CurrentRegime.BlocksAllowed(level)) {
ttd.SetAsBlock(reinterpret_cast<uint64*>(phys_addr), flags);
} else {
// Most likely in Level 3...
ttd.SetAsPage(reinterpret_cast<uint64*>(phys_addr), flags);
}
// Expand!
int64 expandedSize = (size > remainingSizeInTable)?remainingSizeInTable:size;
do {
phys_addr += currentLevelSize;
expandedSize -= currentLevelSize;
if (expandedSize > 0) {
ttd.Next();
if (CurrentRegime.BlocksAllowed(level)) {
ttd.SetAsBlock(reinterpret_cast<uint64*>(phys_addr), flags);
} else {
// Most likely in Level 3...
ttd.SetAsPage(reinterpret_cast<uint64*>(phys_addr), flags);
}
}
} while (expandedSize > 0);
return (size > remainingSizeInTable)?(size - remainingSizeInTable):0;
} else {
// Set it to next level
uint64 offset = 0;
uint64 remainingSize = size;
do {
uint64* page = NULL;
if (ttd.IsInvalid()) {
// our region is too small would need to create a level below
page = CurrentRegime.AllocatePage();
ttd.SetToTable(page, flags);
} else if (ttd.IsTable()) {
// Next table is allocated, follow it
page = ttd.Dereference();
} else {
panic("Required contiguous descriptor in use by Block/Page for %lx\n", ttd.Location());
}
uint64 unprocessedSize = map_region(virt_addr + offset,
phys_addr + offset, remainingSize, level + 1, flags, page);
offset = remainingSize - unprocessedSize;
remainingSize = unprocessedSize;
ttd.Next();
} while (remainingSize > 0);
return 0;
}
} else {
if ((ttd.IsBlock() && CurrentRegime.BlocksAllowed(level))
|| (ttd.IsPage() && CurrentRegime.PagesAllowed(level))
) {
// TODO: Review, overlap? expand?
panic("Re-setting a Block/Page descriptor for %lx\n", ttd.Location());
return 0;
} else if (ttd.IsTable() && CurrentRegime.TablesAllowed(level)) {
// Next Level
map_region(virt_addr, phys_addr, size, level + 1, flags, ttd.Dereference());
return 0;
} else {
panic("All descriptor types processed for %lx\n", ttd.Location());
return 0;
}
}
}
static void
map_range(addr_t virt_addr, phys_addr_t phys_addr, size_t size, uint64_t flags)
{
TRACE(("map 0x%0lx --> 0x%0lx, len=0x%0lx, flags=0x%0lx\n",
(uint64_t)virt_addr, (uint64_t)phys_addr, (uint64_t)size, flags));
// TODO: Review why we get ranges with 0 size ...
if (size == 0) {
TRACE(("Requesing 0 size map\n"));
return;
}
// TODO: Review this case
if (phys_addr == READ_SPECIALREG(TTBR1_EL1)) {
TRACE(("Trying to map the TTBR itself?!\n"));
return;
}
if (arch_mmu_read_access(virt_addr) && arch_mmu_read_access(virt_addr + size)) {
TRACE(("Range already covered in current MMU\n"));
return;
}
uint64 address;
if (arch_mmu_is_kernel_address(virt_addr)) {
// Use TTBR1
address = READ_SPECIALREG(TTBR1_EL1);
} else {
// ok, but USE instead TTBR0
address = READ_SPECIALREG(TTBR0_EL1);
}
map_region(virt_addr, phys_addr, size, 0, flags, reinterpret_cast<uint64*>(address));
// for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
// map_page(virt_addr + offset, phys_addr + offset, flags);
// }
ASSERT_ALWAYS(insert_virtual_allocated_range(virt_addr, size) >= B_OK);
}
static void
build_physical_memory_list(size_t memory_map_size,
efi_memory_descriptor* memory_map, size_t descriptor_size,
uint32_t descriptor_version)
{
addr_t addr = (addr_t)memory_map;
gKernelArgs.num_physical_memory_ranges = 0;
// First scan: Add all usable ranges
for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
efi_memory_descriptor* entry = (efi_memory_descriptor*)(addr + i * descriptor_size);
switch (entry->Type) {
case EfiLoaderCode:
case EfiLoaderData:
entry->VirtualStart = entry->PhysicalStart;
break;
case EfiBootServicesCode:
case EfiBootServicesData:
case EfiConventionalMemory: {
// Usable memory.
uint64_t base = entry->PhysicalStart;
uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
insert_physical_memory_range(base, size);
break;
}
case EfiACPIReclaimMemory:
// ACPI reclaim -- physical memory we could actually use later
break;
case EfiRuntimeServicesCode:
case EfiRuntimeServicesData:
entry->VirtualStart = entry->PhysicalStart;
break;
case EfiMemoryMappedIO:
entry->VirtualStart = entry->PhysicalStart;
break;
}
}
uint64_t initialPhysicalMemory = total_physical_memory();
// Second scan: Remove everything reserved that may overlap
for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
efi_memory_descriptor* entry = (efi_memory_descriptor*)(addr + i * descriptor_size);
switch (entry->Type) {
case EfiLoaderCode:
case EfiLoaderData:
case EfiBootServicesCode:
case EfiBootServicesData:
case EfiConventionalMemory:
break;
default:
uint64_t base = entry->PhysicalStart;
uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
remove_physical_memory_range(base, size);
}
}
gKernelArgs.ignored_physical_memory
+= initialPhysicalMemory - total_physical_memory();
sort_address_ranges(gKernelArgs.physical_memory_range,
gKernelArgs.num_physical_memory_ranges);
}
static void
build_physical_allocated_list(size_t memory_map_size,
efi_memory_descriptor* memory_map, size_t descriptor_size,
uint32_t descriptor_version)
{
addr_t addr = (addr_t)memory_map;
for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
efi_memory_descriptor* entry = (efi_memory_descriptor*)(addr + i * descriptor_size);
switch (entry->Type) {
case EfiLoaderData: {
uint64_t base = entry->PhysicalStart;
uint64_t size = entry->NumberOfPages * B_PAGE_SIZE;
insert_physical_allocated_range(base, size);
break;
}
default:
;
}
}
sort_address_ranges(gKernelArgs.physical_allocated_range,
gKernelArgs.num_physical_allocated_ranges);
}
void
arch_mmu_init()
{
// Stub
}
void
arch_mmu_post_efi_setup(size_t memory_map_size,
efi_memory_descriptor* memory_map, size_t descriptor_size,
uint32_t descriptor_version)
{
build_physical_allocated_list(memory_map_size, memory_map,
descriptor_size, descriptor_version);
// Switch EFI to virtual mode, using the kernel pmap.
// Something involving ConvertPointer might need to be done after this?
// http://wiki.phoenix.com/wiki/index.php/EFI_RUNTIME_SERVICES
kRuntimeServices->SetVirtualAddressMap(memory_map_size, descriptor_size,
descriptor_version, memory_map);
#ifdef DUMP_RANGES_AFTER_EXIT_SERIVCES
TRACE(("phys memory ranges:\n"));
for (uint32_t i = 0; i < gKernelArgs.num_physical_memory_ranges; i++) {
uint32_t start = (uint32_t)gKernelArgs.physical_memory_range[i].start;
uint32_t size = (uint32_t)gKernelArgs.physical_memory_range[i].size;
TRACE((" 0x%08x-0x%08x, length 0x%08x\n",
start, start + size, size));
}
TRACE(("allocated phys memory ranges:\n"));
for (uint32_t i = 0; i < gKernelArgs.num_physical_allocated_ranges; i++) {
uint32_t start = (uint32_t)gKernelArgs.physical_allocated_range[i].start;
uint32_t size = (uint32_t)gKernelArgs.physical_allocated_range[i].size;
TRACE((" 0x%08x-0x%08x, length 0x%08x\n",
start, start + size, size));
}
TRACE(("allocated virt memory ranges:\n"));
for (uint32_t i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
uint32_t start = (uint32_t)gKernelArgs.virtual_allocated_range[i].start;
uint32_t size = (uint32_t)gKernelArgs.virtual_allocated_range[i].size;
TRACE((" 0x%08x-0x%08x, length 0x%08x\n",
start, start + size, size));
}
#endif
}
void
arch_mmu_allocate_kernel_page_tables(void)
{
uint64* page = CurrentRegime.AllocatePage();
if (page != NULL) {
WRITE_SPECIALREG(TTBR1_EL1, page);
sPageDirectory = page;
} else {
panic("Not enough memory for kernel initial page\n");
}
}
uint32_t
arch_mmu_generate_post_efi_page_tables(size_t memory_map_size,
efi_memory_descriptor* memory_map, size_t descriptor_size,
uint32_t descriptor_version)
{
addr_t memory_map_addr = (addr_t)memory_map;
Mair currentMair;
// arch_mmu_allocate_page_tables();
arch_mmu_allocate_kernel_page_tables();
build_physical_memory_list(memory_map_size, memory_map,
descriptor_size, descriptor_version);
TRACE(("Mapping Code & Data\n"));
for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
efi_memory_descriptor* entry = (efi_memory_descriptor*)(memory_map_addr + i * descriptor_size);
switch (entry->Type) {
case EfiLoaderCode:
case EfiLoaderData:
map_range(entry->VirtualStart, entry->PhysicalStart,
entry->NumberOfPages * B_PAGE_SIZE,
ARMv8TranslationTableDescriptor::DefaultCodeAttribute
| currentMair.MaskOf(MAIR_NORMAL_WB));
break;
default:
;
}
}
TRACE(("Mapping EFI_MEMORY_RUNTIME\n"));
for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
efi_memory_descriptor* entry = (efi_memory_descriptor*)(memory_map_addr + i * descriptor_size);
if ((entry->Attribute & EFI_MEMORY_RUNTIME) != 0)
map_range(entry->VirtualStart, entry->PhysicalStart,
entry->NumberOfPages * B_PAGE_SIZE,
ARMv8TranslationTableDescriptor::DefaultCodeAttribute | currentMair.MaskOf(MAIR_NORMAL_WB));
}
TRACE(("Mapping \"next\" regions\n"));
void* cookie = NULL;
addr_t vaddr;
phys_addr_t paddr;
size_t size;
while (mmu_next_region(&cookie, &vaddr, &paddr, &size)) {
map_range(vaddr, paddr, size,
ARMv8TranslationTableDescriptor::DefaultCodeAttribute
| currentMair.MaskOf(MAIR_NORMAL_WB));
}
/* TODO: Not an UART here... inspect dtb?
// identity mapping for the debug uart
map_range(0x09000000, 0x09000000, B_PAGE_SIZE,
ARMv8TranslationTableDescriptor::DefaultPeripheralAttribute
| currentMair.MaskOf(MAIR_DEVICE_nGnRnE));
*/
/* TODO: Whole physical map already covered ...
// identity mapping for page table area
uint64_t page_table_area = (uint64_t)sFirstPageTable;
map_range(page_table_area, page_table_area, PAGE_TABLE_AREA_SIZE,
ARMv8TranslationTableDescriptor::DefaultCodeAttribute
| currentMair.MaskOf(MAIR_NORMAL_WB));
*/
sort_address_ranges(gKernelArgs.virtual_allocated_range,
gKernelArgs.num_virtual_allocated_ranges);
addr_t vir_pgdir;
platform_bootloader_address_to_kernel_address((void*)sPageDirectory, &vir_pgdir);
gKernelArgs.arch_args.phys_pgdir = (uint64)sPageDirectory;
gKernelArgs.arch_args.vir_pgdir = (uint32)vir_pgdir;
gKernelArgs.arch_args.next_pagetable = (uint64)(sNextPageTable) - (uint64)sPageDirectory;
TRACE(("gKernelArgs.arch_args.phys_pgdir = 0x%08x\n",
(uint32_t)gKernelArgs.arch_args.phys_pgdir));
TRACE(("gKernelArgs.arch_args.vir_pgdir = 0x%08x\n",
(uint32_t)gKernelArgs.arch_args.vir_pgdir));
TRACE(("gKernelArgs.arch_args.next_pagetable = 0x%08x\n",
(uint32_t)gKernelArgs.arch_args.next_pagetable));
return (uint64_t)sPageDirectory;
}

View File

@ -0,0 +1,273 @@
/*
* Copyright 2021-2022 Haiku, Inc. All rights reserved.
* Released under the terms of the MIT License.
*/
#ifndef _ARM64_ARCH_MMU_H
#define _ARM64_ARCH_MMU_H
/*
* Quotes taken from:
* Arm(C) Architecture Reference Manual
* Armv8, for Armv8-A architecture profile
* Chapter: D5.3 VMSAv8-64 translation table format descriptors
*/
class ARMv8TranslationTableDescriptor {
/* Descriptor bit[0] identifies whether the descriptor is valid,
* and is 1 for a valid descriptor. If a lookup returns an invalid
* descriptor, the associated input address is unmapped, and any
* attempt to access it generates a Translation fault.
*
* Descriptor bit[1] identifies the descriptor type, and is encoded as:
* 0, Block The descriptor gives the base address of a block of memory,
* and the attributes for that memory region.
* 1, Table The descriptor gives the address of the next level of
* translation table, and for a stage 1 translation, some attributes for
* that translation.
*/
static constexpr uint64_t kTypeMask = 0x3u;
static constexpr uint64_t kTypeInvalid = 0x0u;
static constexpr uint64_t kTypeBlock = 0x1u;
static constexpr uint64_t kTypeTable = 0x3u;
static constexpr uint64_t kTypePage = 0x3u;
// TODO: Place TABLE PAGE BLOCK prefixes accordingly
struct UpperAttributes {
static constexpr uint64_t TABLE_PXN = (1UL << 59);
static constexpr uint64_t TABLE_XN = (1UL << 60);
static constexpr uint64_t TABLE_AP = (1UL << 61);
static constexpr uint64_t TABLE_NS = (1UL << 63);
static constexpr uint64_t BLOCK_PXN = (1UL << 53);
static constexpr uint64_t BLOCK_UXN = (1UL << 54);
};
struct LowerAttributes {
static constexpr uint64_t BLOCK_NS = (1 << 5);
static constexpr uint64_t BLOCK_NON_SHARE = (0 << 8);
static constexpr uint64_t BLOCK_OUTER_SHARE = (2 << 8);
static constexpr uint64_t BLOCK_INNER_SHARE = (3 << 8);
static constexpr uint64_t BLOCK_AF = (1UL << 10);
static constexpr uint64_t BLOCK_NG = (1UL << 11);
};
public:
static constexpr uint64 DefaultPeripheralAttribute = LowerAttributes::BLOCK_AF
| LowerAttributes::BLOCK_NON_SHARE
| UpperAttributes::BLOCK_PXN
| UpperAttributes::BLOCK_UXN;
static constexpr uint64 DefaultCodeAttribute = LowerAttributes::BLOCK_AF
| LowerAttributes::BLOCK_OUTER_SHARE;
ARMv8TranslationTableDescriptor(uint64_t* descriptor)
: fDescriptor(descriptor)
{}
ARMv8TranslationTableDescriptor(uint64_t descriptor)
: fDescriptor(reinterpret_cast<uint64_t*>(descriptor))
{}
bool IsInvalid() {
return (*fDescriptor & kTypeMask) == kTypeInvalid;
}
bool IsBlock() {
return (*fDescriptor & kTypeMask) == kTypeBlock;
}
bool IsPage() {
return (*fDescriptor & kTypeMask) == kTypePage;
}
bool IsTable() {
return (*fDescriptor & kTypeMask) == kTypeTable;
}
uint64_t* Dereference() {
if (IsTable())
// TODO: Use ATTR_MASK
return reinterpret_cast<uint64_t*>((*fDescriptor) & 0x0000fffffffff000ULL);
else
return NULL;
}
void SetToTable(uint64* descriptor, uint64_t attributes) {
*fDescriptor = reinterpret_cast<uint64_t>(descriptor) | kTypeTable;
}
void SetAsPage(uint64_t* physical, uint64_t attributes) {
*fDescriptor = CleanAttributes(reinterpret_cast<uint64_t>(physical)) | attributes | kTypePage;
}
void SetAsBlock(uint64_t* physical, uint64_t attributes) {
*fDescriptor = CleanAttributes(reinterpret_cast<uint64_t>(physical)) | attributes | kTypeBlock;
}
void Next() {
fDescriptor++;
}
void JumpTo(uint16 slot) {
fDescriptor += slot;
}
uint64 Value() {
return *fDescriptor;
}
uint64 Location() {
return reinterpret_cast<uint64_t>(fDescriptor);
}
private:
static uint64 CleanAttributes(uint64 address) {
return address & ~ATTR_MASK;
}
uint64_t* fDescriptor;
};
class Mair {
public:
Mair(uint8 el = kInvalidExceptionLevel)
{
if (el == kInvalidExceptionLevel) {
el = arch_exception_level();
}
switch(el)
{
case 1:
fMair = READ_SPECIALREG(MAIR_EL1);
break;
case 2:
fMair = READ_SPECIALREG(MAIR_EL2);
break;
case 3:
fMair = READ_SPECIALREG(MAIR_EL3);
break;
default:
fMair = 0x00u;
break;
}
}
uint8 IndexOf(uint8 requirement) {
uint64 processedMair = fMair;
uint8 index = 0;
while (((processedMair & 0xFF) != requirement) && (index < 8)) {
index++;
processedMair = (processedMair >> 8);
}
return (index < 8)?index:0xff;
}
uint64 MaskOf(uint8 requirement) {
return IndexOf(requirement) << 2;
}
private:
uint64 fMair;
};
class ARMv8TranslationRegime {
static const uint8 skTranslationLevels = 4;
public:
struct TranslationLevel {
uint8 shift;
uint64 mask;
bool blocks;
bool tables;
bool pages;
};
typedef struct TranslationLevel TranslationDescriptor[skTranslationLevels];
ARMv8TranslationRegime(TranslationDescriptor& regime)
: fRegime(regime)
{}
uint16 DescriptorIndex(addr_t virt_addr, uint8 level) {
return (virt_addr >> fRegime[level].shift) & fRegime[level].mask;
}
bool BlocksAllowed(uint8 level) {
return fRegime[level].blocks;
}
bool TablesAllowed(uint8 level) {
return fRegime[level].tables;
}
bool PagesAllowed(uint8 level) {
return fRegime[level].pages;
}
uint64 Mask(uint8 level) {
return EntrySize(level) - 1;
}
bool Aligned(addr_t address, uint8 level) {
return (address & Mask(level)) == 0;
}
uint64 EntrySize(uint8 level) {
return 1ul << fRegime[level].shift;
}
uint64 TableSize(uint8 level) {
return EntrySize(level) * arch_mmu_entries_per_granularity(Granularity());
}
uint64* AllocatePage(void) {
uint64 size = Granularity();
uint64* page = NULL;
#if 0
// BUG: allocation here overlaps assigned memory ...
if (platform_allocate_region((void **)&page, size, 0, false) == B_OK) {
#else
// TODO: luckly size == B_PAGE_SIZE == 4KB ...
page = reinterpret_cast<uint64*>(mmu_allocate_page());
if (page != NULL) {
#endif
memset(page, 0, size);
if ((reinterpret_cast<uint64>(page) & (size - 1)) != 0) {
panic("Memory requested not %lx aligned\n", size - 1);
}
return page;
} else {
panic("Unavalable memory for descriptors\n");
return NULL;
}
}
uint8 MaxLevels() {
return skTranslationLevels;
}
uint64 Granularity() {
// Size of the last level ...
return EntrySize(skTranslationLevels - 1);
}
private:
TranslationDescriptor& fRegime;
};
#endif /* _ARM64_ARCH_MMU_H */

View File

@ -10,10 +10,64 @@
#include "efi_platform.h"
#include "aarch64.h"
extern "C" void arch_enter_kernel(struct kernel_args *kernelArgs,
addr_t kernelEntry, addr_t kernelStackTop);
extern void arch_mmu_dump_present_tables();
extern const char* granule_type_str(int tg);
extern uint32_t arch_mmu_generate_post_efi_page_tables(size_t memory_map_size,
efi_memory_descriptor *memory_map, size_t descriptor_size,
uint32_t descriptor_version);
extern void arch_mmu_post_efi_setup(size_t memory_map_size,
efi_memory_descriptor *memory_map, size_t descriptor_size,
uint32_t descriptor_version);
extern void arch_mmu_setup_EL1();
static const char*
memory_region_type_str(int type)
{
switch (type) {
case EfiReservedMemoryType:
return "ReservedMemoryType";
case EfiLoaderCode:
return "LoaderCode";
case EfiLoaderData:
return "LoaderData";
case EfiBootServicesCode:
return "BootServicesCode";
case EfiBootServicesData:
return "BootServicesData";
case EfiRuntimeServicesCode:
return "RuntimeServicesCode";
case EfiRuntimeServicesData:
return "RuntimeServicesData";
case EfiConventionalMemory:
return "ConventionalMemory";
case EfiUnusableMemory:
return "UnusableMemory";
case EfiACPIReclaimMemory:
return "ACPIReclaimMemory";
case EfiACPIMemoryNVS:
return "ACPIMemoryNVS";
case EfiMemoryMappedIO:
return "MMIO";
case EfiMemoryMappedIOPortSpace:
return "MMIOPortSpace";
case EfiPalCode:
return "PalCode";
case EfiPersistentMemory:
return "PersistentMemory";
default:
return "unknown";
}
}
void
arch_convert_kernel_args(void)
@ -56,14 +110,72 @@ arch_start_kernel(addr_t kernelEntry)
}
addr_t addr = (addr_t)memory_map;
efi_physical_addr loaderCode = 0LL;
dprintf("System provided memory map:\n");
for (size_t i = 0; i < memory_map_size / descriptor_size; ++i) {
efi_memory_descriptor *entry
= (efi_memory_descriptor *)(addr + i * descriptor_size);
dprintf(" %#lx-%#lx %#lx %#x %#lx\n", entry->PhysicalStart,
dprintf(" phys: 0x%0lx-0x%0lx, virt: 0x%0lx-0x%0lx, size = 0x%0lx, type: %s (%#x), attr: %#lx\n",
entry->PhysicalStart,
entry->PhysicalStart + entry->NumberOfPages * B_PAGE_SIZE,
entry->VirtualStart, entry->Type, entry->Attribute);
entry->VirtualStart,
entry->VirtualStart + entry->NumberOfPages * B_PAGE_SIZE,
entry->NumberOfPages * B_PAGE_SIZE,
memory_region_type_str(entry->Type), entry->Type,
entry->Attribute);
if (entry->Type == EfiLoaderCode)
loaderCode = entry->PhysicalStart;
}
// This is where our efi loader got relocated, therefore we need to use this
// offset for properly align symbols
dprintf("Efi loader symbols offset: 0x%0lx:\n", loaderCode);
// Generate page tables for use after ExitBootServices.
arch_mmu_generate_post_efi_page_tables(
memory_map_size, memory_map, descriptor_size, descriptor_version);
bool el2toel1 = false;
/*
* "The AArch64 exception model is made up of a number of exception levels
* (EL0 - EL3), with EL0 and EL1 having a secure and a non-secure
* counterpart. EL2 is the hypervisor level and exists only in non-secure
* mode. EL3 is the highest priority level and exists only in secure mode."
*
* "2.3 UEFI System Environment and Configuration
* The resident UEFI boot-time environment shall use the highest non-secure
* privilege level available. The exact meaning of this is architecture
* dependent, as detailed below."
* "2.3.1 AArch64 Exception Levels
* On AArch64 UEFI shall execute as 64-bit code at either EL1 or EL2,
* depending on whether or not virtualization is available at OS load time."
*/
if (arch_exception_level() != 1) {
dprintf("Current Exception Level EL%1ld\n", arch_exception_level());
if (arch_exception_level() == 2) {
/* Transitioning from EL we lose present MMU configuration
* which we would like to preserve e.g. peripherals mappings */
if (arch_mmu_enabled()) {
dprintf("MMU Enabled, Translation Table @ %lx Granularity %s, bits %d\n",
arch_mmu_base_register(),
granule_type_str(arch_mmu_user_granule()),
arch_mmu_user_address_bits());
dprintf("Kernel entry accessibility W: %x R: %x\n",
arch_mmu_write_access(kernelEntry),
arch_mmu_read_access(kernelEntry));
arch_mmu_dump_present_tables();
el2toel1 = true; // we want to print before exit services
}
} else {
// Not ready, undexpected any transition different than EL2 >> EL1
panic("Unexpected Exception Level\n");
}
}
// Attempt to fetch the memory map and exit boot services.
// This needs to be done in a loop, as ExitBootServices can change the
@ -97,9 +209,22 @@ arch_start_kernel(addr_t kernelEntry)
//arch_mmu_post_efi_setup(memory_map_size, memory_map,
// descriptor_size, descriptor_version);
if (el2toel1) {
arch_mmu_setup_EL1();
arch_cache_disable();
_arch_transition_EL2_EL1();
arch_cache_enable();
}
//smp_boot_other_cpus(final_pml4, kernelEntry);
// Enter the kernel!
arch_enter_kernel(&gKernelArgs, kernelEntry,
gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size);
if (arch_mmu_read_access(kernelEntry) && arch_mmu_read_access(gKernelArgs.cpu_kstack[0].start)) {
// Enter the kernel!
arch_enter_kernel(&gKernelArgs, kernelEntry,
gKernelArgs.cpu_kstack[0].start + gKernelArgs.cpu_kstack[0].size - 8);
} else {
_arch_exception_panic("Kernel or Stack memory not accessible\n", __LINE__);
}
}

View File

@ -0,0 +1,85 @@
/*
* Copyright 2021-2022 Haiku, Inc. All rights reserved.
* Released under the terms of the MIT License.
*/
#include <asm_defs.h>
#include <kernel/arch/arm64/arm_registers.h>
.text
/* Based on 12.1 The Translation Lookaside Buffer
* ARM DEN0024A (ID050815)
*/
FUNCTION(_arch_mmu_invalidate_tlb_all):
dsb st /* ensure write has completed*/
// cmp x0, 3
// b.eq el3
cmp x0, 2
b.eq el2
cmp x0, 1
b.eq el1
el3:
tlbi alle3
dsb sy
isb
ret
el2:
tlbi alle2
dsb sy
isb
ret
el1:
tlbi vmalle1
dsb sy
isb
ret
FUNCTION_END(_arch_mmu_invalidate_tlb_all)
/* Based on Example 11-3 Cleaning to Point of Coherency
* ARM DEN0024A (ID050815)
*/
FUNCTION(_arch_cache_clean_poc):
MRS X0, CLIDR_EL1
AND W3, W0, #0x07000000 // Get 2 x Level of Coherence
LSR W3, W3, #23
CBZ W3, Finished
MOV W10, #0 // W10 = 2 x cache level
MOV W8, #1 // W8 = constant 0b1
Loop1:
ADD W2, W10, W10, LSR #1 // Calculate 3 x cache level
LSR W1, W0, W2 // extract 3-bit cache type for this level
AND W1, W1, #0x7
CMP W1, #2
B.LT Skip // No data or unified cache at this level
MSR CSSELR_EL1, X10 // Select this cache level
ISB // Synchronize change of CSSELR
MRS X1, CCSIDR_EL1 // Read CCSIDR
AND W2, W1, #7 // W2 = log2(linelen)-4
ADD W2, W2, #4 // W2 = log2(linelen)
UBFX W4, W1, #3, #10 // W4 = max way number, right aligned
CLZ W5, W4 /* W5 = 32-log2(ways), bit position of way in DC operand */
LSL W9, W4, W5 /* W9 = max way number, aligned to position in DC operand */
LSL W16, W8, W5 // W16 = amount to decrement way number per iteration
Loop2:
UBFX W7, W1, #13, #15 // W7 = max set number, right aligned
LSL W7, W7, W2 /* W7 = max set number, aligned to position in DC operand */
LSL W17, W8, W2 // W17 = amount to decrement set number per iteration
Loop3:
ORR W11, W10, W9 // W11 = combine way number and cache number...
ORR W11, W11, W7 // ... and set number for DC operand
DC CSW, X11 // Do data cache clean by set and way
SUBS W7, W7, W17 // Decrement set number
B.GE Loop3
SUBS X9, X9, X16 // Decrement way number
B.GE Loop2
Skip:
ADD W10, W10, #2 // Increment 2 x cache level
CMP W3, W10
DSB sy /* Ensure completion of previous cache maintenance operation */
B.GT Loop1
Finished:
ret
FUNCTION_END(_arch_cache_clean_poc)

View File

@ -0,0 +1,17 @@
#include <asm_defs.h>
.text
FUNCTION(_arch_exception_loop):
arch_loop:
b arch_loop
FUNCTION_END(_arch_exception_loop)
/* The intention in this function in contrast with the previous is that by
* prototype, the compiler sets x0 and x1 to its parameters
* so when attaching we can track where it came from */
FUNCTION(_arch_exception_panic):
arch_panic_loop:
b arch_panic_loop
FUNCTION_END(_arch_exception_panic)

View File

@ -0,0 +1,136 @@
/*
* Copyright 2021-2022, Oliver Ruiz Dorantes. All rights reserved.
* Distributed under the terms of the MIT License.
*/
/*-
* Copyright (c) 2012-2014 Andrew Turner
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <asm_defs.h>
#include <kernel/arch/arm64/arm_registers.h>
#include <kernel/arch/arm64/arch_hypervisor.h>
.text
.macro mreg origin, destination, temporal
mrs \temporal, \origin
msr \destination, \temporal
.endm
FUNCTION(_arch_transition_EL2_EL1):
// Translation Table Base Register
mreg TTBR0_EL2, TTBR0_EL1, x10
// Memory Attribute Indirection Register
mreg MAIR_EL2, MAIR_EL1, x10
// Vector Base Address Register
mreg vbar_el2, vbar_el1, x10
// Migrate SP
mov x10, sp
msr sp_el1, x10
b drop_to_el1
// eret will return to caller
FUNCTION_END(_arch_transition_EL2_EL1)
/*
* If we are started in EL2, configure the required hypervisor
* registers and drop to EL1.
*/
FUNCTION(drop_to_el1):
mrs x1, CurrentEL
lsr x1, x1, #2
cmp x1, #0x2
b.eq 1f
ret
1:
/* Configure the Hypervisor */
mov x2, #(HCR_RW)
msr hcr_el2, x2
/* Load the Virtualization Process ID Register */
mrs x2, midr_el1
msr vpidr_el2, x2
/* Load the Virtualization Multiprocess ID Register */
mrs x2, mpidr_el1
msr vmpidr_el2, x2
/* Set the bits that need to be 1 in sctlr_el1 */
ldr x2, .Lsctlr_res1
msr sctlr_el1, x2
/* Don't trap to EL2 for exceptions */
mov x2, #CPTR_RES1
msr cptr_el2, x2
/* Don't trap to EL2 for CP15 traps */
msr hstr_el2, xzr
/* Enable access to the physical timers at EL1 */
mrs x2, cnthctl_el2
orr x2, x2, #(CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN)
msr cnthctl_el2, x2
/* Set the counter offset to a known value */
msr cntvoff_el2, xzr
/* Hypervisor trap functions */
// adr x2, hyp_vectors
// msr vbar_el2, x2
mov x2, #(PSR_F | PSR_I | PSR_A | PSR_D | PSR_M_EL1h)
msr spsr_el2, x2
/* Configure GICv3 CPU interface */
mrs x2, id_aa64pfr0_el1
/* Extract GIC bits from the register */
ubfx x2, x2, #ID_AA64PFR0_GIC_SHIFT, #ID_AA64PFR0_GIC_BITS
/* GIC[3:0] == 0001 - GIC CPU interface via special regs. supported */
cmp x2, #(ID_AA64PFR0_GIC_CPUIF_EN >> ID_AA64PFR0_GIC_SHIFT)
b.ne 2f
mrs x2, S3_4_C12_C9_5
orr x2, x2, #ICC_SRE_EL2_EN /* Enable access from insecure EL1 */
orr x2, x2, #ICC_SRE_EL2_SRE /* Enable system registers */
msr S3_4_C12_C9_5, x2
2:
/* Set the address to return to our return address */
msr elr_el2, x30
isb
eret
FUNCTION_END(drop_to_el1)
/* Macro Definitions */
.align 3
.Lsctlr_res1:
.quad SCTLR_RES1