mirror of
https://github.com/limine-bootloader/limine
synced 2024-12-14 10:37:15 +03:00
e1f6ac8860
* Initial aarch64 port * Enable chainload on aarch64 No changes necessary since it's all UEFI anyway. * Add specification for Limine protocol for aarch64 * PROTOCOL: Specify state of information in DT /chosen node * common: Add spinup code for aarch64 * common: Port elf and term to aarch64 * common: Port vmm to aarch64 Also prepare to drop VMM_FLAG_PRESENT on x86. * protos: Port limine boot protocol to aarch64 Also drop VMM_FLAG_PRESENT since we never unmap pages anyway. * test: Add DTB request * PROTOCOL: Port SMP request to aarch64 * cpu: Add cache maintenance functions for aarch64 * protos/limine, sys: Port SMP to aarch64 Also move common asm macros into a header file. * test: Start up APs * vmm: Unify get_next_level and implement large page splitting * protos/limine: Map framebuffer using correct caching mode on AArch64 * CI: Fix GCC build for aarch64 * entry, menu: Replace uses of naked attribute with separate asm file GCC does not understand the naked attribute on aarch64, and didn't understand it for x86 in older versions.
296 lines
6.1 KiB
C
296 lines
6.1 KiB
C
#ifndef __SYS__CPU_H__
|
|
#define __SYS__CPU_H__
|
|
|
|
#include <stdint.h>
|
|
#include <stddef.h>
|
|
#include <stdbool.h>
|
|
|
|
#if defined(__x86_64__) || defined(__i386__)
|
|
|
|
inline bool cpuid(uint32_t leaf, uint32_t subleaf,
|
|
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx) {
|
|
uint32_t cpuid_max;
|
|
asm volatile ("cpuid"
|
|
: "=a" (cpuid_max)
|
|
: "a" (leaf & 0x80000000)
|
|
: "ebx", "ecx", "edx");
|
|
if (leaf > cpuid_max)
|
|
return false;
|
|
asm volatile ("cpuid"
|
|
: "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx)
|
|
: "a" (leaf), "c" (subleaf));
|
|
return true;
|
|
}
|
|
|
|
inline void outb(uint16_t port, uint8_t value) {
|
|
asm volatile ("outb %%al, %1" : : "a" (value), "Nd" (port) : "memory");
|
|
}
|
|
|
|
inline void outw(uint16_t port, uint16_t value) {
|
|
asm volatile ("outw %%ax, %1" : : "a" (value), "Nd" (port) : "memory");
|
|
}
|
|
|
|
inline void outd(uint16_t port, uint32_t value) {
|
|
asm volatile ("outl %%eax, %1" : : "a" (value), "Nd" (port) : "memory");
|
|
}
|
|
|
|
inline uint8_t inb(uint16_t port) {
|
|
uint8_t value;
|
|
asm volatile ("inb %1, %%al" : "=a" (value) : "Nd" (port) : "memory");
|
|
return value;
|
|
}
|
|
|
|
inline uint16_t inw(uint16_t port) {
|
|
uint16_t value;
|
|
asm volatile ("inw %1, %%ax" : "=a" (value) : "Nd" (port) : "memory");
|
|
return value;
|
|
}
|
|
|
|
inline uint32_t ind(uint16_t port) {
|
|
uint32_t value;
|
|
asm volatile ("inl %1, %%eax" : "=a" (value) : "Nd" (port) : "memory");
|
|
return value;
|
|
}
|
|
|
|
inline void mmoutb(uintptr_t addr, uint8_t value) {
|
|
asm volatile (
|
|
"movb %1, (%0)"
|
|
:
|
|
: "r" (addr), "ir" (value)
|
|
: "memory"
|
|
);
|
|
}
|
|
|
|
inline void mmoutw(uintptr_t addr, uint16_t value) {
|
|
asm volatile (
|
|
"movw %1, (%0)"
|
|
:
|
|
: "r" (addr), "ir" (value)
|
|
: "memory"
|
|
);
|
|
}
|
|
|
|
inline void mmoutd(uintptr_t addr, uint32_t value) {
|
|
asm volatile (
|
|
"movl %1, (%0)"
|
|
:
|
|
: "r" (addr), "ir" (value)
|
|
: "memory"
|
|
);
|
|
}
|
|
|
|
#if defined (__x86_64__)
|
|
inline void mmoutq(uintptr_t addr, uint64_t value) {
|
|
asm volatile (
|
|
"movq %1, (%0)"
|
|
:
|
|
: "r" (addr), "r" (value)
|
|
: "memory"
|
|
);
|
|
}
|
|
#endif
|
|
|
|
inline uint8_t mminb(uintptr_t addr) {
|
|
uint8_t ret;
|
|
asm volatile (
|
|
"movb (%1), %0"
|
|
: "=r" (ret)
|
|
: "r" (addr)
|
|
: "memory"
|
|
);
|
|
return ret;
|
|
}
|
|
|
|
inline uint16_t mminw(uintptr_t addr) {
|
|
uint16_t ret;
|
|
asm volatile (
|
|
"movw (%1), %0"
|
|
: "=r" (ret)
|
|
: "r" (addr)
|
|
: "memory"
|
|
);
|
|
return ret;
|
|
}
|
|
|
|
inline uint32_t mmind(uintptr_t addr) {
|
|
uint32_t ret;
|
|
asm volatile (
|
|
"movl (%1), %0"
|
|
: "=r" (ret)
|
|
: "r" (addr)
|
|
: "memory"
|
|
);
|
|
return ret;
|
|
}
|
|
|
|
#if defined (__x86_64__)
|
|
inline uint64_t mminq(uintptr_t addr) {
|
|
uint64_t ret;
|
|
asm volatile (
|
|
"movq (%1), %0"
|
|
: "=r" (ret)
|
|
: "r" (addr)
|
|
: "memory"
|
|
);
|
|
return ret;
|
|
}
|
|
#endif
|
|
|
|
inline uint64_t rdmsr(uint32_t msr) {
|
|
uint32_t edx, eax;
|
|
asm volatile ("rdmsr"
|
|
: "=a" (eax), "=d" (edx)
|
|
: "c" (msr)
|
|
: "memory");
|
|
return ((uint64_t)edx << 32) | eax;
|
|
}
|
|
|
|
inline void wrmsr(uint32_t msr, uint64_t value) {
|
|
uint32_t edx = value >> 32;
|
|
uint32_t eax = (uint32_t)value;
|
|
asm volatile ("wrmsr"
|
|
:
|
|
: "a" (eax), "d" (edx), "c" (msr)
|
|
: "memory");
|
|
}
|
|
|
|
inline uint64_t rdtsc(void) {
|
|
uint32_t edx, eax;
|
|
asm volatile ("rdtsc" : "=a" (eax), "=d" (edx));
|
|
return ((uint64_t)edx << 32) | eax;
|
|
}
|
|
|
|
inline void delay(uint64_t cycles) {
|
|
uint64_t next_stop = rdtsc() + cycles;
|
|
|
|
while (rdtsc() < next_stop);
|
|
}
|
|
|
|
#define rdrand(type) ({ \
|
|
type ret; \
|
|
asm volatile ( \
|
|
"1: " \
|
|
"rdrand %0;" \
|
|
"jnc 1b;" \
|
|
: "=r" (ret) \
|
|
); \
|
|
ret; \
|
|
})
|
|
|
|
#define rdseed(type) ({ \
|
|
type ret; \
|
|
asm volatile ( \
|
|
"1: " \
|
|
"rdseed %0;" \
|
|
"jnc 1b;" \
|
|
: "=r" (ret) \
|
|
); \
|
|
ret; \
|
|
})
|
|
|
|
#define write_cr(reg, val) ({ \
|
|
asm volatile ("mov %0, %%cr" reg :: "r" (val) : "memory"); \
|
|
})
|
|
|
|
#define read_cr(reg) ({ \
|
|
size_t cr; \
|
|
asm volatile ("mov %%cr" reg ", %0" : "=r" (cr) :: "memory"); \
|
|
cr; \
|
|
})
|
|
|
|
#define locked_read(var) ({ \
|
|
typeof(*var) ret = 0; \
|
|
asm volatile ( \
|
|
"lock xadd %0, %1" \
|
|
: "+r" (ret) \
|
|
: "m" (*(var)) \
|
|
: "memory" \
|
|
); \
|
|
ret; \
|
|
})
|
|
|
|
#define locked_write(var, val) ({ \
|
|
typeof(*var) ret = val; \
|
|
asm volatile ( \
|
|
"lock xchg %0, %1" \
|
|
: "+r" ((ret)) \
|
|
: "m" (*(var)) \
|
|
: "memory" \
|
|
); \
|
|
ret; \
|
|
})
|
|
|
|
#elif defined (__aarch64__)
|
|
|
|
inline uint64_t rdtsc(void) {
|
|
uint64_t v;
|
|
asm volatile ("mrs %0, cntpct_el0" : "=r" (v));
|
|
return v;
|
|
}
|
|
|
|
#define locked_read(var) ({ \
|
|
typeof(*var) ret = 0; \
|
|
asm volatile ( \
|
|
"ldar %0, %1" \
|
|
: "=r" (ret) \
|
|
: "m" (*(var)) \
|
|
: "memory" \
|
|
); \
|
|
ret; \
|
|
})
|
|
|
|
inline size_t icache_line_size(void) {
|
|
uint64_t ctr;
|
|
asm volatile ("mrs %0, ctr_el0" : "=r"(ctr));
|
|
|
|
return (ctr & 0b1111) << 4;
|
|
}
|
|
|
|
inline size_t dcache_line_size(void) {
|
|
uint64_t ctr;
|
|
asm volatile ("mrs %0, ctr_el0" : "=r"(ctr));
|
|
|
|
return ((ctr >> 16) & 0b1111) << 4;
|
|
}
|
|
|
|
// Clean and invalidate D-Cache to Point of Coherency
|
|
inline void clean_inval_dcache_poc(uintptr_t start, uintptr_t end) {
|
|
size_t dsz = dcache_line_size();
|
|
|
|
uintptr_t addr = start & ~(dsz - 1);
|
|
while (addr < end) {
|
|
asm volatile ("dc civac, %0" :: "r"(addr) : "memory");
|
|
addr += dsz;
|
|
}
|
|
|
|
asm volatile ("dsb sy\n\tisb");
|
|
}
|
|
|
|
// Invalidate I-Cache to Point of Unification
|
|
inline void inval_icache_pou(uintptr_t start, uintptr_t end) {
|
|
size_t isz = icache_line_size();
|
|
|
|
uintptr_t addr = start & ~(isz - 1);
|
|
while (addr < end) {
|
|
asm volatile ("ic ivau, %0" :: "r"(addr) : "memory");
|
|
addr += isz;
|
|
}
|
|
|
|
asm volatile ("dsb sy\n\tisb");
|
|
}
|
|
|
|
inline int current_el(void) {
|
|
uint64_t v;
|
|
|
|
asm volatile ("mrs %0, currentel" : "=r"(v));
|
|
v = (v >> 2) & 0b11;
|
|
|
|
return v;
|
|
}
|
|
|
|
#else
|
|
#error Unknown architecture
|
|
#endif
|
|
|
|
#endif
|