x86_{read,write}_cr{0,4} can just be implemented as macros, put an x86_ prefix on the other read/write macros for consistency.
This commit is contained in:
parent
cbfe5fcd17
commit
4e8fbfb2d1
@ -270,19 +270,46 @@ typedef struct arch_cpu_info {
|
|||||||
|
|
||||||
#define nop() __asm__ ("nop"::)
|
#define nop() __asm__ ("nop"::)
|
||||||
|
|
||||||
#define read_cr2(value) \
|
#define x86_read_cr0() ({ \
|
||||||
__asm__("mov %%cr2,%0" : "=r" (value))
|
size_t _v; \
|
||||||
|
__asm__("mov %%cr0,%0" : "=r" (_v)); \
|
||||||
|
_v; \
|
||||||
|
})
|
||||||
|
|
||||||
#define read_cr3(value) \
|
#define x86_write_cr0(value) \
|
||||||
__asm__("mov %%cr3,%0" : "=r" (value))
|
__asm__("mov %0,%%cr0" : : "r" (value))
|
||||||
|
|
||||||
#define write_cr3(value) \
|
#define x86_read_cr2() ({ \
|
||||||
|
size_t _v; \
|
||||||
|
__asm__("mov %%cr2,%0" : "=r" (_v)); \
|
||||||
|
_v; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define x86_read_cr3() ({ \
|
||||||
|
size_t _v; \
|
||||||
|
__asm__("mov %%cr3,%0" : "=r" (_v)); \
|
||||||
|
_v; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define x86_write_cr3(value) \
|
||||||
__asm__("mov %0,%%cr3" : : "r" (value))
|
__asm__("mov %0,%%cr3" : : "r" (value))
|
||||||
|
|
||||||
#define read_dr3(value) \
|
#define x86_read_cr4() ({ \
|
||||||
__asm__("mov %%dr3,%0" : "=r" (value))
|
size_t _v; \
|
||||||
|
__asm__("mov %%cr4,%0" : "=r" (_v)); \
|
||||||
|
_v; \
|
||||||
|
})
|
||||||
|
|
||||||
#define write_dr3(value) \
|
#define x86_write_cr4(value) \
|
||||||
|
__asm__("mov %0,%%cr4" : : "r" (value))
|
||||||
|
|
||||||
|
#define x86_read_dr3() ({ \
|
||||||
|
size_t _v; \
|
||||||
|
__asm__("mov %%dr3,%0" : "=r" (_v)); \
|
||||||
|
_v; \
|
||||||
|
})
|
||||||
|
|
||||||
|
#define x86_write_dr3(value) \
|
||||||
__asm__("mov %0,%%dr3" : : "r" (value))
|
__asm__("mov %0,%%dr3" : : "r" (value))
|
||||||
|
|
||||||
#define invalidate_TLB(va) \
|
#define invalidate_TLB(va) \
|
||||||
@ -360,10 +387,6 @@ void i386_noop_swap(void* oldFpuState, const void* newFpuState);
|
|||||||
void i386_fnsave_swap(void* oldFpuState, const void* newFpuState);
|
void i386_fnsave_swap(void* oldFpuState, const void* newFpuState);
|
||||||
void i386_fxsave_swap(void* oldFpuState, const void* newFpuState);
|
void i386_fxsave_swap(void* oldFpuState, const void* newFpuState);
|
||||||
uint32 x86_read_ebp();
|
uint32 x86_read_ebp();
|
||||||
uint32 x86_read_cr0();
|
|
||||||
void x86_write_cr0(uint32 value);
|
|
||||||
uint32 x86_read_cr4();
|
|
||||||
void x86_write_cr4(uint32 value);
|
|
||||||
uint64 x86_read_msr(uint32 registerNumber);
|
uint64 x86_read_msr(uint32 registerNumber);
|
||||||
void x86_write_msr(uint32 registerNumber, uint64 value);
|
void x86_write_msr(uint32 registerNumber, uint64 value);
|
||||||
void x86_set_task_gate(int32 cpu, int32 n, int32 segment);
|
void x86_set_task_gate(int32 cpu, int32 n, int32 segment);
|
||||||
|
@ -21,7 +21,7 @@ arch_int_enable_interrupts_inline(void)
|
|||||||
static inline int
|
static inline int
|
||||||
arch_int_disable_interrupts_inline(void)
|
arch_int_disable_interrupts_inline(void)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
size_t flags;
|
||||||
|
|
||||||
asm volatile("pushf;\n"
|
asm volatile("pushf;\n"
|
||||||
"pop %0;\n"
|
"pop %0;\n"
|
||||||
@ -41,7 +41,7 @@ arch_int_restore_interrupts_inline(int oldState)
|
|||||||
static inline bool
|
static inline bool
|
||||||
arch_int_are_interrupts_enabled_inline(void)
|
arch_int_are_interrupts_enabled_inline(void)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
size_t flags;
|
||||||
|
|
||||||
asm volatile("pushf;\n"
|
asm volatile("pushf;\n"
|
||||||
"pop %0;\n" : "=g" (flags));
|
"pop %0;\n" : "=g" (flags));
|
||||||
|
@ -57,8 +57,7 @@ void arch_syscall_64_bit_return_value(void);
|
|||||||
static inline Thread*
|
static inline Thread*
|
||||||
arch_thread_get_current_thread(void)
|
arch_thread_get_current_thread(void)
|
||||||
{
|
{
|
||||||
Thread* t;
|
Thread* t = (Thread*)x86_read_dr3();
|
||||||
read_dr3(t);
|
|
||||||
return t;
|
return t;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -66,7 +65,7 @@ arch_thread_get_current_thread(void)
|
|||||||
static inline void
|
static inline void
|
||||||
arch_thread_set_current_thread(Thread* t)
|
arch_thread_set_current_thread(Thread* t)
|
||||||
{
|
{
|
||||||
write_dr3(t);
|
x86_write_dr3(t);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -86,32 +86,6 @@ FUNCTION(x86_read_ebp):
|
|||||||
ret
|
ret
|
||||||
FUNCTION_END(x86_read_ebp)
|
FUNCTION_END(x86_read_ebp)
|
||||||
|
|
||||||
/* uint32 x86_read_cr0(); */
|
|
||||||
FUNCTION(x86_read_cr0):
|
|
||||||
movl %cr0, %eax
|
|
||||||
ret
|
|
||||||
FUNCTION_END(x86_read_cr0)
|
|
||||||
|
|
||||||
/* void x86_write_cr0(uint32 value); */
|
|
||||||
FUNCTION(x86_write_cr0):
|
|
||||||
movl 4(%esp), %eax
|
|
||||||
movl %eax, %cr0
|
|
||||||
ret
|
|
||||||
FUNCTION_END(x86_write_cr0)
|
|
||||||
|
|
||||||
/* uint32 x86_read_cr4(); */
|
|
||||||
FUNCTION(x86_read_cr4):
|
|
||||||
movl %cr4, %eax
|
|
||||||
ret
|
|
||||||
FUNCTION_END(x86_read_cr4)
|
|
||||||
|
|
||||||
/* void x86_write_cr4(uint32 value); */
|
|
||||||
FUNCTION(x86_write_cr4):
|
|
||||||
movl 4(%esp), %eax
|
|
||||||
movl %eax, %cr4
|
|
||||||
ret
|
|
||||||
FUNCTION_END(x86_write_cr4)
|
|
||||||
|
|
||||||
/* uint64 x86_read_msr(uint32 register); */
|
/* uint64 x86_read_msr(uint32 register); */
|
||||||
FUNCTION(x86_read_msr):
|
FUNCTION(x86_read_msr):
|
||||||
movl 4(%esp), %ecx
|
movl 4(%esp), %ecx
|
@ -322,8 +322,7 @@ x86_double_fault_exception(struct iframe* frame)
|
|||||||
void
|
void
|
||||||
x86_page_fault_exception_double_fault(struct iframe* frame)
|
x86_page_fault_exception_double_fault(struct iframe* frame)
|
||||||
{
|
{
|
||||||
uint32 cr2;
|
addr_t cr2 = x86_read_cr2();
|
||||||
asm("movl %%cr2, %0" : "=r" (cr2));
|
|
||||||
|
|
||||||
// Only if this CPU has a fault handler, we're allowed to be here.
|
// Only if this CPU has a fault handler, we're allowed to be here.
|
||||||
cpu_ent& cpu = gCPU[x86_double_fault_get_cpu()];
|
cpu_ent& cpu = gCPU[x86_double_fault_get_cpu()];
|
||||||
@ -351,11 +350,9 @@ static void
|
|||||||
page_fault_exception(struct iframe* frame)
|
page_fault_exception(struct iframe* frame)
|
||||||
{
|
{
|
||||||
Thread *thread = thread_get_current_thread();
|
Thread *thread = thread_get_current_thread();
|
||||||
uint32 cr2;
|
addr_t cr2 = x86_read_cr2();
|
||||||
addr_t newip;
|
addr_t newip;
|
||||||
|
|
||||||
asm("movl %%cr2, %0" : "=r" (cr2));
|
|
||||||
|
|
||||||
if (debug_debugger_running()) {
|
if (debug_debugger_running()) {
|
||||||
// If this CPU or this thread has a fault handler, we're allowed to be
|
// If this CPU or this thread has a fault handler, we're allowed to be
|
||||||
// here.
|
// here.
|
||||||
|
@ -53,7 +53,7 @@ extern void hardware_interrupt(struct iframe* frame);
|
|||||||
|
|
||||||
|
|
||||||
static const char*
|
static const char*
|
||||||
exception_name(unsigned long number, char* buffer, size_t bufferSize)
|
exception_name(uint64 number, char* buffer, size_t bufferSize)
|
||||||
{
|
{
|
||||||
if (number >= 0
|
if (number >= 0
|
||||||
&& number < (sizeof(kInterruptNames) / sizeof(kInterruptNames[0])))
|
&& number < (sizeof(kInterruptNames) / sizeof(kInterruptNames[0])))
|
||||||
@ -97,8 +97,7 @@ unexpected_exception(iframe* frame)
|
|||||||
static void
|
static void
|
||||||
page_fault_exception(iframe* frame)
|
page_fault_exception(iframe* frame)
|
||||||
{
|
{
|
||||||
unsigned long cr2;
|
addr_t cr2 = x86_read_cr2();
|
||||||
read_cr2(cr2);
|
|
||||||
|
|
||||||
panic("page fault exception at ip %#lx on %#lx, error code %#lx\n",
|
panic("page fault exception at ip %#lx on %#lx, error code %#lx\n",
|
||||||
frame->rip, cr2, frame->error_code);
|
frame->rip, cr2, frame->error_code);
|
||||||
|
@ -28,6 +28,7 @@ if $(TARGET_ARCH) = x86_64 {
|
|||||||
SEARCH_SOURCE += [ FDirName $(SUBDIR) 32 ] ;
|
SEARCH_SOURCE += [ FDirName $(SUBDIR) 32 ] ;
|
||||||
|
|
||||||
archSpecificSources =
|
archSpecificSources =
|
||||||
|
arch.S
|
||||||
int.cpp
|
int.cpp
|
||||||
interrupts.S
|
interrupts.S
|
||||||
|
|
||||||
@ -42,7 +43,6 @@ if $(TARGET_ARCH) = x86_64 {
|
|||||||
arch_timer.cpp
|
arch_timer.cpp
|
||||||
arch_vm.cpp
|
arch_vm.cpp
|
||||||
arch_vm_translation_map.cpp
|
arch_vm_translation_map.cpp
|
||||||
arch_x86.S
|
|
||||||
arch_system_info.cpp
|
arch_system_info.cpp
|
||||||
arch_user_debugger.cpp
|
arch_user_debugger.cpp
|
||||||
apic.cpp
|
apic.cpp
|
||||||
|
@ -355,7 +355,7 @@ init_double_fault(int cpuNum)
|
|||||||
tss->sp0 = (uint32)x86_get_double_fault_stack(cpuNum, &stackSize);
|
tss->sp0 = (uint32)x86_get_double_fault_stack(cpuNum, &stackSize);
|
||||||
tss->sp0 += stackSize;
|
tss->sp0 += stackSize;
|
||||||
tss->ss0 = KERNEL_DATA_SEG;
|
tss->ss0 = KERNEL_DATA_SEG;
|
||||||
read_cr3(tss->cr3);
|
tss->cr3 = x86_read_cr3();
|
||||||
// copy the current cr3 to the double fault cr3
|
// copy the current cr3 to the double fault cr3
|
||||||
tss->eip = (uint32)&double_fault;
|
tss->eip = (uint32)&double_fault;
|
||||||
tss->es = KERNEL_DATA_SEG;
|
tss->es = KERNEL_DATA_SEG;
|
||||||
|
@ -401,8 +401,8 @@ setup_for_thread(char *arg, Thread **_thread, uint32 *_ebp,
|
|||||||
thread_get_current_thread(), thread);
|
thread_get_current_thread(), thread);
|
||||||
|
|
||||||
if (newPageDirectory != 0) {
|
if (newPageDirectory != 0) {
|
||||||
read_cr3(*_oldPageDirectory);
|
*_oldPageDirectory = x86_read_cr3();
|
||||||
write_cr3(newPageDirectory);
|
x86_write_cr3(newPageDirectory);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (thread->state == B_THREAD_RUNNING) {
|
if (thread->state == B_THREAD_RUNNING) {
|
||||||
@ -669,7 +669,7 @@ stack_trace(int argc, char **argv)
|
|||||||
|
|
||||||
if (oldPageDirectory != 0) {
|
if (oldPageDirectory != 0) {
|
||||||
// switch back to the previous page directory to no cause any troubles
|
// switch back to the previous page directory to no cause any troubles
|
||||||
write_cr3(oldPageDirectory);
|
x86_write_cr3(oldPageDirectory);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -829,7 +829,7 @@ show_call(int argc, char **argv)
|
|||||||
|
|
||||||
if (oldPageDirectory != 0) {
|
if (oldPageDirectory != 0) {
|
||||||
// switch back to the previous page directory to not cause any troubles
|
// switch back to the previous page directory to not cause any troubles
|
||||||
write_cr3(oldPageDirectory);
|
x86_write_cr3(oldPageDirectory);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -938,8 +938,8 @@ cmd_in_context(int argc, char** argv)
|
|||||||
thread_get_current_thread(), thread);
|
thread_get_current_thread(), thread);
|
||||||
|
|
||||||
if (newPageDirectory != 0) {
|
if (newPageDirectory != 0) {
|
||||||
read_cr3(oldPageDirectory);
|
oldPageDirectory = x86_read_cr3();
|
||||||
write_cr3(newPageDirectory);
|
x86_write_cr3(newPageDirectory);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -951,7 +951,7 @@ cmd_in_context(int argc, char** argv)
|
|||||||
|
|
||||||
// reset the page directory
|
// reset the page directory
|
||||||
if (oldPageDirectory)
|
if (oldPageDirectory)
|
||||||
write_cr3(oldPageDirectory);
|
x86_write_cr3(oldPageDirectory);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1128,7 +1128,7 @@ arch_debug_get_interrupt_pc(bool* _isSyscall)
|
|||||||
void
|
void
|
||||||
arch_debug_unset_current_thread(void)
|
arch_debug_unset_current_thread(void)
|
||||||
{
|
{
|
||||||
write_dr3(NULL);
|
x86_write_dr3(NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -419,9 +419,8 @@ X86PagingMethod32Bit::IsKernelPageAccessible(addr_t virtualAddress,
|
|||||||
{
|
{
|
||||||
// We only trust the kernel team's page directory. So switch to it first.
|
// We only trust the kernel team's page directory. So switch to it first.
|
||||||
// Always set it to make sure the TLBs don't contain obsolete data.
|
// Always set it to make sure the TLBs don't contain obsolete data.
|
||||||
uint32 physicalPageDirectory;
|
uint32 physicalPageDirectory = x86_read_cr3();
|
||||||
read_cr3(physicalPageDirectory);
|
x86_write_cr3(fKernelPhysicalPageDirectory);
|
||||||
write_cr3(fKernelPhysicalPageDirectory);
|
|
||||||
|
|
||||||
// get the page directory entry for the address
|
// get the page directory entry for the address
|
||||||
page_directory_entry pageDirectoryEntry;
|
page_directory_entry pageDirectoryEntry;
|
||||||
@ -465,7 +464,7 @@ X86PagingMethod32Bit::IsKernelPageAccessible(addr_t virtualAddress,
|
|||||||
|
|
||||||
// switch back to the original page directory
|
// switch back to the original page directory
|
||||||
if (physicalPageDirectory != fKernelPhysicalPageDirectory)
|
if (physicalPageDirectory != fKernelPhysicalPageDirectory)
|
||||||
write_cr3(physicalPageDirectory);
|
x86_write_cr3(physicalPageDirectory);
|
||||||
|
|
||||||
if ((pageTableEntry & X86_PTE_PRESENT) == 0)
|
if ((pageTableEntry & X86_PTE_PRESENT) == 0)
|
||||||
return false;
|
return false;
|
||||||
|
@ -100,8 +100,7 @@ X86PagingStructures32Bit::Delete()
|
|||||||
#if 0
|
#if 0
|
||||||
// this sanity check can be enabled when corruption due to
|
// this sanity check can be enabled when corruption due to
|
||||||
// overwriting an active page directory is suspected
|
// overwriting an active page directory is suspected
|
||||||
uint32 activePageDirectory;
|
uint32 activePageDirectory = x86_read_cr3();
|
||||||
read_cr3(activePageDirectory);
|
|
||||||
if (activePageDirectory == pgdir_phys)
|
if (activePageDirectory == pgdir_phys)
|
||||||
panic("deleting a still active page directory\n");
|
panic("deleting a still active page directory\n");
|
||||||
#endif
|
#endif
|
||||||
|
@ -163,7 +163,7 @@ struct X86PagingMethodPAE::ToPAESwitcher {
|
|||||||
private:
|
private:
|
||||||
static void _EnablePAE(void* physicalPDPT, int cpu)
|
static void _EnablePAE(void* physicalPDPT, int cpu)
|
||||||
{
|
{
|
||||||
write_cr3((addr_t)physicalPDPT);
|
x86_write_cr3((addr_t)physicalPDPT);
|
||||||
x86_write_cr4(x86_read_cr4() | IA32_CR4_PAE | IA32_CR4_GLOBAL_PAGES);
|
x86_write_cr4(x86_read_cr4() | IA32_CR4_PAE | IA32_CR4_GLOBAL_PAGES);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -681,9 +681,8 @@ X86PagingMethodPAE::IsKernelPageAccessible(addr_t virtualAddress,
|
|||||||
// We only trust the kernel team's page directories. So switch to the
|
// We only trust the kernel team's page directories. So switch to the
|
||||||
// kernel PDPT first. Always set it to make sure the TLBs don't contain
|
// kernel PDPT first. Always set it to make sure the TLBs don't contain
|
||||||
// obsolete data.
|
// obsolete data.
|
||||||
uint32 physicalPDPT;
|
uint32 physicalPDPT = x86_read_cr3();
|
||||||
read_cr3(physicalPDPT);
|
x86_write_cr3(fKernelPhysicalPageDirPointerTable);
|
||||||
write_cr3(fKernelPhysicalPageDirPointerTable);
|
|
||||||
|
|
||||||
// get the PDPT entry for the address
|
// get the PDPT entry for the address
|
||||||
pae_page_directory_pointer_table_entry pdptEntry = 0;
|
pae_page_directory_pointer_table_entry pdptEntry = 0;
|
||||||
@ -734,7 +733,7 @@ X86PagingMethodPAE::IsKernelPageAccessible(addr_t virtualAddress,
|
|||||||
|
|
||||||
// switch back to the original page directory
|
// switch back to the original page directory
|
||||||
if (physicalPDPT != fKernelPhysicalPageDirPointerTable)
|
if (physicalPDPT != fKernelPhysicalPageDirPointerTable)
|
||||||
write_cr3(physicalPDPT);
|
x86_write_cr3(physicalPDPT);
|
||||||
|
|
||||||
if ((pageTableEntry & X86_PAE_PTE_PRESENT) == 0)
|
if ((pageTableEntry & X86_PAE_PTE_PRESENT) == 0)
|
||||||
return false;
|
return false;
|
||||||
|
Loading…
Reference in New Issue
Block a user