ARM: Cleanup of port support code.

This also implements the fault handler correctly now, and cleans up the
exception handling. Seems a lot more stable now, no unexpected panics or
faults happening anymore.
This commit is contained in:
Ithamar R. Adema 2012-11-06 11:41:12 +01:00
parent 0cf5ecba12
commit f86b582848
9 changed files with 400 additions and 250 deletions

View File

@ -12,6 +12,7 @@
/* raw exception frames */
struct iframe {
uint32 spsr;
uint32 r0;
uint32 r1;
uint32 r2;
@ -25,10 +26,11 @@ struct iframe {
uint32 r10;
uint32 r11;
uint32 r12;
uint32 r13;
uint32 lr;
uint32 usr_sp;
uint32 usr_lr;
uint32 svc_sp;
uint32 svc_lr;
uint32 pc;
uint32 cpsr;
} _PACKED;
typedef struct arch_cpu_info {
@ -40,6 +42,20 @@ extern int arch_mmu_type;
extern int arch_platform;
extern int arch_machine;
#ifdef __cplusplus
extern "C" {
#endif
extern addr_t arm_get_far(void);
extern int32 arm_get_fsr(void);
extern int mmu_read_c1(void);
extern int mmu_write_c1(int val);
#ifdef __cplusplus
};
#endif
#endif // !_ASSEMBLER
#endif /* _KERNEL_ARCH_ARM_CPU_H */

View File

@ -10,21 +10,38 @@
#include <asm_defs.h>
#include "asm_offsets.h"
.text
/* int mmu_read_c1(void); */
FUNCTION(mmu_read_c1):
mrc p15, 0, r0, c1, c0, 0
bx lr
FUNCTION_END(mmu_read_c1)
/* void mmu_write_c1(int val); */
FUNCTION(mmu_write_c1):
mcr p15, 0, r0, c1, c0, 0
bx lr
FUNCTION_END(mmu_write_c1)
/* NOTE: the I bit in cpsr (bit 7) is *set* to disable... */
/* void arch_int_enable_interrupts(void) */
FUNCTION(arch_int_enable_interrupts):
mrs r0, cpsr
bic r0, r0, #(1<<7) /* clear the I bit */
bic r0, r0, #(1<<7)
msr cpsr_c, r0
bx lr
FUNCTION_END(arch_int_enable_interrupts)
/* int arch_int_disable_interrupts(void)
*/
/* int arch_int_disable_interrupts(void) */
FUNCTION(arch_int_disable_interrupts):
mrs r0, cpsr
orr r1, r0, #(1<<7)
@ -33,8 +50,7 @@ FUNCTION(arch_int_disable_interrupts):
FUNCTION_END(arch_int_disable_interrupts)
/* void arch_int_restore_interrupts(int oldState)
*/
/* void arch_int_restore_interrupts(int oldState) */
FUNCTION(arch_int_restore_interrupts):
mrs r1, cpsr
and r0, r0, #(1<<7)
@ -65,3 +81,57 @@ FUNCTION(arm_context_switch):
ldmfd sp!, { r0-r12, lr }
bx lr
FUNCTION_END(arm_context_switch)
/* addr_t arm_get_fsr(void); */
FUNCTION(arm_get_fsr):
mrc p15, 0, r0, c5, c0, 0 @ get FSR
bx lr
FUNCTION_END(arm_get_fsr)
/* addr_t arm_get_far(void); */
FUNCTION(arm_get_far):
mrc p15, 0, r0, c6, c0, 0 @ get FAR
bx lr
FUNCTION_END(arm_get_far)
/*! \fn void arch_debug_call_with_fault_handler(cpu_ent* cpu,
jmp_buf jumpBuffer, void (*function)(void*), void* parameter)
Called by debug_call_with_fault_handler() to do the dirty work of setting
the fault handler and calling the function. If the function causes a page
fault, the arch_debug_call_with_fault_handler() calls longjmp() with the
given \a jumpBuffer. Otherwise it returns normally.
debug_call_with_fault_handler() has already saved the CPU's fault_handler
and fault_handler_stack_pointer and will reset them later, so
arch_debug_call_with_fault_handler() doesn't need to care about it.
\param cpu The \c cpu_ent for the current CPU.
\param jumpBuffer Buffer to be used for longjmp().
\param function The function to be called.
\param parameter The parameter to be passed to the function to be called.
*/
FUNCTION(arch_debug_call_with_fault_handler):
stmfd sp!, { r4, lr }
// Set fault handler address, and fault handler stack pointer address. We
// don't need to save the previous values, since that's done by the caller.
ldr r4, =1f
str r4, [ r0, #CPU_ENT_fault_handler ]
str sp, [ r0, #CPU_ENT_fault_handler_stack_pointer ]
mov r4, r1
// call the function
mov r0, r3
blx r2
// regular return
ldmfd sp!, { r4, lr }
bx lr
// fault -- return via longjmp(jumpBuffer, 1)
1:
mov r0, r4
mov r1, #1
bl longjmp
FUNCTION_END(arch_debug_call_with_fault_handler)

View File

@ -251,14 +251,9 @@ arch_debug_stack_trace(void)
void *
arch_debug_get_caller(void)
{
#warning ARM:IMPLEMENT
// TODO: implement me
//return __builtin_frame_address(1);
// struct stack_frame *frame;
//frame = __builtin_frame_address(0);
// frame = get_current_stack_frame();
// return (void *)frame->previous->return_address;
return NULL;
/* Return the thread id as the kernel (for example the lock code) actually
gets a somewhat valid indication of the caller back. */
return (void*) thread_get_current_thread_id();
}
@ -316,17 +311,7 @@ arch_debug_init(kernel_args *args)
}
void
arch_debug_call_with_fault_handler(cpu_ent* cpu, jmp_buf jumpBuffer,
void (*function)(void*), void* parameter)
{
// TODO: Implement fault handling! Most likely in assembly.
// (see src/system/kernel/arch/x86/arch_x86.S)
// For now, just call the function and hope we don't crash :P
function(parameter);
}
/* arch_debug_call_with_fault_handler is in arch_asm.S */
void
arch_debug_unset_current_thread(void)

View File

@ -9,6 +9,49 @@
#include <asm_defs.h>
#define CPSR_MODE_MASK 0x1f
#define CPSR_MODE_USR 0x10
#define CPSR_MODE_FIQ 0x11
#define CPSR_MODE_IRQ 0x12
#define CPSR_MODE_SVC 0x13
#define CPSR_MODE_ABT 0x17
#define CPSR_MODE_UND 0x1b
/* The following two macros are taken from FreeBSD... */
.macro PUSHFRAMEINSVC
stmdb sp, {r0-r3} /* Save 4 registers */
mov r0, lr /* Save xxx32 r14 */
mov r1, sp /* Save xxx32 sp */
mrs r3, spsr /* Save xxx32 spsr */
mrs r2, cpsr /* Get the CPSR */
bic r2, r2, #(CPSR_MODE_MASK)/* Fix for SVC mode */
orr r2, r2, #(CPSR_MODE_SVC)
msr cpsr_c, r2 /* Punch into SVC mode */
mov r2, sp /* Save SVC sp */
str r0, [sp, #-4]! /* Push return address */
str lr, [sp, #-4]! /* Push SVC lr */
str r2, [sp, #-4]! /* Push SVC sp */
msr spsr_all, r3 /* Restore correct spsr */
ldmdb r1, {r0-r3} /* Restore 4 regs from xxx mode */
sub sp, sp, #(4*15) /* Adjust the stack pointer */
stmia sp, {r0-r12} /* Push the user mode registers */
add r0, sp, #(4*13) /* Adjust the stack pointer */
stmia r0, {r13-r14}^ /* Push the user mode registers */
mov r0, r0 /* NOP for previous instruction */
mrs r0, spsr_all
str r0, [sp, #-4]! /* Save spsr */
.endm
.macro PULLFRAMEFROMSVCANDEXIT
ldr r0, [sp], #0x0004 /* Get the SPSR from stack */
msr spsr_all, r0 /* restore SPSR */
ldmia sp, {r0-r14}^ /* Restore registers (usr mode) */
mov r0, r0 /* NOP for previous instruction */
add sp, sp, #(4*15) /* Adjust the stack pointer */
ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */
.endm
.text
.globl _vectors_start
@ -43,134 +86,139 @@ _vectors_end:
.rept 64
.word 0xaabbccdd
.word 0xdeadbeef
.endr
abort_stack:
.word .
.word . - 4
.word 0xdeadbeef
.rept 64
.word 0xcafebabe
.endr
irq_stack:
.word . - 4
.word 0xcafebabe
.rept 64
.word 0xaaaabbbb
.endr
fiq_stack:
.word . - 4
.word 0xaaaabbbb
.rept 64
.word 0xccccdddd
.endr
und_stack:
.word . - 4
.word 0xccccdddd
FUNCTION(arm_undefined):
stmfd sp!, { r0-r12, r14 }
sub sp, sp, #12
mov r0, sp
mrs r1, spsr
stmia r0, { r1, r13-r14 }
b arch_arm_undefined
b .
PUSHFRAMEINSVC
mov r0, sp
bl arch_arm_undefined
PULLFRAMEFROMSVCANDEXIT
FUNCTION_END(arm_undefined)
FUNCTION(arm_syscall):
stmfd sp!, { r0-r12, r14 }
sub sp, sp, #12
mov r0, sp
mrs r1, spsr
stmia r0, { r1, r13-r14 }
b arch_arm_syscall
b .
PUSHFRAMEINSVC
mov r0, sp
bl arch_arm_syscall
PULLFRAMEFROMSVCANDEXIT
FUNCTION_END(arm_syscall)
FUNCTION(arm_prefetch_abort):
ldr sp, abort_stack
stmfd sp!, { r0-r12, r14 }
sub sp, sp, #12
mov r0, sp
mrs r1, spsr
stmia r0, { r1, r13-r14 }
b arch_arm_prefetch_abort
b .
#ifdef __XSCALE__
nop /* Make absolutely sure any pending */
nop /* imprecise aborts have occurred. */
#endif
add lr, lr, #4
PUSHFRAMEINSVC
mov r0, sp
bl arch_arm_prefetch_abort
PULLFRAMEFROMSVCANDEXIT
FUNCTION_END(arm_prefetch_abort)
FUNCTION(arm_data_abort):
ldr sp, abort_stack
/* XXX only deals with interrupting supervisor mode */
#ifdef __XSCALE__
nop /* Make absolutely sure any pending */
nop /* imprecise aborts have occurred. */
#endif
sub lr, lr, #8 /* Adjust the lr */
PUSHFRAMEINSVC
/* save r4-r6 and use as a temporary place to save while we switch into supervisor mode */
stmia r13, { r4-r6 }
mov r4, r13
sub r5, lr, #8
mrs r6, spsr
/* move into supervisor mode. irq/fiq disabled */
msr cpsr_c, #0x13
/* save the return address */
stmfd sp!, { r5 }
/* save C trashed regs, supervisor lr */
stmfd sp!, { r0-r3, r12, lr }
/* save spsr */
stmfd sp!, { r6 }
/* restore r4-r6 */
ldmia r4, { r4-r6 }
/* call into higher level code */
mrc p15, 0, r2, c5, c0, 0 @ get FSR
mrc p15, 0, r3, c6, c0, 0 @ get FAR
sub sp, sp, #20
mov r0, sp /* iframe */
stmia r0, { r6,r2,r3,r4,r5 }
mov r0, sp
bl arch_arm_data_abort
add sp, sp, #20
/* restore spsr */
ldmfd sp!, { r0 }
msr spsr_cxsf, r0
PULLFRAMEFROMSVCANDEXIT
/* restore back to where we came from */
ldmfd sp!, { r0-r3, r12, lr, pc }^
FUNCTION(arm_reserved):
b .
FUNCTION_END(arm_reserved)
FUNCTION(arm_irq):
ldr sp, abort_stack
/* XXX only deals with interrupting supervisor mode */
sub lr, lr, #4
PUSHFRAMEINSVC
/* save r4-r6 and use as a temporary place to save while we switch into supervisor mode */
stmia r13, { r4-r6 }
mov r4, r13
sub r5, lr, #4
mrs r6, spsr
/* move into supervisor mode. irq/fiq disabled */
msr cpsr_c, #(3<<6 | 0x13)
/* save the return address */
stmfd sp!, { r5 }
/* save C trashed regs, supervisor lr */
stmfd sp!, { r0-r3, r12, lr }
/* save spsr */
stmfd sp!, { r6 }
/* restore r4-r6 */
ldmia r4, { r4-r6 }
/* call into higher level code */
mov r0, sp /* iframe */
bl arch_arm_irq
/* restore spsr */
ldmfd sp!, { r0 }
msr spsr_cxsf, r0
PULLFRAMEFROMSVCANDEXIT
FUNCTION_END(arm_irq)
/* restore back to where we came from */
ldmfd sp!, { r0-r3, r12, lr, pc }^
.bss
.align 2
.global irq_save_spot
irq_save_spot:
.word 0 /* r4 */
.word 0 /* r5 */
.word 0 /* r6 */
.text
FUNCTION(arm_fiq):
ldr sp, abort_stack
sub lr, lr, #4
stmfd sp!, { r0-r3, r12, lr }
PUSHFRAMEINSVC
mov r0, sp /* iframe */
bl arch_arm_fiq
ldmfd sp!, { r0-r3, r12, pc }^
PULLFRAMEFROMSVCANDEXIT
FUNCTION_END(arm_fiq)
FUNCTION(arm_vector_init):
mrs r1, cpsr
bic r1, r1, #CPSR_MODE_MASK
/* move into modes and set initial sp */
mov r0, r1
orr r0, r0, #CPSR_MODE_FIQ
msr cpsr_c, r0
ldr sp, fiq_stack
mov r0, r1
orr r0, r0, #CPSR_MODE_IRQ
msr cpsr_c, r0
ldr sp, irq_stack
mov r0, r1
orr r0, r0, #CPSR_MODE_ABT
msr cpsr_c, r0
ldr sp, abort_stack
mov r0, r1
orr r0, r0, #CPSR_MODE_UND
msr cpsr_c, r0
ldr sp, und_stack
/* ... and return back to supervisor mode */
mov r0, r1
orr r0, r0, #CPSR_MODE_SVC
msr cpsr_c, r0
bx lr
FUNCTION_END(arm_vector_init)

View File

@ -31,7 +31,7 @@
#include <string.h>
#define TRACE_ARCH_INT
//#define TRACE_ARCH_INT
#ifdef TRACE_ARCH_INT
# define TRACE(x) dprintf x
#else
@ -61,30 +61,11 @@ static void *sVectorPageAddress;
static area_id sUserVectorPageArea;
static void *sUserVectorPageAddress;
// current fault handler
addr_t gFaultHandler;
// An iframe stack used in the early boot process when we don't have
// threads yet.
struct iframe_stack gBootFrameStack;
uint32
mmu_read_c1()
{
uint32 controlReg = 0;
asm volatile("MRC p15, 0, %[c1out], c1, c0, 0":[c1out] "=r" (controlReg));
return controlReg;
}
void
mmu_write_c1(uint32 value)
{
asm volatile("MCR p15, 0, %[c1in], c1, c0, 0"::[c1in] "r" (value));
}
void
arch_int_enable_io_interrupt(int irq)
{
@ -117,29 +98,32 @@ arch_int_disable_io_interrupt(int irq)
static void
print_iframe(struct iframe *frame)
print_iframe(const char *event, struct iframe *frame)
{
if (event)
dprintf("Exception: %s\n", event);
dprintf("R00=%08lx R01=%08lx R02=%08lx R03=%08lx\n"
"R04=%08lx R05=%08lx R06=%08lx R07=%08lx\n",
frame->r0, frame->r1, frame->r2, frame->r3,
frame->r4, frame->r5, frame->r6, frame->r7);
dprintf("R08=%08lx R09=%08lx R10=%08lx R11=%08lx\n"
"R12=%08lx R13=%08lx R14=%08lx CPSR=%08lx\n",
frame->r8, frame->r9, frame->r10, frame->r11,
frame->r12, frame->usr_sp, frame->usr_lr, frame->spsr);
}
status_t
arch_int_init(kernel_args *args)
{
// see if high vectors are enabled
if (mmu_read_c1() & (1<<13))
dprintf("High vectors already enabled\n");
else {
mmu_write_c1(mmu_read_c1() | (1<<13));
if (!(mmu_read_c1() & (1<<13)))
dprintf("Unable to enable high vectors!\n");
else
dprintf("Enabled high vectors\n");
}
return B_OK;
}
extern "C" void arm_vector_init(void);
status_t
arch_int_init_post_vm(kernel_args *args)
{
@ -163,6 +147,20 @@ arch_int_init_post_vm(kernel_args *args)
// copy vectors into the newly created area
memcpy(sVectorPageAddress, &_vectors_start, VECTORPAGE_SIZE);
arm_vector_init();
// see if high vectors are enabled
if (mmu_read_c1() & (1<<13))
dprintf("High vectors already enabled\n");
else {
mmu_write_c1(mmu_read_c1() | (1<<13));
if (!(mmu_read_c1() & (1<<13)))
dprintf("Unable to enable high vectors!\n");
else
dprintf("Enabled high vectors\n");
}
sPxaInterruptArea = map_physical_memory("pxa_intc", PXA_INTERRUPT_PHYS_BASE,
PXA_INTERRUPT_SIZE, 0, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, (void**)&sPxaInterruptBase);
@ -179,7 +177,6 @@ arch_int_init_post_vm(kernel_args *args)
status_t
arch_int_init_io(kernel_args* args)
{
TRACE(("arch_int_init_io(%p)\n", args));
return B_OK;
}
@ -193,35 +190,103 @@ arch_int_init_post_device_manager(struct kernel_args *args)
extern "C" void arch_arm_undefined(struct iframe *iframe)
{
panic("Undefined instruction!");
print_iframe("Undefined Instruction", iframe);
panic("not handled!");
}
extern "C" void arch_arm_syscall(struct iframe *iframe)
{
panic("Software interrupt!\n");
print_iframe("Software interrupt", iframe);
}
extern "C" void arch_arm_data_abort(struct iframe *iframe)
extern "C" void arch_arm_data_abort(struct iframe *frame)
{
addr_t newip;
status_t res = vm_page_fault(iframe->r2 /* FAR */, iframe->r4 /* lr */,
true /* TODO how to determine read/write? */,
false /* only kernelspace for now */,
&newip);
Thread *thread = thread_get_current_thread();
bool isUser = (frame->spsr & 0x1f) == 0x10;
addr_t far = arm_get_far();
bool isWrite = true;
addr_t newip = 0;
if (res != B_HANDLED_INTERRUPT) {
panic("Data Abort: %08x %08x %08x %08x (res=%lx)", iframe->r0 /* spsr */,
iframe->r1 /* FSR */, iframe->r2 /* FAR */,
iframe->r4 /* lr */,
res);
} else {
//panic("vm_page_fault was ok (%08lx/%08lx)!", iframe->r2 /* FAR */, iframe->r0 /* spsr */);
#ifdef TRACE_ARCH_INT
print_iframe("Data Abort", frame);
#endif
if (debug_debugger_running()) {
// If this CPU or this thread has a fault handler, we're allowed to be
// here.
if (thread != NULL) {
cpu_ent* cpu = &gCPU[smp_get_current_cpu()];
if (cpu->fault_handler != 0) {
kprintf("CPU fault handler set! %p %p\n",
(void*)cpu->fault_handler, (void*)cpu->fault_handler_stack_pointer);
debug_set_page_fault_info(far, frame->pc,
isWrite ? DEBUG_PAGE_FAULT_WRITE : 0);
frame->svc_sp = cpu->fault_handler_stack_pointer;
frame->pc = cpu->fault_handler;
return;
}
if (thread->fault_handler != 0) {
kprintf("ERROR: thread::fault_handler used in kernel "
"debugger!\n");
debug_set_page_fault_info(far, frame->pc,
isWrite ? DEBUG_PAGE_FAULT_WRITE : 0);
frame->pc = thread->fault_handler;
return;
}
}
// otherwise, not really
panic("page fault in debugger without fault handler! Touching "
"address %p from pc %p\n", (void *)far, (void *)frame->pc);
return;
} else if ((frame->spsr & (1 << 7)) != 0) {
// interrupts disabled
// If a page fault handler is installed, we're allowed to be here.
// TODO: Now we are generally allowing user_memcpy() with interrupts
// disabled, which in most cases is a bug. We should add some thread
// flag allowing to explicitly indicate that this handling is desired.
if (thread && thread->fault_handler != 0) {
if (frame->pc != thread->fault_handler) {
frame->pc = thread->fault_handler;
return;
}
// The fault happened at the fault handler address. This is a
// certain infinite loop.
panic("page fault, interrupts disabled, fault handler loop. "
"Touching address %p from pc %p\n", (void*)far,
(void*)frame->pc);
}
// If we are not running the kernel startup the page fault was not
// allowed to happen and we must panic.
panic("page fault, but interrupts were disabled. Touching address "
"%p from pc %p\n", (void *)far, (void *)frame->pc);
return;
} else if (thread != NULL && thread->page_faults_allowed < 1) {
panic("page fault not allowed at this place. Touching address "
"%p from pc %p\n", (void *)far, (void *)frame->pc);
return;
}
enable_interrupts();
vm_page_fault(far, frame->pc, isWrite, isUser, &newip);
if (newip != 0) {
// the page fault handler wants us to modify the iframe to set the
// IP the cpu will return to to be this ip
frame->pc = newip;
}
}
extern "C" void arch_arm_prefetch_abort(struct iframe *iframe)
{
panic("Prefetch Abort: %08x %08x %08x", iframe->r0, iframe->r1, iframe->r2);
print_iframe("Prefetch Abort", iframe);
panic("not handled!");
}
extern "C" void arch_arm_irq(struct iframe *iframe)

View File

@ -6,52 +6,17 @@
* All rights reserved. Distributed under the terms of the MIT License.
*/
//#include <arch_platform.h>
#include <new>
#include <KernelExport.h>
#include <arch/platform.h>
#include <boot/kernel_args.h>
//#include <platform/openfirmware/openfirmware.h>
#include <real_time_clock.h>
#include <util/kernel_cpp.h>
#if 0
static M68KPlatform *sM68KPlatform;
// constructor
M68KPlatform::M68KPlatform(platform_type platformType,
m68k_platform_type m68kPlatformType)
: fPlatformType(platformType),
fM68KPlatformType(m68kPlatformType)
{
}
// destructor
M68KPlatform::~M68KPlatform()
{
}
// Default
M68KPlatform *
M68KPlatform::Default()
{
return sM68KPlatform;
}
// # pragma mark -
#endif
#include <kernel/debug.h>
status_t
arch_platform_init(struct kernel_args *kernelArgs)
{
#warning ARM:WRITEME
// NOTE: dprintf() is off-limits here, too early...
return B_OK;
}
@ -59,8 +24,7 @@ arch_platform_init(struct kernel_args *kernelArgs)
status_t
arch_platform_init_post_vm(struct kernel_args *kernelArgs)
{
#warning ARM:WRITEME
//sM68KPlatform->InitPostVM(kernelArgs);
// now we can use the heap and create areas
return B_OK;
}
@ -68,5 +32,6 @@ arch_platform_init_post_vm(struct kernel_args *kernelArgs)
status_t
arch_platform_init_post_thread(struct kernel_args *kernelArgs)
{
// now we can create and use semaphores
return B_OK;
}

View File

@ -19,6 +19,15 @@
#include <arch/timer.h>
#include <arch/cpu.h>
//#define TRACE_ARCH_TIMER
#ifdef TRACE_ARCH_TIMER
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
#define PXA_TIMERS_PHYS_BASE 0x40A00000
#define PXA_TIMERS_SIZE 0x000000C0
#define PXA_TIMERS_INTERRUPT 7 /* OST_4_11 */
@ -29,7 +38,6 @@
#define PXA_OSMR4 0x20
#define PXA_OMCR4 0x30
#define TRACE(x) //dprintf x
static area_id sPxaTimersArea;
static uint32 *sPxaTimersBase;
@ -48,7 +56,7 @@ pxa_timer_interrupt(void *data)
void
arch_timer_set_hardware_timer(bigtime_t timeout)
{
TRACE(("arch_timer_set_hardware_timer(%lld): %p\n", timeout, sPxaTimersBase));
TRACE(("arch_timer_set_hardware_timer(%lld)\n", timeout));
if (sPxaTimersBase) {
sPxaTimersBase[PXA_OIER] |= (1 << 4);
@ -62,7 +70,7 @@ arch_timer_set_hardware_timer(bigtime_t timeout)
void
arch_timer_clear_hardware_timer()
{
TRACE(("arch_timer_clear_hardware_timer: %p\n", sPxaTimersBase));
TRACE(("arch_timer_clear_hardware_timer\n"));
if (sPxaTimersBase) {
sPxaTimersBase[PXA_OMCR4] = 0; // disable our timer

View File

@ -30,7 +30,7 @@
#include "paging/arm_physical_page_mapper_large_memory.h"
#define TRACE_ARM_PAGING_METHOD_32_BIT
//#define TRACE_ARM_PAGING_METHOD_32_BIT
#ifdef TRACE_ARM_PAGING_METHOD_32_BIT
# define TRACE(x...) dprintf(x)
#else

View File

@ -249,14 +249,12 @@ ARMVMTranslationMap32Bit::Unmap(addr_t start, addr_t end)
ARM_PTE_TYPE_MASK);
fMapCount--;
#if 0 /* IRA */
if ((oldEntry & ARM_PTE_ACCESSED) != 0) {
if (true /* (oldEntry & ARM_PTE_ACCESSED) != 0*/) {
// Note, that we only need to invalidate the address, if the
// accessed flags was set, since only then the entry could have
// been in any TLB.
InvalidatePage(start);
}
#endif
}
} while (start != 0 && start < end);
@ -301,8 +299,8 @@ ARMVMTranslationMap32Bit::UnmapPage(VMArea* area, addr_t address,
fMapCount--;
#if 0 //IRA
if ((oldEntry & ARM_PTE_ACCESSED) != 0) {
if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
// Note, that we only need to invalidate the address, if the
// accessed flags was set, since only then the entry could have been
// in any TLB.
@ -320,14 +318,14 @@ ARMVMTranslationMap32Bit::UnmapPage(VMArea* area, addr_t address,
// Interestingly FreeBSD seems to ignore this problem as well
// (cf. pmap_remove_all()), unless I've missed something.
}
#endif
locker.Detach();
// PageUnmapped() will unlock for us
#if 0 //IRA
PageUnmapped(area, (oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
(oldEntry & ARM_PTE_ACCESSED) != 0, (oldEntry & ARM_PTE_DIRTY) != 0,
true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/, true /*(oldEntry & ARM_PTE_DIRTY) != 0*/,
updatePageQueue);
#endif
return B_OK;
}
@ -375,14 +373,13 @@ ARMVMTranslationMap32Bit::UnmapPages(VMArea* area, addr_t base, size_t size,
fMapCount--;
#if 0 //IRA
if ((oldEntry & ARM_PTE_ACCESSED) != 0) {
if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
// Note, that we only need to invalidate the address, if the
// accessed flags was set, since only then the entry could have
// been in any TLB.
InvalidatePage(start);
}
#endif
if (area->cache_type != CACHE_TYPE_DEVICE) {
// get the page
vm_page* page = vm_lookup_page(
@ -390,13 +387,13 @@ ARMVMTranslationMap32Bit::UnmapPages(VMArea* area, addr_t base, size_t size,
ASSERT(page != NULL);
DEBUG_PAGE_ACCESS_START(page);
#if 0
// transfer the accessed/dirty flags to the page
if ((oldEntry & ARM_PTE_ACCESSED) != 0)
if (/*(oldEntry & ARM_PTE_ACCESSED) != 0*/ true) // XXX IRA
page->accessed = true;
if ((oldEntry & ARM_PTE_DIRTY) != 0)
if (/*(oldEntry & ARM_PTE_DIRTY) != 0 */ true)
page->modified = true;
#endif
// remove the mapping object/decrement the wired_count of the
// page
if (area->wiring == B_NO_LOCK) {
@ -512,19 +509,19 @@ ARMVMTranslationMap32Bit::UnmapArea(VMArea* area, bool deletingAddressSpace,
"has no page table entry", page, area, address);
continue;
}
#if 0
// transfer the accessed/dirty flags to the page and invalidate
// the mapping, if necessary
if ((oldEntry & ARM_PTE_ACCESSED) != 0) {
if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) { // XXX IRA
page->accessed = true;
if (!deletingAddressSpace)
InvalidatePage(address);
}
if ((oldEntry & ARM_PTE_DIRTY) != 0)
if (true /*(oldEntry & ARM_PTE_DIRTY) != 0*/)
page->modified = true;
#endif
if (pageFullyUnmapped) {
DEBUG_PAGE_ACCESS_START(page);
@ -640,7 +637,7 @@ ARMVMTranslationMap32Bit::QueryInterrupt(addr_t va, phys_addr_t *_physical,
| ((entry & ARM_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
| ((entry & ARM_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
#else
*_flags = B_KERNEL_WRITE_AREA | B_KERNEL_READ_AREA;
*_flags = B_KERNEL_WRITE_AREA | B_KERNEL_READ_AREA | PAGE_PRESENT;
#endif
return B_OK;
}
@ -752,7 +749,7 @@ ARMVMTranslationMap32Bit::ClearFlags(addr_t va, uint32 flags)
pinner.Unlock();
if ((oldEntry & flagsToClear) != 0)
//XXX IRA if ((oldEntry & flagsToClear) != 0)
InvalidatePage(va);
return B_OK;
@ -820,10 +817,9 @@ ARMVMTranslationMap32Bit::ClearAccessedAndModified(VMArea* area, addr_t address,
pinner.Unlock();
#if 0 //IRA
_modified = (oldEntry & ARM_PTE_DIRTY) != 0;
_modified = true /* (oldEntry & ARM_PTE_DIRTY) != 0 */; // XXX IRA
if ((oldEntry & ARM_PTE_ACCESSED) != 0) {
if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) {
// Note, that we only need to invalidate the address, if the
// accessed flags was set, since only then the entry could have been
// in any TLB.
@ -833,9 +829,6 @@ ARMVMTranslationMap32Bit::ClearAccessedAndModified(VMArea* area, addr_t address,
return true;
}
#else
_modified = false;
#endif
if (!unmapIfUnaccessed)
return false;