Unfinished kernel support for m68k.

Based on ppc/x86 from r22648.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22650 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
François Revol 2007-10-21 23:49:57 +00:00
parent 0b2adc3d8e
commit 845a180f3d
20 changed files with 4606 additions and 0 deletions

View File

@ -0,0 +1,35 @@
SubDir HAIKU_TOP src system kernel arch m68k ;
SubDirHdrs $(SUBDIR) $(DOTDOT) generic ;
#UseHeaders $(TARGET_PRIVATE_KERNEL_HEADERS) ;
# for some reason, this doesn't work
UsePrivateHeaders kernel [ FDirName kernel arch $(TARGET_ARCH) ]
[ FDirName kernel boot platform $(HAIKU_BOOT_PLATFORM) ] ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) $(DOTDOT) generic ] ;
KernelStaticLibrary libm68k :
arch_atomic.c
arch_cpu.cpp
arch_cpu_asm.S
arch_debug_console.cpp
arch_debug.cpp
arch_elf.cpp
arch_exceptions.S
arch_int.cpp
arch_mmu.cpp
arch_platform.cpp
arch_real_time_clock.cpp
arch_smp.c
arch_system_info.c
arch_thread.c
arch_timer.c
arch_user_debugger.cpp
arch_vm.cpp
arch_vm_translation_map.cpp
arch_asm.S
generic_vm_physical_page_mapper.cpp
:
$(TARGET_KERNEL_PIC_CCFLAGS) -Wno-unused
;

View File

@ -0,0 +1,333 @@
/*
* Copyright 2006, Ingo Weinhold <bonefish@cs.tu-berlin.de>.
* All rights reserved. Distributed under the terms of the MIT License.
*
* Copyright 2003, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#define FUNCTION(x) .global x; .type x,@function; x
#define MSR_EXCEPTIONS_ENABLED 15
.text
// ToDo: fixme
FUNCTION(reboot):
reset
/* void arch_int_enable_interrupts(void) */
FUNCTION(arch_int_enable_interrupts):
mfmsr %r3 // load msr
li %r4, 1
insrwi %r3, %r4, 1, 31 - MSR_EXCEPTIONS_ENABLED
// sets bit 15, EE
mtmsr %r3 // put it back into the msr
blr
/* int arch_int_disable_interrupts(void)
* r3
*/
FUNCTION(arch_int_disable_interrupts):
mfmsr %r4 // load msr
mr %r3, %r4 // save old state
rlwinm %r4, %r4, 0, 32 - MSR_EXCEPTIONS_ENABLED, 30 - MSR_EXCEPTIONS_ENABLED
// clears bit 15, EE
mtmsr %r4 // put it back into the msr
blr
/* void arch_int_restore_interrupts(int oldState)
* r3
*/
FUNCTION(arch_int_restore_interrupts):
mfmsr %r4
rlwimi %r4, %r3, 0, 31 - MSR_EXCEPTIONS_ENABLED, 31 - MSR_EXCEPTIONS_ENABLED
// clear or set bit 15, EE to the same state as in r3, oldState
mtmsr %r4
blr
/* bool arch_int_are_interrupts_enabled(void) */
FUNCTION(arch_int_are_interrupts_enabled):
mfmsr %r3 // load msr
extrwi %r3, %r3, 1, 31 - MSR_EXCEPTIONS_ENABLED
// mask out the EE bit
blr
// ToDo: fixme
FUNCTION(dbg_save_registers):
blr
/* long long get_time_base(void) */
FUNCTION(get_time_base):
1:
mftbu %r3 // get the upper time base register
mftb %r4 // get the lower time base register
mftbu %r5 // get the upper again
cmpw %r5, %r3 // see if it changed while we were reading the lower
bne- 1b // if so, repeat
blr
/* void getibats(int bats[8]); */
FUNCTION(getibats):
mfibatu %r0,0
stw %r0,0(%r3)
mfibatl %r0,0
stwu %r0,4(%r3)
mfibatu %r0,1
stwu %r0,4(%r3)
mfibatl %r0,1
stwu %r0,4(%r3)
mfibatu %r0,2
stwu %r0,4(%r3)
mfibatl %r0,2
stwu %r0,4(%r3)
mfibatu %r0,3
stwu %r0,4(%r3)
mfibatl %r0,3
stwu %r0,4(%r3)
blr
// void setibats(int bats[8]);
FUNCTION(setibats):
lwz %r0,0(%r3)
mtibatu 0,%r0
isync
lwzu %r0,4(%r3)
mtibatl 0,%r0
isync
lwzu %r0,4(%r3)
mtibatu 1,%r0
isync
lwzu %r0,4(%r3)
mtibatl 1,%r0
isync
lwzu %r0,4(%r3)
mtibatu 2,%r0
isync
lwzu %r0,4(%r3)
mtibatl 2,%r0
isync
lwzu %r0,4(%r3)
mtibatu 3,%r0
isync
lwzu %r0,4(%r3)
mtibatl 3,%r0
isync
blr
// void getdbats(int bats[8]);
FUNCTION(getdbats):
mfdbatu %r0,0
stw %r0,0(%r3)
mfdbatl %r0,0
stwu %r0,4(%r3)
mfdbatu %r0,1
stwu %r0,4(%r3)
mfdbatl %r0,1
stwu %r0,4(%r3)
mfdbatu %r0,2
stwu %r0,4(%r3)
mfdbatl %r0,2
stwu %r0,4(%r3)
mfdbatu %r0,3
stwu %r0,4(%r3)
mfdbatl %r0,3
stwu %r0,4(%r3)
blr
// void setdbats(int bats[8]);
FUNCTION(setdbats):
lwz %r0,0(%r3)
mtdbatu 0,%r0
lwzu %r0,4(%r3)
mtdbatl 0,%r0
lwzu %r0,4(%r3)
mtdbatu 1,%r0
lwzu %r0,4(%r3)
mtdbatl 1,%r0
lwzu %r0,4(%r3)
mtdbatu 2,%r0
lwzu %r0,4(%r3)
mtdbatl 2,%r0
lwzu %r0,4(%r3)
mtdbatu 3,%r0
lwzu %r0,4(%r3)
mtdbatl 3,%r0
sync
blr
// unsigned int gethid0();
FUNCTION(gethid0):
mfspr %r3, 1008
blr
// void sethid0(unsigned int val);
FUNCTION(sethid0):
mtspr 1008, %r3
blr
// unsigned int getl2cr();
FUNCTION(getl2cr):
mfspr %r3, 1017
blr
// void setl2cr(unsigned int val);
FUNCTION(setl2cr):
mtspr 1017, %r3
blr
// void ppc_context_switch(addr_t *old_sp, addr_t new_sp);
FUNCTION(ppc_context_switch):
// regs to push on the stack: f13-f31, r13-r31, cr, r2, lr
// push the old regs we need to save on the stack
// f31-13
stfdu %f31, -8(%r1)
stfdu %f30, -8(%r1)
stfdu %f29, -8(%r1)
stfdu %f28, -8(%r1)
stfdu %f27, -8(%r1)
stfdu %f26, -8(%r1)
stfdu %f25, -8(%r1)
stfdu %f24, -8(%r1)
stfdu %f23, -8(%r1)
stfdu %f22, -8(%r1)
stfdu %f21, -8(%r1)
stfdu %f20, -8(%r1)
stfdu %f19, -8(%r1)
stfdu %f18, -8(%r1)
stfdu %f17, -8(%r1)
stfdu %f16, -8(%r1)
stfdu %f15, -8(%r1)
stfdu %f14, -8(%r1)
stfdu %f13, -8(%r1)
// r31-13, r2
stwu %r31, -4(%r1)
stwu %r30, -4(%r1)
stwu %r29, -4(%r1)
stwu %r28, -4(%r1)
stwu %r27, -4(%r1)
stwu %r26, -4(%r1)
stwu %r25, -4(%r1)
stwu %r24, -4(%r1)
stwu %r23, -4(%r1)
stwu %r22, -4(%r1)
stwu %r21, -4(%r1)
stwu %r20, -4(%r1)
stwu %r19, -4(%r1)
stwu %r18, -4(%r1)
stwu %r17, -4(%r1)
stwu %r16, -4(%r1)
stwu %r15, -4(%r1)
stwu %r14, -4(%r1)
stwu %r13, -4(%r1)
stwu %r2, -4(%r1)
// CR and LR
mfcr %r0
stwu %r0, -4(%r1)
mflr %r0
stwu %r0, -4(%r1)
// save the old stack pointer
stwu %r1, 0(%r3)
// restore the new stack pointer
mr %r1, %r4
// restore the new regs
// LR and CR
lwz %r0, 0(%r1)
mtlr %r0
lwzu %r0, 4(%r1)
mtcr %r0
// r2, r13-31
lwzu %r2, 4(%r1)
lwzu %r13, 4(%r1)
lwzu %r14, 4(%r1)
lwzu %r15, 4(%r1)
lwzu %r16, 4(%r1)
lwzu %r17, 4(%r1)
lwzu %r18, 4(%r1)
lwzu %r19, 4(%r1)
lwzu %r20, 4(%r1)
lwzu %r21, 4(%r1)
lwzu %r22, 4(%r1)
lwzu %r23, 4(%r1)
lwzu %r24, 4(%r1)
lwzu %r25, 4(%r1)
lwzu %r26, 4(%r1)
lwzu %r27, 4(%r1)
lwzu %r28, 4(%r1)
lwzu %r29, 4(%r1)
lwzu %r30, 4(%r1)
lwzu %r31, 4(%r1)
// f13-31
lfdu %f13, 4(%r1)
lfdu %f14, 8(%r1)
lfdu %f15, 8(%r1)
lfdu %f16, 8(%r1)
lfdu %f17, 8(%r1)
lfdu %f18, 8(%r1)
lfdu %f19, 8(%r1)
lfdu %f20, 8(%r1)
lfdu %f21, 8(%r1)
lfdu %f22, 8(%r1)
lfdu %f23, 8(%r1)
lfdu %f24, 8(%r1)
lfdu %f25, 8(%r1)
lfdu %f26, 8(%r1)
lfdu %f27, 8(%r1)
lfdu %f28, 8(%r1)
lfdu %f29, 8(%r1)
lfdu %f30, 8(%r1)
lfdu %f31, 8(%r1)
addi %r1, %r1, 8
blr
// void ppc_switch_stack_and_call(addr_t newKstack,
// void (*func)(void *), void *arg)
FUNCTION(ppc_switch_stack_and_call):
mr %r1, %r3 // set the new stack pointer
mtctr %r4 // move the target function into CTR
mr %r3, %r5 // move the arg to this func to the new arg
bctr
// ppc_kernel_thread_root(): parameters in r13-r15, the functions to call
// (in that order). The function is used when spawing threads. It usually calls
// an initialization function, the actual thread function, and a function that
// destroys the thread.
FUNCTION(ppc_kernel_thread_root):
mtlr %r13
blrl
mtlr %r14
blrl
mtlr %r15
blrl
// We should never get here. If we do, it's time to enter the kernel
// debugger (without a message at the moment).
li %r3, 0
b kernel_debugger

View File

@ -0,0 +1,237 @@
/*
* Copyright 2003, Marcus Overhagen. All rights reserved.
* Distributed under the terms of the OpenBeOS License.
*/
#include <KernelExport.h>
#include <kernel.h>
#include <user_atomic.h>
/*
* Emulation of 64 bit atomic functions.
* Slow, using spinlocks...
*/
static spinlock atomic_lock = 0;
int64
atomic_set64(vint64 *value, int64 newValue)
{
cpu_status status;
int64 oldValue;
status = disable_interrupts();
acquire_spinlock(&atomic_lock);
oldValue = *value;
*value = newValue;
release_spinlock(&atomic_lock);
restore_interrupts(status);
return oldValue;
}
int64
atomic_test_and_set64(vint64 *value, int64 newValue, int64 testAgainst)
{
cpu_status status;
int64 oldValue;
status = disable_interrupts();
acquire_spinlock(&atomic_lock);
oldValue = *value;
if (oldValue == testAgainst)
*value = newValue;
release_spinlock(&atomic_lock);
restore_interrupts(status);
return oldValue;
}
int64
atomic_add64(vint64 *value, int64 addValue)
{
cpu_status status;
int64 oldValue;
status = disable_interrupts();
acquire_spinlock(&atomic_lock);
oldValue = *value;
*value += addValue;
release_spinlock(&atomic_lock);
restore_interrupts(status);
return oldValue;
}
int64
atomic_and64(vint64 *value, int64 andValue)
{
cpu_status status;
int64 oldValue;
status = disable_interrupts();
acquire_spinlock(&atomic_lock);
oldValue = *value;
*value &= andValue;
release_spinlock(&atomic_lock);
restore_interrupts(status);
return oldValue;
}
int64
atomic_or64(vint64 *value, int64 orValue)
{
cpu_status status;
int64 oldValue;
status = disable_interrupts();
acquire_spinlock(&atomic_lock);
oldValue = *value;
*value |= orValue;
release_spinlock(&atomic_lock);
restore_interrupts(status);
return oldValue;
}
int64
atomic_get64(vint64 *value)
{
cpu_status status;
int64 oldValue;
status = disable_interrupts();
acquire_spinlock(&atomic_lock);
oldValue = *value;
release_spinlock(&atomic_lock);
restore_interrupts(status);
return oldValue;
}
int64
_user_atomic_set64(vint64 *value, int64 newValue)
{
cpu_status status;
int64 oldValue;
if (!IS_USER_ADDRESS(value)
|| lock_memory((void *)value, 8, B_READ_DEVICE) != B_OK)
goto access_violation;
status = disable_interrupts();
acquire_spinlock(&atomic_lock);
oldValue = *value;
*value = newValue;
release_spinlock(&atomic_lock);
restore_interrupts(status);
unlock_memory((void *)value, 8, B_READ_DEVICE);
return oldValue;
access_violation:
// XXX kill application
return -1;
}
int64
_user_atomic_test_and_set64(vint64 *value, int64 newValue, int64 testAgainst)
{
cpu_status status;
int64 oldValue;
if (!IS_USER_ADDRESS(value)
|| lock_memory((void *)value, 8, B_READ_DEVICE) != B_OK)
goto access_violation;
status = disable_interrupts();
acquire_spinlock(&atomic_lock);
oldValue = *value;
if (oldValue == testAgainst)
*value = newValue;
release_spinlock(&atomic_lock);
restore_interrupts(status);
unlock_memory((void *)value, 8, B_READ_DEVICE);
return oldValue;
access_violation:
// XXX kill application
return -1;
}
int64
_user_atomic_add64(vint64 *value, int64 addValue)
{
cpu_status status;
int64 oldValue;
if (!IS_USER_ADDRESS(value)
|| lock_memory((void *)value, 8, B_READ_DEVICE) != B_OK)
goto access_violation;
status = disable_interrupts();
acquire_spinlock(&atomic_lock);
oldValue = *value;
*value += addValue;
release_spinlock(&atomic_lock);
restore_interrupts(status);
unlock_memory((void *)value, 8, B_READ_DEVICE);
return oldValue;
access_violation:
// XXX kill application
return -1;
}
int64
_user_atomic_and64(vint64 *value, int64 andValue)
{
cpu_status status;
int64 oldValue;
if (!IS_USER_ADDRESS(value)
|| lock_memory((void *)value, 8, B_READ_DEVICE) != B_OK)
goto access_violation;
status = disable_interrupts();
acquire_spinlock(&atomic_lock);
oldValue = *value;
*value &= andValue;
release_spinlock(&atomic_lock);
restore_interrupts(status);
unlock_memory((void *)value, 8, B_READ_DEVICE);
return oldValue;
access_violation:
// XXX kill application
return -1;
}
int64
_user_atomic_or64(vint64 *value, int64 orValue)
{
cpu_status status;
int64 oldValue;
if (!IS_USER_ADDRESS(value)
|| lock_memory((void *)value, 8, B_READ_DEVICE) != B_OK)
goto access_violation;
status = disable_interrupts();
acquire_spinlock(&atomic_lock);
oldValue = *value;
*value |= orValue;
release_spinlock(&atomic_lock);
restore_interrupts(status);
unlock_memory((void *)value, 8, B_READ_DEVICE);
return oldValue;
access_violation:
// XXX kill application
return -1;
}
int64
_user_atomic_get64(vint64 *value)
{
cpu_status status;
int64 oldValue;
if (!IS_USER_ADDRESS(value)
|| lock_memory((void *)value, 8, B_READ_DEVICE) != B_OK)
goto access_violation;
status = disable_interrupts();
acquire_spinlock(&atomic_lock);
oldValue = *value;
release_spinlock(&atomic_lock);
restore_interrupts(status);
unlock_memory((void *)value, 8, B_READ_DEVICE);
return oldValue;
access_violation:
// XXX kill application
return -1;
}

View File

@ -0,0 +1,264 @@
/*
* Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <KernelExport.h>
#include <arch_platform.h>
#include <arch_thread.h>
#include <arch/cpu.h>
#include <boot/kernel_args.h>
static bool sHasTlbia;
status_t
arch_cpu_preboot_init_percpu(kernel_args *args, int curr_cpu)
{
// enable FPU
set_msr(get_msr() | MSR_FP_AVAILABLE);
// The current thread must be NULL for all CPUs till we have threads.
// Some boot code relies on this.
arch_thread_set_current_thread(NULL);
return B_OK;
}
status_t
arch_cpu_init(kernel_args *args)
{
// TODO: Let the boot loader put that info into the kernel args
// (property "tlbia" in the CPU node).
sHasTlbia = false;
return B_OK;
}
status_t
arch_cpu_init_post_vm(kernel_args *args)
{
return B_OK;
}
status_t
arch_cpu_init_post_modules(kernel_args *args)
{
return B_OK;
}
#define CACHELINE 32
void
arch_cpu_sync_icache(void *address, size_t len)
{
int l, off;
char *p;
off = (unsigned int)address & (CACHELINE - 1);
len += off;
l = len;
p = (char *)address - off;
do {
asm volatile ("dcbst 0,%0" :: "r"(p));
p += CACHELINE;
} while ((l -= CACHELINE) > 0);
asm volatile ("sync");
p = (char *)address - off;
do {
asm volatile ("icbi 0,%0" :: "r"(p));
p += CACHELINE;
} while ((len -= CACHELINE) > 0);
asm volatile ("sync");
isync();
}
void
arch_cpu_invalidate_TLB_range(addr_t start, addr_t end)
{
asm volatile("sync");
while (start < end) {
asm volatile("tlbie %0" :: "r" (start));
asm volatile("eieio");
asm volatile("sync");
start += B_PAGE_SIZE;
}
asm volatile("tlbsync");
asm volatile("sync");
}
void
arch_cpu_invalidate_TLB_list(addr_t pages[], int num_pages)
{
int i;
asm volatile("sync");
for (i = 0; i < num_pages; i++) {
asm volatile("tlbie %0" :: "r" (pages[i]));
asm volatile("eieio");
asm volatile("sync");
}
asm volatile("tlbsync");
asm volatile("sync");
}
void
arch_cpu_global_TLB_invalidate(void)
{
if (sHasTlbia) {
m68k_sync();
tlbia();
m68k_sync();
} else {
addr_t address = 0;
unsigned long i;
m68k_sync();
for (i = 0; i < 0x100000; i++) {
tlbie(address);
eieio();
m68k_sync();
address += B_PAGE_SIZE;
}
tlbsync();
m68k_sync();
}
}
void
arch_cpu_user_TLB_invalidate(void)
{
arch_cpu_global_TLB_invalidate();
}
status_t
arch_cpu_user_memcpy(void *to, const void *from, size_t size,
addr_t *faultHandler)
{
char *tmp = (char *)to;
char *s = (char *)from;
addr_t oldFaultHandler = *faultHandler;
if (m68k_set_fault_handler(faultHandler, (addr_t)&&error))
goto error;
while (size--)
*tmp++ = *s++;
*faultHandler = oldFaultHandler;
return 0;
error:
*faultHandler = oldFaultHandler;
return B_BAD_ADDRESS;
}
/** \brief Copies at most (\a size - 1) characters from the string in \a from to
* the string in \a to, NULL-terminating the result.
*
* \param to Pointer to the destination C-string.
* \param from Pointer to the source C-string.
* \param size Size in bytes of the string buffer pointed to by \a to.
*
* \return strlen(\a from).
*/
ssize_t
arch_cpu_user_strlcpy(char *to, const char *from, size_t size, addr_t *faultHandler)
{
int from_length = 0;
addr_t oldFaultHandler = *faultHandler;
if (m68k_set_fault_handler(faultHandler, (addr_t)&&error))
goto error;
if (size > 0) {
to[--size] = '\0';
// copy
for ( ; size; size--, from_length++, to++, from++) {
if ((*to = *from) == '\0')
break;
}
}
// count any leftover from chars
while (*from++ != '\0')
from_length++;
*faultHandler = oldFaultHandler;
return from_length;
error:
*faultHandler = oldFaultHandler;
return B_BAD_ADDRESS;
}
status_t
arch_cpu_user_memset(void *s, char c, size_t count, addr_t *faultHandler)
{
char *xs = (char *)s;
addr_t oldFaultHandler = *faultHandler;
if (m68k_set_fault_handler(faultHandler, (addr_t)&&error))
goto error;
while (count--)
*xs++ = c;
*faultHandler = oldFaultHandler;
return 0;
error:
*faultHandler = oldFaultHandler;
return B_BAD_ADDRESS;
}
status_t
arch_cpu_shutdown(bool reboot)
{
M68KPlatform::Default()->ShutDown(reboot);
return B_ERROR;
}
void
arch_cpu_idle(void)
{
}
// The purpose of this function is to trick the compiler. When setting the
// page_handler to a label that is obviously (to the compiler) never used,
// it may reorganize the control flow, so that the labeled part is optimized
// away.
// By invoking the function like this
//
// if (m68k_set_fault_handler(faultHandler, (addr_t)&&error))
// goto error;
//
// the compiler has to keep the labeled code, since it can't guess the return
// value of this (non-inlinable) function. At least in my tests it worked that
// way, and I hope it will continue to work like this in the future.
//
bool
m68k_set_fault_handler(addr_t *handlerLocation, addr_t handler)
{
*handlerLocation = handler;
return false;
}

View File

@ -0,0 +1,178 @@
/*
** Copyright 2003, Axel D<EFBFBD>fler, axeld@pinc-software.de. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
#define FUNCTION(x) .global x; .type x,@function; x
.text
/* uint32 get_sdr1(void);
*/
FUNCTION(get_sdr1):
mfsdr1 %r3
blr
/* void set_sdr1(uint32 value);
* r3
*/
FUNCTION(set_sdr1):
mtsdr1 %r3
blr
/* uint32 get_sr(void *virtualAddress);
* r3
*/
FUNCTION(get_sr):
mfsrin %r3, %r3
blr
/* void set_sr(void *virtualAddress, uint32 value);
* r3 r4
*/
FUNCTION(set_sr):
mtsrin %r4, %r3
blr
/* uint32 get_msr(void);
*/
FUNCTION(get_msr):
mfmsr %r3
blr
/* uint32 set_msr(uint32 value);
* r3
*/
FUNCTION(set_msr):
mtmsr %r3
blr
/* uint32 get_pvr(void);
*/
FUNCTION(get_pvr):
mfpvr %r3
blr
#define get_ibat(num) \
mfibatu %r4, num; \
stw %r4, 0(%r3); \
mfibatl %r4, num; \
stw %r4, 4(%r3); \
#define set_ibat(num); \
lwz %r4, 0(%r3); \
mtibatu num, %r4; \
lwz %r4, 4(%r3); \
mtibatl num, %r4;
/* void get_ibat0-3(block_address_translation *bat);
* r3
*/
FUNCTION(get_ibat0):
get_ibat(0)
blr
FUNCTION(get_ibat1):
get_ibat(1)
blr
FUNCTION(get_ibat2):
get_ibat(2)
blr
FUNCTION(get_ibat3):
get_ibat(3)
blr
/* void set_ibat0-3(block_address_translation *bat);
* r3
*/
FUNCTION(set_ibat0):
set_ibat(0)
blr
FUNCTION(set_ibat1):
set_ibat(1)
blr
FUNCTION(set_ibat2):
set_ibat(2)
blr
FUNCTION(set_ibat3):
set_ibat(3)
blr
/* void reset_ibats(void)
*/
FUNCTION(reset_ibats):
li %r3, 0
mtibatu 0, %r3
mtibatl 0, %r3
mtibatu 1, %r3
mtibatl 1, %r3
mtibatu 2, %r3
mtibatl 2, %r3
mtibatu 3, %r3
mtibatl 3, %r3
blr
#define get_dbat(num) \
mfdbatu %r4, num; \
stw %r4, 0(%r3); \
mfdbatl %r4, num; \
stw %r4, 4(%r3);
#define set_dbat(num) \
lwz %r4, 0(%r3); \
mtdbatu num, %r4; \
lwz %r4, 4(%r3); \
mtdbatl num, %r4;
/* void get_dbat0-3(block_address_translation *bat);
* r3
*/
FUNCTION(get_dbat0):
get_dbat(0)
blr
FUNCTION(get_dbat1):
get_dbat(1)
blr
FUNCTION(get_dbat2):
get_dbat(2)
blr
FUNCTION(get_dbat3):
get_dbat(3)
blr
/* void set_dbat0-3(block_address_translation *bat);
* r3
*/
FUNCTION(set_dbat0):
set_dbat(0)
blr
FUNCTION(set_dbat1):
set_dbat(1)
blr
FUNCTION(set_dbat2):
set_dbat(2)
blr
FUNCTION(set_dbat3):
set_dbat(3)
blr
/* void reset_dbats(void)
*/
FUNCTION(reset_dbats):
li %r3, 0
mtdbatu 0, %r3
mtdbatl 0, %r3
mtdbatu 1, %r3
mtdbatl 1, %r3
mtdbatu 2, %r3
mtdbatl 2, %r3
mtdbatu 3, %r3
mtdbatl 3, %r3
blr
/* void __eieio(void)
*/
FUNCTION(__eieio):
eieio
blr

View File

@ -0,0 +1,290 @@
/*
* Copyright 2003-2006, Haiku Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* Axel Dörfler <axeld@pinc-software.de>
* Ingo Weinhold <bonefish@cs.tu-berlin.de>
*/
#include <arch/debug.h>
#include <arch_cpu.h>
#include <debug.h>
#include <elf.h>
#include <kernel.h>
#include <kimage.h>
#include <thread.h>
struct stack_frame {
struct stack_frame *previous;
addr_t return_address;
};
#define NUM_PREVIOUS_LOCATIONS 32
extern struct iframe_stack gBootFrameStack;
static bool
already_visited(uint32 *visited, int32 *_last, int32 *_num, uint32 framePointer)
{
int32 last = *_last;
int32 num = *_num;
int32 i;
for (i = 0; i < num; i++) {
if (visited[(NUM_PREVIOUS_LOCATIONS + last - i)
% NUM_PREVIOUS_LOCATIONS] == framePointer) {
return true;
}
}
*_last = last = (last + 1) % NUM_PREVIOUS_LOCATIONS;
visited[last] = framePointer;
if (num < NUM_PREVIOUS_LOCATIONS)
*_num = num + 1;
return false;
}
static inline stack_frame *
get_current_stack_frame()
{
stack_frame *frame;
asm volatile("mr %0, %%r1" : "=r"(frame));
return frame;
}
static status_t
get_next_frame(addr_t framePointer, addr_t *next, addr_t *ip)
{
struct thread *thread = thread_get_current_thread();
addr_t oldFaultHandler = thread->fault_handler;
// set fault handler, so that we can safely access user stacks
if (thread) {
if (ppc_set_fault_handler(&thread->fault_handler, (addr_t)&&error))
goto error;
}
*ip = ((struct stack_frame *)framePointer)->return_address;
*next = (addr_t)((struct stack_frame *)framePointer)->previous;
if (thread)
thread->fault_handler = oldFaultHandler;
return B_OK;
error:
thread->fault_handler = oldFaultHandler;
return B_BAD_ADDRESS;
}
static void
print_stack_frame(struct thread *thread, addr_t ip, addr_t framePointer,
addr_t nextFramePointer)
{
addr_t diff = nextFramePointer - framePointer;
// kernel space/user space switch
if (diff & 0x80000000)
diff = 0;
// lookup symbol
const char *symbol, *image;
addr_t baseAddress;
bool exactMatch;
status_t status = elf_debug_lookup_symbol_address(ip, &baseAddress, &symbol,
&image, &exactMatch);
if (status != B_OK && !IS_KERNEL_ADDRESS(ip) && thread) {
// try to locate the image in the images loaded into user space
status = image_debug_lookup_user_symbol_address(thread->team, ip,
&baseAddress, &symbol, &image, &exactMatch);
}
if (status == B_OK) {
if (symbol != NULL) {
kprintf("%08lx (+%4ld) %08lx <%s>:%s + 0x%04lx%s\n", framePointer,
diff, ip, image, symbol, ip - baseAddress,
(exactMatch ? "" : " (nearest)"));
} else {
kprintf("%08lx (+%4ld) %08lx <%s@%p>:unknown + 0x%04lx\n",
framePointer, diff, ip, image, (void *)baseAddress,
ip - baseAddress);
}
} else
kprintf("%08lx (+%4ld) %08lx\n", framePointer, diff, ip);
}
static int
stack_trace(int argc, char **argv)
{
uint32 previousLocations[NUM_PREVIOUS_LOCATIONS];
struct iframe_stack *frameStack;
struct thread *thread;
addr_t framePointer;
int32 i, num = 0, last = 0;
if (argc < 2) {
thread = thread_get_current_thread();
framePointer = (addr_t)get_current_stack_frame();
} else {
// TODO: Add support for stack traces of other threads.
/* thread_id id = strtoul(argv[1], NULL, 0);
thread = thread_get_thread_struct_locked(id);
if (thread == NULL) {
kprintf("could not find thread %ld\n", id);
return 0;
}
// read %ebp from the thread's stack stored by a pushad
ebp = thread->arch_info.current_stack.esp[2];
if (id != thread_get_current_thread_id()) {
// switch to the page directory of the new thread to be
// able to follow the stack trace into userland
addr_t newPageDirectory = (addr_t)x86_next_page_directory(
thread_get_current_thread(), thread);
if (newPageDirectory != 0) {
read_cr3(oldPageDirectory);
write_cr3(newPageDirectory);
}
}
*/
kprintf("Stack traces of other threads not supported yet!\n");
return 0;
}
// We don't have a thread pointer early in the boot process
if (thread != NULL)
frameStack = &thread->arch_info.iframes;
else
frameStack = &gBootFrameStack;
for (i = 0; i < frameStack->index; i++) {
kprintf("iframe %p (end = %p)\n",
frameStack->frames[i], frameStack->frames[i] + 1);
}
if (thread != NULL) {
kprintf("stack trace for thread 0x%lx \"%s\"\n", thread->id,
thread->name);
kprintf(" kernel stack: %p to %p\n",
(void *)thread->kernel_stack_base,
(void *)(thread->kernel_stack_base + KERNEL_STACK_SIZE));
if (thread->user_stack_base != 0) {
kprintf(" user stack: %p to %p\n",
(void *)thread->user_stack_base,
(void *)(thread->user_stack_base + thread->user_stack_size));
}
}
kprintf("frame caller <image>:function + offset\n");
for (;;) {
// see if the frame pointer matches the iframe
struct iframe *frame = NULL;
for (i = 0; i < frameStack->index; i++) {
if (framePointer == (((addr_t)frameStack->frames[i] - 8) & ~0xf)) {
// it's an iframe
frame = frameStack->frames[i];
break;
}
}
if (frame) {
kprintf("iframe at %p\n", frame);
kprintf(" r0 0x%08lx r1 0x%08lx r2 0x%08lx r3 0x%08lx\n",
frame->r0, frame->r1, frame->r2, frame->r3);
kprintf(" r4 0x%08lx r5 0x%08lx r6 0x%08lx r7 0x%08lx\n",
frame->r4, frame->r5, frame->r6, frame->r7);
kprintf(" r8 0x%08lx r9 0x%08lx r10 0x%08lx r11 0x%08lx\n",
frame->r8, frame->r9, frame->r10, frame->r11);
kprintf(" r12 0x%08lx r13 0x%08lx r14 0x%08lx r15 0x%08lx\n",
frame->r12, frame->r13, frame->r14, frame->r15);
kprintf(" r16 0x%08lx r17 0x%08lx r18 0x%08lx r19 0x%08lx\n",
frame->r16, frame->r17, frame->r18, frame->r19);
kprintf(" r20 0x%08lx r21 0x%08lx r22 0x%08lx r23 0x%08lx\n",
frame->r20, frame->r21, frame->r22, frame->r23);
kprintf(" r24 0x%08lx r25 0x%08lx r26 0x%08lx r27 0x%08lx\n",
frame->r24, frame->r25, frame->r26, frame->r27);
kprintf(" r28 0x%08lx r29 0x%08lx r30 0x%08lx r31 0x%08lx\n",
frame->r28, frame->r29, frame->r30, frame->r31);
kprintf(" lr 0x%08lx cr 0x%08lx xer 0x%08lx ctr 0x%08lx\n",
frame->lr, frame->cr, frame->xer, frame->ctr);
kprintf("fpscr 0x%08lx\n", frame->fpscr);
kprintf(" srr0 0x%08lx srr1 0x%08lx dar 0x%08lx dsisr 0x%08lx\n",
frame->srr0, frame->srr1, frame->dar, frame->dsisr);
kprintf(" vector: 0x%lx\n", frame->vector);
print_stack_frame(thread, frame->srr0, framePointer, frame->r1);
framePointer = frame->r1;
} else {
addr_t ip, nextFramePointer;
if (get_next_frame(framePointer, &nextFramePointer, &ip) != B_OK) {
kprintf("%08lx -- read fault\n", framePointer);
break;
}
if (ip == 0 || framePointer == 0)
break;
print_stack_frame(thread, ip, framePointer, nextFramePointer);
framePointer = nextFramePointer;
}
if (already_visited(previousLocations, &last, &num, framePointer)) {
kprintf("circular stack frame: %p!\n", (void *)framePointer);
break;
}
if (framePointer == 0)
break;
}
/* if (oldPageDirectory != 0) {
// switch back to the previous page directory to no cause any troubles
write_cr3(oldPageDirectory);
}
*/
return 0;
}
// #pragma mark -
void
arch_debug_save_registers(int *regs)
{
}
void *
arch_debug_get_caller(void)
{
// TODO: implement me
return (void *)&arch_debug_get_caller;
}
status_t
arch_debug_init(kernel_args *args)
{
add_debugger_command("where", &stack_trace, "Same as \"sc\"");
add_debugger_command("bt", &stack_trace, "Same as \"sc\" (as in gdb)");
add_debugger_command("sc", &stack_trace, "Stack crawl for current thread");
return B_NO_ERROR;
}

View File

@ -0,0 +1,81 @@
/*
* Copyright 2003-2006, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <arch_platform.h>
#include <arch/debug_console.h>
#include <boot/kernel_args.h>
#include <kernel.h>
#include <vm.h>
#include <string.h>
void
arch_debug_remove_interrupt_handler(uint32 line)
{
}
void
arch_debug_install_interrupt_handlers(void)
{
}
char
arch_debug_blue_screen_getchar(void)
{
return 0;
}
char
arch_debug_serial_getchar(void)
{
return M68KPlatform::Default()->SerialDebugGetChar();
}
void
arch_debug_serial_putchar(const char c)
{
return M68KPlatform::Default()->SerialDebugPutChar(c);
}
void
arch_debug_serial_puts(const char *s)
{
while (*s != '\0') {
arch_debug_serial_putchar(*s);
s++;
}
}
void
arch_debug_serial_early_boot_message(const char *string)
{
// this function will only be called in fatal situations
}
status_t
arch_debug_console_init(kernel_args *args)
{
return M68KPlatform::Default()->InitSerialDebug(args);
}
status_t
arch_debug_console_init_settings(kernel_args *args)
{
return B_OK;
}

View File

@ -0,0 +1,385 @@
/*
* Copyright 2005, Ingo Weinhold <bonefish@cs.tu-berlin.de>.
* All rights reserved. Distributed under the terms of the MIT License.
*
*
* Copyright 2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <KernelExport.h>
#include <elf_priv.h>
#include <arch/elf.h>
#define CHATTY 0
int
arch_elf_relocate_rel(struct elf_image_info *image, const char *sym_prepend,
struct elf_image_info *resolve_image, struct Elf32_Rel *rel, int rel_len)
{
// there are no rel entries in PPC elf
return B_NO_ERROR;
}
static inline void
write_word32(addr_t P, Elf32_Word value)
{
*(Elf32_Word*)P = value;
}
static inline void
write_word30(addr_t P, Elf32_Word value)
{
// bits 0:29
*(Elf32_Word*)P = (*(Elf32_Word*)P & 0x3) | (value << 2);
}
static inline bool
write_low24_check(addr_t P, Elf32_Word value)
{
// bits 6:29
if ((value & 0x3f000000) && (~value & 0x3f800000))
return false;
*(Elf32_Word*)P = (*(Elf32_Word*)P & 0xfc000003)
| ((value & 0x00ffffff) << 2);
return true;
}
static inline bool
write_low14_check(addr_t P, Elf32_Word value)
{
// bits 16:29
if ((value & 0x3fffc000) && (~value & 0x3fffe000))
return false;
*(Elf32_Word*)P = (*(Elf32_Word*)P & 0xffff0003)
| ((value & 0x00003fff) << 2);
return true;
}
static inline void
write_half16(addr_t P, Elf32_Word value)
{
// bits 16:29
*(Elf32_Half*)P = (Elf32_Half)value;
}
static inline bool
write_half16_check(addr_t P, Elf32_Word value)
{
// bits 16:29
if ((value & 0xffff0000) && (~value & 0xffff8000))
return false;
*(Elf32_Half*)P = (Elf32_Half)value;
return true;
}
static inline Elf32_Word
lo(Elf32_Word value)
{
return (value & 0xffff);
}
static inline Elf32_Word
hi(Elf32_Word value)
{
return ((value >> 16) & 0xffff);
}
static inline Elf32_Word
ha(Elf32_Word value)
{
return (((value >> 16) + (value & 0x8000 ? 1 : 0)) & 0xffff);
}
int
arch_elf_relocate_rela(struct elf_image_info *image, const char *sym_prepend,
struct elf_image_info *resolve_image, struct Elf32_Rela *rel, int rel_len)
{
int i;
struct Elf32_Sym *sym;
int vlErr;
addr_t S = 0; // symbol address
addr_t R = 0; // section relative symbol address
addr_t G = 0; // GOT address
addr_t L = 0; // PLT address
#define P ((addr_t)(image->text_region.delta + rel[i].r_offset))
#define A ((addr_t)rel[i].r_addend)
#define B (image->text_region.delta)
// TODO: Get the GOT address!
#define REQUIRE_GOT \
if (G == 0) { \
dprintf("arch_elf_relocate_rela(): Failed to get GOT address!\n"); \
return B_ERROR; \
}
// TODO: Get the PLT address!
#define REQUIRE_PLT \
if (L == 0) { \
dprintf("arch_elf_relocate_rela(): Failed to get PLT address!\n"); \
return B_ERROR; \
}
for (i = 0; i * (int)sizeof(struct Elf32_Rela) < rel_len; i++) {
#if CHATTY
dprintf("looking at rel type %d, offset 0x%lx, sym 0x%lx, addend 0x%lx\n",
ELF32_R_TYPE(rel[i].r_info), rel[i].r_offset, ELF32_R_SYM(rel[i].r_info), rel[i].r_addend);
#endif
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_PPC_SECTOFF:
case R_PPC_SECTOFF_LO:
case R_PPC_SECTOFF_HI:
case R_PPC_SECTOFF_HA:
dprintf("arch_elf_relocate_rela(): Getting section relative "
"symbol addresses not yet supported!\n");
return B_ERROR;
case R_PPC_ADDR32:
case R_PPC_ADDR24:
case R_PPC_ADDR16:
case R_PPC_ADDR16_LO:
case R_PPC_ADDR16_HI:
case R_PPC_ADDR16_HA:
case R_PPC_ADDR14:
case R_PPC_ADDR14_BRTAKEN:
case R_PPC_ADDR14_BRNTAKEN:
case R_PPC_REL24:
case R_PPC_REL14:
case R_PPC_REL14_BRTAKEN:
case R_PPC_REL14_BRNTAKEN:
case R_PPC_GLOB_DAT:
case R_PPC_UADDR32:
case R_PPC_UADDR16:
case R_PPC_REL32:
case R_PPC_SDAREL16:
case R_PPC_ADDR30:
case R_PPC_JMP_SLOT:
sym = SYMBOL(image, ELF32_R_SYM(rel[i].r_info));
vlErr = elf_resolve_symbol(image, sym, resolve_image,
sym_prepend, &S);
if (vlErr < 0) {
dprintf("arch_elf_relocate_rela(): Failed to relocate "
"entry index %d, rel type %d, offset 0x%lx, sym 0x%lx, "
"addend 0x%lx\n", i, ELF32_R_TYPE(rel[i].r_info),
rel[i].r_offset, ELF32_R_SYM(rel[i].r_info),
rel[i].r_addend);
return vlErr;
}
break;
}
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_PPC_NONE:
break;
case R_PPC_COPY:
// TODO: Implement!
dprintf("arch_elf_relocate_rela(): R_PPC_COPY not yet "
"supported!\n");
return B_ERROR;
case R_PPC_ADDR32:
case R_PPC_GLOB_DAT:
case R_PPC_UADDR32:
write_word32(P, S + A);
break;
case R_PPC_ADDR24:
if (write_low24_check(P, (S + A) >> 2))
break;
dprintf("R_PPC_ADDR24 overflow\n");
return B_BAD_DATA;
case R_PPC_ADDR16:
case R_PPC_UADDR16:
if (write_half16_check(P, S + A))
break;
dprintf("R_PPC_ADDR16 overflow\n");
return B_BAD_DATA;
case R_PPC_ADDR16_LO:
write_half16(P, lo(S + A));
break;
case R_PPC_ADDR16_HI:
write_half16(P, hi(S + A));
break;
case R_PPC_ADDR16_HA:
write_half16(P, ha(S + A));
break;
case R_PPC_ADDR14:
case R_PPC_ADDR14_BRTAKEN:
case R_PPC_ADDR14_BRNTAKEN:
if (write_low14_check(P, (S + A) >> 2))
break;
dprintf("R_PPC_ADDR14 overflow\n");
return B_BAD_DATA;
case R_PPC_REL24:
if (write_low24_check(P, (S + A - P) >> 2))
break;
dprintf("R_PPC_REL24 overflow: 0x%lx\n", (S + A - P) >> 2);
return B_BAD_DATA;
case R_PPC_REL14:
case R_PPC_REL14_BRTAKEN:
case R_PPC_REL14_BRNTAKEN:
if (write_low14_check(P, (S + A - P) >> 2))
break;
dprintf("R_PPC_REL14 overflow\n");
return B_BAD_DATA;
case R_PPC_GOT16:
REQUIRE_GOT;
if (write_half16_check(P, G + A))
break;
dprintf("R_PPC_GOT16 overflow\n");
return B_BAD_DATA;
case R_PPC_GOT16_LO:
REQUIRE_GOT;
write_half16(P, lo(G + A));
break;
case R_PPC_GOT16_HI:
REQUIRE_GOT;
write_half16(P, hi(G + A));
break;
case R_PPC_GOT16_HA:
REQUIRE_GOT;
write_half16(P, ha(G + A));
break;
case R_PPC_JMP_SLOT:
{
// If the relative offset is small enough, we fabricate a
// relative branch instruction ("b <addr>").
addr_t jumpOffset = S - P;
if ((jumpOffset & 0xfc000000) != 0
&& (~jumpOffset & 0xfe000000) != 0) {
// Offset > 24 bit.
// TODO: Implement!
// See System V PPC ABI supplement, p. 5-6!
dprintf("arch_elf_relocate_rela(): R_PPC_JMP_SLOT: "
"Offsets > 24 bit currently not supported!\n");
dprintf("jumpOffset: %p\n", (void*)jumpOffset);
return B_ERROR;
} else {
// Offset <= 24 bit
// 0:5 opcode (= 18), 6:29 address, 30 AA, 31 LK
// "b" instruction: opcode = 18, AA = 0, LK = 0
// address: 24 high-order bits of 26 bit offset
*(uint32*)P = 0x48000000 | ((jumpOffset) & 0x03fffffc);
}
break;
}
case R_PPC_RELATIVE:
write_word32(P, B + A);
break;
case R_PPC_LOCAL24PC:
// TODO: Implement!
// low24*
// if (write_low24_check(P, ?)
// break;
// return B_BAD_DATA;
dprintf("arch_elf_relocate_rela(): R_PPC_LOCAL24PC not yet "
"supported!\n");
return B_ERROR;
case R_PPC_REL32:
write_word32(P, S + A - P);
break;
case R_PPC_PLTREL24:
REQUIRE_PLT;
if (write_low24_check(P, (L + A - P) >> 2))
break;
dprintf("R_PPC_PLTREL24 overflow\n");
return B_BAD_DATA;
case R_PPC_PLT32:
REQUIRE_PLT;
write_word32(P, L + A);
break;
case R_PPC_PLTREL32:
REQUIRE_PLT;
write_word32(P, L + A - P);
break;
case R_PPC_PLT16_LO:
REQUIRE_PLT;
write_half16(P, lo(L + A));
break;
case R_PPC_PLT16_HI:
REQUIRE_PLT;
write_half16(P, hi(L + A));
break;
case R_PPC_PLT16_HA:
write_half16(P, ha(L + A));
break;
case R_PPC_SDAREL16:
// TODO: Implement!
// if (write_half16_check(P, S + A - _SDA_BASE_))
// break;
// return B_BAD_DATA;
dprintf("arch_elf_relocate_rela(): R_PPC_SDAREL16 not yet "
"supported!\n");
return B_ERROR;
case R_PPC_SECTOFF:
if (write_half16_check(P, R + A))
break;
dprintf("R_PPC_SECTOFF overflow\n");
return B_BAD_DATA;
case R_PPC_SECTOFF_LO:
write_half16(P, lo(R + A));
break;
case R_PPC_SECTOFF_HI:
write_half16(P, hi(R + A));
break;
case R_PPC_SECTOFF_HA:
write_half16(P, ha(R + A));
break;
case R_PPC_ADDR30:
write_word30(P, (S + A - P) >> 2);
break;
default:
dprintf("arch_elf_relocate_rela: unhandled relocation type %d\n", ELF32_R_TYPE(rel[i].r_info));
return B_ERROR;
}
}
return B_NO_ERROR;
}

View File

@ -0,0 +1,439 @@
/*
* Copyright 2006, Ingo Weinhold <bonefish@cs.tu-berlin.de>.
* All rights reserved. Distributed under the terms of the MIT License.
*
* Copyright 2003, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#define FUNCTION(x) .global x; .type x,@function; x
#define LOCAL_FUNCTION(x) .type x,@function; x
/* General exception handling concept:
The PPC architecture specifies entry point offsets for the various
exceptions in the first two physical pages. We put a short piece of code
(VEC_ENTRY()) into each exception vector. It calls exception_vector_common,
which is defined in the unused space at the beginning of the first physical
page. It re-enables address translation and calls ppc_exception_tail which
lies in the kernel. It dumps an iframe and invokes ppc_exception_entry()
(arch_int.cpp), which handles the exception and returns eventually.
The registers are restored from the iframe and we return from the
interrupt.
algorithm overview:
* VEC_ENTRY
* exception_vector_common
* ppc_exception_tail
- dump iframe
- ppc_exception_entry()
- restore registers and return from interrupt
Here we use the following SPRG registers, which are at the disposal of the
operating system:
* SPRG0: Physical address pointer to a struct cpu_exception_context
for the current CPU. The structure contains helpful pointers
as well as some scratch memory for temporarily saving registers.
* SPRG1: Scratch.
struct cpu_exception_context (defined in arch_int.h):
offset 0: virtual address of the exception handler routine in the kernel
offset 4: virtual address of the exception context
offset 8: kernel stack for the current thread
offset 12: start of scratch memory for saving registers etc.
algorithm in detail:
* VEC_ENTRY
- save r1 in SPRG1 and load cpu_exception_context into r1
- save r0, save LR in r0
* exception_vector_common
- params:
. r0: old LR
. r1: exception context (physical address)
. SPRG1: original r1
- save r0-3
- load virtual exception context address to r1
- turn on BAT for exception vector code
- turn on address translation
- get exception vector offset from LR
* ppc_exception_tail
- params:
. r1: exception context (virtual address)
. r3: exception vector offset
. SPRG1: original r1
- turn off BAT
- get kernel stack pointer
- dump iframe
- ppc_exception_entry()
- restore registers and return from interrupt
*/
/* exception vector definitions */
/* code in each exception vector */
#define VEC_ENTRY() \
mtsprg1 %r1 ; /* temporarily save r1 in SPRG1 */ \
mfsprg0 %r1 ; /* ppc_cpu_exception_context* -> r1 */ \
stw %r0, 16(%r1) ; /* save r0 */ \
mflr %r0 ; /* save LR in r0 */ \
bl exception_vector_common ; /* continue with the common part */
/* defines an exception vector */
#define DEFINE_VECTOR(offset, name) \
.skip offset - (. - __irqvec_start); \
FUNCTION(name): \
VEC_ENTRY()
.global __irqvec_start
__irqvec_start:
.long 0
/* Called by the exception vector code.
* LR: Points to the end of the exception vector code we're coming from.
* r0: original LR
* r1: ppc_cpu_exception_context* (physical address)
* SPRG1: original r1
*/
exception_vector_common:
stw %r0, 20(%r1) /* save original LR */
stw %r2, 24(%r1) /* save r2 */
stw %r3, 28(%r1) /* save r3 */
/* load the virtual address of the ppc_cpu_exception_context for this CPU */
lwz %r1, 4(%r1)
/* Address translation is turned off. We map this code via BAT, turn on
address translation, and continue in the kernel proper. */
li %r0, 0x10|0x2 /* BATL_MC | BATL_PP_RW */
mtibatl 0, %r0 /* load lower word of the instruction BAT */
li %r0, 0x2 /* BEPI = 0, BL = 0 (128 KB), BATU_VS */
mtibatu 0, %r0 /* load upper word of the instruction BAT */
isync
sync
/* turn on address translation */
mfsrr1 %r0 /* load saved msr */
rlwinm %r0, %r0, 28, 30, 31 /* extract mmu bits */
mfmsr %r3 /* load the current msr */
rlwimi %r3, %r0, 4, 26, 27 /* merge the mmu bits with the current msr */
li %r0, 1
rlwimi %r3, %r0, 13, 18, 18 /* turn on FPU, too */
mtmsr %r3 /* load new msr (turning the mmu back on) */
isync
/* Get LR -- it points to the end of the exception vector code. We adjust it
to point to the beginning and can use it to identify the vector later. */
mflr %r3
subi %r3, %r3, 20 /* 5 instructions */
/* jump to kernel code (ppc_exception_tail) */
lwz %r2, 0(%r1)
mtlr %r2
blr
DEFINE_VECTOR(0x100, system_reset_exception)
DEFINE_VECTOR(0x200, machine_check_exception)
DEFINE_VECTOR(0x300, DSI_exception)
DEFINE_VECTOR(0x400, ISI_exception)
DEFINE_VECTOR(0x500, external_interrupt_exception)
DEFINE_VECTOR(0x600, alignment_exception)
DEFINE_VECTOR(0x700, program_exception)
DEFINE_VECTOR(0x800, FP_unavailable_exception)
DEFINE_VECTOR(0x900, decrementer_exception)
DEFINE_VECTOR(0xc00, system_call_exception)
DEFINE_VECTOR(0xd00, trace_exception)
DEFINE_VECTOR(0xe00, FP_assist_exception)
DEFINE_VECTOR(0xf00, perf_monitor_exception)
DEFINE_VECTOR(0xf20, altivec_unavailable_exception)
DEFINE_VECTOR(0x1000, ITLB_miss_exception)
DEFINE_VECTOR(0x1100, DTLB_miss_on_load_exception)
DEFINE_VECTOR(0x1200, DTLB_miss_on_store_exception)
DEFINE_VECTOR(0x1300, instruction_address_breakpoint_exception)
DEFINE_VECTOR(0x1400, system_management_exception)
DEFINE_VECTOR(0x1600, altivec_assist_exception)
DEFINE_VECTOR(0x1700, thermal_management_exception)
.global __irqvec_end
__irqvec_end:
/* This is where exception_vector_common continues. We're in the kernel here.
r1: ppc_cpu_exception_context* (virtual address)
r3: exception vector offset
SPRG1: original r1
*/
FUNCTION(ppc_exception_tail):
/* turn off BAT */
li %r2, 0
mtibatu 0, %r2
mtibatl 0, %r2
isync
sync
/* save CR */
mfcr %r0
mfsrr1 %r2 /* load saved msr */
andi. %r2, %r2, (1 << 14) /* see if it was in kernel mode */
beq .kernel /* yep */
/* We come from userland. Load the kernel stack top address for the current
userland thread. */
mr %r2, %r1
lwz %r1, 8(%r1)
b .restore_stack_end
.kernel:
mr %r2, %r1
mfsprg1 %r1
.restore_stack_end:
/* now r2 points to the ppc_cpu_exception_context, r1 to the kernel stack */
/* restore the CR, it was messed up in the previous compare */
mtcrf 0xff, %r0
/* align r1 to 8 bytes, so the iframe will be aligned too */
rlwinm %r1, %r1, 0, 0, 28
/* save the registers */
bl __save_regs
/* iframe pointer to r4 and a backup to r20 */
mr %r4, %r1
mr %r20, %r1
/* adjust the stack pointer for ABI compatibility */
subi %r1, %r1, 8 /* make sure there's space for the previous
frame pointer and the return address */
rlwinm %r1, %r1, 0, 0, 27 /* 16 byte align the stack pointer */
li %r0, 0
stw %r0, 0(%r1) /* previous frame pointer: NULL */
/* 4(%r1) is room for the return address to be filled in by the
called function. */
/* r3: exception vector offset
r4: iframe pointer */
bl ppc_exception_entry
/* move the iframe to r1 */
mr %r1, %r20
b __restore_regs_and_rfi
/* called by ppc_exception_tail
* register expectations:
* r1: stack
* r2: ppc_cpu_exception_context*
* SPRG1: original r1
* r0,r3, LR: scrambled, but saved in scratch memory
* all other regs should have been unmodified by the exception handler,
* and ready to be saved
*/
__save_regs:
/* Note: The iframe must be 8 byte aligned. The stack pointer we are passed
in r1 is aligned. So we store the floating point registers first and
need to take care that an even number of 4 byte registers is stored,
or insert padding respectively. */
/* push f0-f31 */
stfdu %f0, -8(%r1)
stfdu %f1, -8(%r1)
stfdu %f2, -8(%r1)
stfdu %f3, -8(%r1)
stfdu %f4, -8(%r1)
stfdu %f5, -8(%r1)
stfdu %f6, -8(%r1)
stfdu %f7, -8(%r1)
stfdu %f8, -8(%r1)
stfdu %f9, -8(%r1)
stfdu %f10, -8(%r1)
stfdu %f11, -8(%r1)
stfdu %f12, -8(%r1)
stfdu %f13, -8(%r1)
stfdu %f14, -8(%r1)
stfdu %f15, -8(%r1)
stfdu %f16, -8(%r1)
stfdu %f17, -8(%r1)
stfdu %f18, -8(%r1)
stfdu %f19, -8(%r1)
stfdu %f20, -8(%r1)
stfdu %f21, -8(%r1)
stfdu %f22, -8(%r1)
stfdu %f23, -8(%r1)
stfdu %f24, -8(%r1)
stfdu %f25, -8(%r1)
stfdu %f26, -8(%r1)
stfdu %f27, -8(%r1)
stfdu %f28, -8(%r1)
stfdu %f29, -8(%r1)
stfdu %f30, -8(%r1)
stfdu %f31, -8(%r1)
/* push r0-r3 */
lwz %r0, 16(%r2) /* original r0 */
stwu %r0, -4(%r1) /* push r0 */
mfsprg1 %r0 /* original r1 */
stwu %r0, -4(%r1) /* push r1 */
lwz %r0, 24(%r2) /* original r2 */
stwu %r0, -4(%r1) /* push r2 */
lwz %r0, 28(%r2) /* original r3 */
stwu %r0, -4(%r1) /* push r3 */
/* push r4-r31 */
stwu %r4, -4(%r1)
stwu %r5, -4(%r1)
stwu %r6, -4(%r1)
stwu %r7, -4(%r1)
stwu %r8, -4(%r1)
stwu %r9, -4(%r1)
stwu %r10, -4(%r1)
stwu %r11, -4(%r1)
stwu %r12, -4(%r1)
stwu %r13, -4(%r1)
stwu %r14, -4(%r1)
stwu %r15, -4(%r1)
stwu %r16, -4(%r1)
stwu %r17, -4(%r1)
stwu %r18, -4(%r1)
stwu %r19, -4(%r1)
stwu %r20, -4(%r1)
stwu %r21, -4(%r1)
stwu %r22, -4(%r1)
stwu %r23, -4(%r1)
stwu %r24, -4(%r1)
stwu %r25, -4(%r1)
stwu %r26, -4(%r1)
stwu %r27, -4(%r1)
stwu %r28, -4(%r1)
stwu %r29, -4(%r1)
stwu %r30, -4(%r1)
stwu %r31, -4(%r1)
/* save some of the other regs */
mffs %f0
stfsu %f0, -4(%r1) /* push FPSCR */
mfctr %r0
stwu %r0, -4(%r1) /* push CTR */
mfxer %r0
stwu %r0, -4(%r1) /* push XER */
mfcr %r0
stwu %r0, -4(%r1) /* push CR */
lwz %r0, 20(%r2) /* original LR */
stwu %r0, -4(%r1) /* push LR */
mfspr %r0, %dsisr
stwu %r0, -4(%r1) /* push DSISR */
mfspr %r0, %dar
stwu %r0, -4(%r1) /* push DAR */
mfspr %r0, %srr1
stwu %r0, -4(%r1) /* push SRR1 */
mfspr %r0, %srr0
stwu %r0, -4(%r1) /* push SRR0 */
stwu %r3, -4(%r1) /* exception vector offset */
blr
/* called at the tail end of each of the exceptions
* r1: iframe pointer
*/
__restore_regs_and_rfi:
lwzu %r0, 4(%r1) /* SRR0 (skip vector offset) */
mtspr %srr0, %r0
lwzu %r0, 4(%r1) /* SRR1 */
mtspr %srr1, %r0
lwzu %r0, 4(%r1) /* DAR */
mtspr %dar, %r0
lwzu %r0, 4(%r1) /* DSISR */
mtspr %dsisr, %r0
lwzu %r0, 4(%r1) /* LR */
mtlr %r0
lwzu %r0, 4(%r1) /* CR */
mtcr %r0
lwzu %r0, 4(%r1) /* XER */
mtxer %r0
lwzu %r0, 4(%r1) /* CTR */
mtctr %r0
lfsu %f0, 4(%r1) /* FPSCR */
mtfsf 0xff, %f0
lwzu %r31, 4(%r1)
lwzu %r30, 4(%r1)
lwzu %r29, 4(%r1)
lwzu %r28, 4(%r1)
lwzu %r27, 4(%r1)
lwzu %r26, 4(%r1)
lwzu %r25, 4(%r1)
lwzu %r24, 4(%r1)
lwzu %r23, 4(%r1)
lwzu %r22, 4(%r1)
lwzu %r21, 4(%r1)
lwzu %r20, 4(%r1)
lwzu %r19, 4(%r1)
lwzu %r18, 4(%r1)
lwzu %r17, 4(%r1)
lwzu %r16, 4(%r1)
lwzu %r15, 4(%r1)
lwzu %r14, 4(%r1)
lwzu %r13, 4(%r1)
lwzu %r12, 4(%r1)
lwzu %r11, 4(%r1)
lwzu %r10, 4(%r1)
lwzu %r9, 4(%r1)
lwzu %r8, 4(%r1)
lwzu %r7, 4(%r1)
lwzu %r6, 4(%r1)
lwzu %r5, 4(%r1)
lwzu %r4, 4(%r1)
lwzu %r3, 4(%r1)
/* Stop here, before we overwrite r1, and continue with the floating point
registers first. */
addi %r2, %r1, 16 /* skip r3-r0 */
/* f31-f0 */
lfd %f31, 0(%r2)
lfdu %f30, 8(%r2)
lfdu %f29, 8(%r2)
lfdu %f28, 8(%r2)
lfdu %f27, 8(%r2)
lfdu %f26, 8(%r2)
lfdu %f25, 8(%r2)
lfdu %f24, 8(%r2)
lfdu %f23, 8(%r2)
lfdu %f22, 8(%r2)
lfdu %f21, 8(%r2)
lfdu %f20, 8(%r2)
lfdu %f19, 8(%r2)
lfdu %f18, 8(%r2)
lfdu %f17, 8(%r2)
lfdu %f16, 8(%r2)
lfdu %f15, 8(%r2)
lfdu %f14, 8(%r2)
lfdu %f13, 8(%r2)
lfdu %f12, 8(%r2)
lfdu %f11, 8(%r2)
lfdu %f10, 8(%r2)
lfdu %f9, 8(%r2)
lfdu %f8, 8(%r2)
lfdu %f7, 8(%r2)
lfdu %f6, 8(%r2)
lfdu %f5, 8(%r2)
lfdu %f4, 8(%r2)
lfdu %f3, 8(%r2)
lfdu %f2, 8(%r2)
lfdu %f1, 8(%r2)
lfd %f0, 8(%r2)
/* r2-r0 */
lwzu %r2, 4(%r1)
lwz %r0, 8(%r1)
lwz %r1, 4(%r1)
/* return from interrupt */
rfi

View File

@ -0,0 +1,546 @@
/*
* Copyright 2003-2006, Haiku Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* Axel Dörfler <axeld@pinc-software.de>
* Ingo Weinhold <bonefish@cs.tu-berlin.de>
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <int.h>
#include <arch/smp.h>
#include <boot/kernel_args.h>
#include <device_manager.h>
#include <kscheduler.h>
#include <interrupt_controller.h>
#include <smp.h>
#include <thread.h>
#include <timer.h>
#include <util/DoublyLinkedList.h>
#include <util/kernel_cpp.h>
#include <vm.h>
#include <vm_address_space.h>
#include <vm_priv.h>
#include <string.h>
// defined in arch_exceptions.S
extern int __irqvec_start;
extern int __irqvec_end;
extern"C" void m68k_exception_tail(void);
// the exception contexts for all CPUs
static m68k_cpu_exception_context sCPUExceptionContexts[SMP_MAX_CPUS];
// An iframe stack used in the early boot process when we don't have
// threads yet.
struct iframe_stack gBootFrameStack;
// interrupt controller interface (initialized
// in arch_int_init_post_device_manager())
static struct interrupt_controller_module_info *sPIC;
static void *sPICCookie;
void
arch_int_enable_io_interrupt(int irq)
{
if (!sPIC)
return;
// TODO: I have no idea, what IRQ type is appropriate.
sPIC->enable_io_interrupt(sPICCookie, irq, IRQ_TYPE_LEVEL);
}
void
arch_int_disable_io_interrupt(int irq)
{
if (!sPIC)
return;
sPIC->disable_io_interrupt(sPICCookie, irq);
}
/* arch_int_*_interrupts() and friends are in arch_asm.S */
static void
print_iframe(struct iframe *frame)
{
dprintf("iframe at %p:\n", frame);
dprintf("r0-r3: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r0, frame->r1, frame->r2, frame->r3);
dprintf("r4-r7: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r4, frame->r5, frame->r6, frame->r7);
dprintf("r8-r11: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r8, frame->r9, frame->r10, frame->r11);
dprintf("r12-r15: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r12, frame->r13, frame->r14, frame->r15);
dprintf("r16-r19: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r16, frame->r17, frame->r18, frame->r19);
dprintf("r20-r23: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r20, frame->r21, frame->r22, frame->r23);
dprintf("r24-r27: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r24, frame->r25, frame->r26, frame->r27);
dprintf("r28-r31: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r28, frame->r29, frame->r30, frame->r31);
dprintf(" ctr 0x%08lx xer 0x%08lx\n", frame->ctr, frame->xer);
dprintf(" cr 0x%08lx lr 0x%08lx\n", frame->cr, frame->lr);
dprintf(" dsisr 0x%08lx dar 0x%08lx\n", frame->dsisr, frame->dar);
dprintf(" srr1 0x%08lx srr0 0x%08lx\n", frame->srr1, frame->srr0);
}
extern "C" void m68k_exception_entry(int vector, struct iframe *iframe);
void
m68k_exception_entry(int vector, struct iframe *iframe)
{
int ret = B_HANDLED_INTERRUPT;
if (vector != 0x900) {
dprintf("m68k_exception_entry: time %lld vector 0x%x, iframe %p, "
"srr0: %p\n", system_time(), vector, iframe, (void*)iframe->srr0);
}
struct thread *thread = thread_get_current_thread();
// push iframe
if (thread)
m68k_push_iframe(&thread->arch_info.iframes, iframe);
else
m68k_push_iframe(&gBootFrameStack, iframe);
switch (vector) {
case 0x100: // system reset
panic("system reset exception\n");
break;
case 0x200: // machine check
panic("machine check exception\n");
break;
case 0x300: // DSI
case 0x400: // ISI
{
bool kernelDebugger = debug_debugger_running();
if (kernelDebugger) {
// if this thread has a fault handler, we're allowed to be here
struct thread *thread = thread_get_current_thread();
if (thread && thread->fault_handler != NULL) {
iframe->srr0 = thread->fault_handler;
break;
}
// otherwise, not really
panic("page fault in debugger without fault handler! Touching "
"address %p from ip %p\n", (void *)iframe->dar,
(void *)iframe->srr0);
break;
} else if ((iframe->srr1 & MSR_EXCEPTIONS_ENABLED) == 0) {
// if the interrupts were disabled, and we are not running the
// kernel startup the page fault was not allowed to happen and
// we must panic
panic("page fault, but interrupts were disabled. Touching "
"address %p from ip %p\n", (void *)iframe->dar,
(void *)iframe->srr0);
break;
} else if (thread != NULL && thread->page_faults_allowed < 1) {
panic("page fault not allowed at this place. Touching address "
"%p from ip %p\n", (void *)iframe->dar,
(void *)iframe->srr0);
}
enable_interrupts();
addr_t newip;
ret = vm_page_fault(iframe->dar, iframe->srr0,
iframe->dsisr & (1 << 25), // store or load
iframe->srr1 & (1 << 14), // was the system in user or supervisor
&newip);
if (newip != 0) {
// the page fault handler wants us to modify the iframe to set the
// IP the cpu will return to to be this ip
iframe->srr0 = newip;
}
break;
}
case 0x500: // external interrupt
{
if (!sPIC) {
panic("m68k_exception_entry(): external interrupt although we "
"don't have a PIC driver!");
ret = B_HANDLED_INTERRUPT;
break;
}
dprintf("handling I/O interrupts...\n");
int irq;
while ((irq = sPIC->acknowledge_io_interrupt(sPICCookie)) >= 0) {
// TODO: correctly pass level-triggered vs. edge-triggered to the handler!
ret = int_io_interrupt_handler(irq, true);
}
dprintf("handling I/O interrupts done\n");
break;
}
case 0x600: // alignment exception
panic("alignment exception: unimplemented\n");
break;
case 0x700: // program exception
panic("program exception: unimplemented\n");
break;
case 0x800: // FP unavailable exception
panic("FP unavailable exception: unimplemented\n");
break;
case 0x900: // decrementer exception
ret = timer_interrupt();
break;
case 0xc00: // system call
panic("system call exception: unimplemented\n");
break;
case 0xd00: // trace exception
panic("trace exception: unimplemented\n");
break;
case 0xe00: // FP assist exception
panic("FP assist exception: unimplemented\n");
break;
case 0xf00: // performance monitor exception
panic("performance monitor exception: unimplemented\n");
break;
case 0xf20: // altivec unavailable exception
panic("alitivec unavailable exception: unimplemented\n");
break;
case 0x1000:
case 0x1100:
case 0x1200:
panic("TLB miss exception: unimplemented\n");
break;
case 0x1300: // instruction address exception
panic("instruction address exception: unimplemented\n");
break;
case 0x1400: // system management exception
panic("system management exception: unimplemented\n");
break;
case 0x1600: // altivec assist exception
panic("altivec assist exception: unimplemented\n");
break;
case 0x1700: // thermal management exception
panic("thermal management exception: unimplemented\n");
break;
default:
dprintf("unhandled exception type 0x%x\n", vector);
print_iframe(iframe);
panic("unhandled exception type\n");
}
if (ret == B_INVOKE_SCHEDULER) {
int state = disable_interrupts();
GRAB_THREAD_LOCK();
scheduler_reschedule();
RELEASE_THREAD_LOCK();
restore_interrupts(state);
}
// pop iframe
if (thread)
m68k_pop_iframe(&thread->arch_info.iframes);
else
m68k_pop_iframe(&gBootFrameStack);
}
status_t
arch_int_init(kernel_args *args)
{
return B_OK;
}
status_t
arch_int_init_post_vm(kernel_args *args)
{
void *handlers = (void *)args->arch_args.exception_handlers.start;
// We may need to remap the exception handler area into the kernel address
// space.
if (!IS_KERNEL_ADDRESS(handlers)) {
addr_t address = (addr_t)handlers;
status_t error = m68k_remap_address_range(&address,
args->arch_args.exception_handlers.size, true);
if (error != B_OK) {
panic("arch_int_init_post_vm(): Failed to remap the exception "
"handler area!");
return error;
}
handlers = (void*)(address);
}
// create a region to map the irq vector code into (physical address 0x0)
area_id exceptionArea = create_area("exception_handlers",
&handlers, B_EXACT_ADDRESS, args->arch_args.exception_handlers.size,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (exceptionArea < B_OK)
panic("arch_int_init2: could not create exception handler region\n");
dprintf("exception handlers at %p\n", handlers);
// copy the handlers into this area
memcpy(handlers, &__irqvec_start, args->arch_args.exception_handlers.size);
arch_cpu_sync_icache(handlers, args->arch_args.exception_handlers.size);
// init the CPU exception contexts
int cpuCount = smp_get_num_cpus();
for (int i = 0; i < cpuCount; i++) {
m68k_cpu_exception_context *context = m68k_get_cpu_exception_context(i);
context->kernel_handle_exception = (void*)&m68k_exception_tail;
context->exception_context = context;
// kernel_stack is set when the current thread changes. At this point
// we don't have threads yet.
}
// set the exception context for this CPU
m68k_set_current_cpu_exception_context(m68k_get_cpu_exception_context(0));
return B_OK;
}
template<typename ModuleInfo>
struct Module : DoublyLinkedListLinkImpl<Module<ModuleInfo> > {
Module(ModuleInfo *module)
: module(module)
{
}
~Module()
{
if (module)
put_module(((module_info*)module)->name);
}
ModuleInfo *module;
};
typedef Module<interrupt_controller_module_info> PICModule;
struct PICModuleList : DoublyLinkedList<PICModule> {
~PICModuleList()
{
while (PICModule *module = First()) {
Remove(module);
delete module;
}
}
};
class DeviceTreeIterator {
public:
DeviceTreeIterator(device_manager_info *deviceManager)
: fDeviceManager(deviceManager),
fNode(NULL),
fParent(NULL)
{
Rewind();
}
~DeviceTreeIterator()
{
if (fParent != NULL)
fDeviceManager->put_device_node(fParent);
if (fNode != NULL)
fDeviceManager->put_device_node(fNode);
}
void Rewind()
{
fNode = fDeviceManager->get_root();
}
bool HasNext() const
{
return (fNode != NULL);
}
device_node_handle Next()
{
if (fNode == NULL)
return NULL;
device_node_handle foundNode = fNode;
// get first child
device_node_handle child = NULL;
if (fDeviceManager->get_next_child_device(fNode, &child, NULL)
== B_OK) {
// move to the child node
if (fParent != NULL)
fDeviceManager->put_device_node(fParent);
fParent = fNode;
fNode = child;
// no more children; backtrack to find the next sibling
} else {
while (fParent != NULL) {
if (fDeviceManager->get_next_child_device(fParent, &fNode, NULL)
== B_OK) {
// get_next_child_device() always puts the node
break;
}
fNode = fParent;
fParent = fDeviceManager->get_parent(fNode);
}
// if we hit the root node again, we're done
if (fParent == NULL) {
fDeviceManager->put_device_node(fNode);
fNode = NULL;
}
}
return foundNode;
}
private:
device_manager_info *fDeviceManager;
device_node_handle fNode;
device_node_handle fParent;
};
static void
get_interrupt_controller_modules(PICModuleList &list)
{
const char *namePrefix = "interrupt_controllers/";
size_t namePrefixLen = strlen(namePrefix);
char name[B_PATH_NAME_LENGTH];
size_t length;
uint32 cookie = 0;
while (get_next_loaded_module_name(&cookie, name, &(length = sizeof(name)))
== B_OK) {
// an interrupt controller module?
if (length <= namePrefixLen
|| strncmp(name, namePrefix, namePrefixLen) != 0) {
continue;
}
// get the module
interrupt_controller_module_info *moduleInfo;
if (get_module(name, (module_info**)&moduleInfo) != B_OK)
continue;
// add it to the list
PICModule *module = new(nothrow) PICModule(moduleInfo);
if (!module) {
put_module(((module_info*)moduleInfo)->name);
continue;
}
list.Add(module);
}
}
static bool
probe_pic_device(device_node_handle node, PICModuleList &picModules)
{
for (PICModule *module = picModules.Head();
module;
module = picModules.GetNext(module)) {
bool noConnection;
if (module->module->info.supports_device(node, &noConnection) > 0) {
if (module->module->info.register_device(node) == B_OK)
return true;
}
}
return false;
}
status_t
arch_int_init_post_device_manager(struct kernel_args *args)
{
// get the interrupt controller driver modules
PICModuleList picModules;
get_interrupt_controller_modules(picModules);
if (picModules.IsEmpty()) {
panic("arch_int_init_post_device_manager(): Found no PIC modules!");
return B_ENTRY_NOT_FOUND;
}
// get the device manager module
device_manager_info *deviceManager;
status_t error = get_module(B_DEVICE_MANAGER_MODULE_NAME,
(module_info**)&deviceManager);
if (error != B_OK) {
panic("arch_int_init_post_device_manager(): Failed to get device "
"manager: %s", strerror(error));
return error;
}
Module<device_manager_info> _deviceManager(deviceManager); // auto put
// iterate through the device tree and probe the interrupt controllers
DeviceTreeIterator iterator(deviceManager);
while (device_node_handle node = iterator.Next())
probe_pic_device(node, picModules);
// iterate through the tree again and get an interrupt controller node
iterator.Rewind();
while (device_node_handle node = iterator.Next()) {
char *deviceType;
if (deviceManager->get_attr_string(node, B_DRIVER_DEVICE_TYPE,
&deviceType, false) == B_OK) {
bool isPIC
= (strcmp(deviceType, B_INTERRUPT_CONTROLLER_DRIVER_TYPE) == 0);
free(deviceType);
if (isPIC) {
driver_module_info *driver;
void *driverCookie;
error = deviceManager->init_driver(node, NULL, &driver,
&driverCookie);
if (error == B_OK) {
sPIC = (interrupt_controller_module_info *)driver;
sPICCookie = driverCookie;
return B_OK;
}
}
}
}
// no PIC found
panic("arch_int_init_post_device_manager(): Found no supported PIC!");
return B_ENTRY_NOT_FOUND;
}
// #pragma mark -
struct m68k_cpu_exception_context *
m68k_get_cpu_exception_context(int cpu)
{
return sCPUExceptionContexts + cpu;
}
void
m68k_set_current_cpu_exception_context(struct m68k_cpu_exception_context *context)
{
// translate to physical address
addr_t physicalPage;
addr_t inPageOffset = (addr_t)context & (B_PAGE_SIZE - 1);
status_t error = vm_get_page_mapping(vm_kernel_address_space_id(),
(addr_t)context - inPageOffset, &physicalPage);
if (error != B_OK) {
panic("m68k_set_current_cpu_exception_context(): Failed to get physical "
"address!");
return;
}
asm volatile("mtsprg0 %0" : : "r"(physicalPage + inPageOffset));
}

View File

@ -0,0 +1,47 @@
/*
** Copyright 2003, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
#include <arch_mmu.h>
#include <arch_cpu.h>
uint32
page_table_entry::PrimaryHash(uint32 virtualSegmentID, uint32 virtualAddress)
{
return (virtualSegmentID & 0x7ffff) ^ ((virtualAddress >> 12) & 0xffff);
}
uint32
page_table_entry::SecondaryHash(uint32 virtualSegmentID, uint32 virtualAddress)
{
return ~PrimaryHash(virtualSegmentID, virtualAddress);
}
uint32
page_table_entry::SecondaryHash(uint32 primaryHash)
{
return ~primaryHash;
}
void
m68k_get_page_table(page_table_entry_group **_pageTable, size_t *_size)
{
uint32 sdr1 = get_sdr1();
*_pageTable = (page_table_entry_group *)(sdr1 & 0xffff0000);
*_size = ((sdr1 & 0x1ff) + 1) << 16;
}
void
m68k_set_page_table(page_table_entry_group *pageTable, size_t size)
{
set_sdr1(((uint32)pageTable & 0xffff0000) | (((size -1) >> 16) & 0x1ff));
}

View File

@ -0,0 +1,296 @@
/*
* Copyright 2006, Ingo Weinhold <bonefish@cs.tu-berlin.de>.
* All rights reserved. Distributed under the terms of the MIT License.
*/
#include <arch_platform.h>
#include <new>
#include <KernelExport.h>
#include <boot/kernel_args.h>
//#include <platform/openfirmware/openfirmware.h>
#include <real_time_clock.h>
#include <util/kernel_cpp.h>
static M68KPlatform *sM68KPlatform;
// constructor
M68KPlatform::M68KPlatform(m68k_platform_type platformType)
: fPlatformType(platformType)
{
}
// destructor
M68KPlatform::~M68KPlatform()
{
}
// Default
M68KPlatform *
M68KPlatform::Default()
{
return sM68KPlatform;
}
// #pragma mark - Amiga
// #pragma mark - Apple
namespace BPrivate {
class M68KApple : public M68KPlatform {
public:
M68KApple();
virtual ~M68KApple();
virtual status_t Init(struct kernel_args *kernelArgs);
virtual status_t InitSerialDebug(struct kernel_args *kernelArgs);
virtual status_t InitPostVM(struct kernel_args *kernelArgs);
virtual status_t InitRTC(struct kernel_args *kernelArgs,
struct real_time_data *data);
virtual char SerialDebugGetChar();
virtual void SerialDebugPutChar(char c);
//virtual void SetHardwareRTC(uint32 seconds);
//virtual uint32 GetHardwareRTC();
virtual void ShutDown(bool reboot);
private:
int fRTC;
};
} // namespace BPrivate
using BPrivate::M68KApple;
// constructor
M68KApple::M68KApple()
: M68KPlatform(M68K_PLATFORM_OPEN_FIRMWARE),
fRTC(-1)
{
}
// destructor
M68KApple::~M68KApple()
{
}
// Init
status_t
M68KApple::Init(struct kernel_args *kernelArgs)
{
return of_init(
(int(*)(void*))kernelArgs->platform_args.openfirmware_entry);
}
// InitSerialDebug
status_t
M68KApple::InitSerialDebug(struct kernel_args *kernelArgs)
{
return B_OK;
}
// InitPostVM
status_t
M68KApple::InitPostVM(struct kernel_args *kernelArgs)
{
return B_OK;
}
// InitRTC
status_t
M68KApple::InitRTC(struct kernel_args *kernelArgs,
struct real_time_data *data)
{
return B_OK;
}
// DebugSerialGetChar
char
M68KApple::SerialDebugGetChar()
{
int key;
return (char)key;
}
// DebugSerialPutChar
void
M68KApple::SerialDebugPutChar(char c)
{
}
// ShutDown
void
M68KApple::ShutDown(bool reboot)
{
if (reboot) {
of_interpret("reset-all", 0, 0);
} else {
// not standardized, so it might fail
of_interpret("shut-down", 0, 0);
}
}
// #pragma mark - Atari (Falcon)
namespace BPrivate {
class M68KAtari : public M68KPlatform {
public:
M68KAtari();
virtual ~M68KAtari();
virtual status_t Init(struct kernel_args *kernelArgs);
virtual status_t InitSerialDebug(struct kernel_args *kernelArgs);
virtual status_t InitPostVM(struct kernel_args *kernelArgs);
virtual status_t InitRTC(struct kernel_args *kernelArgs,
struct real_time_data *data);
virtual char SerialDebugGetChar();
virtual void SerialDebugPutChar(char c);
//virtual void SetHardwareRTC(uint32 seconds);
//virtual uint32 GetHardwareRTC();
virtual void ShutDown(bool reboot);
private:
int fRTC;
};
} // namespace BPrivate
using BPrivate::M68KAtari;
// constructor
M68KAtari::M68KAtari()
: M68KPlatform(M68K_PLATFORM_OPEN_FIRMWARE),
fRTC(-1)
{
}
// destructor
M68KAtari::~M68KAtari()
{
}
// Init
status_t
M68KAtari::Init(struct kernel_args *kernelArgs)
{
return of_init(
(int(*)(void*))kernelArgs->platform_args.openfirmware_entry);
}
// InitSerialDebug
status_t
M68KAtari::InitSerialDebug(struct kernel_args *kernelArgs)
{
if (of_getprop(gChosen, "stdin", &fInput, sizeof(int)) == OF_FAILED)
return B_ERROR;
if (of_getprop(gChosen, "stdout", &fOutput, sizeof(int)) == OF_FAILED)
return B_ERROR;
return B_OK;
}
// InitPostVM
status_t
M68KAtari::InitPostVM(struct kernel_args *kernelArgs)
{
add_debugger_command("of_exit", &debug_command_of_exit,
"Exit to the Open Firmware prompt. No way to get back into the OS!");
add_debugger_command("of_enter", &debug_command_of_enter,
"Enter a subordinate Open Firmware interpreter. Quitting it returns "
"to KDL.");
return B_OK;
}
// InitRTC
status_t
M68KAtari::InitRTC(struct kernel_args *kernelArgs,
struct real_time_data *data)
{
// open RTC
fRTC = of_open(kernelArgs->platform_args.rtc_path);
if (fRTC == OF_FAILED) {
dprintf("M68KAtari::InitRTC(): Failed open RTC device!\n");
return B_ERROR;
}
return B_OK;
}
// DebugSerialGetChar
char
M68KAtari::SerialDebugGetChar()
{
int key;
if (of_interpret("key", 0, 1, &key) == OF_FAILED)
return 0;
return (char)key;
}
// DebugSerialPutChar
void
M68KAtari::SerialDebugPutChar(char c)
{
if (c == '\n')
of_write(fOutput, "\r\n", 2);
else
of_write(fOutput, &c, 1);
}
// ShutDown
void
M68KAtari::ShutDown(bool reboot)
{
if (reboot) {
of_interpret("reset-all", 0, 0);
} else {
// not standardized, so it might fail
of_interpret("shut-down", 0, 0);
}
}
// # pragma mark -
// static buffer for constructing the actual M68KPlatform
static char *sM68KPlatformBuffer[sizeof(M68KAtari)];
status_t
arch_platform_init(struct kernel_args *kernelArgs)
{
// only Atari supported for now
if (true)
sM68KPlatform = new(sM68KPlatformBuffer) M68KAtari;
return sM68KPlatform->Init(kernelArgs);
}
status_t
arch_platform_init_post_vm(struct kernel_args *kernelArgs)
{
return sM68KPlatform->InitPostVM(kernelArgs);
}
status_t
arch_platform_init_post_thread(struct kernel_args *kernelArgs)
{
return B_OK;
}

View File

@ -0,0 +1,83 @@
/*
* Copyright 2006, Ingo Weinhold <bonefish@cs.tu-berlin.de>.
* All rights reserved. Distributed under the terms of the MIT License.
*/
#include <arch/real_time_clock.h>
#include <arch_platform.h>
#include <real_time_data.h>
#include <smp.h>
static spinlock sSetArchDataLock;
status_t
arch_rtc_init(kernel_args *args, struct real_time_data *data)
{
// init the platform RTC service
status_t error = M68KPlatform::Default()->InitRTC(args, data);
if (error != B_OK)
return error;
// init the arch specific part of the real_time_data
data->arch_data.data[0].system_time_offset = 0;
// cvFactor = 2^32 * 1000000 / tbFreq
// => (tb * cvFactor) >> 32 = (tb * 2^32 * 1000000 / tbFreq) >> 32
// = tb / tbFreq * 1000000 = time in us
data->arch_data.system_time_conversion_factor
= uint32((uint64(1) << 32) * 1000000
/ args->arch_args.time_base_frequency);
data->arch_data.version = 0;
// init spinlock
sSetArchDataLock = 0;
// init system_time() conversion factor
__m68k_setup_system_time(&data->arch_data.system_time_conversion_factor);
return B_OK;
}
uint32
arch_rtc_get_hw_time(void)
{
return M68KPlatform::Default()->GetHardwareRTC();
}
void
arch_rtc_set_hw_time(uint32 seconds)
{
M68KPlatform::Default()->SetHardwareRTC(seconds);
}
void
arch_rtc_set_system_time_offset(struct real_time_data *data, bigtime_t offset)
{
cpu_status state = disable_interrupts();
acquire_spinlock(&sSetArchDataLock);
int32 version = data->arch_data.version + 1;
data->arch_data.data[version % 2].system_time_offset = offset;
data->arch_data.version = version;
release_spinlock(&sSetArchDataLock);
restore_interrupts(state);
}
bigtime_t
arch_rtc_get_system_time_offset(struct real_time_data *data)
{
int32 version;
bigtime_t offset;
do {
version = data->arch_data.version;
offset = data->arch_data.data[version % 2].system_time_offset;
} while (version != data->arch_data.version);
return offset;
}

View File

@ -0,0 +1,40 @@
/*
** Copyright 2004, Axel Dörfler, axeld@pinc-software.de
** Distributed under the terms of the OpenBeOS License.
*/
#include <KernelExport.h>
#include <boot/stage2.h>
#include <arch/smp.h>
#include <debug.h>
status_t
arch_smp_init(kernel_args *args)
{
return B_OK;
}
status_t
arch_smp_per_cpu_init(kernel_args *args, int32 cpu)
{
return B_OK;
}
void
arch_smp_send_ici(int32 target_cpu)
{
panic("called arch_smp_send_ici!\n");
}
void
arch_smp_send_broadcast_ici()
{
panic("called arch_smp_send_broadcast_ici\n");
}

View File

@ -0,0 +1,45 @@
/*
* Copyright 2006, Ingo Weinhold <bonefish@cs.tu-berlin.de>.
* All rights reserved. Distributed under the terms of the MIT License.
*/
#include <OS.h>
#include <arch_cpu.h>
#include <arch/system_info.h>
#include <boot/kernel_args.h>
static uint64 sCPUClockFrequency;
static uint64 sBusClockFrequency;
static enum cpu_types sCPUType;
static uint16 sCPURevision;
status_t
arch_get_system_info(system_info *info, size_t size)
{
info->cpu_type = sCPUType;
info->cpu_revision = sCPURevision;
info->cpu_clock_speed = sCPUClockFrequency;
info->bus_clock_speed = sBusClockFrequency;
info->platform_type = B_MAC_PLATFORM;//XXX
return B_OK;
}
status_t
arch_system_info_init(struct kernel_args *args)
{
int i;
sCPUClockFrequency = args->arch_args.cpu_frequency;
sBusClockFrequency = args->arch_args.bus_frequency;
sCPURevision = 0x68030; //XXX
sCPUType = B_CPU_M68K;
return B_OK;
}

View File

@ -0,0 +1,267 @@
/*
* Copyright 2003-2007, Haiku Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* Axel Dörfler <axeld@pinc-software.de>
* Ingo Weinhold <bonefish@cs.tu-berlin.de>
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <arch_thread.h>
#include <arch_cpu.h>
#include <arch/thread.h>
#include <boot/stage2.h>
#include <kernel.h>
#include <thread.h>
#include <vm_types.h>
//#include <arch/vm_translation_map.h>
#include <string.h>
// Valid initial arch_thread state. We just memcpy() it when initializing
// a new thread structure.
static struct arch_thread sInitialState;
// Helper function for thread creation, defined in arch_asm.S.
extern void ppc_kernel_thread_root();
extern void ppc_switch_stack_and_call(addr_t newKstack, void (*func)(void *),
void *arg);
void
ppc_push_iframe(struct iframe_stack *stack, struct iframe *frame)
{
ASSERT(stack->index < IFRAME_TRACE_DEPTH);
stack->frames[stack->index++] = frame;
}
void
ppc_pop_iframe(struct iframe_stack *stack)
{
ASSERT(stack->index > 0);
stack->index--;
}
/** Returns the current iframe structure of the running thread.
* This function must only be called in a context where it's actually
* sure that such iframe exists; ie. from syscalls, but usually not
* from standard kernel threads.
*/
static struct iframe *
ppc_get_current_iframe(void)
{
struct thread *thread = thread_get_current_thread();
ASSERT(thread->arch_info.iframes.index >= 0);
return thread->arch_info.iframes.frames[thread->arch_info.iframes.index - 1];
}
/** \brief Returns the current thread's topmost (i.e. most recent)
* userland->kernel transition iframe (usually the first one, save for
* interrupts in signal handlers).
* \return The iframe, or \c NULL, if there is no such iframe (e.g. when
* the thread is a kernel thread).
*/
struct iframe *
ppc_get_user_iframe(void)
{
struct thread *thread = thread_get_current_thread();
int i;
for (i = thread->arch_info.iframes.index - 1; i >= 0; i--) {
struct iframe *frame = thread->arch_info.iframes.frames[i];
if (frame->srr1 & MSR_PRIVILEGE_LEVEL)
return frame;
}
return NULL;
}
// #pragma mark -
status_t
arch_thread_init(struct kernel_args *args)
{
// Initialize the static initial arch_thread state (sInitialState).
// Currently nothing to do, i.e. zero initialized is just fine.
return B_OK;
}
status_t
arch_team_init_team_struct(struct team *team, bool kernel)
{
// Nothing to do. The structure is empty.
return B_OK;
}
status_t
arch_thread_init_thread_struct(struct thread *thread)
{
// set up an initial state (stack & fpu)
memcpy(&thread->arch_info, &sInitialState, sizeof(struct arch_thread));
return B_OK;
}
status_t
arch_thread_init_kthread_stack(struct thread *t, int (*start_func)(void),
void (*entry_func)(void), void (*exit_func)(void))
{
addr_t *kstack = (addr_t *)t->kernel_stack_base;
addr_t *kstackTop = kstack + KERNEL_STACK_SIZE / sizeof(addr_t);
// clear the kernel stack
#ifdef DEBUG_KERNEL_STACKS
# ifdef STACK_GROWS_DOWNWARDS
memset((void *)((addr_t)kstack + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE), 0,
KERNEL_STACK_SIZE - KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE);
# else
memset(kstack, 0, KERNEL_STACK_SIZE - KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE);
# endif
#else
memset(kstack, 0, KERNEL_STACK_SIZE);
#endif
// space for frame pointer and return address, and stack frames must be
// 16 byte aligned
kstackTop -= 2;
kstackTop = (addr_t*)((addr_t)kstackTop & ~0xf);
// LR, CR, r2, r13-r31, f13-f31, as pushed by ppc_context_switch()
kstackTop -= 22 + 2 * 19;
// let LR point to ppc_kernel_thread_root()
kstackTop[0] = (addr_t)&ppc_kernel_thread_root;
// the arguments of ppc_kernel_thread_root() are the functions to call,
// provided in registers r13-r15
kstackTop[3] = (addr_t)entry_func;
kstackTop[4] = (addr_t)start_func;
kstackTop[5] = (addr_t)exit_func;
// save this stack position
t->arch_info.sp = (void *)kstackTop;
return B_OK;
}
status_t
arch_thread_init_tls(struct thread *thread)
{
// TODO: Implement!
return B_OK;
}
void
arch_thread_switch_kstack_and_call(struct thread *t, addr_t newKstack,
void (*func)(void *), void *arg)
{
ppc_switch_stack_and_call(newKstack, func, arg);
}
void
arch_thread_context_switch(struct thread *t_from, struct thread *t_to)
{
// set the new kernel stack in the EAR register.
// this is used in the exception handler code to decide what kernel stack to
// switch to if the exception had happened when the processor was in user mode
asm("mtear %0" :: "g"(t_to->kernel_stack_base + KERNEL_STACK_SIZE - 8));
// switch the asids if we need to
if (t_to->team->address_space != NULL) {
// the target thread has is user space
if (t_from->team != t_to->team) {
// switching to a new address space
ppc_translation_map_change_asid(&t_to->team->address_space->translation_map);
}
}
ppc_context_switch(&t_from->arch_info.sp, t_to->arch_info.sp);
}
void
arch_thread_dump_info(void *info)
{
struct arch_thread *at = (struct arch_thread *)info;
dprintf("\tsp: %p\n", at->sp);
}
status_t
arch_thread_enter_userspace(struct thread *thread, addr_t entry, void *arg1, void *arg2)
{
panic("arch_thread_enter_uspace(): not yet implemented\n");
return B_ERROR;
}
bool
arch_on_signal_stack(struct thread *thread)
{
return false;
}
status_t
arch_setup_signal_frame(struct thread *thread, struct sigaction *sa, int sig, int sigMask)
{
return B_ERROR;
}
int64
arch_restore_signal_frame(void)
{
return 0;
}
void
arch_check_syscall_restart(struct thread *thread)
{
}
/** Saves everything needed to restore the frame in the child fork in the
* arch_fork_arg structure to be passed to arch_restore_fork_frame().
* Also makes sure to return the right value.
*/
void
arch_store_fork_frame(struct arch_fork_arg *arg)
{
}
/** Restores the frame from a forked team as specified by the provided
* arch_fork_arg structure.
* Needs to be called from within the child team, ie. instead of
* arch_thread_enter_uspace() as thread "starter".
* This function does not return to the caller, but will enter userland
* in the child team at the same position where the parent team left of.
*/
void
arch_restore_fork_frame(struct arch_fork_arg *arg)
{
}

View File

@ -0,0 +1,46 @@
/*
** Copyright 2001, Travis Geiselbrecht. All rights reserved.
** Distributed under the terms of the NewOS License.
*/
#include <boot/stage2.h>
#include <kernel.h>
#include <debug.h>
#include <timer.h>
#include <arch/timer.h>
static bigtime_t sTickRate;
void
arch_timer_set_hardware_timer(bigtime_t timeout)
{
bigtime_t new_val_64;
if(timeout < 1000)
timeout = 1000;
new_val_64 = (timeout * sTickRate) / 1000000;
asm("mtdec %0" :: "r"((uint32)new_val_64));
}
void
arch_timer_clear_hardware_timer()
{
asm("mtdec %0" :: "r"(0x7fffffff));
}
int
arch_init_timer(kernel_args *ka)
{
sTickRate = ka->arch_args.time_base_frequency;
return 0;
}

View File

@ -0,0 +1,77 @@
/*
* Copyright 2005, Axel Dörfler, axeld@pinc-softare.de
* Distributed under the terms of the MIT License.
*/
#include <debugger.h>
#include <int.h>
#include <thread.h>
#include <arch/user_debugger.h>
void
arch_clear_team_debug_info(struct arch_team_debug_info *info)
{
}
void
arch_destroy_team_debug_info(struct arch_team_debug_info *info)
{
arch_clear_team_debug_info(info);
}
void
arch_clear_thread_debug_info(struct arch_thread_debug_info *info)
{
}
void
arch_destroy_thread_debug_info(struct arch_thread_debug_info *info)
{
arch_clear_thread_debug_info(info);
}
void
arch_set_debug_cpu_state(const struct debug_cpu_state *cpuState)
{
}
void
arch_get_debug_cpu_state(struct debug_cpu_state *cpuState)
{
}
status_t
arch_set_breakpoint(void *address)
{
return B_ERROR;
}
status_t
arch_clear_breakpoint(void *address)
{
return B_ERROR;
}
status_t
arch_set_watchpoint(void *address, uint32 type, int32 length)
{
return B_ERROR;
}
status_t
arch_clear_watchpoint(void *address)
{
return B_ERROR;
}

View File

@ -0,0 +1,172 @@
/*
* Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <KernelExport.h>
#include <kernel.h>
#include <boot/kernel_args.h>
#include <vm.h>
#include <arch/vm.h>
#include <arch_mmu.h>
//#define TRACE_ARCH_VM
#ifdef TRACE_ARCH_VM
# define TRACE(x) dprintf x
#else
# define TRACE(x) ;
#endif
status_t
arch_vm_init(kernel_args *args)
{
return B_OK;
}
status_t
arch_vm_init2(kernel_args *args)
{
// int bats[8];
// int i;
#if 0
// print out any bat mappings
getibats(bats);
dprintf("ibats:\n");
for(i = 0; i < 4; i++)
dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]);
getdbats(bats);
dprintf("dbats:\n");
for(i = 0; i < 4; i++)
dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]);
#endif
#if 1
// turn off the first 2 BAT mappings (3 & 4 are used by the lower level code)
block_address_translation bat;
bat.Clear();
set_ibat0(&bat);
set_ibat1(&bat);
set_dbat0(&bat);
set_dbat1(&bat);
/* getibats(bats);
memset(bats, 0, 2 * 2);
setibats(bats);
getdbats(bats);
memset(bats, 0, 2 * 2);
setdbats(bats);
*/
#endif
#if 0
// just clear the first BAT mapping (0 - 256MB)
dprintf("msr 0x%x\n", getmsr());
{
unsigned int reg;
asm("mr %0,1" : "=r"(reg));
dprintf("sp 0x%x\n", reg);
}
dprintf("ka %p\n", ka);
getibats(bats);
dprintf("ibats:\n");
for(i = 0; i < 4; i++)
dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]);
bats[0] = bats[1] = 0;
setibats(bats);
getdbats(bats);
dprintf("dbats:\n");
for(i = 0; i < 4; i++)
dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]);
bats[0] = bats[1] = 0;
setdbats(bats);
#endif
return B_OK;
}
status_t
arch_vm_init_post_area(kernel_args *args)
{
return B_OK;
}
status_t
arch_vm_init_end(kernel_args *args)
{
TRACE(("arch_vm_init_end(): %lu virtual ranges to keep:\n",
args->arch_args.num_virtual_ranges_to_keep));
for (int i = 0; i < (int)args->arch_args.num_virtual_ranges_to_keep; i++) {
addr_range &range = args->arch_args.virtual_ranges_to_keep[i];
TRACE((" start: %p, size: 0x%lx\n", (void*)range.start, range.size));
// skip ranges outside the kernel address space
if (!IS_KERNEL_ADDRESS(range.start)) {
TRACE((" no kernel address, skipping...\n"));
continue;
}
void *address = (void*)range.start;
area_id area = create_area("boot loader reserved area", &address,
B_EXACT_ADDRESS, range.size, B_ALREADY_WIRED,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (area < 0) {
panic("arch_vm_init_end(): Failed to create area for boot loader "
"reserved area: %p - %p\n", (void*)range.start,
(void*)(range.start + range.size));
}
}
// Throw away any address space mappings we've inherited from the boot
// loader and have not yet turned into an area.
vm_free_unused_boot_loader_range(0, 0xffffffff - B_PAGE_SIZE + 1);
return B_OK;
}
status_t
arch_vm_init_post_modules(kernel_args *args)
{
return B_OK;
}
void
arch_vm_aspace_swap(vm_address_space *aspace)
{
}
bool
arch_vm_supports_protection(uint32 protection)
{
return true;
}
void
arch_vm_unset_memory_type(vm_area *area)
{
}
status_t
arch_vm_set_memory_type(vm_area *area, addr_t physicalBase, uint32 type)
{
if (type == 0)
return B_OK;
return B_ERROR;
}

View File

@ -0,0 +1,745 @@
/*
* Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
/* (bonefish) Some explanatory words on how address translation is implemented
for the 32 bit PPC architecture.
I use the address type nomenclature as used in the PPC architecture
specs, i.e.
- effective address: An address as used by program instructions, i.e.
that's what elsewhere (e.g. in the VM implementation) is called
virtual address.
- virtual address: An intermediate address computed from the effective
address via the segment registers.
- physical address: An address referring to physical storage.
The hardware translates an effective address to a physical address using
either of two mechanisms: 1) Block Address Translation (BAT) or
2) segment + page translation. The first mechanism does this directly
using two sets (for data/instructions) of special purpose registers.
The latter mechanism is of more relevance here, though:
effective address (32 bit): [ 0 ESID 3 | 4 PIX 19 | 20 Byte 31 ]
| | |
(segment registers) | |
| | |
virtual address (52 bit): [ 0 VSID 23 | 24 PIX 39 | 40 Byte 51 ]
[ 0 VPN 39 | 40 Byte 51 ]
| |
(page table) |
| |
physical address (32 bit): [ 0 PPN 19 | 20 Byte 31 ]
ESID: Effective Segment ID
VSID: Virtual Segment ID
PIX: Page Index
VPN: Virtual Page Number
PPN: Physical Page Number
Unlike on x86 we can't just switch the context to another team by just
setting a register to another page directory, since we only have one
page table containing both kernel and user address mappings. Instead we
map the effective address space of kernel and *all* teams
non-intersectingly into the virtual address space (which fortunately is
20 bits wider), and use the segment registers to select the section of
the virtual address space for the current team. Half of the 16 segment
registers (8 - 15) map the kernel addresses, so they remain unchanged.
The range of the virtual address space a team's effective address space
is mapped to is defined by its vm_translation_map_arch_info::vsid_base,
which is the first of the 8 successive VSID values used for the team.
Which vsid_base values are already taken is defined by the set bits in
the bitmap sVSIDBaseBitmap.
TODO:
* If we want to continue to use the OF services, we would need to add
its address mappings to the kernel space. Unfortunately some stuff
(especially RAM) is mapped in an address range without the kernel
address space. We probably need to map those into each team's address
space as kernel read/write areas.
* The current locking scheme is insufficient. The page table is a resource
shared by all teams. We need to synchronize access to it. Probably via a
spinlock.
*/
#include <KernelExport.h>
#include <kernel.h>
#include <vm.h>
#include <vm_address_space.h>
#include <vm_priv.h>
#include <int.h>
#include <boot/kernel_args.h>
#include <arch/vm_translation_map.h>
#include <arch/cpu.h>
#include <arch_mmu.h>
#include <stdlib.h>
#include "generic_vm_physical_page_mapper.h"
static struct page_table_entry_group *sPageTable;
static size_t sPageTableSize;
static uint32 sPageTableHashMask;
static area_id sPageTableArea;
// 64 MB of iospace
#define IOSPACE_SIZE (64*1024*1024)
// We only have small (4 KB) pages. The only reason for choosing greater chunk
// size is to keep the waste of memory limited, since the generic page mapper
// allocates structures per physical/virtual chunk.
// TODO: Implement a page mapper more suitable for small pages!
#define IOSPACE_CHUNK_SIZE (16 * B_PAGE_SIZE)
static addr_t sIOSpaceBase;
// The VSID is a 24 bit number. The lower three bits are defined by the
// (effective) segment number, which leaves us with a 21 bit space of
// VSID bases (= 2 * 1024 * 1024).
#define MAX_VSID_BASES (PAGE_SIZE * 8)
static uint32 sVSIDBaseBitmap[MAX_VSID_BASES / (sizeof(uint32) * 8)];
static spinlock sVSIDBaseBitmapLock;
#define VSID_BASE_SHIFT 3
#define VADDR_TO_VSID(map, vaddr) \
((map)->arch_data->vsid_base + ((vaddr) >> 28))
// vm_translation object stuff
typedef struct vm_translation_map_arch_info {
int vsid_base; // used VSIDs are vside_base ... vsid_base + 7
} vm_translation_map_arch_info;
void
ppc_translation_map_change_asid(vm_translation_map *map)
{
// this code depends on the kernel being at 0x80000000, fix if we change that
#if KERNEL_BASE != 0x80000000
#error fix me
#endif
int vsidBase = map->arch_data->vsid_base;
isync(); // synchronize context
asm("mtsr 0,%0" : : "g"(vsidBase));
asm("mtsr 1,%0" : : "g"(vsidBase + 1));
asm("mtsr 2,%0" : : "g"(vsidBase + 2));
asm("mtsr 3,%0" : : "g"(vsidBase + 3));
asm("mtsr 4,%0" : : "g"(vsidBase + 4));
asm("mtsr 5,%0" : : "g"(vsidBase + 5));
asm("mtsr 6,%0" : : "g"(vsidBase + 6));
asm("mtsr 7,%0" : : "g"(vsidBase + 7));
isync(); // synchronize context
}
static status_t
lock_tmap(vm_translation_map *map)
{
recursive_lock_lock(&map->lock);
return 0;
}
static status_t
unlock_tmap(vm_translation_map *map)
{
recursive_lock_unlock(&map->lock);
return 0;
}
static void
destroy_tmap(vm_translation_map *map)
{
if (map->map_count > 0) {
panic("vm_translation_map.destroy_tmap: map %p has positive map count %ld\n",
map, map->map_count);
}
// mark the vsid base not in use
int baseBit = map->arch_data->vsid_base >> VSID_BASE_SHIFT;
atomic_and((vint32 *)&sVSIDBaseBitmap[baseBit / 32],
~(1 << (baseBit % 32)));
free(map->arch_data);
recursive_lock_destroy(&map->lock);
}
static void
fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID,
addr_t virtualAddress, addr_t physicalAddress, uint8 protection,
bool secondaryHash)
{
// lower 32 bit - set at once
entry->physical_page_number = physicalAddress / B_PAGE_SIZE;
entry->_reserved0 = 0;
entry->referenced = false;
entry->changed = false;
entry->write_through = false;
entry->caching_inhibited = false;
entry->memory_coherent = false;
entry->guarded = false;
entry->_reserved1 = 0;
entry->page_protection = protection & 0x3;
eieio();
// we need to make sure that the lower 32 bit were
// already written when the entry becomes valid
// upper 32 bit
entry->virtual_segment_id = virtualSegmentID;
entry->secondary_hash = secondaryHash;
entry->abbr_page_index = (virtualAddress >> 22) & 0x3f;
entry->valid = true;
ppc_sync();
}
static size_t
map_max_pages_need(vm_translation_map *map, addr_t start, addr_t end)
{
return 0;
}
static status_t
map_tmap(vm_translation_map *map, addr_t virtualAddress, addr_t physicalAddress, uint32 attributes)
{
// lookup the vsid based off the va
uint32 virtualSegmentID = VADDR_TO_VSID(map, virtualAddress);
uint32 protection = 0;
// ToDo: check this
// all kernel mappings are R/W to supervisor code
if (attributes & (B_READ_AREA | B_WRITE_AREA))
protection = (attributes & B_WRITE_AREA) ? PTE_READ_WRITE : PTE_READ_ONLY;
//dprintf("vm_translation_map.map_tmap: vsid %d, pa 0x%lx, va 0x%lx\n", vsid, pa, va);
// Search for a free page table slot using the primary hash value
uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
for (int i = 0; i < 8; i++) {
page_table_entry *entry = &group->entry[i];
if (entry->valid)
continue;
fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
protection, false);
map->map_count++;
return B_OK;
}
// Didn't found one, try the secondary hash value
hash = page_table_entry::SecondaryHash(hash);
group = &sPageTable[hash & sPageTableHashMask];
for (int i = 0; i < 8; i++) {
page_table_entry *entry = &group->entry[i];
if (entry->valid)
continue;
fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
protection, false);
map->map_count++;
return B_OK;
}
panic("vm_translation_map.map_tmap: hash table full\n");
return B_ERROR;
}
static page_table_entry *
lookup_page_table_entry(vm_translation_map *map, addr_t virtualAddress)
{
// lookup the vsid based off the va
uint32 virtualSegmentID = VADDR_TO_VSID(map, virtualAddress);
// dprintf("vm_translation_map.lookup_page_table_entry: vsid %d, va 0x%lx\n", vsid, va);
// Search for the page table entry using the primary hash value
uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
for (int i = 0; i < 8; i++) {
page_table_entry *entry = &group->entry[i];
if (entry->virtual_segment_id == virtualSegmentID
&& entry->secondary_hash == false
&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
return entry;
}
// Didn't found it, try the secondary hash value
hash = page_table_entry::SecondaryHash(hash);
group = &sPageTable[hash & sPageTableHashMask];
for (int i = 0; i < 8; i++) {
page_table_entry *entry = &group->entry[i];
if (entry->virtual_segment_id == virtualSegmentID
&& entry->secondary_hash == true
&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
return entry;
}
return NULL;
}
static bool
remove_page_table_entry(vm_translation_map *map, addr_t virtualAddress)
{
page_table_entry *entry = lookup_page_table_entry(map, virtualAddress);
if (entry) {
entry->valid = 0;
ppc_sync();
tlbie(virtualAddress);
eieio();
tlbsync();
ppc_sync();
}
return entry;
}
static status_t
unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
{
page_table_entry *entry;
start = ROUNDOWN(start, B_PAGE_SIZE);
end = ROUNDUP(end, B_PAGE_SIZE);
// dprintf("vm_translation_map.unmap_tmap: start 0x%lx, end 0x%lx\n", start, end);
while (start < end) {
if (remove_page_table_entry(map, start))
map->map_count--;
start += B_PAGE_SIZE;
}
return B_OK;
}
static status_t
query_tmap(vm_translation_map *map, addr_t va, addr_t *_outPhysical, uint32 *_outFlags)
{
page_table_entry *entry;
// default the flags to not present
*_outFlags = 0;
*_outPhysical = 0;
entry = lookup_page_table_entry(map, va);
if (entry == NULL)
return B_NO_ERROR;
// ToDo: check this!
if (IS_KERNEL_ADDRESS(va))
*_outFlags |= B_KERNEL_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_KERNEL_WRITE_AREA);
else
*_outFlags |= B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_WRITE_AREA);
*_outFlags |= entry->changed ? PAGE_MODIFIED : 0;
*_outFlags |= entry->referenced ? PAGE_ACCESSED : 0;
*_outFlags |= entry->valid ? PAGE_PRESENT : 0;
*_outPhysical = entry->physical_page_number * B_PAGE_SIZE;
return B_OK;
}
static status_t
map_iospace_chunk(addr_t va, addr_t pa)
{
pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE))
panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);
// map the pages
return ppc_map_address_range(va, pa, IOSPACE_CHUNK_SIZE);
}
static addr_t
get_mapped_size_tmap(vm_translation_map *map)
{
return map->map_count;
}
static status_t
protect_tmap(vm_translation_map *map, addr_t base, addr_t top, uint32 attributes)
{
// XXX finish
return B_ERROR;
}
static status_t
clear_flags_tmap(vm_translation_map *map, addr_t virtualAddress, uint32 flags)
{
page_table_entry *entry = lookup_page_table_entry(map, virtualAddress);
if (entry == NULL)
return B_NO_ERROR;
bool modified = false;
// clear the bits
if (flags & PAGE_MODIFIED && entry->changed) {
entry->changed = false;
modified = true;
}
if (flags & PAGE_ACCESSED && entry->referenced) {
entry->referenced = false;
modified = true;
}
// synchronize
if (modified) {
tlbie(virtualAddress);
eieio();
tlbsync();
ppc_sync();
}
return B_OK;
}
static void
flush_tmap(vm_translation_map *map)
{
// TODO: arch_cpu_global_TLB_invalidate() is extremely expensive and doesn't
// even cut it here. We are supposed to invalidate all TLB entries for this
// map on all CPUs. We should loop over the virtual pages and invoke tlbie
// instead (which marks the entry invalid on all CPUs).
arch_cpu_global_TLB_invalidate();
}
static status_t
get_physical_page_tmap(addr_t pa, addr_t *va, uint32 flags)
{
return generic_get_physical_page(pa, va, flags);
}
static status_t
put_physical_page_tmap(addr_t va)
{
return generic_put_physical_page(va);
}
static vm_translation_map_ops tmap_ops = {
destroy_tmap,
lock_tmap,
unlock_tmap,
map_max_pages_need,
map_tmap,
unmap_tmap,
query_tmap,
query_tmap,
get_mapped_size_tmap,
protect_tmap,
clear_flags_tmap,
flush_tmap,
get_physical_page_tmap,
put_physical_page_tmap
};
// #pragma mark -
// VM API
status_t
arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
{
// initialize the new object
map->ops = &tmap_ops;
map->map_count = 0;
if (!kernel) {
// During the boot process, there are no semaphores available at this
// point, so we only try to create the translation map lock if we're
// initialize a user translation map.
// vm_translation_map_init_kernel_map_post_sem() is used to complete
// the kernel translation map.
if (recursive_lock_init(&map->lock, "translation map") < B_OK)
return map->lock.sem;
}
map->arch_data = (vm_translation_map_arch_info *)malloc(sizeof(vm_translation_map_arch_info));
if (map->arch_data == NULL) {
if (!kernel)
recursive_lock_destroy(&map->lock);
return B_NO_MEMORY;
}
cpu_status state = disable_interrupts();
acquire_spinlock(&sVSIDBaseBitmapLock);
// allocate a VSID base for this one
if (kernel) {
// The boot loader has set up the segment registers for identical
// mapping. Two VSID bases are reserved for the kernel: 0 and 8. The
// latter one for mapping the kernel address space (0x80000000...), the
// former one for the lower addresses required by the Open Firmware
// services.
map->arch_data->vsid_base = 0;
sVSIDBaseBitmap[0] |= 0x3;
} else {
int i = 0;
while (i < MAX_VSID_BASES) {
if (sVSIDBaseBitmap[i / 32] == 0xffffffff) {
i += 32;
continue;
}
if ((sVSIDBaseBitmap[i / 32] & (1 << (i % 32))) == 0) {
// we found it
sVSIDBaseBitmap[i / 32] |= 1 << (i % 32);
break;
}
i++;
}
if (i >= MAX_VSID_BASES)
panic("vm_translation_map_create: out of VSID bases\n");
map->arch_data->vsid_base = i << VSID_BASE_SHIFT;
}
release_spinlock(&sVSIDBaseBitmapLock);
restore_interrupts(state);
return B_OK;
}
status_t
arch_vm_translation_map_init_kernel_map_post_sem(vm_translation_map *map)
{
if (recursive_lock_init(&map->lock, "translation map") < B_OK)
return map->lock.sem;
return B_OK;
}
status_t
arch_vm_translation_map_init(kernel_args *args)
{
sPageTable = (page_table_entry_group *)args->arch_args.page_table.start;
sPageTableSize = args->arch_args.page_table.size;
sPageTableHashMask = sPageTableSize / sizeof(page_table_entry_group) - 1;
// init physical page mapper
status_t error = generic_vm_physical_page_mapper_init(args,
map_iospace_chunk, &sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE);
if (error != B_OK)
return error;
return B_OK;
}
status_t
arch_vm_translation_map_init_post_area(kernel_args *args)
{
// If the page table doesn't lie within the kernel address space, we
// remap it.
if (!IS_KERNEL_ADDRESS(sPageTable)) {
addr_t newAddress = (addr_t)sPageTable;
status_t error = ppc_remap_address_range(&newAddress, sPageTableSize,
false);
if (error != B_OK) {
panic("arch_vm_translation_map_init_post_area(): Failed to remap "
"the page table!");
return error;
}
// set the new page table address
addr_t oldVirtualBase = (addr_t)(sPageTable);
sPageTable = (page_table_entry_group*)newAddress;
// unmap the old pages
ppc_unmap_address_range(oldVirtualBase, sPageTableSize);
// TODO: We should probably map the page table via BAT. It is relatively large,
// and due to being a hash table the access patterns might look sporadic, which
// certainly isn't to the liking of the TLB.
}
// create an area to cover the page table
sPageTableArea = create_area("page_table", (void **)&sPageTable, B_EXACT_ADDRESS,
sPageTableSize, B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
// init physical page mapper
status_t error = generic_vm_physical_page_mapper_init_post_area(args);
if (error != B_OK)
return error;
return B_OK;
}
status_t
arch_vm_translation_map_init_post_sem(kernel_args *args)
{
// init physical page mapper
return generic_vm_physical_page_mapper_init_post_sem(args);
}
/** Directly maps a page without having knowledge of any kernel structures.
* Used only during VM setup.
* It currently ignores the "attributes" parameter and sets all pages
* read/write.
*/
status_t
arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress, addr_t physicalAddress,
uint8 attributes, addr_t (*get_free_page)(kernel_args *))
{
uint32 virtualSegmentID = get_sr((void *)virtualAddress) & 0xffffff;
uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, (uint32)virtualAddress);
page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
for (int32 i = 0; i < 8; i++) {
// 8 entries in a group
if (group->entry[i].valid)
continue;
fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, PTE_READ_WRITE, false);
return B_OK;
}
hash = page_table_entry::SecondaryHash(hash);
group = &sPageTable[hash & sPageTableHashMask];
for (int32 i = 0; i < 8; i++) {
if (group->entry[i].valid)
continue;
fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, PTE_READ_WRITE, true);
return B_OK;
}
return B_ERROR;
}
// XXX currently assumes this translation map is active
status_t
arch_vm_translation_map_early_query(addr_t va, addr_t *out_physical)
{
//PANIC_UNIMPLEMENTED();
panic("vm_translation_map_quick_query(): not yet implemented\n");
return B_OK;
}
// #pragma mark -
status_t
ppc_map_address_range(addr_t virtualAddress, addr_t physicalAddress,
size_t size)
{
addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
virtualAddress = ROUNDOWN(virtualAddress, B_PAGE_SIZE);
physicalAddress = ROUNDOWN(physicalAddress, B_PAGE_SIZE);
vm_address_space *addressSpace = vm_kernel_address_space();
// map the pages
for (; virtualAddress < virtualEnd;
virtualAddress += B_PAGE_SIZE, physicalAddress += B_PAGE_SIZE) {
status_t error = map_tmap(&addressSpace->translation_map,
virtualAddress, physicalAddress,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (error != B_OK)
return error;
}
return B_OK;
}
void
ppc_unmap_address_range(addr_t virtualAddress, size_t size)
{
addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
virtualAddress = ROUNDOWN(virtualAddress, B_PAGE_SIZE);
vm_address_space *addressSpace = vm_kernel_address_space();
for (0; virtualAddress < virtualEnd; virtualAddress += B_PAGE_SIZE)
remove_page_table_entry(&addressSpace->translation_map, virtualAddress);
}
status_t
ppc_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap)
{
addr_t virtualAddress = ROUNDOWN(*_virtualAddress, B_PAGE_SIZE);
size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE);
vm_address_space *addressSpace = vm_kernel_address_space();
// reserve space in the address space
void *newAddress = NULL;
status_t error = vm_reserve_address_range(addressSpace->id, &newAddress,
B_ANY_KERNEL_ADDRESS, size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (error != B_OK)
return error;
// get the area's first physical page
page_table_entry *entry = lookup_page_table_entry(
&addressSpace->translation_map, virtualAddress);
if (!entry)
return B_ERROR;
addr_t physicalBase = entry->physical_page_number << 12;
// map the pages
error = ppc_map_address_range((addr_t)newAddress, physicalBase, size);
if (error != B_OK)
return error;
*_virtualAddress = (addr_t)newAddress;
// unmap the old pages
if (unmap)
ppc_unmap_address_range(virtualAddress, size);
return B_OK;
}