cleanup
Less ppc, more m68k :)


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22822 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
François Revol 2007-11-03 21:04:42 +00:00
parent 1a8a803ee1
commit 4e44040df4
22 changed files with 1870 additions and 979 deletions

View File

@ -8,6 +8,24 @@ UsePrivateHeaders kernel [ FDirName kernel arch $(TARGET_ARCH) ]
SEARCH_SOURCE += [ FDirName $(SUBDIR) $(DOTDOT) generic ] ;
# cpu-specific stuff
KernelMergeObject arch_m68k_030.o :
arch_030_cpu.cpp
arch_030_mmu.cpp
arch_030_asm.S
: $(TARGET_KERNEL_PIC_CCFLAGS) -Wno-unused -m68030
;
KernelMergeObject arch_m68k_040.o :
arch_040.cpp
: $(TARGET_KERNEL_PIC_CCFLAGS) -Wno-unused -m68040
;
KernelMergeObject arch_m68k_060.o :
arch_060.cpp
: $(TARGET_KERNEL_PIC_CCFLAGS) -Wno-unused -m68060
;
KernelStaticLibrary libm68k :
arch_atomic.c
arch_cpu.cpp
@ -30,6 +48,10 @@ KernelStaticLibrary libm68k :
arch_asm.S
generic_vm_physical_page_mapper.cpp
arch_m68k_030.o
# arch_m68k_040.a
# arch_m68k_060.a
:
$(TARGET_KERNEL_PIC_CCFLAGS) -Wno-unused
;

View File

@ -0,0 +1,24 @@
#define FUNCTION(x) .global x; .type x,@function; x
.text
/* that one can be inlined */
FUNCTION(flush_insn_pipeline_030):
nop
rts
/* flush all ATC entries */
FUNCTION(flush_atc_all_030):
pflusha
rts
/* flush all ATC entries */
FUNCTION(flush_atc_addr_030):
move.l (4,%a7),%a0
pflush #0,#0,(%a0)
rts

View File

@ -0,0 +1,77 @@
/*
* Copyright 2003-2007, Haiku Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* François Revol <revol@free.fr>
*/
#include <KernelExport.h>
#include <arch_platform.h>
#include <arch_thread.h>
#include <arch/cpu.h>
#include <boot/kernel_args.h>
#ifdef __cplusplus
extern "C" {
#endif
/* from arch_030_asm.S */
extern void flush_insn_pipeline_030(void);
extern void flush_atc_all_030(void);
extern void flush_atc_addr_030(void *addr);
#ifdef __cplusplus
}
#endif
#define CACHELINE 16
static void
sync_icache_030(void *address, size_t len)
{
int l, off;
char *p;
uint32 cacr;
off = (unsigned int)address & (CACHELINE - 1);
len += off;
l = len;
p = (char *)address - off;
asm volatile ("nop");
asm volatile ("movec %%cacr,%0" : "=r"(cacr):);
cacr |= 0x00000004; /* ClearInstructionCacheEntry */
do {
/* the 030 invalidates only 1 long of the cache line */
//XXX: what about 040 and 060 ?
asm volatile ("movec %0,%%caar\n" \
"movec %1,%%cacr\n" \
"addq.l #4,%0\n" \
"movec %0,%%caar\n" \
"movec %1,%%cacr\n" \
"addq.l #4,%0\n" \
"movec %0,%%caar\n" \
"movec %1,%%cacr\n" \
"addq.l #4,%0\n" \
"movec %0,%%caar\n" \
"movec %1,%%cacr\n" \
:: "r"(p), "r"(cacr));
p += CACHELINE;
} while ((l -= CACHELINE) > 0);
asm volatile ("nop");
}
struct m68k_cpu_ops cpu_ops_030 = {
&flush_insn_pipeline_030,
&flush_atc_all_030,
&flush_atc_all_030, // no global flag, so no useronly flushing
&flush_atc_addr_030,
&sync_icache_030, // dcache is the same
&sync_icache_030,
NULL // idle
};

View File

@ -0,0 +1,213 @@
/*
* Copyright 2007, Haiku Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* François Revol <revol@free.fr>
*/
#include <arch/cpu.h>
#define ARCH_M68K_MMU_TYPE MMU_68030
enum descriptor_types {
DT_INVALID = 0, // invalid entry
DT_PAGE, // page descriptor
DT_VALID_4, // short page table descriptor
DT_VALID_8, // long page table descriptor
};
// = names in MC user's manual
// or comments
struct short_page_directory_entry {
// upper 32 bits
uint32 type : 2; // DT_*
uint32 write_protect : 1;
uint32 accessed : 1; // = used
uint32 addr : 28; // address
};
struct long_page_directory_entry {
// upper 32 bits
uint32 type : 2;
uint32 write_protect : 1;
uint32 accessed : 1; // = used
uint32 _zero1 : 4;
uint32 supervisor : 1;
uint32 _zero2 : 1;
uint32 _ones : 6;
uint32 limit : 15;
uint32 low_up : 1; // limit is lower(1)/upper(0)
// lower 32 bits
uint32 unused : 4; //
uint32 addr : 28; // address
};
struct short_page_table_entry {
uint32 type : 2;
uint32 write_protect : 1;
uint32 accessed : 1; // = used
uint32 dirty : 1; // = modified
uint32 _zero1 : 1;
uint32 cache_disabled : 1; // = cache_inhibit
uint32 _zero2 : 1;
uint32 addr : 24; // address
};
struct long_page_table_entry {
// upper 32 bits
uint32 type : 2;
uint32 write_protect : 1;
uint32 accessed : 1; // = used
uint32 dirty : 1; // = modified
uint32 _zero1 : 1;
uint32 cache_disabled : 1; // = cache_inhibit
uint32 _zero2 : 1;
uint32 supervisor : 1;
uint32 _zero3 : 1;
uint32 _ones : 6;
// limit only used on early table terminators, else unused
uint32 limit : 15;
uint32 low_up : 1; // limit is lower(1)/upper(0)
// lower 32 bits
uint32 unused : 8; //
uint32 addr : 24; // address
};
/* rarely used */
struct short_indirect_entry {
// upper 32 bits
uint32 type : 2; // DT_*
uint32 addr : 30; // address
};
struct long_indirect_entry {
// upper 32 bits
uint32 type : 2;
uint32 unused1 : 30;
// lower 32 bits
uint32 unused2 : 2; //
uint32 addr : 30; // address
};
/* for clarity:
- the top level page directory will be called "page root", (root or rtdir)
- the 2nd level will be "page directory" like on x86, (pgdir)
- the 3rd level is a "page table" as on x86. (pgtbl)
*/
typedef struct short_page_directory_entry page_root_entry;
typedef struct short_page_directory_entry page_directory_entry;
typedef struct long_page_table_entry page_table_entry;
typedef struct long_indirect_entry page_indirect_entry;
/* scalar storage type that maps them */
typedef uint32 page_root_entry_scalar;
typedef uint32 page_directory_entry_scalar;
typedef uint64 page_table_entry_scalar;
typedef uint64 page_indirect_entry_scalar;
#define DT_ROOT DT_VALID_4
#define DT_DIR DT_VALID_8
//#define DT_PAGE DT_PAGE :)
#define DT_INDIRECT DT_VALID_8
/* default scalar values for entries */
#define DFL_ROOTENT_VAL 0x00000000
#define DFL_DIRENT_VAL 0x00000000
// limit disabled, 6bits at 1
// (limit isn't used on that level, but just in case)
#define DFL_PAGEENT_VAL 0x7FFFFC0000000000LL
#define NUM_ROOTENT_PER_TBL 128
#define NUM_DIRENT_PER_TBL 128
#define NUM_PAGEENT_PER_TBL 64
/* unlike x86, the root/dir/page table sizes are different than B_PAGE_SIZE
* so we will have to fit more than one on a page to avoid wasting space.
* We will allocate a group of tables with the one we want inside, and
* add them from the aligned index needed, to make it easy to free them.
*/
#define SIZ_ROOTTBL (128 * sizeof(page_root_entry))
#define SIZ_DIRTBL (128 * sizeof(page_directory_entry))
#define SIZ_PAGETBL (64 * sizeof(page_table_entry))
//#define NUM_ROOTTBL_PER_PAGE (B_PAGE_SIZE / SIZ_ROOTTBL)
#define NUM_DIRTBL_PER_PAGE (B_PAGE_SIZE / SIZ_DIRTBL)
#define NUM_PAGETBL_PER_PAGE (B_PAGE_SIZE / SIZ_PAGETBL)
/* macros to get the physical page or table number and address of tables from
* descriptors */
#if 0
/* XXX:
suboptimal:
struct foo {
int a:2;
int b:30;
} v = {...};
*(int *)0 = (v.b) << 2;
generates:
sarl $2, %eax
sall $2, %eax
We use a cast + bitmasking, since all address fields are already shifted
*/
// from a root entry
#define PREA_TO_TA(a) ((a) << 4)
#define PREA_TO_PN(a) ((a) >> (12-4))
#define PREA_TO_PA(a) ((a) << 4)
#define TA_TO_PREA(a) ((a) >> 4)
//...
#endif
// TA: table address
// PN: page number
// PA: page address
// PO: page offset (offset of table in page)
// PI: page index (index of table relative to page start)
// from a root entry
#define PRE_TO_TA(a) ((*(uint32 *)(&(a))) & ~((1<<4)-1))
#define PRE_TO_PN(e) ((*(uint32 *)(&(e))) >> 12)
#define PRE_TO_PA(e) ((*(uint32 *)(&(e))) & ~((1<<12)-1))
#define PRE_TO_PO(e) ((*(uint32 *)(&(e))) & ((1<<12)-1))
#define PRE_TO_PI(e) (((*(uint32 *)(&(e))) & ((1<<12)-1)) / SIZ_DIRTBL)
#define TA_TO_PREA(a) ((a) >> 4)
// from a directory entry
#define PDE_TO_TA(a) ((*(uint32 *)(&(a))) & ~((1<<4)-1))
#define PDE_TO_PN(e) ((*(uint32 *)(&(e))) >> 12)
#define PDE_TO_PA(e) ((*(uint32 *)(&(e))) & ~((1<<12)-1))
#define PDE_TO_PO(e) ((*(uint32 *)(&(e))) & ((1<<12)-1))
#define PDE_TO_PI(e) (((*(uint32 *)(&(e))) & ((1<<12)-1)) / SIZ_PAGETBL)
#define TA_TO_PDEA(a) ((a) >> 4)
// from a table entry
#define PTE_TO_TA(a) ((((uint32 *)(&(a)))[1]) & ~((1<<8)-1))
#define PTE_TO_PN(e) ((((uint32 *)(&(e)))[1]) >> 12)
#define PTE_TO_PA(e) ((((uint32 *)(&(e)))[1]) & ~((1<<12)-1))
#define TA_TO_PTEA(a) ((a) >> 8)
// from an indirect entry
#define PIE_TO_TA(a) ((((uint32 *)(&(a)))[1]) & ~((1<<2)-1))
#define PIE_TO_PN(e) ((((uint32 *)(&(e)))[1]) >> 12)
#define PIE_TO_PA(e) ((((uint32 *)(&(e)))[1]) & ~((1<<12)-1))
#define TA_TO_PIEA(a) ((a) >> 2)
#include "arch_vm_translation_map_impl.cpp"
struct m68k_vm_ops m68030_vm_ops = {
m68k_translation_map_get_pgdir,
arch_vm_translation_map_init_map,
arch_vm_translation_map_init_kernel_map_post_sem,
arch_vm_translation_map_init,
arch_vm_translation_map_init_post_area,
arch_vm_translation_map_init_post_sem,
arch_vm_translation_map_early_map,
arch_vm_translation_map_early_query,
#if 0
m68k_map_address_range,
m68k_unmap_address_range,
m68k_remap_address_range
#endif
};

View File

@ -14,183 +14,70 @@
// ToDo: fixme
FUNCTION(reboot):
reset
reset
/* void arch_int_enable_interrupts(void) */
FUNCTION(arch_int_enable_interrupts):
mfmsr %r3 // load msr
li %r4, 1
insrwi %r3, %r4, 1, 31 - MSR_EXCEPTIONS_ENABLED
// sets bit 15, EE
mtmsr %r3 // put it back into the msr
blr
andi #0xf8ff,%sr
rts
/* int arch_int_disable_interrupts(void)
* r3
*/
FUNCTION(arch_int_disable_interrupts):
mfmsr %r4 // load msr
mr %r3, %r4 // save old state
rlwinm %r4, %r4, 0, 32 - MSR_EXCEPTIONS_ENABLED, 30 - MSR_EXCEPTIONS_ENABLED
// clears bit 15, EE
mtmsr %r4 // put it back into the msr
blr
clr.l %d0
move %sr,%d0
move.l %d0,%d1
ori.w #%x0700,%d1
move %d1,%sr
// return value: previous IPM
lsr.l #8,%d0
andi.l #7,%d0
rts
/* void arch_int_restore_interrupts(int oldState)
* r3
*/
FUNCTION(arch_int_restore_interrupts):
mfmsr %r4
move.l (4,%a7),%d0
// make sure we only have IPM bits
andi.w #7,%d0
lsl.w #8,%d0
move %sr,%d1
andi.w #0xf8ff,%d1
or.w %d0,%d1
move %d1,%sr
rts
rlwimi %r4, %r3, 0, 31 - MSR_EXCEPTIONS_ENABLED, 31 - MSR_EXCEPTIONS_ENABLED
// clear or set bit 15, EE to the same state as in r3, oldState
mtmsr %r4
blr
/* bool arch_int_are_interrupts_enabled(void) */
FUNCTION(arch_int_are_interrupts_enabled):
mfmsr %r3 // load msr
extrwi %r3, %r3, 1, 31 - MSR_EXCEPTIONS_ENABLED
// mask out the EE bit
blr
clr.l %d0
move %sr,%d1
andi.w 0x0700,%d1
bne arch_int_are_interrupts_enabled_no
moveq.l #1,%d0
arch_int_are_interrupts_enabled_no:
rts
// ToDo: fixme
FUNCTION(dbg_save_registers):
blr
rts
/* long long get_time_base(void) */
FUNCTION(get_time_base):
1:
mftbu %r3 // get the upper time base register
mftb %r4 // get the lower time base register
mftbu %r5 // get the upper again
cmpw %r5, %r3 // see if it changed while we were reading the lower
bne- 1b // if so, repeat
blr
#warning M68K: implement get_time_base!
clr.l %d0
clr.l %d1
//passed through a0 or d0:d1 ?
rts
/* void getibats(int bats[8]); */
FUNCTION(getibats):
mfibatu %r0,0
stw %r0,0(%r3)
mfibatl %r0,0
stwu %r0,4(%r3)
mfibatu %r0,1
stwu %r0,4(%r3)
mfibatl %r0,1
stwu %r0,4(%r3)
mfibatu %r0,2
stwu %r0,4(%r3)
mfibatl %r0,2
stwu %r0,4(%r3)
mfibatu %r0,3
stwu %r0,4(%r3)
mfibatl %r0,3
stwu %r0,4(%r3)
blr
// void setibats(int bats[8]);
FUNCTION(setibats):
lwz %r0,0(%r3)
mtibatu 0,%r0
isync
lwzu %r0,4(%r3)
mtibatl 0,%r0
isync
lwzu %r0,4(%r3)
mtibatu 1,%r0
isync
lwzu %r0,4(%r3)
mtibatl 1,%r0
isync
lwzu %r0,4(%r3)
mtibatu 2,%r0
isync
lwzu %r0,4(%r3)
mtibatl 2,%r0
isync
lwzu %r0,4(%r3)
mtibatu 3,%r0
isync
lwzu %r0,4(%r3)
mtibatl 3,%r0
isync
blr
// void getdbats(int bats[8]);
FUNCTION(getdbats):
mfdbatu %r0,0
stw %r0,0(%r3)
mfdbatl %r0,0
stwu %r0,4(%r3)
mfdbatu %r0,1
stwu %r0,4(%r3)
mfdbatl %r0,1
stwu %r0,4(%r3)
mfdbatu %r0,2
stwu %r0,4(%r3)
mfdbatl %r0,2
stwu %r0,4(%r3)
mfdbatu %r0,3
stwu %r0,4(%r3)
mfdbatl %r0,3
stwu %r0,4(%r3)
blr
// void setdbats(int bats[8]);
FUNCTION(setdbats):
lwz %r0,0(%r3)
mtdbatu 0,%r0
lwzu %r0,4(%r3)
mtdbatl 0,%r0
lwzu %r0,4(%r3)
mtdbatu 1,%r0
lwzu %r0,4(%r3)
mtdbatl 1,%r0
lwzu %r0,4(%r3)
mtdbatu 2,%r0
lwzu %r0,4(%r3)
mtdbatl 2,%r0
lwzu %r0,4(%r3)
mtdbatu 3,%r0
lwzu %r0,4(%r3)
mtdbatl 3,%r0
sync
blr
// unsigned int gethid0();
FUNCTION(gethid0):
mfspr %r3, 1008
blr
// void sethid0(unsigned int val);
FUNCTION(sethid0):
mtspr 1008, %r3
blr
// unsigned int getl2cr();
FUNCTION(getl2cr):
mfspr %r3, 1017
blr
// void setl2cr(unsigned int val);
FUNCTION(setl2cr):
mtspr 1017, %r3
blr
// void ppc_context_switch(addr_t *old_sp, addr_t new_sp);
FUNCTION(ppc_context_switch):
// void m68k_context_switch(addr_t *old_sp, addr_t new_sp);
FUNCTION(m68k_context_switch):
// regs to push on the stack: f13-f31, r13-r31, cr, r2, lr
@ -305,20 +192,20 @@ FUNCTION(ppc_context_switch):
blr
// void ppc_switch_stack_and_call(addr_t newKstack,
// void m68k_switch_stack_and_call(addr_t newKstack,
// void (*func)(void *), void *arg)
FUNCTION(ppc_switch_stack_and_call):
FUNCTION(m68k_switch_stack_and_call):
mr %r1, %r3 // set the new stack pointer
mtctr %r4 // move the target function into CTR
mr %r3, %r5 // move the arg to this func to the new arg
bctr
// ppc_kernel_thread_root(): parameters in r13-r15, the functions to call
// m68k_kernel_thread_root(): parameters in r13-r15, the functions to call
// (in that order). The function is used when spawing threads. It usually calls
// an initialization function, the actual thread function, and a function that
// destroys the thread.
FUNCTION(ppc_kernel_thread_root):
FUNCTION(m68k_kernel_thread_root):
mtlr %r13
blrl
mtlr %r14

View File

@ -1,4 +1,10 @@
/*
* Copyright 2007, Haiku Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* François Revol <revol@free.fr>
*
* Copyright 2003, Marcus Overhagen. All rights reserved.
* Distributed under the terms of the OpenBeOS License.
*/

View File

@ -1,4 +1,7 @@
/*
* Copyright 2007, François Revol, revol@free.fr.
* Distributed under the terms of the MIT License.
*
* Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
@ -14,6 +17,16 @@
#include <arch/cpu.h>
#include <boot/kernel_args.h>
extern struct m68k_cpu_ops cpu_ops_030;
extern struct m68k_cpu_ops cpu_ops_040;
extern struct m68k_cpu_ops cpu_ops_060;
struct m68k_cpu_ops cpu_ops;
int cpu_type;
int fpu_type;
int mmu_type;
int platform;
status_t
arch_cpu_preboot_init_percpu(kernel_args *args, int curr_cpu)
@ -32,6 +45,47 @@ arch_cpu_preboot_init_percpu(kernel_args *args, int curr_cpu)
status_t
arch_cpu_init(kernel_args *args)
{
cpu_type = args->arch_args.cpu_type;
fpu_type = args->arch_args.fpu_type;
mmu_type = args->arch_args.mmu_type;
platform = args->arch_args.platform;
switch (cpu_type) {
case CPU_68020:
case CPU_68030:
cpu_ops.flush_insn_pipeline = cpu_ops_030.flush_insn_pipeline;
cpu_ops.flush_atc_all = cpu_ops_030.flush_atc_all;
cpu_ops.flush_atc_user = cpu_ops_030.flush_atc_user;
cpu_ops.flush_atc_addr = cpu_ops_030.flush_atc_addr;
cpu_ops.flush_cache_line = cpu_ops_030.flush_cache_line;
cpu_ops.idle = cpu_ops_030.idle; // NULL
//cpu_ops. = cpu_ops_030.;
break;
#ifdef SUPPORTS_040
case CPU_68040:
cpu_ops.flush_insn_pipeline = cpu_ops_040.flush_insn_pipeline;
cpu_ops.flush_atc_all = cpu_ops_040.flush_atc_all;
cpu_ops.flush_atc_user = cpu_ops_040.flush_atc_user;
cpu_ops.flush_atc_addr = cpu_ops_040.flush_atc_addr;
cpu_ops.flush_cache_line = cpu_ops_040.flush_cache_line;
cpu_ops.idle = cpu_ops_040.idle; // NULL
//cpu_ops. = cpu_ops_040.;
break;
#endif
#ifdef SUPPORTS_060
case CPU_68060:
cpu_ops.flush_insn_pipeline = cpu_ops_060.flush_insn_pipeline;
cpu_ops.flush_atc_all = cpu_ops_060.flush_atc_all;
cpu_ops.flush_atc_user = cpu_ops_060.flush_atc_user;
cpu_ops.flush_atc_addr = cpu_ops_060.flush_atc_addr;
cpu_ops.flush_cache_line = cpu_ops_060.flush_cache_line;
cpu_ops.idle = cpu_ops_060.idle;
//cpu_ops. = cpu_ops_060.;
break;
#endif
default:
panic("unknown cpu_type 0x%08lx\n", args->arch_args.cpu_type);
}
return B_OK;
}
@ -48,53 +102,24 @@ arch_cpu_init_post_modules(kernel_args *args)
return B_OK;
}
#define CACHELINE 16
void
arch_cpu_sync_icache(void *address, size_t len)
{
int l, off;
char *p;
uint32 cacr;
off = (unsigned int)address & (CACHELINE - 1);
len += off;
l = len;
p = (char *)address - off;
asm volatile ("movec %%cacr,%0" : "=r"(cacr):);
cacr |= 0x00000004; /* ClearInstructionCacheEntry */
do {
/* the 030 invalidates only 1 long of the cache line */
//XXX: what about 040 and 060 ?
asm volatile ("movec %0,%%caar\n" \
"movec %1,%%cacr\n" \
"addq.l #4,%0\n" \
"movec %0,%%caar\n" \
"movec %1,%%cacr\n" \
"addq.l #4,%0\n" \
"movec %0,%%caar\n" \
"movec %1,%%cacr\n" \
"addq.l #4,%0\n" \
"movec %0,%%caar\n" \
"movec %1,%%cacr\n" \
:: "r"(p), "r"(cacr));
p += CACHELINE;
} while ((l -= CACHELINE) > 0);
m68k_nop();
cpu_ops.flush_icache(address, len);
}
void
arch_cpu_invalidate_TLB_range(addr_t start, addr_t end)
{
m68k_nop();
cpu_ops.flush_insn_pipeline();
while (start < end) {
pflush(start);
m68k_nop();
cpu_ops.flush_atc_addr(start);
cpu_ops.flush_insn_pipeline();
start += B_PAGE_SIZE;
}
m68k_nop();
cpu_ops.flush_insn_pipeline();
}
@ -103,29 +128,30 @@ arch_cpu_invalidate_TLB_list(addr_t pages[], int num_pages)
{
int i;
m68k_nop();
cpu_ops.flush_insn_pipeline();
for (i = 0; i < num_pages; i++) {
pflush(pages[i]);
m68k_nop();
cpu_ops.flush_atc_addr(pages[i]);
cpu_ops.flush_insn_pipeline();
}
m68k_nop();
cpu_ops.flush_insn_pipeline();
}
void
arch_cpu_global_TLB_invalidate(void)
{
m68k_nop();
pflusha();
m68k_nop();
cpu_ops.flush_insn_pipeline();
cpu_ops.flush_atc_all();
cpu_ops.flush_insn_pipeline();
}
void
arch_cpu_user_TLB_invalidate(void)
{
// pflushfd ?
arch_cpu_global_TLB_invalidate();
cpu_ops.flush_insn_pipeline();
cpu_ops.flush_atc_user();
cpu_ops.flush_insn_pipeline();
}
@ -224,6 +250,8 @@ arch_cpu_shutdown(bool reboot)
void
arch_cpu_idle(void)
{
if (cpu_ops.idle)
cpu_ops.idle();
#warning M68K: use LPSTOP ?
//asm volatile ("lpstop");
}

View File

@ -1,10 +1,11 @@
/*
* Copyright 2003-2006, Haiku Inc. All rights reserved.
* Copyright 2003-2007, Haiku Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* Axel Dörfler <axeld@pinc-software.de>
* Ingo Weinhold <bonefish@cs.tu-berlin.de>
* François Revol <revol@free.fr>
*/
@ -56,7 +57,8 @@ static inline stack_frame *
get_current_stack_frame()
{
stack_frame *frame;
asm volatile("mr %0, %%r1" : "=r"(frame));
#warning M68K: a6 or a7 ?
asm volatile("move.l %%a6,%0" : "=r"(frame));
return frame;
}
@ -69,7 +71,7 @@ get_next_frame(addr_t framePointer, addr_t *next, addr_t *ip)
// set fault handler, so that we can safely access user stacks
if (thread) {
if (ppc_set_fault_handler(&thread->fault_handler, (addr_t)&&error))
if (m68k_set_fault_handler(&thread->fault_handler, (addr_t)&&error))
goto error;
}
@ -202,31 +204,23 @@ return 0;
if (frame) {
kprintf("iframe at %p\n", frame);
kprintf(" r0 0x%08lx r1 0x%08lx r2 0x%08lx r3 0x%08lx\n",
frame->r0, frame->r1, frame->r2, frame->r3);
kprintf(" r4 0x%08lx r5 0x%08lx r6 0x%08lx r7 0x%08lx\n",
frame->r4, frame->r5, frame->r6, frame->r7);
kprintf(" r8 0x%08lx r9 0x%08lx r10 0x%08lx r11 0x%08lx\n",
frame->r8, frame->r9, frame->r10, frame->r11);
kprintf(" r12 0x%08lx r13 0x%08lx r14 0x%08lx r15 0x%08lx\n",
frame->r12, frame->r13, frame->r14, frame->r15);
kprintf(" r16 0x%08lx r17 0x%08lx r18 0x%08lx r19 0x%08lx\n",
frame->r16, frame->r17, frame->r18, frame->r19);
kprintf(" r20 0x%08lx r21 0x%08lx r22 0x%08lx r23 0x%08lx\n",
frame->r20, frame->r21, frame->r22, frame->r23);
kprintf(" r24 0x%08lx r25 0x%08lx r26 0x%08lx r27 0x%08lx\n",
frame->r24, frame->r25, frame->r26, frame->r27);
kprintf(" r28 0x%08lx r29 0x%08lx r30 0x%08lx r31 0x%08lx\n",
frame->r28, frame->r29, frame->r30, frame->r31);
kprintf(" lr 0x%08lx cr 0x%08lx xer 0x%08lx ctr 0x%08lx\n",
frame->lr, frame->cr, frame->xer, frame->ctr);
kprintf("fpscr 0x%08lx\n", frame->fpscr);
kprintf(" srr0 0x%08lx srr1 0x%08lx dar 0x%08lx dsisr 0x%08lx\n",
frame->srr0, frame->srr1, frame->dar, frame->dsisr);
kprintf(" vector: 0x%lx\n", frame->vector);
kprintf(" d0 0x%08lx d1 0x%08lx d2 0x%08lx d3 0x%08lx\n",
frame->d0, frame->d1, frame->d2, frame->d3);
kprintf(" d4 0x%08lx d5 0x%08lx d6 0x%08lx d7 0x%08lx\n",
frame->d4, frame->d5, frame->d6, frame->d7);
kprintf(" a0 0x%08lx a1 0x%08lx a2 0x%08lx a3 0x%08lx\n",
frame->a0, frame->a1, frame->a2, frame->a3);
kprintf(" a4 0x%08lx a5 0x%08lx a6 0x%08lx a7 0x%08lx (sp)\n",
frame->a4, frame->a5, frame->a6, frame->a7);
print_stack_frame(thread, frame->srr0, framePointer, frame->r1);
framePointer = frame->r1;
/*kprintf(" pc 0x%08lx ccr 0x%02x\n",
frame->pc, frame->ccr);*/
kprintf(" pc 0x%08lx sr 0x%04x\n",
frame->pc, frame->sr);
#warning M68K: missing regs
print_stack_frame(thread, frame->pc, framePointer, frame->a6);
framePointer = frame->a6;
} else {
addr_t ip, nextFramePointer;

View File

@ -1,4 +1,7 @@
/*
* Copyright 2007, François Revol, revol@free.fr.
* Distributed under the terms of the MIT License.
*
* Copyright 2003-2006, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*

View File

@ -1,4 +1,7 @@
/*
* Copyright 2007, François Revol, revol@free.fr.
* Distributed under the terms of the MIT License.
*
* Copyright 2005, Ingo Weinhold <bonefish@cs.tu-berlin.de>.
* All rights reserved. Distributed under the terms of the MIT License.
*
@ -7,6 +10,9 @@
* Distributed under the terms of the NewOS License.
*/
#ifdef _BOOT_MODE
#include <boot/arch.h>
#endif
#include <KernelExport.h>
@ -16,10 +22,15 @@
#define CHATTY 0
#ifdef _BOOT_MODE
status_t
boot_arch_elf_relocate_rel(struct preloaded_image *image,
struct Elf32_Rel *rel, int rel_len)
#else
int
arch_elf_relocate_rel(struct elf_image_info *image, const char *sym_prepend,
struct elf_image_info *resolve_image, struct Elf32_Rel *rel, int rel_len)
#endif
{
// there are no rel entries in PPC elf
return B_NO_ERROR;
@ -105,9 +116,15 @@ ha(Elf32_Word value)
}
#ifdef _BOOT_MODE
status_t
boot_arch_elf_relocate_rela(struct preloaded_image *image,
struct Elf32_Rela *rel, int rel_len)
#else
int
arch_elf_relocate_rela(struct elf_image_info *image, const char *sym_prepend,
struct elf_image_info *resolve_image, struct Elf32_Rela *rel, int rel_len)
#endif
{
int i;
struct Elf32_Sym *sym;
@ -128,7 +145,7 @@ arch_elf_relocate_rela(struct elf_image_info *image, const char *sym_prepend,
dprintf("arch_elf_relocate_rela(): Failed to get GOT address!\n"); \
return B_ERROR; \
}
// TODO: Get the PLT address!
#define REQUIRE_PLT \
if (L == 0) { \
@ -172,12 +189,16 @@ arch_elf_relocate_rela(struct elf_image_info *image, const char *sym_prepend,
case R_PPC_JMP_SLOT:
sym = SYMBOL(image, ELF32_R_SYM(rel[i].r_info));
#ifdef _BOOT_MODE
vlErr = boot_elf_resolve_symbol(image, sym, &S);
#else
vlErr = elf_resolve_symbol(image, sym, resolve_image,
sym_prepend, &S);
#endif
if (vlErr < 0) {
dprintf("arch_elf_relocate_rela(): Failed to relocate "
dprintf("%s(): Failed to relocate "
"entry index %d, rel type %d, offset 0x%lx, sym 0x%lx, "
"addend 0x%lx\n", i, ELF32_R_TYPE(rel[i].r_info),
"addend 0x%lx\n", __FUNCTION__, i, ELF32_R_TYPE(rel[i].r_info),
rel[i].r_offset, ELF32_R_SYM(rel[i].r_info),
rel[i].r_addend);
return vlErr;

View File

@ -5,6 +5,9 @@
* Authors:
* Axel Dörfler <axeld@pinc-software.de>
* Ingo Weinhold <bonefish@cs.tu-berlin.de>
* François Revol <revol@free.fr>
* Distributed under the terms of the MIT License.
*
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
@ -27,6 +30,7 @@
#include <vm_priv.h>
#include <string.h>
#warning M68K: writeme!
// defined in arch_exceptions.S
extern int __irqvec_start;
@ -77,18 +81,24 @@ static void
print_iframe(struct iframe *frame)
{
dprintf("iframe at %p:\n", frame);
dprintf("r0-r3: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r0, frame->r1, frame->r2, frame->r3);
dprintf("r4-r7: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r4, frame->r5, frame->r6, frame->r7);
dprintf("r8-r11: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r8, frame->r9, frame->r10, frame->r11);
dprintf("r12-r15: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r12, frame->r13, frame->r14, frame->r15);
dprintf("r16-r19: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r16, frame->r17, frame->r18, frame->r19);
dprintf("r20-r23: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r20, frame->r21, frame->r22, frame->r23);
dprintf("r24-r27: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r24, frame->r25, frame->r26, frame->r27);
dprintf("r28-r31: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r28, frame->r29, frame->r30, frame->r31);
dprintf(" ctr 0x%08lx xer 0x%08lx\n", frame->ctr, frame->xer);
dprintf(" cr 0x%08lx lr 0x%08lx\n", frame->cr, frame->lr);
dprintf(" dsisr 0x%08lx dar 0x%08lx\n", frame->dsisr, frame->dar);
dprintf(" srr1 0x%08lx srr0 0x%08lx\n", frame->srr1, frame->srr0);
dprintf(" d0 0x%08lx d1 0x%08lx d2 0x%08lx d3 0x%08lx\n",
frame->d0, frame->d1, frame->d2, frame->d3);
kprintf(" d4 0x%08lx d5 0x%08lx d6 0x%08lx d7 0x%08lx\n",
frame->d4, frame->d5, frame->d6, frame->d7);
kprintf(" a0 0x%08lx a1 0x%08lx a2 0x%08lx a3 0x%08lx\n",
frame->a0, frame->a1, frame->a2, frame->a3);
kprintf(" a4 0x%08lx a5 0x%08lx a6 0x%08lx a7 0x%08lx (sp)\n",
frame->d4, frame->d5, frame->d6, frame->d7);
/*kprintf(" pc 0x%08lx ccr 0x%02x\n",
frame->pc, frame->ccr);*/
kprintf(" pc 0x%08lx sr 0x%04x\n",
frame->pc, frame->sr);
dprintf("r0-r3: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->d0, frame->d1, frame->d2, frame->d3);
dprintf("r4-r7: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->d4, frame->d5, frame->d6, frame->d7);
dprintf("r8-r11: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->a0, frame->a1, frame->a2, frame->a3);
dprintf("r12-r15: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->a4, frame->a5, frame->a6, frame->a7);
dprintf(" pc 0x%08lx sr 0x%08lx\n", frame->pc, frame->sr);
}

View File

@ -1,34 +1,16 @@
/*
** Copyright 2003, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
* Copyright 2007, François Revol, revol@free.fr.
* Distributed under the terms of the MIT License.
*
* Copyright 2003, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the OpenBeOS License.
*/
#include <arch_mmu.h>
#include <arch_cpu.h>
uint32
page_table_entry::PrimaryHash(uint32 virtualSegmentID, uint32 virtualAddress)
{
return (virtualSegmentID & 0x7ffff) ^ ((virtualAddress >> 12) & 0xffff);
}
uint32
page_table_entry::SecondaryHash(uint32 virtualSegmentID, uint32 virtualAddress)
{
return ~PrimaryHash(virtualSegmentID, virtualAddress);
}
uint32
page_table_entry::SecondaryHash(uint32 primaryHash)
{
return ~primaryHash;
}
w
void
m68k_get_page_table(page_table_entry_group **_pageTable, size_t *_size)
{

View File

@ -1,4 +1,7 @@
/*
* Copyright 2007, François Revol, revol@free.fr.
* Distributed under the terms of the MIT License.
*
* Copyright 2006, Ingo Weinhold <bonefish@cs.tu-berlin.de>.
* All rights reserved. Distributed under the terms of the MIT License.
*/

View File

@ -1,4 +1,7 @@
/*
* Copyright 2007, François Revol, revol@free.fr.
* Distributed under the terms of the MIT License.
*
* Copyright 2006, Ingo Weinhold <bonefish@cs.tu-berlin.de>.
* All rights reserved. Distributed under the terms of the MIT License.
*/

View File

@ -1,7 +1,13 @@
/*
** Copyright 2004, Axel Dörfler, axeld@pinc-software.de
** Distributed under the terms of the OpenBeOS License.
*/
* Copyright 2007, Haiku Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* François Revol <revol@free.fr>
*
* Copyright 2004, Axel Dörfler, axeld@pinc-software.de
* Distributed under the terms of the OpenBeOS License.
*/
#include <KernelExport.h>

View File

@ -1,4 +1,10 @@
/*
* Copyright 2007, Haiku Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* François Revol <revol@free.fr>
*
* Copyright 2006, Ingo Weinhold <bonefish@cs.tu-berlin.de>.
* All rights reserved. Distributed under the terms of the MIT License.
*/

View File

@ -5,6 +5,7 @@
* Authors:
* Axel Dörfler <axeld@pinc-software.de>
* Ingo Weinhold <bonefish@cs.tu-berlin.de>
* François Revol <revol@free.fr>
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
@ -22,19 +23,20 @@
#include <string.h>
#warning M68K: writeme!
// Valid initial arch_thread state. We just memcpy() it when initializing
// a new thread structure.
static struct arch_thread sInitialState;
// Helper function for thread creation, defined in arch_asm.S.
extern void ppc_kernel_thread_root();
extern void m68k_kernel_thread_root();
extern void ppc_switch_stack_and_call(addr_t newKstack, void (*func)(void *),
extern void m68k_switch_stack_and_call(addr_t newKstack, void (*func)(void *),
void *arg);
void
ppc_push_iframe(struct iframe_stack *stack, struct iframe *frame)
m68k_push_iframe(struct iframe_stack *stack, struct iframe *frame)
{
ASSERT(stack->index < IFRAME_TRACE_DEPTH);
stack->frames[stack->index++] = frame;
@ -42,7 +44,7 @@ ppc_push_iframe(struct iframe_stack *stack, struct iframe *frame)
void
ppc_pop_iframe(struct iframe_stack *stack)
m68k_pop_iframe(struct iframe_stack *stack)
{
ASSERT(stack->index > 0);
stack->index--;
@ -55,7 +57,7 @@ ppc_pop_iframe(struct iframe_stack *stack)
* from standard kernel threads.
*/
static struct iframe *
ppc_get_current_iframe(void)
m68k_get_current_iframe(void)
{
struct thread *thread = thread_get_current_thread();
@ -71,15 +73,15 @@ ppc_get_current_iframe(void)
* the thread is a kernel thread).
*/
struct iframe *
ppc_get_user_iframe(void)
m68k_get_user_iframe(void)
{
struct thread *thread = thread_get_current_thread();
int i;
for (i = thread->arch_info.iframes.index - 1; i >= 0; i--) {
struct iframe *frame = thread->arch_info.iframes.frames[i];
if (frame->srr1 & MSR_PRIVILEGE_LEVEL)
return frame;
// if (frame->srr1 & MSR_PRIVILEGE_LEVEL)
// return frame;
}
return NULL;
@ -141,13 +143,13 @@ arch_thread_init_kthread_stack(struct thread *t, int (*start_func)(void),
kstackTop -= 2;
kstackTop = (addr_t*)((addr_t)kstackTop & ~0xf);
// LR, CR, r2, r13-r31, f13-f31, as pushed by ppc_context_switch()
// LR, CR, r2, r13-r31, f13-f31, as pushed by m68k_context_switch()
kstackTop -= 22 + 2 * 19;
// let LR point to ppc_kernel_thread_root()
kstackTop[0] = (addr_t)&ppc_kernel_thread_root;
// let LR point to m68k_kernel_thread_root()
kstackTop[0] = (addr_t)&m68k_kernel_thread_root;
// the arguments of ppc_kernel_thread_root() are the functions to call,
// the arguments of m68k_kernel_thread_root() are the functions to call,
// provided in registers r13-r15
kstackTop[3] = (addr_t)entry_func;
kstackTop[4] = (addr_t)start_func;
@ -172,7 +174,7 @@ void
arch_thread_switch_kstack_and_call(struct thread *t, addr_t newKstack,
void (*func)(void *), void *arg)
{
ppc_switch_stack_and_call(newKstack, func, arg);
m68k_switch_stack_and_call(newKstack, func, arg);
}
@ -189,11 +191,11 @@ arch_thread_context_switch(struct thread *t_from, struct thread *t_to)
// the target thread has is user space
if (t_from->team != t_to->team) {
// switching to a new address space
ppc_translation_map_change_asid(&t_to->team->address_space->translation_map);
m68k_translation_map_change_asid(&t_to->team->address_space->translation_map);
}
}
ppc_context_switch(&t_from->arch_info.sp, t_to->arch_info.sp);
m68k_context_switch(&t_from->arch_info.sp, t_to->arch_info.sp);
}

View File

@ -1,7 +1,13 @@
/*
** Copyright 2001, Travis Geiselbrecht. All rights reserved.
** Distributed under the terms of the NewOS License.
*/
* Copyright 2007, Haiku Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Authors:
* François Revol <revol@free.fr>
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <boot/stage2.h>

View File

@ -1,4 +1,7 @@
/*
* Copyright 2007, François Revol, revol@free.fr.
* Distributed under the terms of the MIT License.
*
* Copyright 2005, Axel Dörfler, axeld@pinc-softare.de
* Distributed under the terms of the MIT License.
*/

View File

@ -1,4 +1,7 @@
/*
* Copyright 2007, François Revol, revol@free.fr.
* Distributed under the terms of the MIT License.
*
* Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
@ -37,58 +40,9 @@ arch_vm_init2(kernel_args *args)
// int bats[8];
// int i;
#if 0
// print out any bat mappings
getibats(bats);
dprintf("ibats:\n");
for(i = 0; i < 4; i++)
dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]);
getdbats(bats);
dprintf("dbats:\n");
for(i = 0; i < 4; i++)
dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]);
#endif
/**/
#warning M68K: disable TT0 and TT1, set up pmmu
#if 1
// turn off the first 2 BAT mappings (3 & 4 are used by the lower level code)
block_address_translation bat;
bat.Clear();
set_ibat0(&bat);
set_ibat1(&bat);
set_dbat0(&bat);
set_dbat1(&bat);
/* getibats(bats);
memset(bats, 0, 2 * 2);
setibats(bats);
getdbats(bats);
memset(bats, 0, 2 * 2);
setdbats(bats);
*/
#endif
#if 0
// just clear the first BAT mapping (0 - 256MB)
dprintf("msr 0x%x\n", getmsr());
{
unsigned int reg;
asm("mr %0,1" : "=r"(reg));
dprintf("sp 0x%x\n", reg);
}
dprintf("ka %p\n", ka);
getibats(bats);
dprintf("ibats:\n");
for(i = 0; i < 4; i++)
dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]);
bats[0] = bats[1] = 0;
setibats(bats);
getdbats(bats);
dprintf("dbats:\n");
for(i = 0; i < 4; i++)
dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]);
bats[0] = bats[1] = 0;
setdbats(bats);
#endif
return B_OK;
}

View File

@ -1,4 +1,7 @@
/*
* Copyright 1007, François Revol, revol@free.fr.
* Distributed under the terms of the MIT License.
*
* Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
@ -6,71 +9,6 @@
* Distributed under the terms of the NewOS License.
*/
/* (bonefish) Some explanatory words on how address translation is implemented
for the 32 bit PPC architecture.
I use the address type nomenclature as used in the PPC architecture
specs, i.e.
- effective address: An address as used by program instructions, i.e.
that's what elsewhere (e.g. in the VM implementation) is called
virtual address.
- virtual address: An intermediate address computed from the effective
address via the segment registers.
- physical address: An address referring to physical storage.
The hardware translates an effective address to a physical address using
either of two mechanisms: 1) Block Address Translation (BAT) or
2) segment + page translation. The first mechanism does this directly
using two sets (for data/instructions) of special purpose registers.
The latter mechanism is of more relevance here, though:
effective address (32 bit): [ 0 ESID 3 | 4 PIX 19 | 20 Byte 31 ]
| | |
(segment registers) | |
| | |
virtual address (52 bit): [ 0 VSID 23 | 24 PIX 39 | 40 Byte 51 ]
[ 0 VPN 39 | 40 Byte 51 ]
| |
(page table) |
| |
physical address (32 bit): [ 0 PPN 19 | 20 Byte 31 ]
ESID: Effective Segment ID
VSID: Virtual Segment ID
PIX: Page Index
VPN: Virtual Page Number
PPN: Physical Page Number
Unlike on x86 we can't just switch the context to another team by just
setting a register to another page directory, since we only have one
page table containing both kernel and user address mappings. Instead we
map the effective address space of kernel and *all* teams
non-intersectingly into the virtual address space (which fortunately is
20 bits wider), and use the segment registers to select the section of
the virtual address space for the current team. Half of the 16 segment
registers (8 - 15) map the kernel addresses, so they remain unchanged.
The range of the virtual address space a team's effective address space
is mapped to is defined by its vm_translation_map_arch_info::vsid_base,
which is the first of the 8 successive VSID values used for the team.
Which vsid_base values are already taken is defined by the set bits in
the bitmap sVSIDBaseBitmap.
TODO:
* If we want to continue to use the OF services, we would need to add
its address mappings to the kernel space. Unfortunately some stuff
(especially RAM) is mapped in an address range without the kernel
address space. We probably need to map those into each team's address
space as kernel read/write areas.
* The current locking scheme is insufficient. The page table is a resource
shared by all teams. We need to synchronize access to it. Probably via a
spinlock.
*/
#include <KernelExport.h>
#include <kernel.h>
#include <vm.h>
@ -85,396 +23,48 @@
#include "generic_vm_physical_page_mapper.h"
static struct page_table_entry_group *sPageTable;
static size_t sPageTableSize;
static uint32 sPageTableHashMask;
static area_id sPageTableArea;
/*
* Each mmu of the m68k family has its own tricks, registers and opcodes...
* so we use a function array to switch to the one we want.
*/
//extern struct m68k_vm_ops m68851_vm_ops;
extern struct m68k_vm_ops m68030_vm_ops;
//extern struct m68k_vm_ops m68030_vm_ops;
//extern struct m68k_vm_ops m68030_vm_ops;
// 64 MB of iospace
#define IOSPACE_SIZE (64*1024*1024)
// We only have small (4 KB) pages. The only reason for choosing greater chunk
// size is to keep the waste of memory limited, since the generic page mapper
// allocates structures per physical/virtual chunk.
// TODO: Implement a page mapper more suitable for small pages!
#define IOSPACE_CHUNK_SIZE (16 * B_PAGE_SIZE)
static addr_t sIOSpaceBase;
// The VSID is a 24 bit number. The lower three bits are defined by the
// (effective) segment number, which leaves us with a 21 bit space of
// VSID bases (= 2 * 1024 * 1024).
#define MAX_VSID_BASES (PAGE_SIZE * 8)
static uint32 sVSIDBaseBitmap[MAX_VSID_BASES / (sizeof(uint32) * 8)];
static spinlock sVSIDBaseBitmapLock;
#define VSID_BASE_SHIFT 3
#define VADDR_TO_VSID(map, vaddr) \
((map)->arch_data->vsid_base + ((vaddr) >> 28))
// vm_translation object stuff
typedef struct vm_translation_map_arch_info {
int vsid_base; // used VSIDs are vside_base ... vsid_base + 7
} vm_translation_map_arch_info;
void
ppc_translation_map_change_asid(vm_translation_map *map)
static m68k_vm_ops *get_vm_ops()
{
// this code depends on the kernel being at 0x80000000, fix if we change that
#if KERNEL_BASE != 0x80000000
#error fix me
#endif
int vsidBase = map->arch_data->vsid_base;
isync(); // synchronize context
asm("mtsr 0,%0" : : "g"(vsidBase));
asm("mtsr 1,%0" : : "g"(vsidBase + 1));
asm("mtsr 2,%0" : : "g"(vsidBase + 2));
asm("mtsr 3,%0" : : "g"(vsidBase + 3));
asm("mtsr 4,%0" : : "g"(vsidBase + 4));
asm("mtsr 5,%0" : : "g"(vsidBase + 5));
asm("mtsr 6,%0" : : "g"(vsidBase + 6));
asm("mtsr 7,%0" : : "g"(vsidBase + 7));
isync(); // synchronize context
}
static status_t
lock_tmap(vm_translation_map *map)
{
recursive_lock_lock(&map->lock);
return 0;
}
static status_t
unlock_tmap(vm_translation_map *map)
{
recursive_lock_unlock(&map->lock);
return 0;
}
static void
destroy_tmap(vm_translation_map *map)
{
if (map->map_count > 0) {
panic("vm_translation_map.destroy_tmap: map %p has positive map count %ld\n",
map, map->map_count);
int cpu = arch_cpu_type;
cpu &= MMU_MASK;
switch (cpu) {
case MMU_NONE:
panic("Ugh, no mmu !?");
return NULL;
case MMU_68551:
panic("Unimplemented yet (mmu)");
//return &m68851_vm_ops;
return NULL;
case MMU_68030:
return &m68030_vm_ops;
case MMU_68040:
//return &m68040_vm_ops;
case MMU_68060:
//return &m68060_vm_ops;
panic("Unimplemented yet (mmu)");
return NULL;
default:
panic("Invalid mmu type!");
return NULL;
}
// mark the vsid base not in use
int baseBit = map->arch_data->vsid_base >> VSID_BASE_SHIFT;
atomic_and((vint32 *)&sVSIDBaseBitmap[baseBit / 32],
~(1 << (baseBit % 32)));
free(map->arch_data);
recursive_lock_destroy(&map->lock);
}
static void
fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID,
addr_t virtualAddress, addr_t physicalAddress, uint8 protection,
bool secondaryHash)
void *
m68k_translation_map_get_pgdir(vm_translation_map *map)
{
// lower 32 bit - set at once
entry->physical_page_number = physicalAddress / B_PAGE_SIZE;
entry->_reserved0 = 0;
entry->referenced = false;
entry->changed = false;
entry->write_through = false;
entry->caching_inhibited = false;
entry->memory_coherent = false;
entry->guarded = false;
entry->_reserved1 = 0;
entry->page_protection = protection & 0x3;
eieio();
// we need to make sure that the lower 32 bit were
// already written when the entry becomes valid
// upper 32 bit
entry->virtual_segment_id = virtualSegmentID;
entry->secondary_hash = secondaryHash;
entry->abbr_page_index = (virtualAddress >> 22) & 0x3f;
entry->valid = true;
ppc_sync();
return get_vm_ops()->m68k_translation_map_get_pgdir(map);
}
static size_t
map_max_pages_need(vm_translation_map *map, addr_t start, addr_t end)
{
return 0;
}
static status_t
map_tmap(vm_translation_map *map, addr_t virtualAddress, addr_t physicalAddress, uint32 attributes)
{
// lookup the vsid based off the va
uint32 virtualSegmentID = VADDR_TO_VSID(map, virtualAddress);
uint32 protection = 0;
// ToDo: check this
// all kernel mappings are R/W to supervisor code
if (attributes & (B_READ_AREA | B_WRITE_AREA))
protection = (attributes & B_WRITE_AREA) ? PTE_READ_WRITE : PTE_READ_ONLY;
//dprintf("vm_translation_map.map_tmap: vsid %d, pa 0x%lx, va 0x%lx\n", vsid, pa, va);
// Search for a free page table slot using the primary hash value
uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
for (int i = 0; i < 8; i++) {
page_table_entry *entry = &group->entry[i];
if (entry->valid)
continue;
fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
protection, false);
map->map_count++;
return B_OK;
}
// Didn't found one, try the secondary hash value
hash = page_table_entry::SecondaryHash(hash);
group = &sPageTable[hash & sPageTableHashMask];
for (int i = 0; i < 8; i++) {
page_table_entry *entry = &group->entry[i];
if (entry->valid)
continue;
fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
protection, false);
map->map_count++;
return B_OK;
}
panic("vm_translation_map.map_tmap: hash table full\n");
return B_ERROR;
}
static page_table_entry *
lookup_page_table_entry(vm_translation_map *map, addr_t virtualAddress)
{
// lookup the vsid based off the va
uint32 virtualSegmentID = VADDR_TO_VSID(map, virtualAddress);
// dprintf("vm_translation_map.lookup_page_table_entry: vsid %d, va 0x%lx\n", vsid, va);
// Search for the page table entry using the primary hash value
uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
for (int i = 0; i < 8; i++) {
page_table_entry *entry = &group->entry[i];
if (entry->virtual_segment_id == virtualSegmentID
&& entry->secondary_hash == false
&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
return entry;
}
// Didn't found it, try the secondary hash value
hash = page_table_entry::SecondaryHash(hash);
group = &sPageTable[hash & sPageTableHashMask];
for (int i = 0; i < 8; i++) {
page_table_entry *entry = &group->entry[i];
if (entry->virtual_segment_id == virtualSegmentID
&& entry->secondary_hash == true
&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
return entry;
}
return NULL;
}
static bool
remove_page_table_entry(vm_translation_map *map, addr_t virtualAddress)
{
page_table_entry *entry = lookup_page_table_entry(map, virtualAddress);
if (entry) {
entry->valid = 0;
ppc_sync();
tlbie(virtualAddress);
eieio();
tlbsync();
ppc_sync();
}
return entry;
}
static status_t
unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
{
page_table_entry *entry;
start = ROUNDOWN(start, B_PAGE_SIZE);
end = ROUNDUP(end, B_PAGE_SIZE);
// dprintf("vm_translation_map.unmap_tmap: start 0x%lx, end 0x%lx\n", start, end);
while (start < end) {
if (remove_page_table_entry(map, start))
map->map_count--;
start += B_PAGE_SIZE;
}
return B_OK;
}
static status_t
query_tmap(vm_translation_map *map, addr_t va, addr_t *_outPhysical, uint32 *_outFlags)
{
page_table_entry *entry;
// default the flags to not present
*_outFlags = 0;
*_outPhysical = 0;
entry = lookup_page_table_entry(map, va);
if (entry == NULL)
return B_NO_ERROR;
// ToDo: check this!
if (IS_KERNEL_ADDRESS(va))
*_outFlags |= B_KERNEL_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_KERNEL_WRITE_AREA);
else
*_outFlags |= B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_WRITE_AREA);
*_outFlags |= entry->changed ? PAGE_MODIFIED : 0;
*_outFlags |= entry->referenced ? PAGE_ACCESSED : 0;
*_outFlags |= entry->valid ? PAGE_PRESENT : 0;
*_outPhysical = entry->physical_page_number * B_PAGE_SIZE;
return B_OK;
}
static status_t
map_iospace_chunk(addr_t va, addr_t pa)
{
pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE))
panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);
// map the pages
return ppc_map_address_range(va, pa, IOSPACE_CHUNK_SIZE);
}
static addr_t
get_mapped_size_tmap(vm_translation_map *map)
{
return map->map_count;
}
static status_t
protect_tmap(vm_translation_map *map, addr_t base, addr_t top, uint32 attributes)
{
// XXX finish
return B_ERROR;
}
static status_t
clear_flags_tmap(vm_translation_map *map, addr_t virtualAddress, uint32 flags)
{
page_table_entry *entry = lookup_page_table_entry(map, virtualAddress);
if (entry == NULL)
return B_NO_ERROR;
bool modified = false;
// clear the bits
if (flags & PAGE_MODIFIED && entry->changed) {
entry->changed = false;
modified = true;
}
if (flags & PAGE_ACCESSED && entry->referenced) {
entry->referenced = false;
modified = true;
}
// synchronize
if (modified) {
tlbie(virtualAddress);
eieio();
tlbsync();
ppc_sync();
}
return B_OK;
}
static void
flush_tmap(vm_translation_map *map)
{
// TODO: arch_cpu_global_TLB_invalidate() is extremely expensive and doesn't
// even cut it here. We are supposed to invalidate all TLB entries for this
// map on all CPUs. We should loop over the virtual pages and invoke tlbie
// instead (which marks the entry invalid on all CPUs).
arch_cpu_global_TLB_invalidate();
}
static status_t
get_physical_page_tmap(addr_t pa, addr_t *va, uint32 flags)
{
return generic_get_physical_page(pa, va, flags);
}
static status_t
put_physical_page_tmap(addr_t va)
{
return generic_put_physical_page(va);
}
static vm_translation_map_ops tmap_ops = {
destroy_tmap,
lock_tmap,
unlock_tmap,
map_max_pages_need,
map_tmap,
unmap_tmap,
query_tmap,
query_tmap,
get_mapped_size_tmap,
protect_tmap,
clear_flags_tmap,
flush_tmap,
get_physical_page_tmap,
put_physical_page_tmap
};
// #pragma mark -
// VM API
@ -482,138 +72,35 @@ static vm_translation_map_ops tmap_ops = {
status_t
arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
{
// initialize the new object
map->ops = &tmap_ops;
map->map_count = 0;
if (!kernel) {
// During the boot process, there are no semaphores available at this
// point, so we only try to create the translation map lock if we're
// initialize a user translation map.
// vm_translation_map_init_kernel_map_post_sem() is used to complete
// the kernel translation map.
if (recursive_lock_init(&map->lock, "translation map") < B_OK)
return map->lock.sem;
}
map->arch_data = (vm_translation_map_arch_info *)malloc(sizeof(vm_translation_map_arch_info));
if (map->arch_data == NULL) {
if (!kernel)
recursive_lock_destroy(&map->lock);
return B_NO_MEMORY;
}
cpu_status state = disable_interrupts();
acquire_spinlock(&sVSIDBaseBitmapLock);
// allocate a VSID base for this one
if (kernel) {
// The boot loader has set up the segment registers for identical
// mapping. Two VSID bases are reserved for the kernel: 0 and 8. The
// latter one for mapping the kernel address space (0x80000000...), the
// former one for the lower addresses required by the Open Firmware
// services.
map->arch_data->vsid_base = 0;
sVSIDBaseBitmap[0] |= 0x3;
} else {
int i = 0;
while (i < MAX_VSID_BASES) {
if (sVSIDBaseBitmap[i / 32] == 0xffffffff) {
i += 32;
continue;
}
if ((sVSIDBaseBitmap[i / 32] & (1 << (i % 32))) == 0) {
// we found it
sVSIDBaseBitmap[i / 32] |= 1 << (i % 32);
break;
}
i++;
}
if (i >= MAX_VSID_BASES)
panic("vm_translation_map_create: out of VSID bases\n");
map->arch_data->vsid_base = i << VSID_BASE_SHIFT;
}
release_spinlock(&sVSIDBaseBitmapLock);
restore_interrupts(state);
return B_OK;
return get_vm_ops()->arch_vm_translation_map_init_map(map, kernel);
}
status_t
arch_vm_translation_map_init_kernel_map_post_sem(vm_translation_map *map)
{
if (recursive_lock_init(&map->lock, "translation map") < B_OK)
return map->lock.sem;
return B_OK;
return get_vm_ops()->arch_vm_translation_map_init_kernel_map_post_sem(map);
}
status_t
arch_vm_translation_map_init(kernel_args *args)
{
sPageTable = (page_table_entry_group *)args->arch_args.page_table.start;
sPageTableSize = args->arch_args.page_table.size;
sPageTableHashMask = sPageTableSize / sizeof(page_table_entry_group) - 1;
// init physical page mapper
status_t error = generic_vm_physical_page_mapper_init(args,
map_iospace_chunk, &sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE);
if (error != B_OK)
return error;
return B_OK;
return get_vm_ops()->arch_vm_translation_map_init(args);
}
status_t
arch_vm_translation_map_init_post_area(kernel_args *args)
{
// If the page table doesn't lie within the kernel address space, we
// remap it.
if (!IS_KERNEL_ADDRESS(sPageTable)) {
addr_t newAddress = (addr_t)sPageTable;
status_t error = ppc_remap_address_range(&newAddress, sPageTableSize,
false);
if (error != B_OK) {
panic("arch_vm_translation_map_init_post_area(): Failed to remap "
"the page table!");
return error;
}
// set the new page table address
addr_t oldVirtualBase = (addr_t)(sPageTable);
sPageTable = (page_table_entry_group*)newAddress;
// unmap the old pages
ppc_unmap_address_range(oldVirtualBase, sPageTableSize);
// TODO: We should probably map the page table via BAT. It is relatively large,
// and due to being a hash table the access patterns might look sporadic, which
// certainly isn't to the liking of the TLB.
}
// create an area to cover the page table
sPageTableArea = create_area("page_table", (void **)&sPageTable, B_EXACT_ADDRESS,
sPageTableSize, B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
// init physical page mapper
status_t error = generic_vm_physical_page_mapper_init_post_area(args);
if (error != B_OK)
return error;
return B_OK;
return get_vm_ops()->arch_vm_translation_map_init_post_area(args);
}
status_t
arch_vm_translation_map_init_post_sem(kernel_args *args)
{
// init physical page mapper
return generic_vm_physical_page_mapper_init_post_sem(args);
return get_vm_ops()->arch_vm_translation_map_init_post_sem(args);
}
@ -627,32 +114,8 @@ status_t
arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress, addr_t physicalAddress,
uint8 attributes, addr_t (*get_free_page)(kernel_args *))
{
uint32 virtualSegmentID = get_sr((void *)virtualAddress) & 0xffffff;
uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, (uint32)virtualAddress);
page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
for (int32 i = 0; i < 8; i++) {
// 8 entries in a group
if (group->entry[i].valid)
continue;
fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, PTE_READ_WRITE, false);
return B_OK;
}
hash = page_table_entry::SecondaryHash(hash);
group = &sPageTable[hash & sPageTableHashMask];
for (int32 i = 0; i < 8; i++) {
if (group->entry[i].valid)
continue;
fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, PTE_READ_WRITE, true);
return B_OK;
}
return B_ERROR;
return get_vm_ops()->arch_vm_translation_map_early_map(ka, virtualAddress, physicalAddress,
attributes, get_free_page);
}
@ -661,85 +124,32 @@ arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress, addr_t
status_t
arch_vm_translation_map_early_query(addr_t va, addr_t *out_physical)
{
//PANIC_UNIMPLEMENTED();
panic("vm_translation_map_quick_query(): not yet implemented\n");
return B_OK;
return get_vm_ops()->arch_vm_translation_map_early_query(va, out_physical);
}
// #pragma mark -
#if 0
status_t
ppc_map_address_range(addr_t virtualAddress, addr_t physicalAddress,
m68k_map_address_range(addr_t virtualAddress, addr_t physicalAddress,
size_t size)
{
addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
virtualAddress = ROUNDOWN(virtualAddress, B_PAGE_SIZE);
physicalAddress = ROUNDOWN(physicalAddress, B_PAGE_SIZE);
vm_address_space *addressSpace = vm_kernel_address_space();
// map the pages
for (; virtualAddress < virtualEnd;
virtualAddress += B_PAGE_SIZE, physicalAddress += B_PAGE_SIZE) {
status_t error = map_tmap(&addressSpace->translation_map,
virtualAddress, physicalAddress,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (error != B_OK)
return error;
}
return B_OK;
return get_vm_ops()->m68k_map_address_range(virtualAddress, physicalAddress, size);
}
void
ppc_unmap_address_range(addr_t virtualAddress, size_t size)
m68k_unmap_address_range(addr_t virtualAddress, size_t size)
{
addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
virtualAddress = ROUNDOWN(virtualAddress, B_PAGE_SIZE);
vm_address_space *addressSpace = vm_kernel_address_space();
for (0; virtualAddress < virtualEnd; virtualAddress += B_PAGE_SIZE)
remove_page_table_entry(&addressSpace->translation_map, virtualAddress);
get_vm_ops()->m68k_unmap_address_range(virtualAddress, size);
}
status_t
ppc_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap)
m68k_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap)
{
addr_t virtualAddress = ROUNDOWN(*_virtualAddress, B_PAGE_SIZE);
size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE);
vm_address_space *addressSpace = vm_kernel_address_space();
// reserve space in the address space
void *newAddress = NULL;
status_t error = vm_reserve_address_range(addressSpace->id, &newAddress,
B_ANY_KERNEL_ADDRESS, size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (error != B_OK)
return error;
// get the area's first physical page
page_table_entry *entry = lookup_page_table_entry(
&addressSpace->translation_map, virtualAddress);
if (!entry)
return B_ERROR;
addr_t physicalBase = entry->physical_page_number << 12;
// map the pages
error = ppc_map_address_range((addr_t)newAddress, physicalBase, size);
if (error != B_OK)
return error;
*_virtualAddress = (addr_t)newAddress;
// unmap the old pages
if (unmap)
ppc_unmap_address_range(virtualAddress, size);
return B_OK;
return get_vm_ops()->m68k_remap_address_range(_virtualAddress, size, unmap);
}
#endif

File diff suppressed because it is too large Load Diff