From 4e44040df4d2763e5df092771c9464d4692fdaa3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fran=C3=A7ois=20Revol?= Date: Sat, 3 Nov 2007 21:04:42 +0000 Subject: [PATCH] Ditto. cleanup Less ppc, more m68k :) git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@22822 a95241bf-73f2-0310-859d-f6bbb57e9c96 --- src/system/kernel/arch/m68k/Jamfile | 22 + src/system/kernel/arch/m68k/arch_030_asm.S | 24 + src/system/kernel/arch/m68k/arch_030_cpu.cpp | 77 ++ src/system/kernel/arch/m68k/arch_030_mmu.cpp | 213 +++ src/system/kernel/arch/m68k/arch_asm.S | 195 +-- src/system/kernel/arch/m68k/arch_atomic.c | 6 + src/system/kernel/arch/m68k/arch_cpu.cpp | 114 +- src/system/kernel/arch/m68k/arch_debug.cpp | 48 +- .../kernel/arch/m68k/arch_debug_console.cpp | 3 + src/system/kernel/arch/m68k/arch_elf.cpp | 29 +- src/system/kernel/arch/m68k/arch_int.cpp | 34 +- src/system/kernel/arch/m68k/arch_mmu.cpp | 32 +- src/system/kernel/arch/m68k/arch_platform.cpp | 3 + .../kernel/arch/m68k/arch_real_time_clock.cpp | 3 + src/system/kernel/arch/m68k/arch_smp.c | 12 +- .../kernel/arch/m68k/arch_system_info.c | 6 + src/system/kernel/arch/m68k/arch_thread.c | 32 +- src/system/kernel/arch/m68k/arch_timer.c | 12 +- .../kernel/arch/m68k/arch_user_debugger.cpp | 3 + src/system/kernel/arch/m68k/arch_vm.cpp | 56 +- .../arch/m68k/arch_vm_translation_map.cpp | 694 +--------- .../m68k/arch_vm_translation_map_impl.cpp | 1231 +++++++++++++++++ 22 files changed, 1870 insertions(+), 979 deletions(-) create mode 100644 src/system/kernel/arch/m68k/arch_030_asm.S create mode 100644 src/system/kernel/arch/m68k/arch_030_cpu.cpp create mode 100644 src/system/kernel/arch/m68k/arch_030_mmu.cpp create mode 100644 src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp diff --git a/src/system/kernel/arch/m68k/Jamfile b/src/system/kernel/arch/m68k/Jamfile index b97c47d75f..bae5105719 100644 --- a/src/system/kernel/arch/m68k/Jamfile +++ b/src/system/kernel/arch/m68k/Jamfile @@ -8,6 +8,24 @@ UsePrivateHeaders kernel [ FDirName kernel arch $(TARGET_ARCH) ] SEARCH_SOURCE += [ FDirName $(SUBDIR) $(DOTDOT) generic ] ; +# cpu-specific stuff +KernelMergeObject arch_m68k_030.o : + arch_030_cpu.cpp + arch_030_mmu.cpp + arch_030_asm.S + : $(TARGET_KERNEL_PIC_CCFLAGS) -Wno-unused -m68030 +; + +KernelMergeObject arch_m68k_040.o : + arch_040.cpp + : $(TARGET_KERNEL_PIC_CCFLAGS) -Wno-unused -m68040 +; + +KernelMergeObject arch_m68k_060.o : + arch_060.cpp + : $(TARGET_KERNEL_PIC_CCFLAGS) -Wno-unused -m68060 +; + KernelStaticLibrary libm68k : arch_atomic.c arch_cpu.cpp @@ -30,6 +48,10 @@ KernelStaticLibrary libm68k : arch_asm.S generic_vm_physical_page_mapper.cpp + + arch_m68k_030.o +# arch_m68k_040.a +# arch_m68k_060.a : $(TARGET_KERNEL_PIC_CCFLAGS) -Wno-unused ; diff --git a/src/system/kernel/arch/m68k/arch_030_asm.S b/src/system/kernel/arch/m68k/arch_030_asm.S new file mode 100644 index 0000000000..3ac764ec58 --- /dev/null +++ b/src/system/kernel/arch/m68k/arch_030_asm.S @@ -0,0 +1,24 @@ + +#define FUNCTION(x) .global x; .type x,@function; x + +.text + + + /* that one can be inlined */ +FUNCTION(flush_insn_pipeline_030): + nop + rts + + /* flush all ATC entries */ +FUNCTION(flush_atc_all_030): + pflusha + rts + + /* flush all ATC entries */ +FUNCTION(flush_atc_addr_030): + move.l (4,%a7),%a0 + pflush #0,#0,(%a0) + rts + + + diff --git a/src/system/kernel/arch/m68k/arch_030_cpu.cpp b/src/system/kernel/arch/m68k/arch_030_cpu.cpp new file mode 100644 index 0000000000..4fa7e11c0c --- /dev/null +++ b/src/system/kernel/arch/m68k/arch_030_cpu.cpp @@ -0,0 +1,77 @@ +/* + * Copyright 2003-2007, Haiku Inc. All rights reserved. + * Distributed under the terms of the MIT License. + * + * Authors: + * François Revol + */ + +#include + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/* from arch_030_asm.S */ +extern void flush_insn_pipeline_030(void); +extern void flush_atc_all_030(void); +extern void flush_atc_addr_030(void *addr); + +#ifdef __cplusplus +} +#endif + + + +#define CACHELINE 16 + +static void +sync_icache_030(void *address, size_t len) +{ + int l, off; + char *p; + uint32 cacr; + + off = (unsigned int)address & (CACHELINE - 1); + len += off; + + l = len; + p = (char *)address - off; + asm volatile ("nop"); + asm volatile ("movec %%cacr,%0" : "=r"(cacr):); + cacr |= 0x00000004; /* ClearInstructionCacheEntry */ + do { + /* the 030 invalidates only 1 long of the cache line */ + //XXX: what about 040 and 060 ? + asm volatile ("movec %0,%%caar\n" \ + "movec %1,%%cacr\n" \ + "addq.l #4,%0\n" \ + "movec %0,%%caar\n" \ + "movec %1,%%cacr\n" \ + "addq.l #4,%0\n" \ + "movec %0,%%caar\n" \ + "movec %1,%%cacr\n" \ + "addq.l #4,%0\n" \ + "movec %0,%%caar\n" \ + "movec %1,%%cacr\n" \ + :: "r"(p), "r"(cacr)); + p += CACHELINE; + } while ((l -= CACHELINE) > 0); + asm volatile ("nop"); +} + + +struct m68k_cpu_ops cpu_ops_030 = { + &flush_insn_pipeline_030, + &flush_atc_all_030, + &flush_atc_all_030, // no global flag, so no useronly flushing + &flush_atc_addr_030, + &sync_icache_030, // dcache is the same + &sync_icache_030, + NULL // idle +}; diff --git a/src/system/kernel/arch/m68k/arch_030_mmu.cpp b/src/system/kernel/arch/m68k/arch_030_mmu.cpp new file mode 100644 index 0000000000..47282f9f7b --- /dev/null +++ b/src/system/kernel/arch/m68k/arch_030_mmu.cpp @@ -0,0 +1,213 @@ +/* + * Copyright 2007, Haiku Inc. All rights reserved. + * Distributed under the terms of the MIT License. + * + * Authors: + * François Revol + */ + +#include + + +#define ARCH_M68K_MMU_TYPE MMU_68030 + +enum descriptor_types { + DT_INVALID = 0, // invalid entry + DT_PAGE, // page descriptor + DT_VALID_4, // short page table descriptor + DT_VALID_8, // long page table descriptor +}; + + + // = names in MC user's manual + // or comments +struct short_page_directory_entry { + // upper 32 bits + uint32 type : 2; // DT_* + uint32 write_protect : 1; + uint32 accessed : 1; // = used + uint32 addr : 28; // address +}; + +struct long_page_directory_entry { + // upper 32 bits + uint32 type : 2; + uint32 write_protect : 1; + uint32 accessed : 1; // = used + uint32 _zero1 : 4; + uint32 supervisor : 1; + uint32 _zero2 : 1; + uint32 _ones : 6; + uint32 limit : 15; + uint32 low_up : 1; // limit is lower(1)/upper(0) + // lower 32 bits + uint32 unused : 4; // + uint32 addr : 28; // address +}; + +struct short_page_table_entry { + uint32 type : 2; + uint32 write_protect : 1; + uint32 accessed : 1; // = used + uint32 dirty : 1; // = modified + uint32 _zero1 : 1; + uint32 cache_disabled : 1; // = cache_inhibit + uint32 _zero2 : 1; + uint32 addr : 24; // address +}; + +struct long_page_table_entry { + // upper 32 bits + uint32 type : 2; + uint32 write_protect : 1; + uint32 accessed : 1; // = used + uint32 dirty : 1; // = modified + uint32 _zero1 : 1; + uint32 cache_disabled : 1; // = cache_inhibit + uint32 _zero2 : 1; + uint32 supervisor : 1; + uint32 _zero3 : 1; + uint32 _ones : 6; + // limit only used on early table terminators, else unused + uint32 limit : 15; + uint32 low_up : 1; // limit is lower(1)/upper(0) + // lower 32 bits + uint32 unused : 8; // + uint32 addr : 24; // address +}; + +/* rarely used */ +struct short_indirect_entry { + // upper 32 bits + uint32 type : 2; // DT_* + uint32 addr : 30; // address +}; + +struct long_indirect_entry { + // upper 32 bits + uint32 type : 2; + uint32 unused1 : 30; + // lower 32 bits + uint32 unused2 : 2; // + uint32 addr : 30; // address +}; + +/* for clarity: + - the top level page directory will be called "page root", (root or rtdir) + - the 2nd level will be "page directory" like on x86, (pgdir) + - the 3rd level is a "page table" as on x86. (pgtbl) +*/ + +typedef struct short_page_directory_entry page_root_entry; +typedef struct short_page_directory_entry page_directory_entry; +typedef struct long_page_table_entry page_table_entry; +typedef struct long_indirect_entry page_indirect_entry; + +/* scalar storage type that maps them */ +typedef uint32 page_root_entry_scalar; +typedef uint32 page_directory_entry_scalar; +typedef uint64 page_table_entry_scalar; +typedef uint64 page_indirect_entry_scalar; + +#define DT_ROOT DT_VALID_4 +#define DT_DIR DT_VALID_8 +//#define DT_PAGE DT_PAGE :) +#define DT_INDIRECT DT_VALID_8 + +/* default scalar values for entries */ +#define DFL_ROOTENT_VAL 0x00000000 +#define DFL_DIRENT_VAL 0x00000000 +// limit disabled, 6bits at 1 +// (limit isn't used on that level, but just in case) +#define DFL_PAGEENT_VAL 0x7FFFFC0000000000LL + +#define NUM_ROOTENT_PER_TBL 128 +#define NUM_DIRENT_PER_TBL 128 +#define NUM_PAGEENT_PER_TBL 64 + +/* unlike x86, the root/dir/page table sizes are different than B_PAGE_SIZE + * so we will have to fit more than one on a page to avoid wasting space. + * We will allocate a group of tables with the one we want inside, and + * add them from the aligned index needed, to make it easy to free them. + */ + +#define SIZ_ROOTTBL (128 * sizeof(page_root_entry)) +#define SIZ_DIRTBL (128 * sizeof(page_directory_entry)) +#define SIZ_PAGETBL (64 * sizeof(page_table_entry)) + +//#define NUM_ROOTTBL_PER_PAGE (B_PAGE_SIZE / SIZ_ROOTTBL) +#define NUM_DIRTBL_PER_PAGE (B_PAGE_SIZE / SIZ_DIRTBL) +#define NUM_PAGETBL_PER_PAGE (B_PAGE_SIZE / SIZ_PAGETBL) + +/* macros to get the physical page or table number and address of tables from + * descriptors */ +#if 0 +/* XXX: + suboptimal: + struct foo { + int a:2; + int b:30; + } v = {...}; + *(int *)0 = (v.b) << 2; + generates: + sarl $2, %eax + sall $2, %eax + We use a cast + bitmasking, since all address fields are already shifted +*/ +// from a root entry +#define PREA_TO_TA(a) ((a) << 4) +#define PREA_TO_PN(a) ((a) >> (12-4)) +#define PREA_TO_PA(a) ((a) << 4) +#define TA_TO_PREA(a) ((a) >> 4) +//... +#endif + +// TA: table address +// PN: page number +// PA: page address +// PO: page offset (offset of table in page) +// PI: page index (index of table relative to page start) + +// from a root entry +#define PRE_TO_TA(a) ((*(uint32 *)(&(a))) & ~((1<<4)-1)) +#define PRE_TO_PN(e) ((*(uint32 *)(&(e))) >> 12) +#define PRE_TO_PA(e) ((*(uint32 *)(&(e))) & ~((1<<12)-1)) +#define PRE_TO_PO(e) ((*(uint32 *)(&(e))) & ((1<<12)-1)) +#define PRE_TO_PI(e) (((*(uint32 *)(&(e))) & ((1<<12)-1)) / SIZ_DIRTBL) +#define TA_TO_PREA(a) ((a) >> 4) +// from a directory entry +#define PDE_TO_TA(a) ((*(uint32 *)(&(a))) & ~((1<<4)-1)) +#define PDE_TO_PN(e) ((*(uint32 *)(&(e))) >> 12) +#define PDE_TO_PA(e) ((*(uint32 *)(&(e))) & ~((1<<12)-1)) +#define PDE_TO_PO(e) ((*(uint32 *)(&(e))) & ((1<<12)-1)) +#define PDE_TO_PI(e) (((*(uint32 *)(&(e))) & ((1<<12)-1)) / SIZ_PAGETBL) +#define TA_TO_PDEA(a) ((a) >> 4) +// from a table entry +#define PTE_TO_TA(a) ((((uint32 *)(&(a)))[1]) & ~((1<<8)-1)) +#define PTE_TO_PN(e) ((((uint32 *)(&(e)))[1]) >> 12) +#define PTE_TO_PA(e) ((((uint32 *)(&(e)))[1]) & ~((1<<12)-1)) +#define TA_TO_PTEA(a) ((a) >> 8) +// from an indirect entry +#define PIE_TO_TA(a) ((((uint32 *)(&(a)))[1]) & ~((1<<2)-1)) +#define PIE_TO_PN(e) ((((uint32 *)(&(e)))[1]) >> 12) +#define PIE_TO_PA(e) ((((uint32 *)(&(e)))[1]) & ~((1<<12)-1)) +#define TA_TO_PIEA(a) ((a) >> 2) + + +#include "arch_vm_translation_map_impl.cpp" + +struct m68k_vm_ops m68030_vm_ops = { + m68k_translation_map_get_pgdir, + arch_vm_translation_map_init_map, + arch_vm_translation_map_init_kernel_map_post_sem, + arch_vm_translation_map_init, + arch_vm_translation_map_init_post_area, + arch_vm_translation_map_init_post_sem, + arch_vm_translation_map_early_map, + arch_vm_translation_map_early_query, +#if 0 + m68k_map_address_range, + m68k_unmap_address_range, + m68k_remap_address_range +#endif +}; diff --git a/src/system/kernel/arch/m68k/arch_asm.S b/src/system/kernel/arch/m68k/arch_asm.S index fe43f229dc..3e5b21f840 100644 --- a/src/system/kernel/arch/m68k/arch_asm.S +++ b/src/system/kernel/arch/m68k/arch_asm.S @@ -14,183 +14,70 @@ // ToDo: fixme FUNCTION(reboot): - reset + reset /* void arch_int_enable_interrupts(void) */ FUNCTION(arch_int_enable_interrupts): - mfmsr %r3 // load msr - - li %r4, 1 - insrwi %r3, %r4, 1, 31 - MSR_EXCEPTIONS_ENABLED - // sets bit 15, EE - - mtmsr %r3 // put it back into the msr - blr + andi #0xf8ff,%sr + rts /* int arch_int_disable_interrupts(void) - * r3 */ FUNCTION(arch_int_disable_interrupts): - mfmsr %r4 // load msr - - mr %r3, %r4 // save old state - rlwinm %r4, %r4, 0, 32 - MSR_EXCEPTIONS_ENABLED, 30 - MSR_EXCEPTIONS_ENABLED - // clears bit 15, EE - - mtmsr %r4 // put it back into the msr - blr + clr.l %d0 + move %sr,%d0 + move.l %d0,%d1 + ori.w #%x0700,%d1 + move %d1,%sr + // return value: previous IPM + lsr.l #8,%d0 + andi.l #7,%d0 + rts /* void arch_int_restore_interrupts(int oldState) - * r3 */ FUNCTION(arch_int_restore_interrupts): - mfmsr %r4 + move.l (4,%a7),%d0 + // make sure we only have IPM bits + andi.w #7,%d0 + lsl.w #8,%d0 + move %sr,%d1 + andi.w #0xf8ff,%d1 + or.w %d0,%d1 + move %d1,%sr + rts - rlwimi %r4, %r3, 0, 31 - MSR_EXCEPTIONS_ENABLED, 31 - MSR_EXCEPTIONS_ENABLED - // clear or set bit 15, EE to the same state as in r3, oldState - - mtmsr %r4 - blr /* bool arch_int_are_interrupts_enabled(void) */ FUNCTION(arch_int_are_interrupts_enabled): - mfmsr %r3 // load msr - extrwi %r3, %r3, 1, 31 - MSR_EXCEPTIONS_ENABLED - // mask out the EE bit - blr + clr.l %d0 + move %sr,%d1 + andi.w 0x0700,%d1 + bne arch_int_are_interrupts_enabled_no + moveq.l #1,%d0 +arch_int_are_interrupts_enabled_no: + rts // ToDo: fixme FUNCTION(dbg_save_registers): - blr + rts + /* long long get_time_base(void) */ FUNCTION(get_time_base): -1: - mftbu %r3 // get the upper time base register - mftb %r4 // get the lower time base register - mftbu %r5 // get the upper again - cmpw %r5, %r3 // see if it changed while we were reading the lower - bne- 1b // if so, repeat - blr +#warning M68K: implement get_time_base! + clr.l %d0 + clr.l %d1 + //passed through a0 or d0:d1 ? + rts -/* void getibats(int bats[8]); */ -FUNCTION(getibats): - mfibatu %r0,0 - stw %r0,0(%r3) - mfibatl %r0,0 - stwu %r0,4(%r3) - mfibatu %r0,1 - stwu %r0,4(%r3) - mfibatl %r0,1 - stwu %r0,4(%r3) - mfibatu %r0,2 - stwu %r0,4(%r3) - mfibatl %r0,2 - stwu %r0,4(%r3) - mfibatu %r0,3 - stwu %r0,4(%r3) - mfibatl %r0,3 - stwu %r0,4(%r3) - blr -// void setibats(int bats[8]); -FUNCTION(setibats): - lwz %r0,0(%r3) - mtibatu 0,%r0 - isync - lwzu %r0,4(%r3) - mtibatl 0,%r0 - isync - lwzu %r0,4(%r3) - mtibatu 1,%r0 - isync - lwzu %r0,4(%r3) - mtibatl 1,%r0 - isync - lwzu %r0,4(%r3) - mtibatu 2,%r0 - isync - lwzu %r0,4(%r3) - mtibatl 2,%r0 - isync - lwzu %r0,4(%r3) - mtibatu 3,%r0 - isync - lwzu %r0,4(%r3) - mtibatl 3,%r0 - isync - - blr - -// void getdbats(int bats[8]); -FUNCTION(getdbats): - mfdbatu %r0,0 - stw %r0,0(%r3) - mfdbatl %r0,0 - stwu %r0,4(%r3) - mfdbatu %r0,1 - stwu %r0,4(%r3) - mfdbatl %r0,1 - stwu %r0,4(%r3) - mfdbatu %r0,2 - stwu %r0,4(%r3) - mfdbatl %r0,2 - stwu %r0,4(%r3) - mfdbatu %r0,3 - stwu %r0,4(%r3) - mfdbatl %r0,3 - stwu %r0,4(%r3) - blr - -// void setdbats(int bats[8]); -FUNCTION(setdbats): - lwz %r0,0(%r3) - mtdbatu 0,%r0 - lwzu %r0,4(%r3) - mtdbatl 0,%r0 - lwzu %r0,4(%r3) - mtdbatu 1,%r0 - lwzu %r0,4(%r3) - mtdbatl 1,%r0 - lwzu %r0,4(%r3) - mtdbatu 2,%r0 - lwzu %r0,4(%r3) - mtdbatl 2,%r0 - lwzu %r0,4(%r3) - mtdbatu 3,%r0 - lwzu %r0,4(%r3) - mtdbatl 3,%r0 - sync - - blr - -// unsigned int gethid0(); -FUNCTION(gethid0): - mfspr %r3, 1008 - blr - -// void sethid0(unsigned int val); -FUNCTION(sethid0): - mtspr 1008, %r3 - blr - -// unsigned int getl2cr(); -FUNCTION(getl2cr): - mfspr %r3, 1017 - blr - -// void setl2cr(unsigned int val); -FUNCTION(setl2cr): - mtspr 1017, %r3 - blr - - -// void ppc_context_switch(addr_t *old_sp, addr_t new_sp); -FUNCTION(ppc_context_switch): +// void m68k_context_switch(addr_t *old_sp, addr_t new_sp); +FUNCTION(m68k_context_switch): // regs to push on the stack: f13-f31, r13-r31, cr, r2, lr @@ -305,20 +192,20 @@ FUNCTION(ppc_context_switch): blr -// void ppc_switch_stack_and_call(addr_t newKstack, +// void m68k_switch_stack_and_call(addr_t newKstack, // void (*func)(void *), void *arg) -FUNCTION(ppc_switch_stack_and_call): +FUNCTION(m68k_switch_stack_and_call): mr %r1, %r3 // set the new stack pointer mtctr %r4 // move the target function into CTR mr %r3, %r5 // move the arg to this func to the new arg bctr -// ppc_kernel_thread_root(): parameters in r13-r15, the functions to call +// m68k_kernel_thread_root(): parameters in r13-r15, the functions to call // (in that order). The function is used when spawing threads. It usually calls // an initialization function, the actual thread function, and a function that // destroys the thread. -FUNCTION(ppc_kernel_thread_root): +FUNCTION(m68k_kernel_thread_root): mtlr %r13 blrl mtlr %r14 diff --git a/src/system/kernel/arch/m68k/arch_atomic.c b/src/system/kernel/arch/m68k/arch_atomic.c index 9b62202f71..23a30d3292 100644 --- a/src/system/kernel/arch/m68k/arch_atomic.c +++ b/src/system/kernel/arch/m68k/arch_atomic.c @@ -1,4 +1,10 @@ /* + * Copyright 2007, Haiku Inc. All rights reserved. + * Distributed under the terms of the MIT License. + * + * Authors: + * François Revol + * * Copyright 2003, Marcus Overhagen. All rights reserved. * Distributed under the terms of the OpenBeOS License. */ diff --git a/src/system/kernel/arch/m68k/arch_cpu.cpp b/src/system/kernel/arch/m68k/arch_cpu.cpp index 57a824f6ed..628b4e86ac 100644 --- a/src/system/kernel/arch/m68k/arch_cpu.cpp +++ b/src/system/kernel/arch/m68k/arch_cpu.cpp @@ -1,4 +1,7 @@ /* + * Copyright 2007, François Revol, revol@free.fr. + * Distributed under the terms of the MIT License. + * * Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de. * Distributed under the terms of the MIT License. * @@ -14,6 +17,16 @@ #include #include +extern struct m68k_cpu_ops cpu_ops_030; +extern struct m68k_cpu_ops cpu_ops_040; +extern struct m68k_cpu_ops cpu_ops_060; + +struct m68k_cpu_ops cpu_ops; + +int cpu_type; +int fpu_type; +int mmu_type; +int platform; status_t arch_cpu_preboot_init_percpu(kernel_args *args, int curr_cpu) @@ -32,6 +45,47 @@ arch_cpu_preboot_init_percpu(kernel_args *args, int curr_cpu) status_t arch_cpu_init(kernel_args *args) { + cpu_type = args->arch_args.cpu_type; + fpu_type = args->arch_args.fpu_type; + mmu_type = args->arch_args.mmu_type; + platform = args->arch_args.platform; + + switch (cpu_type) { + case CPU_68020: + case CPU_68030: + cpu_ops.flush_insn_pipeline = cpu_ops_030.flush_insn_pipeline; + cpu_ops.flush_atc_all = cpu_ops_030.flush_atc_all; + cpu_ops.flush_atc_user = cpu_ops_030.flush_atc_user; + cpu_ops.flush_atc_addr = cpu_ops_030.flush_atc_addr; + cpu_ops.flush_cache_line = cpu_ops_030.flush_cache_line; + cpu_ops.idle = cpu_ops_030.idle; // NULL + //cpu_ops. = cpu_ops_030.; + break; +#ifdef SUPPORTS_040 + case CPU_68040: + cpu_ops.flush_insn_pipeline = cpu_ops_040.flush_insn_pipeline; + cpu_ops.flush_atc_all = cpu_ops_040.flush_atc_all; + cpu_ops.flush_atc_user = cpu_ops_040.flush_atc_user; + cpu_ops.flush_atc_addr = cpu_ops_040.flush_atc_addr; + cpu_ops.flush_cache_line = cpu_ops_040.flush_cache_line; + cpu_ops.idle = cpu_ops_040.idle; // NULL + //cpu_ops. = cpu_ops_040.; + break; +#endif +#ifdef SUPPORTS_060 + case CPU_68060: + cpu_ops.flush_insn_pipeline = cpu_ops_060.flush_insn_pipeline; + cpu_ops.flush_atc_all = cpu_ops_060.flush_atc_all; + cpu_ops.flush_atc_user = cpu_ops_060.flush_atc_user; + cpu_ops.flush_atc_addr = cpu_ops_060.flush_atc_addr; + cpu_ops.flush_cache_line = cpu_ops_060.flush_cache_line; + cpu_ops.idle = cpu_ops_060.idle; + //cpu_ops. = cpu_ops_060.; + break; +#endif + default: + panic("unknown cpu_type 0x%08lx\n", args->arch_args.cpu_type); + } return B_OK; } @@ -48,53 +102,24 @@ arch_cpu_init_post_modules(kernel_args *args) return B_OK; } -#define CACHELINE 16 void arch_cpu_sync_icache(void *address, size_t len) { - int l, off; - char *p; - uint32 cacr; - - off = (unsigned int)address & (CACHELINE - 1); - len += off; - - l = len; - p = (char *)address - off; - asm volatile ("movec %%cacr,%0" : "=r"(cacr):); - cacr |= 0x00000004; /* ClearInstructionCacheEntry */ - do { - /* the 030 invalidates only 1 long of the cache line */ - //XXX: what about 040 and 060 ? - asm volatile ("movec %0,%%caar\n" \ - "movec %1,%%cacr\n" \ - "addq.l #4,%0\n" \ - "movec %0,%%caar\n" \ - "movec %1,%%cacr\n" \ - "addq.l #4,%0\n" \ - "movec %0,%%caar\n" \ - "movec %1,%%cacr\n" \ - "addq.l #4,%0\n" \ - "movec %0,%%caar\n" \ - "movec %1,%%cacr\n" \ - :: "r"(p), "r"(cacr)); - p += CACHELINE; - } while ((l -= CACHELINE) > 0); - m68k_nop(); + cpu_ops.flush_icache(address, len); } void arch_cpu_invalidate_TLB_range(addr_t start, addr_t end) { - m68k_nop(); + cpu_ops.flush_insn_pipeline(); while (start < end) { - pflush(start); - m68k_nop(); + cpu_ops.flush_atc_addr(start); + cpu_ops.flush_insn_pipeline(); start += B_PAGE_SIZE; } - m68k_nop(); + cpu_ops.flush_insn_pipeline(); } @@ -103,29 +128,30 @@ arch_cpu_invalidate_TLB_list(addr_t pages[], int num_pages) { int i; - m68k_nop(); + cpu_ops.flush_insn_pipeline(); for (i = 0; i < num_pages; i++) { - pflush(pages[i]); - m68k_nop(); + cpu_ops.flush_atc_addr(pages[i]); + cpu_ops.flush_insn_pipeline(); } - m68k_nop(); + cpu_ops.flush_insn_pipeline(); } void arch_cpu_global_TLB_invalidate(void) { - m68k_nop(); - pflusha(); - m68k_nop(); + cpu_ops.flush_insn_pipeline(); + cpu_ops.flush_atc_all(); + cpu_ops.flush_insn_pipeline(); } void arch_cpu_user_TLB_invalidate(void) { - // pflushfd ? - arch_cpu_global_TLB_invalidate(); + cpu_ops.flush_insn_pipeline(); + cpu_ops.flush_atc_user(); + cpu_ops.flush_insn_pipeline(); } @@ -224,6 +250,8 @@ arch_cpu_shutdown(bool reboot) void arch_cpu_idle(void) { + if (cpu_ops.idle) + cpu_ops.idle(); #warning M68K: use LPSTOP ? //asm volatile ("lpstop"); } diff --git a/src/system/kernel/arch/m68k/arch_debug.cpp b/src/system/kernel/arch/m68k/arch_debug.cpp index 6b5dd896a2..8b37c950ca 100644 --- a/src/system/kernel/arch/m68k/arch_debug.cpp +++ b/src/system/kernel/arch/m68k/arch_debug.cpp @@ -1,10 +1,11 @@ /* - * Copyright 2003-2006, Haiku Inc. All rights reserved. + * Copyright 2003-2007, Haiku Inc. All rights reserved. * Distributed under the terms of the MIT License. * * Authors: * Axel Dörfler * Ingo Weinhold + * François Revol */ @@ -56,7 +57,8 @@ static inline stack_frame * get_current_stack_frame() { stack_frame *frame; - asm volatile("mr %0, %%r1" : "=r"(frame)); +#warning M68K: a6 or a7 ? + asm volatile("move.l %%a6,%0" : "=r"(frame)); return frame; } @@ -69,7 +71,7 @@ get_next_frame(addr_t framePointer, addr_t *next, addr_t *ip) // set fault handler, so that we can safely access user stacks if (thread) { - if (ppc_set_fault_handler(&thread->fault_handler, (addr_t)&&error)) + if (m68k_set_fault_handler(&thread->fault_handler, (addr_t)&&error)) goto error; } @@ -202,31 +204,23 @@ return 0; if (frame) { kprintf("iframe at %p\n", frame); - kprintf(" r0 0x%08lx r1 0x%08lx r2 0x%08lx r3 0x%08lx\n", - frame->r0, frame->r1, frame->r2, frame->r3); - kprintf(" r4 0x%08lx r5 0x%08lx r6 0x%08lx r7 0x%08lx\n", - frame->r4, frame->r5, frame->r6, frame->r7); - kprintf(" r8 0x%08lx r9 0x%08lx r10 0x%08lx r11 0x%08lx\n", - frame->r8, frame->r9, frame->r10, frame->r11); - kprintf(" r12 0x%08lx r13 0x%08lx r14 0x%08lx r15 0x%08lx\n", - frame->r12, frame->r13, frame->r14, frame->r15); - kprintf(" r16 0x%08lx r17 0x%08lx r18 0x%08lx r19 0x%08lx\n", - frame->r16, frame->r17, frame->r18, frame->r19); - kprintf(" r20 0x%08lx r21 0x%08lx r22 0x%08lx r23 0x%08lx\n", - frame->r20, frame->r21, frame->r22, frame->r23); - kprintf(" r24 0x%08lx r25 0x%08lx r26 0x%08lx r27 0x%08lx\n", - frame->r24, frame->r25, frame->r26, frame->r27); - kprintf(" r28 0x%08lx r29 0x%08lx r30 0x%08lx r31 0x%08lx\n", - frame->r28, frame->r29, frame->r30, frame->r31); - kprintf(" lr 0x%08lx cr 0x%08lx xer 0x%08lx ctr 0x%08lx\n", - frame->lr, frame->cr, frame->xer, frame->ctr); - kprintf("fpscr 0x%08lx\n", frame->fpscr); - kprintf(" srr0 0x%08lx srr1 0x%08lx dar 0x%08lx dsisr 0x%08lx\n", - frame->srr0, frame->srr1, frame->dar, frame->dsisr); - kprintf(" vector: 0x%lx\n", frame->vector); + kprintf(" d0 0x%08lx d1 0x%08lx d2 0x%08lx d3 0x%08lx\n", + frame->d0, frame->d1, frame->d2, frame->d3); + kprintf(" d4 0x%08lx d5 0x%08lx d6 0x%08lx d7 0x%08lx\n", + frame->d4, frame->d5, frame->d6, frame->d7); + kprintf(" a0 0x%08lx a1 0x%08lx a2 0x%08lx a3 0x%08lx\n", + frame->a0, frame->a1, frame->a2, frame->a3); + kprintf(" a4 0x%08lx a5 0x%08lx a6 0x%08lx a7 0x%08lx (sp)\n", + frame->a4, frame->a5, frame->a6, frame->a7); - print_stack_frame(thread, frame->srr0, framePointer, frame->r1); - framePointer = frame->r1; + /*kprintf(" pc 0x%08lx ccr 0x%02x\n", + frame->pc, frame->ccr);*/ + kprintf(" pc 0x%08lx sr 0x%04x\n", + frame->pc, frame->sr); +#warning M68K: missing regs + + print_stack_frame(thread, frame->pc, framePointer, frame->a6); + framePointer = frame->a6; } else { addr_t ip, nextFramePointer; diff --git a/src/system/kernel/arch/m68k/arch_debug_console.cpp b/src/system/kernel/arch/m68k/arch_debug_console.cpp index f0728a707d..d37b5f4215 100644 --- a/src/system/kernel/arch/m68k/arch_debug_console.cpp +++ b/src/system/kernel/arch/m68k/arch_debug_console.cpp @@ -1,4 +1,7 @@ /* + * Copyright 2007, François Revol, revol@free.fr. + * Distributed under the terms of the MIT License. + * * Copyright 2003-2006, Axel Dörfler, axeld@pinc-software.de. * Distributed under the terms of the MIT License. * diff --git a/src/system/kernel/arch/m68k/arch_elf.cpp b/src/system/kernel/arch/m68k/arch_elf.cpp index e45492de24..24ed3bc527 100644 --- a/src/system/kernel/arch/m68k/arch_elf.cpp +++ b/src/system/kernel/arch/m68k/arch_elf.cpp @@ -1,4 +1,7 @@ /* + * Copyright 2007, François Revol, revol@free.fr. + * Distributed under the terms of the MIT License. + * * Copyright 2005, Ingo Weinhold . * All rights reserved. Distributed under the terms of the MIT License. * @@ -7,6 +10,9 @@ * Distributed under the terms of the NewOS License. */ +#ifdef _BOOT_MODE +#include +#endif #include @@ -16,10 +22,15 @@ #define CHATTY 0 - +#ifdef _BOOT_MODE +status_t +boot_arch_elf_relocate_rel(struct preloaded_image *image, + struct Elf32_Rel *rel, int rel_len) +#else int arch_elf_relocate_rel(struct elf_image_info *image, const char *sym_prepend, struct elf_image_info *resolve_image, struct Elf32_Rel *rel, int rel_len) +#endif { // there are no rel entries in PPC elf return B_NO_ERROR; @@ -105,9 +116,15 @@ ha(Elf32_Word value) } +#ifdef _BOOT_MODE +status_t +boot_arch_elf_relocate_rela(struct preloaded_image *image, + struct Elf32_Rela *rel, int rel_len) +#else int arch_elf_relocate_rela(struct elf_image_info *image, const char *sym_prepend, struct elf_image_info *resolve_image, struct Elf32_Rela *rel, int rel_len) +#endif { int i; struct Elf32_Sym *sym; @@ -128,7 +145,7 @@ arch_elf_relocate_rela(struct elf_image_info *image, const char *sym_prepend, dprintf("arch_elf_relocate_rela(): Failed to get GOT address!\n"); \ return B_ERROR; \ } - + // TODO: Get the PLT address! #define REQUIRE_PLT \ if (L == 0) { \ @@ -172,12 +189,16 @@ arch_elf_relocate_rela(struct elf_image_info *image, const char *sym_prepend, case R_PPC_JMP_SLOT: sym = SYMBOL(image, ELF32_R_SYM(rel[i].r_info)); +#ifdef _BOOT_MODE + vlErr = boot_elf_resolve_symbol(image, sym, &S); +#else vlErr = elf_resolve_symbol(image, sym, resolve_image, sym_prepend, &S); +#endif if (vlErr < 0) { - dprintf("arch_elf_relocate_rela(): Failed to relocate " + dprintf("%s(): Failed to relocate " "entry index %d, rel type %d, offset 0x%lx, sym 0x%lx, " - "addend 0x%lx\n", i, ELF32_R_TYPE(rel[i].r_info), + "addend 0x%lx\n", __FUNCTION__, i, ELF32_R_TYPE(rel[i].r_info), rel[i].r_offset, ELF32_R_SYM(rel[i].r_info), rel[i].r_addend); return vlErr; diff --git a/src/system/kernel/arch/m68k/arch_int.cpp b/src/system/kernel/arch/m68k/arch_int.cpp index 03dac1b848..21ae690678 100644 --- a/src/system/kernel/arch/m68k/arch_int.cpp +++ b/src/system/kernel/arch/m68k/arch_int.cpp @@ -5,6 +5,9 @@ * Authors: * Axel Dörfler * Ingo Weinhold + * François Revol + * Distributed under the terms of the MIT License. + * * * Copyright 2001, Travis Geiselbrecht. All rights reserved. * Distributed under the terms of the NewOS License. @@ -27,6 +30,7 @@ #include #include +#warning M68K: writeme! // defined in arch_exceptions.S extern int __irqvec_start; @@ -77,18 +81,24 @@ static void print_iframe(struct iframe *frame) { dprintf("iframe at %p:\n", frame); - dprintf("r0-r3: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r0, frame->r1, frame->r2, frame->r3); - dprintf("r4-r7: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r4, frame->r5, frame->r6, frame->r7); - dprintf("r8-r11: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r8, frame->r9, frame->r10, frame->r11); - dprintf("r12-r15: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r12, frame->r13, frame->r14, frame->r15); - dprintf("r16-r19: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r16, frame->r17, frame->r18, frame->r19); - dprintf("r20-r23: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r20, frame->r21, frame->r22, frame->r23); - dprintf("r24-r27: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r24, frame->r25, frame->r26, frame->r27); - dprintf("r28-r31: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->r28, frame->r29, frame->r30, frame->r31); - dprintf(" ctr 0x%08lx xer 0x%08lx\n", frame->ctr, frame->xer); - dprintf(" cr 0x%08lx lr 0x%08lx\n", frame->cr, frame->lr); - dprintf(" dsisr 0x%08lx dar 0x%08lx\n", frame->dsisr, frame->dar); - dprintf(" srr1 0x%08lx srr0 0x%08lx\n", frame->srr1, frame->srr0); + dprintf(" d0 0x%08lx d1 0x%08lx d2 0x%08lx d3 0x%08lx\n", + frame->d0, frame->d1, frame->d2, frame->d3); + kprintf(" d4 0x%08lx d5 0x%08lx d6 0x%08lx d7 0x%08lx\n", + frame->d4, frame->d5, frame->d6, frame->d7); + kprintf(" a0 0x%08lx a1 0x%08lx a2 0x%08lx a3 0x%08lx\n", + frame->a0, frame->a1, frame->a2, frame->a3); + kprintf(" a4 0x%08lx a5 0x%08lx a6 0x%08lx a7 0x%08lx (sp)\n", + frame->d4, frame->d5, frame->d6, frame->d7); + + /*kprintf(" pc 0x%08lx ccr 0x%02x\n", + frame->pc, frame->ccr);*/ + kprintf(" pc 0x%08lx sr 0x%04x\n", + frame->pc, frame->sr); + dprintf("r0-r3: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->d0, frame->d1, frame->d2, frame->d3); + dprintf("r4-r7: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->d4, frame->d5, frame->d6, frame->d7); + dprintf("r8-r11: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->a0, frame->a1, frame->a2, frame->a3); + dprintf("r12-r15: 0x%08lx 0x%08lx 0x%08lx 0x%08lx\n", frame->a4, frame->a5, frame->a6, frame->a7); + dprintf(" pc 0x%08lx sr 0x%08lx\n", frame->pc, frame->sr); } diff --git a/src/system/kernel/arch/m68k/arch_mmu.cpp b/src/system/kernel/arch/m68k/arch_mmu.cpp index a7cdc753a2..50096ed88c 100644 --- a/src/system/kernel/arch/m68k/arch_mmu.cpp +++ b/src/system/kernel/arch/m68k/arch_mmu.cpp @@ -1,34 +1,16 @@ /* -** Copyright 2003, Axel Dörfler, axeld@pinc-software.de. All rights reserved. -** Distributed under the terms of the OpenBeOS License. -*/ + * Copyright 2007, François Revol, revol@free.fr. + * Distributed under the terms of the MIT License. + * + * Copyright 2003, Axel Dörfler, axeld@pinc-software.de. All rights reserved. + * Distributed under the terms of the OpenBeOS License. + */ #include #include - -uint32 -page_table_entry::PrimaryHash(uint32 virtualSegmentID, uint32 virtualAddress) -{ - return (virtualSegmentID & 0x7ffff) ^ ((virtualAddress >> 12) & 0xffff); -} - - -uint32 -page_table_entry::SecondaryHash(uint32 virtualSegmentID, uint32 virtualAddress) -{ - return ~PrimaryHash(virtualSegmentID, virtualAddress); -} - - -uint32 -page_table_entry::SecondaryHash(uint32 primaryHash) -{ - return ~primaryHash; -} - - +w void m68k_get_page_table(page_table_entry_group **_pageTable, size_t *_size) { diff --git a/src/system/kernel/arch/m68k/arch_platform.cpp b/src/system/kernel/arch/m68k/arch_platform.cpp index 76e1dd6e6e..ba2d327789 100644 --- a/src/system/kernel/arch/m68k/arch_platform.cpp +++ b/src/system/kernel/arch/m68k/arch_platform.cpp @@ -1,4 +1,7 @@ /* + * Copyright 2007, François Revol, revol@free.fr. + * Distributed under the terms of the MIT License. + * * Copyright 2006, Ingo Weinhold . * All rights reserved. Distributed under the terms of the MIT License. */ diff --git a/src/system/kernel/arch/m68k/arch_real_time_clock.cpp b/src/system/kernel/arch/m68k/arch_real_time_clock.cpp index e0d614467a..4b834555c9 100644 --- a/src/system/kernel/arch/m68k/arch_real_time_clock.cpp +++ b/src/system/kernel/arch/m68k/arch_real_time_clock.cpp @@ -1,4 +1,7 @@ /* + * Copyright 2007, François Revol, revol@free.fr. + * Distributed under the terms of the MIT License. + * * Copyright 2006, Ingo Weinhold . * All rights reserved. Distributed under the terms of the MIT License. */ diff --git a/src/system/kernel/arch/m68k/arch_smp.c b/src/system/kernel/arch/m68k/arch_smp.c index dd590b8a98..3a14e963d1 100644 --- a/src/system/kernel/arch/m68k/arch_smp.c +++ b/src/system/kernel/arch/m68k/arch_smp.c @@ -1,7 +1,13 @@ /* -** Copyright 2004, Axel Dörfler, axeld@pinc-software.de -** Distributed under the terms of the OpenBeOS License. -*/ + * Copyright 2007, Haiku Inc. All rights reserved. + * Distributed under the terms of the MIT License. + * + * Authors: + * François Revol + * + * Copyright 2004, Axel Dörfler, axeld@pinc-software.de + * Distributed under the terms of the OpenBeOS License. + */ #include diff --git a/src/system/kernel/arch/m68k/arch_system_info.c b/src/system/kernel/arch/m68k/arch_system_info.c index 624691a2ba..adf55869a3 100644 --- a/src/system/kernel/arch/m68k/arch_system_info.c +++ b/src/system/kernel/arch/m68k/arch_system_info.c @@ -1,4 +1,10 @@ /* + * Copyright 2007, Haiku Inc. All rights reserved. + * Distributed under the terms of the MIT License. + * + * Authors: + * François Revol + * * Copyright 2006, Ingo Weinhold . * All rights reserved. Distributed under the terms of the MIT License. */ diff --git a/src/system/kernel/arch/m68k/arch_thread.c b/src/system/kernel/arch/m68k/arch_thread.c index 6b3163d807..579594a0c3 100644 --- a/src/system/kernel/arch/m68k/arch_thread.c +++ b/src/system/kernel/arch/m68k/arch_thread.c @@ -5,6 +5,7 @@ * Authors: * Axel Dörfler * Ingo Weinhold + * François Revol * * Copyright 2001, Travis Geiselbrecht. All rights reserved. * Distributed under the terms of the NewOS License. @@ -22,19 +23,20 @@ #include +#warning M68K: writeme! // Valid initial arch_thread state. We just memcpy() it when initializing // a new thread structure. static struct arch_thread sInitialState; // Helper function for thread creation, defined in arch_asm.S. -extern void ppc_kernel_thread_root(); +extern void m68k_kernel_thread_root(); -extern void ppc_switch_stack_and_call(addr_t newKstack, void (*func)(void *), +extern void m68k_switch_stack_and_call(addr_t newKstack, void (*func)(void *), void *arg); void -ppc_push_iframe(struct iframe_stack *stack, struct iframe *frame) +m68k_push_iframe(struct iframe_stack *stack, struct iframe *frame) { ASSERT(stack->index < IFRAME_TRACE_DEPTH); stack->frames[stack->index++] = frame; @@ -42,7 +44,7 @@ ppc_push_iframe(struct iframe_stack *stack, struct iframe *frame) void -ppc_pop_iframe(struct iframe_stack *stack) +m68k_pop_iframe(struct iframe_stack *stack) { ASSERT(stack->index > 0); stack->index--; @@ -55,7 +57,7 @@ ppc_pop_iframe(struct iframe_stack *stack) * from standard kernel threads. */ static struct iframe * -ppc_get_current_iframe(void) +m68k_get_current_iframe(void) { struct thread *thread = thread_get_current_thread(); @@ -71,15 +73,15 @@ ppc_get_current_iframe(void) * the thread is a kernel thread). */ struct iframe * -ppc_get_user_iframe(void) +m68k_get_user_iframe(void) { struct thread *thread = thread_get_current_thread(); int i; for (i = thread->arch_info.iframes.index - 1; i >= 0; i--) { struct iframe *frame = thread->arch_info.iframes.frames[i]; - if (frame->srr1 & MSR_PRIVILEGE_LEVEL) - return frame; +// if (frame->srr1 & MSR_PRIVILEGE_LEVEL) +// return frame; } return NULL; @@ -141,13 +143,13 @@ arch_thread_init_kthread_stack(struct thread *t, int (*start_func)(void), kstackTop -= 2; kstackTop = (addr_t*)((addr_t)kstackTop & ~0xf); - // LR, CR, r2, r13-r31, f13-f31, as pushed by ppc_context_switch() + // LR, CR, r2, r13-r31, f13-f31, as pushed by m68k_context_switch() kstackTop -= 22 + 2 * 19; - // let LR point to ppc_kernel_thread_root() - kstackTop[0] = (addr_t)&ppc_kernel_thread_root; + // let LR point to m68k_kernel_thread_root() + kstackTop[0] = (addr_t)&m68k_kernel_thread_root; - // the arguments of ppc_kernel_thread_root() are the functions to call, + // the arguments of m68k_kernel_thread_root() are the functions to call, // provided in registers r13-r15 kstackTop[3] = (addr_t)entry_func; kstackTop[4] = (addr_t)start_func; @@ -172,7 +174,7 @@ void arch_thread_switch_kstack_and_call(struct thread *t, addr_t newKstack, void (*func)(void *), void *arg) { - ppc_switch_stack_and_call(newKstack, func, arg); + m68k_switch_stack_and_call(newKstack, func, arg); } @@ -189,11 +191,11 @@ arch_thread_context_switch(struct thread *t_from, struct thread *t_to) // the target thread has is user space if (t_from->team != t_to->team) { // switching to a new address space - ppc_translation_map_change_asid(&t_to->team->address_space->translation_map); + m68k_translation_map_change_asid(&t_to->team->address_space->translation_map); } } - ppc_context_switch(&t_from->arch_info.sp, t_to->arch_info.sp); + m68k_context_switch(&t_from->arch_info.sp, t_to->arch_info.sp); } diff --git a/src/system/kernel/arch/m68k/arch_timer.c b/src/system/kernel/arch/m68k/arch_timer.c index d8bc17fc0d..a687c65d49 100644 --- a/src/system/kernel/arch/m68k/arch_timer.c +++ b/src/system/kernel/arch/m68k/arch_timer.c @@ -1,7 +1,13 @@ /* -** Copyright 2001, Travis Geiselbrecht. All rights reserved. -** Distributed under the terms of the NewOS License. -*/ + * Copyright 2007, Haiku Inc. All rights reserved. + * Distributed under the terms of the MIT License. + * + * Authors: + * François Revol + * + * Copyright 2001, Travis Geiselbrecht. All rights reserved. + * Distributed under the terms of the NewOS License. + */ #include diff --git a/src/system/kernel/arch/m68k/arch_user_debugger.cpp b/src/system/kernel/arch/m68k/arch_user_debugger.cpp index 7cad26e8e9..52b12079ca 100644 --- a/src/system/kernel/arch/m68k/arch_user_debugger.cpp +++ b/src/system/kernel/arch/m68k/arch_user_debugger.cpp @@ -1,4 +1,7 @@ /* + * Copyright 2007, François Revol, revol@free.fr. + * Distributed under the terms of the MIT License. + * * Copyright 2005, Axel Dörfler, axeld@pinc-softare.de * Distributed under the terms of the MIT License. */ diff --git a/src/system/kernel/arch/m68k/arch_vm.cpp b/src/system/kernel/arch/m68k/arch_vm.cpp index a1c6469a87..5414630bf0 100644 --- a/src/system/kernel/arch/m68k/arch_vm.cpp +++ b/src/system/kernel/arch/m68k/arch_vm.cpp @@ -1,4 +1,7 @@ /* + * Copyright 2007, François Revol, revol@free.fr. + * Distributed under the terms of the MIT License. + * * Copyright 2003-2005, Axel Dörfler, axeld@pinc-software.de. * Distributed under the terms of the MIT License. * @@ -37,58 +40,9 @@ arch_vm_init2(kernel_args *args) // int bats[8]; // int i; -#if 0 - // print out any bat mappings - getibats(bats); - dprintf("ibats:\n"); - for(i = 0; i < 4; i++) - dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]); - getdbats(bats); - dprintf("dbats:\n"); - for(i = 0; i < 4; i++) - dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]); -#endif + /**/ +#warning M68K: disable TT0 and TT1, set up pmmu -#if 1 - // turn off the first 2 BAT mappings (3 & 4 are used by the lower level code) - block_address_translation bat; - bat.Clear(); - - set_ibat0(&bat); - set_ibat1(&bat); - set_dbat0(&bat); - set_dbat1(&bat); -/* getibats(bats); - memset(bats, 0, 2 * 2); - setibats(bats); - getdbats(bats); - memset(bats, 0, 2 * 2); - setdbats(bats); -*/ -#endif -#if 0 - // just clear the first BAT mapping (0 - 256MB) - dprintf("msr 0x%x\n", getmsr()); - { - unsigned int reg; - asm("mr %0,1" : "=r"(reg)); - dprintf("sp 0x%x\n", reg); - } - dprintf("ka %p\n", ka); - - getibats(bats); - dprintf("ibats:\n"); - for(i = 0; i < 4; i++) - dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]); - bats[0] = bats[1] = 0; - setibats(bats); - getdbats(bats); - dprintf("dbats:\n"); - for(i = 0; i < 4; i++) - dprintf("0x%x 0x%x\n", bats[i*2], bats[i*2+1]); - bats[0] = bats[1] = 0; - setdbats(bats); -#endif return B_OK; } diff --git a/src/system/kernel/arch/m68k/arch_vm_translation_map.cpp b/src/system/kernel/arch/m68k/arch_vm_translation_map.cpp index 5be8348e69..2eab1dca47 100644 --- a/src/system/kernel/arch/m68k/arch_vm_translation_map.cpp +++ b/src/system/kernel/arch/m68k/arch_vm_translation_map.cpp @@ -1,4 +1,7 @@ /* + * Copyright 1007, François Revol, revol@free.fr. + * Distributed under the terms of the MIT License. + * * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de. * Distributed under the terms of the MIT License. * @@ -6,71 +9,6 @@ * Distributed under the terms of the NewOS License. */ -/* (bonefish) Some explanatory words on how address translation is implemented - for the 32 bit PPC architecture. - - I use the address type nomenclature as used in the PPC architecture - specs, i.e. - - effective address: An address as used by program instructions, i.e. - that's what elsewhere (e.g. in the VM implementation) is called - virtual address. - - virtual address: An intermediate address computed from the effective - address via the segment registers. - - physical address: An address referring to physical storage. - - The hardware translates an effective address to a physical address using - either of two mechanisms: 1) Block Address Translation (BAT) or - 2) segment + page translation. The first mechanism does this directly - using two sets (for data/instructions) of special purpose registers. - The latter mechanism is of more relevance here, though: - - effective address (32 bit): [ 0 ESID 3 | 4 PIX 19 | 20 Byte 31 ] - | | | - (segment registers) | | - | | | - virtual address (52 bit): [ 0 VSID 23 | 24 PIX 39 | 40 Byte 51 ] - [ 0 VPN 39 | 40 Byte 51 ] - | | - (page table) | - | | - physical address (32 bit): [ 0 PPN 19 | 20 Byte 31 ] - - - ESID: Effective Segment ID - VSID: Virtual Segment ID - PIX: Page Index - VPN: Virtual Page Number - PPN: Physical Page Number - - - Unlike on x86 we can't just switch the context to another team by just - setting a register to another page directory, since we only have one - page table containing both kernel and user address mappings. Instead we - map the effective address space of kernel and *all* teams - non-intersectingly into the virtual address space (which fortunately is - 20 bits wider), and use the segment registers to select the section of - the virtual address space for the current team. Half of the 16 segment - registers (8 - 15) map the kernel addresses, so they remain unchanged. - - The range of the virtual address space a team's effective address space - is mapped to is defined by its vm_translation_map_arch_info::vsid_base, - which is the first of the 8 successive VSID values used for the team. - - Which vsid_base values are already taken is defined by the set bits in - the bitmap sVSIDBaseBitmap. - - - TODO: - * If we want to continue to use the OF services, we would need to add - its address mappings to the kernel space. Unfortunately some stuff - (especially RAM) is mapped in an address range without the kernel - address space. We probably need to map those into each team's address - space as kernel read/write areas. - * The current locking scheme is insufficient. The page table is a resource - shared by all teams. We need to synchronize access to it. Probably via a - spinlock. - */ - #include #include #include @@ -85,396 +23,48 @@ #include "generic_vm_physical_page_mapper.h" -static struct page_table_entry_group *sPageTable; -static size_t sPageTableSize; -static uint32 sPageTableHashMask; -static area_id sPageTableArea; +/* + * Each mmu of the m68k family has its own tricks, registers and opcodes... + * so we use a function array to switch to the one we want. + */ +//extern struct m68k_vm_ops m68851_vm_ops; +extern struct m68k_vm_ops m68030_vm_ops; +//extern struct m68k_vm_ops m68030_vm_ops; +//extern struct m68k_vm_ops m68030_vm_ops; -// 64 MB of iospace -#define IOSPACE_SIZE (64*1024*1024) -// We only have small (4 KB) pages. The only reason for choosing greater chunk -// size is to keep the waste of memory limited, since the generic page mapper -// allocates structures per physical/virtual chunk. -// TODO: Implement a page mapper more suitable for small pages! -#define IOSPACE_CHUNK_SIZE (16 * B_PAGE_SIZE) - -static addr_t sIOSpaceBase; - - -// The VSID is a 24 bit number. The lower three bits are defined by the -// (effective) segment number, which leaves us with a 21 bit space of -// VSID bases (= 2 * 1024 * 1024). -#define MAX_VSID_BASES (PAGE_SIZE * 8) -static uint32 sVSIDBaseBitmap[MAX_VSID_BASES / (sizeof(uint32) * 8)]; -static spinlock sVSIDBaseBitmapLock; - -#define VSID_BASE_SHIFT 3 -#define VADDR_TO_VSID(map, vaddr) \ - ((map)->arch_data->vsid_base + ((vaddr) >> 28)) - -// vm_translation object stuff -typedef struct vm_translation_map_arch_info { - int vsid_base; // used VSIDs are vside_base ... vsid_base + 7 -} vm_translation_map_arch_info; - - -void -ppc_translation_map_change_asid(vm_translation_map *map) +static m68k_vm_ops *get_vm_ops() { -// this code depends on the kernel being at 0x80000000, fix if we change that -#if KERNEL_BASE != 0x80000000 -#error fix me -#endif - int vsidBase = map->arch_data->vsid_base; - - isync(); // synchronize context - asm("mtsr 0,%0" : : "g"(vsidBase)); - asm("mtsr 1,%0" : : "g"(vsidBase + 1)); - asm("mtsr 2,%0" : : "g"(vsidBase + 2)); - asm("mtsr 3,%0" : : "g"(vsidBase + 3)); - asm("mtsr 4,%0" : : "g"(vsidBase + 4)); - asm("mtsr 5,%0" : : "g"(vsidBase + 5)); - asm("mtsr 6,%0" : : "g"(vsidBase + 6)); - asm("mtsr 7,%0" : : "g"(vsidBase + 7)); - isync(); // synchronize context -} - - -static status_t -lock_tmap(vm_translation_map *map) -{ - recursive_lock_lock(&map->lock); - return 0; -} - - -static status_t -unlock_tmap(vm_translation_map *map) -{ - recursive_lock_unlock(&map->lock); - return 0; -} - - -static void -destroy_tmap(vm_translation_map *map) -{ - if (map->map_count > 0) { - panic("vm_translation_map.destroy_tmap: map %p has positive map count %ld\n", - map, map->map_count); + int cpu = arch_cpu_type; + cpu &= MMU_MASK; + switch (cpu) { + case MMU_NONE: + panic("Ugh, no mmu !?"); + return NULL; + case MMU_68551: + panic("Unimplemented yet (mmu)"); + //return &m68851_vm_ops; + return NULL; + case MMU_68030: + return &m68030_vm_ops; + case MMU_68040: + //return &m68040_vm_ops; + case MMU_68060: + //return &m68060_vm_ops; + panic("Unimplemented yet (mmu)"); + return NULL; + default: + panic("Invalid mmu type!"); + return NULL; } - - // mark the vsid base not in use - int baseBit = map->arch_data->vsid_base >> VSID_BASE_SHIFT; - atomic_and((vint32 *)&sVSIDBaseBitmap[baseBit / 32], - ~(1 << (baseBit % 32))); - - free(map->arch_data); - recursive_lock_destroy(&map->lock); } - -static void -fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID, - addr_t virtualAddress, addr_t physicalAddress, uint8 protection, - bool secondaryHash) +void * +m68k_translation_map_get_pgdir(vm_translation_map *map) { - // lower 32 bit - set at once - entry->physical_page_number = physicalAddress / B_PAGE_SIZE; - entry->_reserved0 = 0; - entry->referenced = false; - entry->changed = false; - entry->write_through = false; - entry->caching_inhibited = false; - entry->memory_coherent = false; - entry->guarded = false; - entry->_reserved1 = 0; - entry->page_protection = protection & 0x3; - eieio(); - // we need to make sure that the lower 32 bit were - // already written when the entry becomes valid - - // upper 32 bit - entry->virtual_segment_id = virtualSegmentID; - entry->secondary_hash = secondaryHash; - entry->abbr_page_index = (virtualAddress >> 22) & 0x3f; - entry->valid = true; - - ppc_sync(); + return get_vm_ops()->m68k_translation_map_get_pgdir(map); } - -static size_t -map_max_pages_need(vm_translation_map *map, addr_t start, addr_t end) -{ - return 0; -} - - -static status_t -map_tmap(vm_translation_map *map, addr_t virtualAddress, addr_t physicalAddress, uint32 attributes) -{ - // lookup the vsid based off the va - uint32 virtualSegmentID = VADDR_TO_VSID(map, virtualAddress); - uint32 protection = 0; - - // ToDo: check this - // all kernel mappings are R/W to supervisor code - if (attributes & (B_READ_AREA | B_WRITE_AREA)) - protection = (attributes & B_WRITE_AREA) ? PTE_READ_WRITE : PTE_READ_ONLY; - - //dprintf("vm_translation_map.map_tmap: vsid %d, pa 0x%lx, va 0x%lx\n", vsid, pa, va); - - // Search for a free page table slot using the primary hash value - - uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress); - page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask]; - - for (int i = 0; i < 8; i++) { - page_table_entry *entry = &group->entry[i]; - - if (entry->valid) - continue; - - fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress, - protection, false); - map->map_count++; - return B_OK; - } - - // Didn't found one, try the secondary hash value - - hash = page_table_entry::SecondaryHash(hash); - group = &sPageTable[hash & sPageTableHashMask]; - - for (int i = 0; i < 8; i++) { - page_table_entry *entry = &group->entry[i]; - - if (entry->valid) - continue; - - fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress, - protection, false); - map->map_count++; - return B_OK; - } - - panic("vm_translation_map.map_tmap: hash table full\n"); - return B_ERROR; -} - - -static page_table_entry * -lookup_page_table_entry(vm_translation_map *map, addr_t virtualAddress) -{ - // lookup the vsid based off the va - uint32 virtualSegmentID = VADDR_TO_VSID(map, virtualAddress); - -// dprintf("vm_translation_map.lookup_page_table_entry: vsid %d, va 0x%lx\n", vsid, va); - - - // Search for the page table entry using the primary hash value - - uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress); - page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask]; - - for (int i = 0; i < 8; i++) { - page_table_entry *entry = &group->entry[i]; - - if (entry->virtual_segment_id == virtualSegmentID - && entry->secondary_hash == false - && entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f)) - return entry; - } - - // Didn't found it, try the secondary hash value - - hash = page_table_entry::SecondaryHash(hash); - group = &sPageTable[hash & sPageTableHashMask]; - - for (int i = 0; i < 8; i++) { - page_table_entry *entry = &group->entry[i]; - - if (entry->virtual_segment_id == virtualSegmentID - && entry->secondary_hash == true - && entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f)) - return entry; - } - - return NULL; -} - - -static bool -remove_page_table_entry(vm_translation_map *map, addr_t virtualAddress) -{ - page_table_entry *entry = lookup_page_table_entry(map, virtualAddress); - if (entry) { - entry->valid = 0; - ppc_sync(); - tlbie(virtualAddress); - eieio(); - tlbsync(); - ppc_sync(); - } - - return entry; -} - - -static status_t -unmap_tmap(vm_translation_map *map, addr_t start, addr_t end) -{ - page_table_entry *entry; - - start = ROUNDOWN(start, B_PAGE_SIZE); - end = ROUNDUP(end, B_PAGE_SIZE); - -// dprintf("vm_translation_map.unmap_tmap: start 0x%lx, end 0x%lx\n", start, end); - - while (start < end) { - if (remove_page_table_entry(map, start)) - map->map_count--; - - start += B_PAGE_SIZE; - } - - return B_OK; -} - - -static status_t -query_tmap(vm_translation_map *map, addr_t va, addr_t *_outPhysical, uint32 *_outFlags) -{ - page_table_entry *entry; - - // default the flags to not present - *_outFlags = 0; - *_outPhysical = 0; - - entry = lookup_page_table_entry(map, va); - if (entry == NULL) - return B_NO_ERROR; - - // ToDo: check this! - if (IS_KERNEL_ADDRESS(va)) - *_outFlags |= B_KERNEL_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_KERNEL_WRITE_AREA); - else - *_outFlags |= B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_WRITE_AREA); - - *_outFlags |= entry->changed ? PAGE_MODIFIED : 0; - *_outFlags |= entry->referenced ? PAGE_ACCESSED : 0; - *_outFlags |= entry->valid ? PAGE_PRESENT : 0; - - *_outPhysical = entry->physical_page_number * B_PAGE_SIZE; - - return B_OK; -} - - -static status_t -map_iospace_chunk(addr_t va, addr_t pa) -{ - pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned - va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned - if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE)) - panic("map_iospace_chunk: passed invalid va 0x%lx\n", va); - - // map the pages - return ppc_map_address_range(va, pa, IOSPACE_CHUNK_SIZE); -} - - -static addr_t -get_mapped_size_tmap(vm_translation_map *map) -{ - return map->map_count; -} - - -static status_t -protect_tmap(vm_translation_map *map, addr_t base, addr_t top, uint32 attributes) -{ - // XXX finish - return B_ERROR; -} - - -static status_t -clear_flags_tmap(vm_translation_map *map, addr_t virtualAddress, uint32 flags) -{ - page_table_entry *entry = lookup_page_table_entry(map, virtualAddress); - if (entry == NULL) - return B_NO_ERROR; - - bool modified = false; - - // clear the bits - if (flags & PAGE_MODIFIED && entry->changed) { - entry->changed = false; - modified = true; - } - if (flags & PAGE_ACCESSED && entry->referenced) { - entry->referenced = false; - modified = true; - } - - // synchronize - if (modified) { - tlbie(virtualAddress); - eieio(); - tlbsync(); - ppc_sync(); - } - - return B_OK; -} - - -static void -flush_tmap(vm_translation_map *map) -{ -// TODO: arch_cpu_global_TLB_invalidate() is extremely expensive and doesn't -// even cut it here. We are supposed to invalidate all TLB entries for this -// map on all CPUs. We should loop over the virtual pages and invoke tlbie -// instead (which marks the entry invalid on all CPUs). - arch_cpu_global_TLB_invalidate(); -} - - -static status_t -get_physical_page_tmap(addr_t pa, addr_t *va, uint32 flags) -{ - return generic_get_physical_page(pa, va, flags); -} - - -static status_t -put_physical_page_tmap(addr_t va) -{ - return generic_put_physical_page(va); -} - - -static vm_translation_map_ops tmap_ops = { - destroy_tmap, - lock_tmap, - unlock_tmap, - map_max_pages_need, - map_tmap, - unmap_tmap, - query_tmap, - query_tmap, - get_mapped_size_tmap, - protect_tmap, - clear_flags_tmap, - flush_tmap, - get_physical_page_tmap, - put_physical_page_tmap -}; - - // #pragma mark - // VM API @@ -482,138 +72,35 @@ static vm_translation_map_ops tmap_ops = { status_t arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel) { - // initialize the new object - map->ops = &tmap_ops; - map->map_count = 0; - - if (!kernel) { - // During the boot process, there are no semaphores available at this - // point, so we only try to create the translation map lock if we're - // initialize a user translation map. - // vm_translation_map_init_kernel_map_post_sem() is used to complete - // the kernel translation map. - if (recursive_lock_init(&map->lock, "translation map") < B_OK) - return map->lock.sem; - } - - map->arch_data = (vm_translation_map_arch_info *)malloc(sizeof(vm_translation_map_arch_info)); - if (map->arch_data == NULL) { - if (!kernel) - recursive_lock_destroy(&map->lock); - return B_NO_MEMORY; - } - - cpu_status state = disable_interrupts(); - acquire_spinlock(&sVSIDBaseBitmapLock); - - // allocate a VSID base for this one - if (kernel) { - // The boot loader has set up the segment registers for identical - // mapping. Two VSID bases are reserved for the kernel: 0 and 8. The - // latter one for mapping the kernel address space (0x80000000...), the - // former one for the lower addresses required by the Open Firmware - // services. - map->arch_data->vsid_base = 0; - sVSIDBaseBitmap[0] |= 0x3; - } else { - int i = 0; - - while (i < MAX_VSID_BASES) { - if (sVSIDBaseBitmap[i / 32] == 0xffffffff) { - i += 32; - continue; - } - if ((sVSIDBaseBitmap[i / 32] & (1 << (i % 32))) == 0) { - // we found it - sVSIDBaseBitmap[i / 32] |= 1 << (i % 32); - break; - } - i++; - } - if (i >= MAX_VSID_BASES) - panic("vm_translation_map_create: out of VSID bases\n"); - map->arch_data->vsid_base = i << VSID_BASE_SHIFT; - } - - release_spinlock(&sVSIDBaseBitmapLock); - restore_interrupts(state); - - return B_OK; + return get_vm_ops()->arch_vm_translation_map_init_map(map, kernel); } status_t arch_vm_translation_map_init_kernel_map_post_sem(vm_translation_map *map) { - if (recursive_lock_init(&map->lock, "translation map") < B_OK) - return map->lock.sem; - - return B_OK; + return get_vm_ops()->arch_vm_translation_map_init_kernel_map_post_sem(map); } status_t arch_vm_translation_map_init(kernel_args *args) { - sPageTable = (page_table_entry_group *)args->arch_args.page_table.start; - sPageTableSize = args->arch_args.page_table.size; - sPageTableHashMask = sPageTableSize / sizeof(page_table_entry_group) - 1; - - // init physical page mapper - status_t error = generic_vm_physical_page_mapper_init(args, - map_iospace_chunk, &sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE); - if (error != B_OK) - return error; - - return B_OK; + return get_vm_ops()->arch_vm_translation_map_init(args); } status_t arch_vm_translation_map_init_post_area(kernel_args *args) { - // If the page table doesn't lie within the kernel address space, we - // remap it. - if (!IS_KERNEL_ADDRESS(sPageTable)) { - addr_t newAddress = (addr_t)sPageTable; - status_t error = ppc_remap_address_range(&newAddress, sPageTableSize, - false); - if (error != B_OK) { - panic("arch_vm_translation_map_init_post_area(): Failed to remap " - "the page table!"); - return error; - } - - // set the new page table address - addr_t oldVirtualBase = (addr_t)(sPageTable); - sPageTable = (page_table_entry_group*)newAddress; - - // unmap the old pages - ppc_unmap_address_range(oldVirtualBase, sPageTableSize); - -// TODO: We should probably map the page table via BAT. It is relatively large, -// and due to being a hash table the access patterns might look sporadic, which -// certainly isn't to the liking of the TLB. - } - - // create an area to cover the page table - sPageTableArea = create_area("page_table", (void **)&sPageTable, B_EXACT_ADDRESS, - sPageTableSize, B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); - - // init physical page mapper - status_t error = generic_vm_physical_page_mapper_init_post_area(args); - if (error != B_OK) - return error; - - return B_OK; + return get_vm_ops()->arch_vm_translation_map_init_post_area(args); } status_t arch_vm_translation_map_init_post_sem(kernel_args *args) { - // init physical page mapper - return generic_vm_physical_page_mapper_init_post_sem(args); + return get_vm_ops()->arch_vm_translation_map_init_post_sem(args); } @@ -627,32 +114,8 @@ status_t arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress, addr_t physicalAddress, uint8 attributes, addr_t (*get_free_page)(kernel_args *)) { - uint32 virtualSegmentID = get_sr((void *)virtualAddress) & 0xffffff; - - uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, (uint32)virtualAddress); - page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask]; - - for (int32 i = 0; i < 8; i++) { - // 8 entries in a group - if (group->entry[i].valid) - continue; - - fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, PTE_READ_WRITE, false); - return B_OK; - } - - hash = page_table_entry::SecondaryHash(hash); - group = &sPageTable[hash & sPageTableHashMask]; - - for (int32 i = 0; i < 8; i++) { - if (group->entry[i].valid) - continue; - - fill_page_table_entry(&group->entry[i], virtualSegmentID, virtualAddress, physicalAddress, PTE_READ_WRITE, true); - return B_OK; - } - - return B_ERROR; + return get_vm_ops()->arch_vm_translation_map_early_map(ka, virtualAddress, physicalAddress, + attributes, get_free_page); } @@ -661,85 +124,32 @@ arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress, addr_t status_t arch_vm_translation_map_early_query(addr_t va, addr_t *out_physical) { - //PANIC_UNIMPLEMENTED(); - panic("vm_translation_map_quick_query(): not yet implemented\n"); - return B_OK; + return get_vm_ops()->arch_vm_translation_map_early_query(va, out_physical); } // #pragma mark - - +#if 0 status_t -ppc_map_address_range(addr_t virtualAddress, addr_t physicalAddress, +m68k_map_address_range(addr_t virtualAddress, addr_t physicalAddress, size_t size) { - addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE); - virtualAddress = ROUNDOWN(virtualAddress, B_PAGE_SIZE); - physicalAddress = ROUNDOWN(physicalAddress, B_PAGE_SIZE); - - vm_address_space *addressSpace = vm_kernel_address_space(); - - // map the pages - for (; virtualAddress < virtualEnd; - virtualAddress += B_PAGE_SIZE, physicalAddress += B_PAGE_SIZE) { - status_t error = map_tmap(&addressSpace->translation_map, - virtualAddress, physicalAddress, - B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); - if (error != B_OK) - return error; - } - - return B_OK; + return get_vm_ops()->m68k_map_address_range(virtualAddress, physicalAddress, size); } void -ppc_unmap_address_range(addr_t virtualAddress, size_t size) +m68k_unmap_address_range(addr_t virtualAddress, size_t size) { - addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE); - virtualAddress = ROUNDOWN(virtualAddress, B_PAGE_SIZE); - - vm_address_space *addressSpace = vm_kernel_address_space(); - - for (0; virtualAddress < virtualEnd; virtualAddress += B_PAGE_SIZE) - remove_page_table_entry(&addressSpace->translation_map, virtualAddress); + get_vm_ops()->m68k_unmap_address_range(virtualAddress, size); } status_t -ppc_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap) +m68k_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap) { - addr_t virtualAddress = ROUNDOWN(*_virtualAddress, B_PAGE_SIZE); - size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE); - - vm_address_space *addressSpace = vm_kernel_address_space(); - - // reserve space in the address space - void *newAddress = NULL; - status_t error = vm_reserve_address_range(addressSpace->id, &newAddress, - B_ANY_KERNEL_ADDRESS, size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); - if (error != B_OK) - return error; - - // get the area's first physical page - page_table_entry *entry = lookup_page_table_entry( - &addressSpace->translation_map, virtualAddress); - if (!entry) - return B_ERROR; - addr_t physicalBase = entry->physical_page_number << 12; - - // map the pages - error = ppc_map_address_range((addr_t)newAddress, physicalBase, size); - if (error != B_OK) - return error; - - *_virtualAddress = (addr_t)newAddress; - - // unmap the old pages - if (unmap) - ppc_unmap_address_range(virtualAddress, size); - - return B_OK; + return get_vm_ops()->m68k_remap_address_range(_virtualAddress, size, unmap); } +#endif diff --git a/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp b/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp new file mode 100644 index 0000000000..78cffb33db --- /dev/null +++ b/src/system/kernel/arch/m68k/arch_vm_translation_map_impl.cpp @@ -0,0 +1,1231 @@ +/* + * Copyright 2007, Haiku Inc. All rights reserved. + * Distributed under the terms of the MIT License. + * + * Authors: + * François Revol + * + * Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de. + * Distributed under the terms of the MIT License. + * + * Copyright 2001, Travis Geiselbrecht. All rights reserved. + * Distributed under the terms of the NewOS License. + */ + +#ifndef ARCH_M68K_MMU_TYPE +#error This file is included from arch_*_mmu.cpp +#endif + +/* (mmu_man) Implementation details on 68030 and others: + + Unlike on x86 we can't just switch the context to another team by just + setting a register to another page directory, since we only have one + page table containing both kernel and user address mappings. + The 030 supports arbitrary layout of the page directory tree, including + a 1-bit first level (2 entries top level table) that would map kernel + and user land at a single place. But 040 and later only support a fixed + splitting of 7/7/6 for 4K pages. + + Since 68k SMP hardware is rare enough we don't want to support them, we + can take some shortcuts. + + As we don't want a separate user and kernel space, we'll use a single + table. With the 7/7/6 split the 2nd level would require 32KB of tables, + which is small enough to not want to use the list hack from x86. + XXX: we use the hack for now, check later + + Since page directories/tables don't fit exactly a page, we stuff more + than one per page, and allocate them all at once, and add them at the + same time to the tree. So we guarantee all higher-level entries modulo + the number of tables/page are either invalid or present. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "generic_vm_physical_page_mapper.h" + + + +//#define TRACE_VM_TMAP +#ifdef TRACE_VM_TMAP +# define TRACE(x) dprintf x +#else +# define TRACE(x) ; +#endif + +//XXX: that's platform specific! +// 14 MB of iospace +#define IOSPACE_SIZE (14*1024*1024) +// 4 MB chunks, to optimize for 4 MB pages +// XXX: no such thing on 68k (060 ?) +// 256K +#define IOSPACE_CHUNK_SIZE (256*1024) + +static page_table_entry *iospace_pgtables = NULL; + +#define PAGE_INVALIDATE_CACHE_SIZE 64 + +// vm_translation object stuff +typedef struct vm_translation_map_arch_info { + page_root_entry *rtdir_virt; + page_root_entry *rtdir_phys; + int num_invalidate_pages; + addr_t pages_to_invalidate[PAGE_INVALIDATE_CACHE_SIZE]; +} vm_translation_map_arch_info; + +#if 0 +static page_table_entry *page_hole = NULL; +static page_directory_entry *page_hole_pgdir = NULL; +#endif +static page_root_entry *sKernelPhysicalPageDirectory = NULL; +static page_root_entry *sKernelVirtualPageDirectory = NULL; +static addr_t sQueryPage = NULL; +//static page_table_entry *sQueryPageTable; +//static page_directory_entry *sQueryPageDir; +// MUST be aligned +static page_table_entry sQueryDesc __attribute__ (( aligned (4) )); + +static vm_translation_map *tmap_list; +static spinlock tmap_list_lock; + +static addr_t sIOSpaceBase; + +#define CHATTY_TMAP 0 + +#if 0 +// use P*E_TO_* and TA_TO_P*EA ! +#define ADDR_SHIFT(x) ((x)>>12) +#define ADDR_REVERSE_SHIFT(x) ((x)<<12) +#endif + +/* 7/7/6 split */ +#define VADDR_TO_PRENT(va) (((va) / B_PAGE_SIZE) / (64*128)) +#define VADDR_TO_PDENT(va) ((((va) / B_PAGE_SIZE) / 64) % 128) +#define VADDR_TO_PTENT(va) (((va) / B_PAGE_SIZE) % 64) + +#define FIRST_USER_PGROOT_ENT (VADDR_TO_PRENT(USER_BASE)) +#define FIRST_USER_PGDIR_ENT (VADDR_TO_PDENT(USER_BASE)) +#define NUM_USER_PGROOT_ENTS (VADDR_TO_PRENT(ROUNDUP(USER_SIZE, B_PAGE_SIZE * 64 * 128))) +#define NUM_USER_PGDIR_ENTS (VADDR_TO_PDENT(ROUNDUP(USER_SIZE, B_PAGE_SIZE * 64))) +#define FIRST_KERNEL_PGROOT_ENT (VADDR_TO_PRENT(KERNEL_BASE)) +#define FIRST_KERNEL_PGDIR_ENT (VADDR_TO_PDENT(KERNEL_BASE)) +#define NUM_KERNEL_PGROOT_ENTS (VADDR_TO_PRENT(KERNEL_SIZE)) +#define NUM_KERNEL_PGDIR_ENTS (VADDR_TO_PDENT(KERNEL_SIZE)) +#define IS_KERNEL_MAP(map) (map->arch_data->rtdir_phys == sKernelPhysicalPageRoot) + +static status_t early_query(addr_t va, addr_t *out_physical); +static status_t get_physical_page_tmap(addr_t pa, addr_t *va, uint32 flags); +static status_t put_physical_page_tmap(addr_t va); + +static void flush_tmap(vm_translation_map *map); + + +static void * +m68k_translation_map_get_pgdir(vm_translation_map *map) +{ + return map->arch_data->rtdir_phys; +} + + +static inline void +init_page_root_entry(page_root_entry *entry) +{ + // DT_INVALID is 0 + *(page_root_entry_scalar *)entry = DFL_ROOTENT_VAL; +} + + +static inline void +update_page_root_entry(page_root_entry *entry, page_root_entry *with) +{ + // update page directory entry atomically + *(page_root_entry_scalar *)entry = *(page_root_entry_scalar *)with; +} + + +static inline void +init_page_directory_entry(page_directory_entry *entry) +{ + *(page_directory_entry_scalar *)entry = DFL_DIRENT_VAL; +} + + +static inline void +update_page_directory_entry(page_directory_entry *entry, page_directory_entry *with) +{ + // update page directory entry atomically + *(page_directory_entry_scalar *)entry = *(page_directory_entry_scalar *)with; +} + + +static inline void +init_page_table_entry(page_table_entry *entry) +{ + *(page_table_entry_scalar *)entry = DFL_PAGEENT_VAL; +} + + +static inline void +update_page_table_entry(page_table_entry *entry, page_table_entry *with) +{ + // update page table entry atomically + // XXX: is it ?? (long desc?) + *(page_table_entry_scalar *)entry = *(page_table_entry_scalar *)with; +} + + +static void +_update_all_pgdirs(int index, page_root_entry e) +{ + vm_translation_map *entry; + unsigned int state = disable_interrupts(); + + acquire_spinlock(&tmap_list_lock); + + for(entry = tmap_list; entry != NULL; entry = entry->next) + entry->arch_data->rtdir_virt[index] = e; + + release_spinlock(&tmap_list_lock); + restore_interrupts(state); +} + + +/*! Acquires the map's recursive lock, and resets the invalidate pages counter + in case it's the first locking recursion. +*/ +static status_t +lock_tmap(vm_translation_map *map) +{ + TRACE(("lock_tmap: map %p\n", map)); + + recursive_lock_lock(&map->lock); + if (recursive_lock_get_recursion(&map->lock) == 1) { + // we were the first one to grab the lock + TRACE(("clearing invalidated page count\n")); + map->arch_data->num_invalidate_pages = 0; + } + + return B_OK; +} + + +/*! Unlocks the map, and, if we'll actually losing the recursive lock, + flush all pending changes of this map (ie. flush TLB caches as + needed). +*/ +static status_t +unlock_tmap(vm_translation_map *map) +{ + TRACE(("unlock_tmap: map %p\n", map)); + + if (recursive_lock_get_recursion(&map->lock) == 1) { + // we're about to release it for the last time + flush_tmap(map); + } + + recursive_lock_unlock(&map->lock); + return B_OK; +} + + +static void +destroy_tmap(vm_translation_map *map) +{ + int state; + vm_translation_map *entry; + vm_translation_map *last = NULL; + unsigned int i, j; + + if (map == NULL) + return; + + // remove it from the tmap list + state = disable_interrupts(); + acquire_spinlock(&tmap_list_lock); + + entry = tmap_list; + while (entry != NULL) { + if (entry == map) { + if (last != NULL) + last->next = entry->next; + else + tmap_list = entry->next; + + break; + } + last = entry; + entry = entry->next; + } + + release_spinlock(&tmap_list_lock); + restore_interrupts(state); + + if (map->arch_data->rtdir_virt != NULL) { + // cycle through and free all of the user space pgtables + // since the size of tables don't match B_PAEG_SIZE, + // we alloc several at once, based on modulos, + // we make sure they are either all in the tree or none. + for (i = VADDR_TO_PRENT(USER_BASE); i <= VADDR_TO_PRENT(USER_BASE + (USER_SIZE - 1)); i++) { + addr_t pgdir_pn; + page_directory_entry *pgdir; + vm_page *dirpage; + + if (map->arch_data->rtdir_virt[i].type == DT_INVALID) + continue; + if (map->arch_data->rtdir_virt[i].type != DT_ROOT) { + panic("rtdir[%d]: buggy descriptor type", i); + return; + } + // suboptimal (done 8 times) + pgdir_pn = PRE_TO_PA(map->arch_data->rtdir_virt[i]); + dirpage = vm_lookup_page(pgdir_pn); + pgdir = &(((page_directory_entry *)dirpage)[i%NUM_DIRTBL_PER_PAGE]); + + for (j = 0; j <= NUM_DIRENT_PER_TBL; j+=NUM_PAGETBL_PER_PAGE) { + addr_t pgtbl_pn; + page_table_entry *pgtbl; + vm_page *page; + if (pgdir[j].type == DT_INVALID) + continue; + if (pgdir[j].type != DT_DIR) { + panic("rtdir[%d][%d]: buggy descriptor type", i, j); + return; + } + pgtbl_pn = PDE_TO_PN(pgdir[j]); + page = vm_lookup_page(pgtbl_pn); + pgtbl = (page_table_entry *)page; + + if (!page) { + panic("destroy_tmap: didn't find pgtable page\n"); + return; + } + vm_page_set_state(page, PAGE_STATE_FREE); + } + if (((i+1)%NUM_DIRTBL_PER_PAGE) == 0) + vm_page_set_state(dirpage, PAGE_STATE_FREE); + } + free(map->arch_data->rtdir_virt); + } + + free(map->arch_data); + recursive_lock_destroy(&map->lock); +} + + +static void +put_pgdir_in_pgroot(page_root_entry *entry, + addr_t pgdir_phys, uint32 attributes) +{ + page_root_entry dir; + // put it in the pgdir + init_page_root_entry(&dir); + dir.addr = TA_TO_PREA(pgdir_phys); + + // ToDo: we ignore the attributes of the page table - for compatibility + // with BeOS we allow having user accessible areas in the kernel address + // space. This is currently being used by some drivers, mainly for the + // frame buffer. Our current real time data implementation makes use of + // this fact, too. + // We might want to get rid of this possibility one day, especially if + // we intend to port it to a platform that does not support this. + //dir.user = 1; + //dir.rw = 1; + dir.type = DT_ROOT; + update_page_root_entry(entry, &dir); +} + + +static void +put_pgtable_in_pgdir(page_directory_entry *entry, + addr_t pgtable_phys, uint32 attributes) +{ + page_directory_entry table; + // put it in the pgdir + init_page_directory_entry(&table); + table.addr = TA_TO_PDEA(pgtable_phys); + + // ToDo: we ignore the attributes of the page table - for compatibility + // with BeOS we allow having user accessible areas in the kernel address + // space. This is currently being used by some drivers, mainly for the + // frame buffer. Our current real time data implementation makes use of + // this fact, too. + // We might want to get rid of this possibility one day, especially if + // we intend to port it to a platform that does not support this. + //table.user = 1; + //table.rw = 1; + table.type = DT_DIR; + update_page_directory_entry(entry, &table); +} + + +static void +put_page_table_entry_in_pgtable(page_table_entry *entry, + addr_t physicalAddress, uint32 attributes, bool globalPage) +{ + page_table_entry page; + init_page_table_entry(&page); + + page.addr = TA_TO_PTEA(physicalAddress); + + // if the page is user accessible, it's automatically + // accessible in kernel space, too (but with the same + // protection) + page.supervisor = (attributes & B_USER_PROTECTION) == 0; + if (page.supervisor) + page.write_protect = (attributes & B_KERNEL_WRITE_AREA) == 0; + else + page.write_protect = (attributes & B_WRITE_AREA) == 0; + page.type = DT_PAGE; + +#ifdef PAGE_HAS_GLOBAL_BIT + if (globalPage) + page.global = 1; +#endif + + // put it in the page table + update_page_table_entry(entry, &page); +} + + +static size_t +map_max_pages_need(vm_translation_map */*map*/, addr_t start, addr_t end) +{ + size_t need; + size_t pgdirs = VADDR_TO_PRENT(end) + 1 - VADDR_TO_PRENT(start); + // how much for page directories + need = (pgdirs + NUM_DIRTBL_PER_PAGE - 1) / NUM_DIRTBL_PER_PAGE; + // and page tables themselves + need = ((pgdirs * NUM_DIRENT_PER_TBL) + NUM_PAGETBL_PER_PAGE - 1) / NUM_PAGETBL_PER_PAGE; + + // better rounding when only 1 pgdir + // XXX: do better for other cases + if (pgdirs == 1) { + need = 1; + need += (VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start) + NUM_PAGETBL_PER_PAGE - 1) / NUM_PAGETBL_PER_PAGE; + } + + return need; +} + + +static status_t +map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes) +{ + page_root_entry *pr; + page_directory_entry *pd; + page_table_entry *pt; + addr_t pd_pg, pt_pg; + unsigned int rindex, dindex, pindex; + int err; + + TRACE(("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va)); + +/* + dprintf("pgdir at 0x%x\n", pgdir); + dprintf("index is %d\n", va / B_PAGE_SIZE / 1024); + dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]); + dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]); + dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present); + dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr); +*/ + pr = map->arch_data->rtdir_virt; + + // check to see if a page directory exists for this range + rindex = VADDR_TO_PRENT(va); + if (pr[rindex].type != DT_ROOT) { + addr_t pgdir; + vm_page *page; + unsigned int i; + + // we need to allocate a pgtable + page = vm_page_allocate_page(PAGE_STATE_CLEAR, true); + + // mark the page WIRED + vm_page_set_state(page, PAGE_STATE_WIRED); + + pgdir = page->physical_page_number * B_PAGE_SIZE; + + TRACE(("map_tmap: asked for free page for pgdir. 0x%lx\n", pgdir)); + + // for each pgdir on the allocated page: + for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) { + unsigned aindex = rindex & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */ + page_root_entry *apr = &pr[aindex + i]; + + // put in the pgdir + put_pgdir_in_pgroot(apr, pgdir, attributes + | (attributes & B_USER_PROTECTION ? B_WRITE_AREA : B_KERNEL_WRITE_AREA)); + + // update any other page directories, if it maps kernel space + //XXX: suboptimal, should batch them + if ((aindex+i) >= FIRST_KERNEL_PGDIR_ENT + && (aindex+i) < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS)) + _update_all_pgdirs((aindex+i), pr[aindex+i]); + + pgdir += SIZ_DIRTBL; + } +#warning M68K: really mean map_count++ ?? + map->map_count++; + } + // now, fill in the pentry + do { + err = get_physical_page_tmap(PRE_TO_PA(pr[rindex]), + &pd_pg, PHYSICAL_PAGE_NO_WAIT); + } while (err < 0); + pd = (page_directory_entry *)pd_pg; + // we want the table at rindex, not at rindex%(tbl/page) + pd += rindex % NUM_DIRTBL_PER_PAGE; + + // check to see if a page table exists for this range + dindex = VADDR_TO_PDENT(va); + if (pd[dindex].type != DT_DIR) { + addr_t pgtable; + vm_page *page; + unsigned int i; + + // we need to allocate a pgtable + page = vm_page_allocate_page(PAGE_STATE_CLEAR, true); + + // mark the page WIRED + vm_page_set_state(page, PAGE_STATE_WIRED); + + pgtable = page->physical_page_number * B_PAGE_SIZE; + + TRACE(("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable)); + + // for each pgtable on the allocated page: + for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) { + unsigned aindex = dindex & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */ + page_directory_entry *apd = &pd[aindex + i]; + + // put in the pgdir + put_pgtable_in_pgdir(apd, pgtable, attributes + | (attributes & B_USER_PROTECTION ? B_WRITE_AREA : B_KERNEL_WRITE_AREA)); + + // no need to update other page directories for kernel space; + // the root-level already point to us. + + pgtable += SIZ_PAGETBL; + } + +#warning M68K: really mean map_count++ ?? + map->map_count++; + } + // now, fill in the pentry + do { + err = get_physical_page_tmap(PDE_TO_PA(pd[dindex]), + &pt_pg, PHYSICAL_PAGE_NO_WAIT); + } while (err < 0); + pt = (page_table_entry *)pt_pg; + // we want the table at rindex, not at rindex%(tbl/page) + pt += dindex % NUM_PAGETBL_PER_PAGE; + + pindex = VADDR_TO_PTENT(va); + + put_page_table_entry_in_pgtable(&pt[pindex], pa, attributes, + IS_KERNEL_MAP(map)); + + put_physical_page_tmap(pt_pg); + put_physical_page_tmap(pd_pg); + + if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE) + map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = va; + + map->arch_data->num_invalidate_pages++; + + map->map_count++; + + return 0; +} + + +static status_t +unmap_tmap(vm_translation_map *map, addr_t start, addr_t end) +{ + page_table_entry *pt; + page_directory_entry *pd; + page_root_entry *pr = map->arch_data->rtdir_virt; + addr_t pd_pg, pt_pg; + status_t status; + int index; + + start = ROUNDOWN(start, B_PAGE_SIZE); + end = ROUNDUP(end, B_PAGE_SIZE); + + TRACE(("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end)); + +restart: + if (start >= end) + return B_OK; + + index = VADDR_TO_PRENT(start); + if (pr[index].type != DT_ROOT) { + // no pagedir here, move the start up to access the next page table + start = ROUNDUP(start + 1, B_PAGE_SIZE); + goto restart; + } + + do { + status = get_physical_page_tmap(PRE_TO_PA(pr[index]), + &pd_pg, PHYSICAL_PAGE_NO_WAIT); + } while (status < B_OK); + pd = (page_directory_entry *)pd_pg; + pd += index % NUM_DIRTBL_PER_PAGE; + + index = VADDR_TO_PDENT(start); + if (pd[index].type != DT_DIR) { + // no pagetable here, move the start up to access the next page table + start = ROUNDUP(start + 1, B_PAGE_SIZE); + put_physical_page_tmap(pd_pg); + goto restart; + } + + do { + status = get_physical_page_tmap(PDE_TO_PA(pd[index]), + &pt_pg, PHYSICAL_PAGE_NO_WAIT); + } while (status < B_OK); + pt = (page_table_entry *)pt_pg; + pt += index % NUM_PAGETBL_PER_PAGE; + + for (index = VADDR_TO_PTENT(start); (index < NUM_PAGEENT_PER_TBL) && (start < end); + index++, start += B_PAGE_SIZE) { + if (pt[index].type != DT_PAGE && pt[index].type != DT_INDIRECT) { + // page mapping not valid + continue; + } + + TRACE(("unmap_tmap: removing page 0x%lx\n", start)); + + pt[index].type = DT_INVALID; + map->map_count--; + + if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE) + map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = start; + + map->arch_data->num_invalidate_pages++; + } + + put_physical_page_tmap(pt_pg); + put_physical_page_tmap(pd_pg); + + goto restart; +} + +// XXX: 040 should be able to do that with PTEST (but not 030 or 060) +static status_t +query_tmap_interrupt(vm_translation_map *map, addr_t va, addr_t *_physical, + uint32 *_flags) +{ + page_root_entry *pr = map->arch_data->rtdir_virt; + page_directory_entry *pd; + page_table_entry *pt; + addr_t physicalPageTable; + int32 cpu = smp_get_current_cpu(); + int32 index; + int level + + *_physical = 0; + + for (level = 0; level < 4; level++) { + + index = VADDR_TO_PDENT(va); + if (pd[index].type != 0) { + // no pagetable here + return B_ERROR; + } + + // map page table entry using our per CPU mapping page + + physicalPageTable = ADDR_REVERSE_SHIFT(pd[index].addr); + pt = (page_table_entry *)(sQueryPage/* + cpu * SIZ_DIRTBL*/); + index = VADDR_TO_PDENT((addr_t)pt); + if (pd[index].present == 0) { + // no page table here + return B_ERROR; + } + + index = VADDR_TO_PTENT((addr_t)pt); + put_page_table_entry_in_pgtable(&sQueryPageTable[index], physicalPageTable, + B_KERNEL_READ_AREA, false); + invalidate_TLB(pt); + + index = VADDR_TO_PTENT(va); + + switch (level) { + case 0: // root table + case 1: // directory table + case 2: // page table + if (.type == DT_INDIRECT) { + continue; + } + // FALLTHROUGH + case 3: // indirect desc + } + } + *_physical = ADDR_REVERSE_SHIFT(pt[index].addr); + + *_flags |= ((pt[index].rw ? B_KERNEL_WRITE_AREA : 0) | B_KERNEL_READ_AREA) + | (pt[index].dirty ? PAGE_MODIFIED : 0) + | (pt[index].accessed ? PAGE_ACCESSED : 0) + | (pt[index].present ? PAGE_PRESENT : 0); + + return B_OK; +} + + +static status_t +query_tmap(vm_translation_map *map, addr_t va, addr_t *_physical, uint32 *_flags) +{ + page_table_entry *pt; + page_directory_entry *pd = map->arch_data->rtdir_virt; + status_t status; + int32 index; + + // default the flags to not present + *_flags = 0; + *_physical = 0; + + index = VADDR_TO_PDENT(va); + if (pd[index].present == 0) { + // no pagetable here + return B_NO_ERROR; + } + + do { + status = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr), + (addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT); + } while (status < B_OK); + index = VADDR_TO_PTENT(va); + + *_physical = ADDR_REVERSE_SHIFT(pt[index].addr); + + // read in the page state flags + if (pt[index].user) + *_flags |= (pt[index].rw ? B_WRITE_AREA : 0) | B_READ_AREA; + + *_flags |= ((pt[index].rw ? B_KERNEL_WRITE_AREA : 0) | B_KERNEL_READ_AREA) + | (pt[index].dirty ? PAGE_MODIFIED : 0) + | (pt[index].accessed ? PAGE_ACCESSED : 0) + | (pt[index].present ? PAGE_PRESENT : 0); + + put_physical_page_tmap((addr_t)pt); + + TRACE(("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va)); + + return B_OK; +} + + +static addr_t +get_mapped_size_tmap(vm_translation_map *map) +{ + return map->map_count; +} + + +static status_t +protect_tmap(vm_translation_map *map, addr_t start, addr_t end, uint32 attributes) +{ + page_table_entry *pt; + page_directory_entry *pd = map->arch_data->rtdir_virt; + status_t status; + int index; + + start = ROUNDOWN(start, B_PAGE_SIZE); + end = ROUNDUP(end, B_PAGE_SIZE); + + TRACE(("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end, attributes)); + +restart: + if (start >= end) + return B_OK; + + index = VADDR_TO_PDENT(start); + if (pd[index].present == 0) { + // no pagetable here, move the start up to access the next page table + start = ROUNDUP(start + 1, B_PAGE_SIZE); + goto restart; + } + + do { + status = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr), + (addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT); + } while (status < B_OK); + + for (index = VADDR_TO_PTENT(start); index < 1024 && start < end; index++, start += B_PAGE_SIZE) { + if (pt[index].present == 0) { + // page mapping not valid + continue; + } + + TRACE(("protect_tmap: protect page 0x%lx\n", start)); + + pt[index].user = (attributes & B_USER_PROTECTION) != 0; + if ((attributes & B_USER_PROTECTION) != 0) + pt[index].rw = (attributes & B_WRITE_AREA) != 0; + else + pt[index].rw = (attributes & B_KERNEL_WRITE_AREA) != 0; + + if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE) + map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = start; + + map->arch_data->num_invalidate_pages++; + } + + put_physical_page_tmap((addr_t)pt); + + goto restart; +} + + +static status_t +clear_flags_tmap(vm_translation_map *map, addr_t va, uint32 flags) +{ + page_table_entry *pt; + page_directory_entry *pd = map->arch_data->rtdir_virt; + status_t status; + int index; + int tlb_flush = false; + + index = VADDR_TO_PDENT(va); + if (pd[index].present == 0) { + // no pagetable here + return B_OK; + } + + do { + status = get_physical_page_tmap(ADDR_REVERSE_SHIFT(pd[index].addr), + (addr_t *)&pt, PHYSICAL_PAGE_NO_WAIT); + } while (status < B_OK); + index = VADDR_TO_PTENT(va); + + // clear out the flags we've been requested to clear + if (flags & PAGE_MODIFIED) { + pt[index].dirty = 0; + tlb_flush = true; + } + if (flags & PAGE_ACCESSED) { + pt[index].accessed = 0; + tlb_flush = true; + } + + put_physical_page_tmap((addr_t)pt); + + if (tlb_flush) { + if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE) + map->arch_data->pages_to_invalidate[map->arch_data->num_invalidate_pages] = va; + + map->arch_data->num_invalidate_pages++; + } + + return B_OK; +} + + +static void +flush_tmap(vm_translation_map *map) +{ + cpu_status state; + + if (map->arch_data->num_invalidate_pages <= 0) + return; + + state = disable_interrupts(); + + if (map->arch_data->num_invalidate_pages > PAGE_INVALIDATE_CACHE_SIZE) { + // invalidate all pages + TRACE(("flush_tmap: %d pages to invalidate, invalidate all\n", + map->arch_data->num_invalidate_pages)); + + if (IS_KERNEL_MAP(map)) { + arch_cpu_global_TLB_invalidate(); + smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0, NULL, + SMP_MSG_FLAG_SYNC); + } else { + arch_cpu_user_TLB_invalidate(); + smp_send_broadcast_ici(SMP_MSG_USER_INVALIDATE_PAGES, 0, 0, 0, NULL, + SMP_MSG_FLAG_SYNC); + } + } else { + TRACE(("flush_tmap: %d pages to invalidate, invalidate list\n", + map->arch_data->num_invalidate_pages)); + + arch_cpu_invalidate_TLB_list(map->arch_data->pages_to_invalidate, + map->arch_data->num_invalidate_pages); + smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST, + (uint32)map->arch_data->pages_to_invalidate, + map->arch_data->num_invalidate_pages, 0, NULL, + SMP_MSG_FLAG_SYNC); + } + map->arch_data->num_invalidate_pages = 0; + + restore_interrupts(state); +} + + +static status_t +map_iospace_chunk(addr_t va, addr_t pa) +{ + int i; + page_table_entry *pt; + addr_t ppn; + int state; + + pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned + va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned + if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE)) + panic("map_iospace_chunk: passed invalid va 0x%lx\n", va); + + ppn = ADDR_SHIFT(pa); + pt = &iospace_pgtables[(va - sIOSpaceBase) / B_PAGE_SIZE]; + for (i = 0; i < 1024; i++) { + init_page_table_entry(&pt[i]); + pt[i].addr = ppn + i; + pt[i].user = 0; + pt[i].rw = 1; + pt[i].present = 1; + pt[i].global = 1; + } + + state = disable_interrupts(); + arch_cpu_invalidate_TLB_range(va, va + (IOSPACE_CHUNK_SIZE - B_PAGE_SIZE)); + smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_RANGE, + va, va + (IOSPACE_CHUNK_SIZE - B_PAGE_SIZE), 0, + NULL, SMP_MSG_FLAG_SYNC); + restore_interrupts(state); + + return B_OK; +} + + +static status_t +get_physical_page_tmap(addr_t pa, addr_t *va, uint32 flags) +{ + return generic_get_physical_page(pa, va, flags); +} + + +static status_t +put_physical_page_tmap(addr_t va) +{ + return generic_put_physical_page(va); +} + + +static vm_translation_map_ops tmap_ops = { + destroy_tmap, + lock_tmap, + unlock_tmap, + map_max_pages_need, + map_tmap, + unmap_tmap, + query_tmap, + query_tmap_interrupt, + get_mapped_size_tmap, + protect_tmap, + clear_flags_tmap, + flush_tmap, + get_physical_page_tmap, + put_physical_page_tmap +}; + + +// #pragma mark - +// VM API + + +static status_t +arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel) +{ + if (map == NULL) + return B_BAD_VALUE; + + TRACE(("vm_translation_map_create\n")); + + // initialize the new object + map->ops = &tmap_ops; + map->map_count = 0; + + if (!kernel) { + // During the boot process, there are no semaphores available at this + // point, so we only try to create the translation map lock if we're + // initialize a user translation map. + // vm_translation_map_init_kernel_map_post_sem() is used to complete + // the kernel translation map. + if (recursive_lock_init(&map->lock, "translation map") < B_OK) + return map->lock.sem; + } + + map->arch_data = (vm_translation_map_arch_info *)malloc(sizeof(vm_translation_map_arch_info)); + if (map == NULL) { + recursive_lock_destroy(&map->lock); + return B_NO_MEMORY; + } + + map->arch_data->num_invalidate_pages = 0; + + if (!kernel) { + // user + // allocate a pgdir + map->arch_data->rtdir_virt = (page_directory_entry *)memalign( + B_PAGE_SIZE, B_PAGE_SIZE); + if (map->arch_data->rtdir_virt == NULL) { + free(map->arch_data); + recursive_lock_destroy(&map->lock); + return B_NO_MEMORY; + } + vm_get_page_mapping(vm_kernel_address_space_id(), + (addr_t)map->arch_data->rtdir_virt, (addr_t *)&map->arch_data->rtdir_phys); + } else { + // kernel + // we already know the kernel pgdir mapping + map->arch_data->rtdir_virt = sKernelVirtualPageDirectory; + map->arch_data->rtdir_phys = sKernelPhysicalPageDirectory; + } + + // zero out the bottom portion of the new pgdir + memset(map->arch_data->rtdir_virt + FIRST_USER_PGDIR_ENT, 0, + NUM_USER_PGDIR_ENTS * sizeof(page_directory_entry)); + + // insert this new map into the map list + { + int state = disable_interrupts(); + acquire_spinlock(&tmap_list_lock); + + // copy the top portion of the pgdir from the current one + memcpy(map->arch_data->rtdir_virt + FIRST_KERNEL_PGDIR_ENT, + sKernelVirtualPageDirectory + FIRST_KERNEL_PGDIR_ENT, + NUM_KERNEL_PGDIR_ENTS * sizeof(page_directory_entry)); + + map->next = tmap_list; + tmap_list = map; + + release_spinlock(&tmap_list_lock); + restore_interrupts(state); + } + + return B_OK; +} + + +static status_t +arch_vm_translation_map_init_kernel_map_post_sem(vm_translation_map *map) +{ + if (recursive_lock_init(&map->lock, "translation map") < B_OK) + return map->lock.sem; + + return B_OK; +} + + +static status_t +arch_vm_translation_map_init(kernel_args *args) +{ + status_t error; + + TRACE(("vm_translation_map_init: entry\n")); +#if 0 + // page hole set up in stage2 + page_hole = (page_table_entry *)args->arch_args.page_hole; + // calculate where the pgdir would be + page_hole_pgdir = (page_directory_entry *)(((unsigned int)args->arch_args.page_hole) + (B_PAGE_SIZE * 1024 - B_PAGE_SIZE)); + // clear out the bottom 2 GB, unmap everything + memset(page_hole_pgdir + FIRST_USER_PGDIR_ENT, 0, sizeof(page_directory_entry) * NUM_USER_PGDIR_ENTS); +#endif + + sKernelPhysicalPageDirectory = (page_directory_entry *)args->arch_args.phys_pgdir; + sKernelVirtualPageDirectory = (page_directory_entry *)args->arch_args.vir_pgdir; + + sQueryDesc.type = DT_INVALID; + + tmap_list_lock = 0; + tmap_list = NULL; + + // allocate some space to hold physical page mapping info + iospace_pgtables = (page_table_entry *)vm_allocate_early(args, + B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * 1024)), ~0L, + B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); + + TRACE(("iospace_pgtables %p\n", iospace_pgtables)); + + // init physical page mapper + error = generic_vm_physical_page_mapper_init(args, map_iospace_chunk, + &sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE); + if (error != B_OK) + return error; + + // initialize our data structures + memset(iospace_pgtables, 0, B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * 1024))); + + TRACE(("mapping iospace_pgtables\n")); + + // put the array of pgtables directly into the kernel pagedir + // these will be wired and kept mapped into virtual space to be easy to get to + { + addr_t phys_pgtable; + addr_t virt_pgtable; + page_directory_entry *e; + int i; + + virt_pgtable = (addr_t)iospace_pgtables; + for (i = 0; i < (IOSPACE_SIZE / (B_PAGE_SIZE * 1024)); i++, virt_pgtable += B_PAGE_SIZE) { + early_query(virt_pgtable, &phys_pgtable); + e = &page_hole_pgdir[(sIOSpaceBase / (B_PAGE_SIZE * 1024)) + i]; + put_pgtable_in_pgdir(e, phys_pgtable, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); + } + } + + // enable global page feature if available + if (x86_check_feature(IA32_FEATURE_PGE, FEATURE_COMMON)) { + // this prevents kernel pages from being flushed from TLB on context-switch + x86_write_cr4(x86_read_cr4() | IA32_CR4_GLOBAL_PAGES); + } + + TRACE(("vm_translation_map_init: done\n")); + + return B_OK; +} + + +static status_t +arch_vm_translation_map_init_post_sem(kernel_args *args) +{ + return generic_vm_physical_page_mapper_init_post_sem(args); +} + + +static status_t +arch_vm_translation_map_init_post_area(kernel_args *args) +{ + // now that the vm is initialized, create a region that represents + // the page hole + void *temp; + status_t error; + area_id area; + + TRACE(("vm_translation_map_init_post_area: entry\n")); + + // unmap the page hole hack we were using before + sKernelVirtualPageDirectory[1023].present = 0; + page_hole_pgdir = NULL; + page_hole = NULL; + + temp = (void *)sKernelVirtualPageDirectory; + area = create_area("kernel_pgdir", &temp, B_EXACT_ADDRESS, B_PAGE_SIZE, + B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); + if (area < B_OK) + return area; + + temp = (void *)iospace_pgtables; + area = create_area("iospace_pgtables", &temp, B_EXACT_ADDRESS, + B_PAGE_SIZE * (IOSPACE_SIZE / (B_PAGE_SIZE * 1024)), + B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); + if (area < B_OK) + return area; + + error = generic_vm_physical_page_mapper_init_post_area(args); + if (error != B_OK) + return error; + + // this area is used for query_tmap_interrupt() + // TODO: Note, this only works as long as all pages belong to the same + // page table, which is not yet enforced (or even tested)! + // Note we don't support SMP which makes things simpler. + + area = vm_create_null_area(vm_kernel_address_space_id(), + "interrupt query pages", (void **)&sQueryPage, B_ANY_ADDRESS, + B_PAGE_SIZE); + if (area < B_OK) + return area; + + // insert the indirect descriptor in the tree so we can map the page we want from it. + + { + page_directory_entry *pageDirEntry; + page_indirect_entry *pageTableEntry; + addr_t physicalPageTable; + int32 index; + + sQueryPageTable = (page_indirect_entry *)(sQueryPage); + + index = VADDR_TO_PRENT((addr_t)sQueryPageTable); + physicalPageTable = ADDR_REVERSE_SHIFT(sKernelVirtualPageDirectory[index].addr); + + get_physical_page_tmap(physicalPageTable, + (addr_t *)&pageTableEntry, PHYSICAL_PAGE_NO_WAIT); + + sQueryPageTable = (page_table_entry *)(sQueryPages); + + index = VADDR_TO_PDENT((addr_t)sQueryPageTable); + physicalPageTable = ADDR_REVERSE_SHIFT(sKernelVirtualPageDirectory[index].addr); + + get_physical_page_tmap(physicalPageTable, + (addr_t *)&pageTableEntry, PHYSICAL_PAGE_NO_WAIT); + + index = VADDR_TO_PTENT((addr_t)sQueryPageTable); + put_page_table_entry_in_pgtable(&pageTableEntry[index], physicalPageTable, + B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false); + + put_physical_page_tmap((addr_t)pageTableEntry); + put_physical_page_tmap((addr_t)pageDirEntry); + //invalidate_TLB(sQueryPageTable); + } + + TRACE(("vm_translation_map_init_post_area: done\n")); + return B_OK; +} + + +// XXX horrible back door to map a page quickly regardless of translation map object, etc. +// used only during VM setup. +// uses a 'page hole' set up in the stage 2 bootloader. The page hole is created by pointing one of +// the pgdir entries back at itself, effectively mapping the contents of all of the 4MB of pagetables +// into a 4 MB region. It's only used here, and is later unmapped. + +static status_t +arch_vm_translation_map_early_map(kernel_args *args, addr_t va, addr_t pa, + uint8 attributes, addr_t (*get_free_page)(kernel_args *)) +{ + int index; + + TRACE(("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va)); + + // check to see if a page table exists for this range + index = VADDR_TO_PDENT(va); + if (page_hole_pgdir[index].present == 0) { + addr_t pgtable; + page_directory_entry *e; + // we need to allocate a pgtable + pgtable = get_free_page(args); + // pgtable is in pages, convert to physical address + pgtable *= B_PAGE_SIZE; + + TRACE(("early_map: asked for free page for pgtable. 0x%lx\n", pgtable)); + + // put it in the pgdir + e = &page_hole_pgdir[index]; + put_pgtable_in_pgdir(e, pgtable, attributes); + + // zero it out in it's new mapping + memset((unsigned int *)((unsigned int)page_hole + (va / B_PAGE_SIZE / 1024) * B_PAGE_SIZE), 0, B_PAGE_SIZE); + } + + // now, fill in the pentry + put_page_table_entry_in_pgtable(page_hole + va / B_PAGE_SIZE, pa, attributes, + IS_KERNEL_ADDRESS(va)); + + arch_cpu_invalidate_TLB_range(va, va); + + return B_OK; +} +