Updated the PPC stuff to recent changes in other parts.

git-svn-id: file:///srv/svn/repos/haiku/trunk/current@9798 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2004-11-04 16:41:32 +00:00
parent 848b55ddd3
commit bd00cbc6a9
6 changed files with 222 additions and 154 deletions

View File

@ -1,7 +1,10 @@
/*
** Copyright 2001, Travis Geiselbrecht. All rights reserved.
** Distributed under the terms of the NewOS License.
*/
* Copyright 2003-2004, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <KernelExport.h>
@ -9,24 +12,24 @@
#include <boot/kernel_args.h>
int
arch_cpu_preboot_init(kernel_args *ka)
status_t
arch_cpu_preboot_init(kernel_args *args)
{
return 0;
return B_OK;
}
int
arch_cpu_init(kernel_args *ka)
status_t
arch_cpu_init(kernel_args *args)
{
return 0;
return B_OK;
}
int
arch_cpu_init2(kernel_args *ka)
status_t
arch_cpu_init_post_vm(kernel_args *args)
{
return 0;
return B_OK;
}
#define CACHELINE 32
@ -121,13 +124,13 @@ system_time(void)
#endif
int
arch_cpu_user_memcpy(void *to, const void *from, size_t size, addr *fault_handler)
status_t
arch_cpu_user_memcpy(void *to, const void *from, size_t size, addr_t *fault_handler)
{
char *tmp = (char *)to;
char *s = (char *)from;
*fault_handler = (addr)&&error;
*fault_handler = (addr_t)&&error;
while (size--)
*tmp++ = *s++;
@ -151,12 +154,12 @@ error:
* \return strlen(\a from).
*/
int
arch_cpu_user_strlcpy(char *to, const char *from, size_t size, addr *faultHandler)
ssize_t
arch_cpu_user_strlcpy(char *to, const char *from, size_t size, addr_t *faultHandler)
{
int from_length = 0;
*faultHandler = (addr)&&error;
*faultHandler = (addr_t)&&error;
if (size > 0) {
to[--size] = '\0';
@ -179,12 +182,12 @@ error:
}
int
arch_cpu_user_memset(void *s, char c, size_t count, addr *fault_handler)
status_t
arch_cpu_user_memset(void *s, char c, size_t count, addr_t *fault_handler)
{
char *xs = (char *)s;
*fault_handler = (addr)&&error;
*fault_handler = (addr_t)&&error;
while (count--)
*xs++ = c;

View File

@ -1,7 +1,10 @@
/*
** Copyright 2001, Travis Geiselbrecht. All rights reserved.
** Distributed under the terms of the NewOS License.
*/
* Copyright 2003-2004, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <kernel.h>
@ -210,34 +213,18 @@ arch_dbg_putchar(char c)
#endif
int
arch_dbg_con_init(kernel_args *ka)
void
arch_dbg_con_early_boot_message(const char *string)
{
#if FRAMEBUFFER_DBG_CONSOLE
framebuffer = (unsigned char *)ka->fb.mapping.start;
screen_size_x = ka->fb.x_size;
screen_size_y = ka->fb.y_size;
back_color = 0x0;
draw_color = 0xff;
char_x = 0;
char_y = ka->cons_line;
screen_depth = ka->fb.bit_depth;
num_cols = screen_size_x / CHAR_WIDTH;
num_rows = screen_size_y / CHAR_HEIGHT;
#endif
return B_NO_ERROR;
// this function will only be called in fatal situations
}
int
arch_dbg_con_init2(kernel_args *ka)
status_t
arch_dbg_con_init(kernel_args *args)
{
#if 0
#if FRAMEBUFFER_DBG_CONSOLE
#if 0
region_id fb_region;
void *new_framebuffer;
@ -249,38 +236,50 @@ arch_dbg_con_init2(kernel_args *ka)
dprintf("framebuffer now at %p, phys addr 0x%x\n", new_framebuffer, ka->fb.phys_addr.start);
dprintf("compare %d\n", memcmp(framebuffer, new_framebuffer, ka->fb.phys_addr.size));
#endif
// ToDo: this has to be mapped before it can be used!
framebuffer = (unsigned char *)args->frame_buffer.physical_buffer.start;
screen_size_x = args->frame_buffer.width;
screen_size_y = args->frame_buffer.height;
back_color = 0x0;
draw_color = 0xff;
char_x = 0;
char_y = args->cons_line;
screen_depth = args->frame_buffer.depth;
num_cols = screen_size_x / CHAR_WIDTH;
num_rows = screen_size_y / CHAR_HEIGHT;
#endif
#endif
return B_NO_ERROR;
}
char arch_dbg_con_read()
char
arch_dbg_con_read()
{
for(;;);
for (;;);
return 0;
}
char arch_dbg_con_putch(const char c)
char
arch_dbg_con_putch(const char c)
{
return c;
}
void arch_dbg_con_puts(const char *str)
void
arch_dbg_con_puts(const char *str)
{
while(*str) {
while (*str) {
arch_dbg_putchar(*str);
str++;
}
}
ssize_t arch_dbg_con_write(const void *buf, ssize_t len)
{
ssize_t i;
for(i = 0; i < len; i++)
arch_dbg_putchar(((char *)buf)[i]);
return 0;
}

View File

@ -1,7 +1,10 @@
/*
** Copyright 2001, Travis Geiselbrecht. All rights reserved.
** Distributed under the terms of the NewOS License.
*/
* Copyright 2003-2004, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <boot/kernel_args.h>
@ -150,23 +153,23 @@ ppc_exception_entry(int vector, struct iframe *iframe)
}
int
arch_int_init(kernel_args *ka)
status_t
arch_int_init(kernel_args *args)
{
return 0;
return B_OK;
}
int
arch_int_init2(kernel_args *ka)
status_t
arch_int_init_post_vm(kernel_args *args)
{
region_id exception_region;
void *handlers;
// create a region to map the irq vector code into (physical address 0x0)
handlers = (void *)ka->arch_args.exception_handlers.start;
handlers = (void *)args->arch_args.exception_handlers.start;
exception_region = vm_create_anonymous_region(vm_get_kernel_aspace_id(), "exception_handlers",
&handlers, B_EXACT_ADDRESS, ka->arch_args.exception_handlers.size,
&handlers, B_EXACT_ADDRESS, args->arch_args.exception_handlers.size,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (exception_region < 0)
panic("arch_int_init2: could not create exception handler region\n");
@ -174,9 +177,9 @@ arch_int_init2(kernel_args *ka)
dprintf("exception handlers at %p\n", handlers);
// copy the handlers into this area
memcpy(handlers, &__irqvec_start, ka->arch_args.exception_handlers.size);
memcpy(handlers, &__irqvec_start, args->arch_args.exception_handlers.size);
arch_cpu_sync_icache(0, 0x1000);
return 0;
return B_OK;
}

View File

@ -1,7 +1,10 @@
/*
** Copyright 2001, Travis Geiselbrecht. All rights reserved.
** Distributed under the terms of the NewOS License.
*/
/*
* Copyright 2003-2004, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <kernel.h>
@ -11,24 +14,31 @@
#include <string.h>
int
arch_team_init_team_struct(struct team *team, bool kernel)
status_t
arch_thread_init(struct kernel_args *args)
{
return 0;
return B_OK;
}
int
status_t
arch_team_init_team_struct(struct team *team, bool kernel)
{
return B_OK;
}
status_t
arch_thread_init_thread_struct(struct thread *t)
{
// set up an initial state (stack & fpu)
memset(&t->arch_info, 0, sizeof(t->arch_info));
return 0;
return B_OK;
}
int
status_t
arch_thread_init_kthread_stack(struct thread *t, int (*start_func)(void), void (*entry_func)(void), void (*exit_func)(void))
{
addr_t *kstack = (addr_t *)t->kernel_stack_base;
@ -45,7 +55,7 @@ arch_thread_init_kthread_stack(struct thread *t, int (*start_func)(void), void (
// save this stack position
t->arch_info.sp = (void *)kstack_top;
return 0;
return B_OK;
}
@ -81,9 +91,9 @@ arch_thread_context_switch(struct thread *t_from, struct thread *t_to)
asm("mtear %0" :: "g"(t_to->kernel_stack_base + KSTACK_SIZE - 8));
// switch the asids if we need to
if (t_to->team->_aspace_id >= 0) {
if (t_to->team->aspace != NULL) {
// the target thread has is user space
if (t_from->team->_aspace_id != t_to->team->_aspace_id) {
if (t_from->team != t_to->team) {
// switching to a new address space
ppc_translation_map_change_asid(&t_to->team->aspace->translation_map);
}
@ -103,7 +113,7 @@ arch_thread_dump_info(void *info)
void
arch_thread_enter_uspace(struct thread *thread, addr entry, void *arg1, void *arg2)
arch_thread_enter_uspace(struct thread *thread, addr_t entry, void *arg1, void *arg2)
{
panic("arch_thread_enter_uspace(): not yet implemented\n");
}
@ -129,3 +139,27 @@ arch_check_syscall_restart(struct thread *thread)
}
/** Saves everything needed to restore the frame in the child fork in the
* arch_fork_arg structure to be passed to arch_restore_fork_frame().
* Also makes sure to return the right value.
*/
void
arch_store_fork_frame(struct arch_fork_arg *arg)
{
}
/** Restores the frame from a forked team as specified by the provided
* arch_fork_arg structure.
* Needs to be called from within the child team, ie. instead of
* arch_thread_enter_uspace() as thread "starter".
* This function does not return to the caller, but will enter userland
* in the child team at the same position where the parent team left of.
*/
void
arch_restore_fork_frame(struct arch_fork_arg *arg)
{
}

View File

@ -1,7 +1,10 @@
/*
** Copyright 2001, Travis Geiselbrecht. All rights reserved.
** Distributed under the terms of the NewOS License.
*/
* Copyright 2003-2004, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <kernel.h>
@ -11,15 +14,15 @@
#include <arch_mmu.h>
int
arch_vm_init(kernel_args *ka)
status_t
arch_vm_init(kernel_args *args)
{
return 0;
return B_OK;
}
int
arch_vm_init2(kernel_args *ka)
status_t
arch_vm_init2(kernel_args *args)
{
// int bats[8];
// int i;
@ -76,27 +79,24 @@ arch_vm_init2(kernel_args *ka)
bats[0] = bats[1] = 0;
setdbats(bats);
#endif
return 0;
return B_OK;
}
int
arch_vm_init_existing_maps(kernel_args *ka)
status_t
arch_vm_init_post_area(kernel_args *args)
{
void *temp = (void *)ka->fb.mapping.start;
// create a region for the framebuffer
vm_create_anonymous_region(vm_get_kernel_aspace_id(), "framebuffer", &temp, B_EXACT_ADDRESS,
ka->fb.mapping.size, B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
return B_NO_ERROR;
return B_OK;
}
int
arch_vm_init_endvm(kernel_args *ka)
status_t
arch_vm_init_end(kernel_args *args)
{
return B_NO_ERROR;
// throw away anything in the kernel_args.pgtable[] that's not yet mapped
//vm_free_unused_boot_loader_range(KERNEL_BASE, 0x400000 * args->arch_args.num_pgtables);
return B_OK;
}

View File

@ -1,7 +1,10 @@
/*
** Copyright 2001, Travis Geiselbrecht. All rights reserved.
** Distributed under the terms of the NewOS License.
*/
* Copyright 2003-2004, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <KernelExport.h>
@ -35,11 +38,31 @@ spinlock asid_bitmap_lock;
(((map)->arch_data->asid_base << ASID_SHIFT) + ((vaddr) / 0x10000000))
// vm_translation object stuff
typedef struct vm_translation_map_arch_info_struct {
typedef struct vm_translation_map_arch_info {
int asid_base; // shift left by ASID_SHIFT to get the base asid to use
} vm_translation_map_arch_info;
void
ppc_translation_map_change_asid(vm_translation_map *map)
{
// this code depends on the kernel being at 0x80000000, fix if we change that
#if KERNEL_BASE != 0x80000000
#error fix me
#endif
int asid_base = map->arch_data->asid_base;
asm("mtsr 0,%0" : : "g"(asid_base));
asm("mtsr 1,%0" : : "g"(asid_base + 1));
asm("mtsr 2,%0" : : "g"(asid_base + 2));
asm("mtsr 3,%0" : : "g"(asid_base + 3));
asm("mtsr 4,%0" : : "g"(asid_base + 4));
asm("mtsr 5,%0" : : "g"(asid_base + 5));
asm("mtsr 6,%0" : : "g"(asid_base + 6));
asm("mtsr 7,%0" : : "g"(asid_base + 7));
}
static status_t
lock_tmap(vm_translation_map *map)
{
@ -331,25 +354,40 @@ static vm_translation_map_ops tmap_ops = {
};
int
vm_translation_map_create(vm_translation_map *new_map, bool kernel)
// #pragma mark -
// VM API
status_t
arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
{
// initialize the new object
new_map->ops = &tmap_ops;
new_map->map_count = 0;
if (recursive_lock_init(&new_map->lock, "map lock") < B_OK)
return B_NO_MEMORY;
map->ops = &tmap_ops;
map->map_count = 0;
new_map->arch_data = (vm_translation_map_arch_info *)malloc(sizeof(vm_translation_map_arch_info));
if (new_map->arch_data == NULL)
if (!kernel) {
// During the boot process, there are no semaphores available at this
// point, so we only try to create the translation map lock if we're
// initialize a user translation map.
// vm_translation_map_init_kernel_map_post_sem() is used to complete
// the kernel translation map.
if (recursive_lock_init(&map->lock, "translation map") < B_OK)
return map->lock.sem;
}
map->arch_data = (vm_translation_map_arch_info *)malloc(sizeof(vm_translation_map_arch_info));
if (map->arch_data == NULL) {
if (!kernel)
recursive_lock_destroy(&map->lock);
return B_NO_MEMORY;
}
cpu_status state = disable_interrupts();
acquire_spinlock(&asid_bitmap_lock);
// allocate a ASID base for this one
if (kernel) {
new_map->arch_data->asid_base = 0; // set up by the bootloader
map->arch_data->asid_base = 0; // set up by the bootloader
asid_bitmap[0] |= 0x1;
} else {
int i = 0;
@ -368,18 +406,28 @@ vm_translation_map_create(vm_translation_map *new_map, bool kernel)
}
if (i >= MAX_ASIDS)
panic("vm_translation_map_create: out of ASIDs\n");
new_map->arch_data->asid_base = i;
map->arch_data->asid_base = i;
}
release_spinlock(&asid_bitmap_lock);
restore_interrupts(state);
return 0;
return B_OK;
}
int
vm_translation_map_module_init(kernel_args *args)
status_t
arch_vm_translation_map_init_kernel_map_post_sem(vm_translation_map *map)
{
if (recursive_lock_init(&map->lock, "translation map") < B_OK)
return map->lock.sem;
return B_OK;
}
status_t
arch_vm_translation_map_init(kernel_args *args)
{
sPageTable = (page_table_entry_group *)args->arch_args.page_table.start;
sPageTableSize = args->arch_args.page_table.size;
@ -389,14 +437,15 @@ vm_translation_map_module_init(kernel_args *args)
}
void
vm_translation_map_module_init_post_sem(kernel_args *ka)
status_t
arch_vm_translation_map_init_post_sem(kernel_args *args)
{
return B_OK;
}
int
vm_translation_map_module_init2(kernel_args *ka)
status_t
arch_vm_translation_map_init_post_area(kernel_args *args)
{
// create a region to cover the page table
sPageTableRegion = vm_create_anonymous_region(vm_get_kernel_aspace_id(),
@ -457,7 +506,7 @@ vm_translation_map_module_init2(kernel_args *ka)
*/
status_t
vm_translation_map_quick_map(kernel_args *ka, addr_t virtualAddress, addr_t physicalAddress,
arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress, addr_t physicalAddress,
uint8 attributes, addr_t (*get_free_page)(kernel_args *))
{
uint32 virtualSegmentID = get_sr((void *)virtualAddress) & 0xffffff;
@ -491,31 +540,11 @@ vm_translation_map_quick_map(kernel_args *ka, addr_t virtualAddress, addr_t phys
// XXX currently assumes this translation map is active
int
vm_translation_map_quick_query(addr_t va, addr_t *out_physical)
status_t
arch_vm_translation_map_early_query(addr_t va, addr_t *out_physical)
{
//PANIC_UNIMPLEMENTED();
panic("vm_translation_map_quick_query(): not yet implemented\n");
return 0;
}
void
ppc_translation_map_change_asid(vm_translation_map *map)
{
// this code depends on the kernel being at 0x80000000, fix if we change that
#if KERNEL_BASE != 0x80000000
#error fix me
#endif
int asid_base = map->arch_data->asid_base;
asm("mtsr 0,%0" :: "g"(asid_base));
asm("mtsr 1,%0" :: "g"(asid_base+1));
asm("mtsr 2,%0" :: "g"(asid_base+2));
asm("mtsr 3,%0" :: "g"(asid_base+3));
asm("mtsr 4,%0" :: "g"(asid_base+4));
asm("mtsr 5,%0" :: "g"(asid_base+5));
asm("mtsr 6,%0" :: "g"(asid_base+6));
asm("mtsr 7,%0" :: "g"(asid_base+7));
return B_OK;
}