Merged branch haiku/branches/developer/bonefish/optimization revision

23139 into trunk, with roughly the following changes (for details svn
log the branch):
* The int 99 syscall handler is now fully in assembly.
* Added a sysenter/sysexit handler and use it on Pentiums that support
  it (via commpage).
* Got rid of i386_handle_trap(). A bit of functionality was moved into
  the assembly handler which now uses a jump table to call C functions
  handling the respective interrupt.
* Some optimizations to get user debugger support code out of the
  interrupt handling path.
* Introduced a thread::flags fields which allows to skip handling of
  rare events (signals, user debug enabling/disabling) on the
  common interrupt handling path.
* Got rid of the explicit iframe stack. The iframes can still be
  retrieved by iterating through the stack frames.
* Made the commpage an architecture independent feature. It's used for
  the real time data stuff (instead of creating a separate area).
* The x86 CPU modules can now provide processor optimized versions for
  common functions (currently memcpy() only). They are used in the
  kernel and are provided to the userland via commpage entries.
* Introduced build system feature allowing easy use of C structure
  member offsets in assembly code.

Changes after merging:
* Fixed merge conflict in src/system/kernel/arch/x86/arch_debug.cpp
  (caused by refactoring and introduction of "call" debugger command).



git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23370 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2008-01-11 00:36:44 +00:00
parent 5a69bb2730
commit 34b3b26b3b
50 changed files with 1641 additions and 722 deletions

@ -193,9 +193,10 @@ HAIKU_CCFLAGS += -Wno-multichar ;
HAIKU_C++FLAGS += -Wno-multichar ;
HAIKU_KERNEL_CCFLAGS += -finline -fno-builtin -Wno-multichar
-DBOCHS_DEBUG_HACK=$(BOCHS_DEBUG_HACK) -D_KERNEL_MODE ;
-DBOCHS_DEBUG_HACK=$(BOCHS_DEBUG_HACK) ;
HAIKU_KERNEL_C++FLAGS += -finline -fno-builtin -fno-exceptions -Wno-multichar
-DBOCHS_DEBUG_HACK=$(BOCHS_DEBUG_HACK) -D_KERNEL_MODE ;
-DBOCHS_DEBUG_HACK=$(BOCHS_DEBUG_HACK) ;
HAIKU_KERNEL_DEFINES += _KERNEL_MODE ;
if $(HAIKU_GCC_VERSION[1]) >= 3 {
HAIKU_KERNEL_C++FLAGS += -fno-use-cxa-atexit ;
@ -432,6 +433,7 @@ HOST_KERNEL_CCFLAGS += $(HOST_GCC_BASE_FLAGS) -finline -fno-builtin
-DBOCHS_DEBUG_HACK=$(BOCHS_DEBUG_HACK) -D_KERNEL_MODE ;
HOST_KERNEL_C++FLAGS += $(HOST_GCC_BASE_FLAGS) -finline -fno-builtin
-fno-exceptions -DBOCHS_DEBUG_HACK=$(BOCHS_DEBUG_HACK) -D_KERNEL_MODE ;
HOST_KERNEL_DEFINES += _KERNEL_MODE ;
HOST_KERNEL_PIC_CCFLAGS = -fno-pic ;
HOST_KERNEL_PIC_LINKFLAGS = ;
@ -647,6 +649,7 @@ local buildVars =
HDRS CPPFLAGS CCFLAGS C++FLAGS LDFLAGS LINK LINKFLAGS DEFINES
ARFLAGS UNARFLAGS
KERNEL_DEFINES
KERNEL_CCFLAGS KERNEL_C++FLAGS
KERNEL_PIC_CCFLAGS KERNEL_PIC_LINKFLAGS

@ -20,6 +20,7 @@ rule SetupKernel
# add kernel flags for the object
ObjectCcFlags $(object) : $(TARGET_KERNEL_CCFLAGS) $(2) ;
ObjectC++Flags $(object) : $(TARGET_KERNEL_C++FLAGS) $(2) ;
ObjectDefines $(object) : $(TARGET_KERNEL_DEFINES) ;
# override warning flags
TARGET_WARNING_CCFLAGS on $(object) = $(TARGET_KERNEL_WARNING_CCFLAGS) ;

@ -212,6 +212,118 @@ actions Ld
$(LINK) $(LINKFLAGS) -o "$(1)" "$(2)" "$(NEEDLIBS)" $(LINKLIBS)
}
rule CreateAsmStructOffsetsHeader header : source
{
# CreateAsmStructOffsetsHeader header : source
#
# Grist will be added to both header and source.
header = [ FGristFiles $(header) ] ;
source = [ FGristFiles $(source) ] ;
# find out which headers, defines, etc. to use
local headers ;
local sysHeaders ;
local defines ;
local flags ;
local includesSeparator ;
local localIncludesOption ;
local systemIncludesOption ;
on $(header) { # use on $(1) variable values
if ! $(PLATFORM) in $(SUPPORTED_PLATFORMS) {
return ;
}
# headers and defines
headers = $(SEARCH_SOURCE) $(SUBDIRHDRS) $(HDRS) ;
sysHeaders = $(SUBDIRSYSHDRS) $(SYSHDRS) ;
defines = $(DEFINES) ;
if $(PLATFORM) = host {
sysHeaders += $(HOST_HDRS) ;
defines += $(HOST_DEFINES) ;
if $(USES_BE_API) {
sysHeaders += $(HOST_BE_API_HEADERS) ;
}
} else {
sysHeaders += $(TARGET_HDRS) ;
defines += $(TARGET_DEFINES) ;
}
# optimization flags
if $(DEBUG) = 0 {
flags += $(OPTIM) ;
} else {
flags += -O0 ;
}
if $(PLATFORM) = host {
# warning flags
if $(WARNINGS) != 0 {
flags += $(HOST_WARNING_C++FLAGS) ;
}
# debug and other flags
flags += $(HOST_C++FLAGS) $(HOST_DEBUG_$(DEBUG)_C++FLAGS)
$(SUBDIRC++FLAGS) $(C++FLAGS) ;
if $(USES_BE_API) {
flags += $(HOST_BE_API_C++FLAGS) ;
}
C++ on $(header) = $(HOST_C++) ;
includesSeparator = $(HOST_INCLUDES_SEPARATOR) ;
localIncludesOption = $(HOST_LOCAL_INCLUDES_OPTION) ;
systemIncludesOption = $(HOST_SYSTEM_INCLUDES_OPTION) ;
} else {
# warning flags
if $(WARNINGS) != 0 {
flags += $(TARGET_WARNING_C++FLAGS) ;
}
# debug and other flags
flags += $(TARGET_C++FLAGS) $(TARGET_DEBUG_$(DEBUG)_C++FLAGS)
$(SUBDIRC++FLAGS) $(C++FLAGS) ;
C++ on $(header) = $(TARGET_C++) ;
includesSeparator = $(TARGET_INCLUDES_SEPARATOR) ;
localIncludesOption = $(TARGET_LOCAL_INCLUDES_OPTION) ;
systemIncludesOption = $(TARGET_SYSTEM_INCLUDES_OPTION) ;
}
}
# locate object, search for source, and set on target variables
Depends $(header) : $(source) ;
SEARCH on $(source) += $(SEARCH_SOURCE) ;
MakeLocateArch $(header) ;
LocalClean clean : $(header) ;
HDRRULE on $(source) = HdrRule ;
HDRSCAN on $(source) = $(HDRPATTERN) ;
HDRSEARCH on $(source) = $(headers) $(sysHeaders) $(STDHDRS) ;
HDRGRIST on $(source) = $(HDRGRIST) ;
C++FLAGS on $(header) = $(flags) ;
CCHDRS on $(header) = [ FIncludes $(headers) : $(localIncludesOption) ]
$(includesSeparator)
[ FSysIncludes $(sysHeaders) : $(systemIncludesOption) ] ;
CCDEFS on $(header) = [ FDefines $(defines) ] ;
CreateAsmStructOffsetsHeader1 $(header) : $(source) ;
}
actions CreateAsmStructOffsetsHeader1
{
$(C++) -S "$(2)" $(C++FLAGS) $(CCDEFS) $(CCHDRS) -o - \
| grep "#define" | sed -e 's/\$//' > "$(1)"
}
rule MergeObjectFromObjects
{

@ -46,6 +46,9 @@ void arch_check_syscall_restart(struct thread *t);
void arch_store_fork_frame(struct arch_fork_arg *arg);
void arch_restore_fork_frame(struct arch_fork_arg *arg);
#define arch_syscall_64_bit_return_value()
// overridden by architectures that need special handling
#ifdef __cplusplus
}
#endif

@ -22,6 +22,8 @@ void arch_destroy_team_debug_info(struct arch_team_debug_info *info);
void arch_clear_thread_debug_info(struct arch_thread_debug_info *info);
void arch_destroy_thread_debug_info(struct arch_thread_debug_info *info);
void arch_update_thread_single_step();
void arch_set_debug_cpu_state(const struct debug_cpu_state *cpuState);
void arch_get_debug_cpu_state(struct debug_cpu_state *cpuState);
@ -29,6 +31,7 @@ status_t arch_set_breakpoint(void *address);
status_t arch_clear_breakpoint(void *address);
status_t arch_set_watchpoint(void *address, uint32 type, int32 length);
status_t arch_clear_watchpoint(void *address);
bool arch_has_breakpoints(struct arch_team_debug_info *info);
#if KERNEL_BREAKPOINTS
status_t arch_set_kernel_breakpoint(void *address);

@ -0,0 +1,17 @@
/*
* Copyright 2007, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef _KERNEL_ARCH_x86_COMMPAGE_H
#define _KERNEL_ARCH_x86_COMMPAGE_H
#ifndef _KERNEL_COMMPAGE_H
# error Must not be included directly. Include <commpage.h> instead!
#endif
#define COMMPAGE_ENTRY_X86_SYSCALL (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 0)
#define COMMPAGE_ENTRY_X86_MEMCPY (COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC + 1)
#define ARCH_USER_COMMPAGE_ADDR (0xffff0000)
#endif /* _KERNEL_ARCH_x86_COMMPAGE_H */

@ -9,14 +9,21 @@
#define _KERNEL_ARCH_x86_CPU_H
#ifndef _ASSEMBLER
#include <module.h>
#include <arch/x86/descriptors.h>
#endif // !_ASSEMBLER
// MSR registers (possibly Intel specific)
#define IA32_MSR_APIC_BASE 0x1b
#define IA32_MSR_MTRR_CAPABILITIES 0xfe
#define IA32_MSR_SYSENTER_CS 0x174
#define IA32_MSR_SYSENTER_ESP 0x175
#define IA32_MSR_SYSENTER_EIP 0x176
#define IA32_MSR_MTRR_DEFAULT_TYPE 0x2ff
#define IA32_MSR_MTRR_PHYSICAL_BASE_0 0x200
#define IA32_MSR_MTRR_PHYSICAL_MASK_0 0x201
@ -81,6 +88,20 @@
#define IA32_MTR_WRITE_PROTECTED 5
#define IA32_MTR_WRITE_BACK 6
// iframe types
#define IFRAME_TYPE_SYSCALL 0x1
#define IFRAME_TYPE_OTHER 0x2
#define IFRAME_TYPE_MASK 0xf
#ifndef _ASSEMBLER
typedef struct x86_optimized_functions {
void (*memcpy)(void* dest, const void* source, size_t count);
void* memcpy_end;
} x86_optimized_functions;
typedef struct x86_cpu_module_info {
module_info info;
uint32 (*count_mtrrs)(void);
@ -89,6 +110,8 @@ typedef struct x86_cpu_module_info {
void (*set_mtrr)(uint32 index, uint64 base, uint64 length, uint8 type);
status_t (*get_mtrr)(uint32 index, uint64 *_base, uint64 *_length,
uint8 *_type);
void (*get_optimized_functions)(x86_optimized_functions* functions);
} x86_cpu_module_info;
@ -110,6 +133,7 @@ struct tss {
};
struct iframe {
uint32 type; // iframe type
uint32 gs;
uint32 fs;
uint32 es;
@ -277,5 +301,6 @@ extern segment_descriptor *gGDT;
} // extern "C" {
#endif
#endif // !_ASSEMBLER
#endif /* _KERNEL_ARCH_x86_CPU_H */

@ -5,7 +5,9 @@
#ifndef _KERNEL_ARCH_x86_KERNEL_H
#define _KERNEL_ARCH_x86_KERNEL_H
#include <arch/cpu.h>
#ifndef _ASSEMBLER
# include <arch/cpu.h>
#endif
// memory layout
#define KERNEL_BASE 0x80000000

@ -16,14 +16,16 @@
extern "C" {
#endif
void x86_push_iframe(struct iframe_stack *stack, struct iframe *frame);
void x86_pop_iframe(struct iframe_stack *stack);
struct iframe *i386_get_user_iframe(void);
void *x86_next_page_directory(struct thread *from, struct thread *to);
void i386_return_from_signal();
void i386_end_return_from_signal();
// override empty macro
#undef arch_syscall_64_bit_return_value
void arch_syscall_64_bit_return_value();
static
inline struct thread *

@ -19,13 +19,6 @@ struct farcall {
uint32 *ss;
};
#define IFRAME_TRACE_DEPTH 4
struct iframe_stack {
struct iframe *frames[IFRAME_TRACE_DEPTH];
int32 index;
};
// architecture specific thread info
struct arch_thread {
struct farcall current_stack;
@ -33,9 +26,6 @@ struct arch_thread {
// 512 byte floating point save point - this must be 16 byte aligned
uint8 fpu_state[512];
// used to track interrupts on this thread
struct iframe_stack iframes;
} _ALIGNED(16);
struct arch_team {

@ -5,7 +5,7 @@
#ifndef _KERNEL_ARCH_X86_USER_DEBUGGER_H
#define _KERNEL_ARCH_X86_USER_DEBUGGER_H
#define ARCH_INIT_USER_DEBUG i386_init_user_debug
#define ARCH_INIT_USER_DEBUG x86_init_user_debug
// number of breakpoints the CPU supports
// Actually it supports 4, but DR3 is used to hold the struct thread*.
@ -94,11 +94,6 @@ enum {
X86_BREAKPOINT_LENGTH_4 = 0x3,
};
// thread debug flags
enum {
X86_THREAD_DEBUG_DR7_SET = 0x01,
};
struct arch_breakpoint {
void *address; // NULL, if deactivated
uint32 type; // one of the architecture types above
@ -122,14 +117,13 @@ extern "C" {
struct iframe;
struct thread;
extern void i386_init_user_debug_at_kernel_exit(struct iframe *frame);
extern void i386_exit_user_debug_at_kernel_entry();
extern void i386_reinit_user_debug_after_context_switch(struct thread *thread);
extern void x86_init_user_debug_at_kernel_exit(struct iframe *frame);
extern void x86_exit_user_debug_at_kernel_entry();
extern int i386_handle_debug_exception(struct iframe *frame);
extern int i386_handle_breakpoint_exception(struct iframe *frame);
extern void x86_handle_debug_exception(struct iframe *frame);
extern void x86_handle_breakpoint_exception(struct iframe *frame);
extern void i386_init_user_debug();
extern void x86_init_user_debug();
#ifdef __cplusplus
}

@ -1,31 +0,0 @@
/*
* Copyright 2007, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef _KERNEL_ARCH_x86_COMMPAGE_H
#define _KERNEL_ARCH_x86_COMMPAGE_H
/*! Some systemwide commpage constants, used in the kernel and libroot */
#ifndef _ASSEMBLER
# include <SupportDefs.h>
#endif
/* be careful what you put here, this file is included from assembly */
#define COMMPAGE_ENTRY_MAGIC 0
#define COMMPAGE_ENTRY_VERSION 1
#define COMMPAGE_ENTRY_SYSCALL 2
#define USER_COMMPAGE_ADDR (0xffff0000)
#define COMMPAGE_SIZE (0x8000)
#define TABLE_ENTRIES 64
#define COMMPAGE_SIGNATURE 'COMM'
#define COMMPAGE_VERSION 1
#ifndef _ASSEMBLER
status_t commpage_init(void);
#endif
#endif /* _KERNEL_ARCH_x86_COMMPAGE_H */

@ -0,0 +1,53 @@
/*
* Copyright 2007, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef _KERNEL_COMMPAGE_H
#define _KERNEL_COMMPAGE_H
/*! Some systemwide commpage constants, used in the kernel and libroot */
#ifndef _ASSEMBLER
# include <SupportDefs.h>
#endif
/* be careful what you put here, this file is included from assembly */
#define COMMPAGE_ENTRY_MAGIC 0
#define COMMPAGE_ENTRY_VERSION 1
#define COMMPAGE_ENTRY_REAL_TIME_DATA 2
#define COMMPAGE_ENTRY_FIRST_ARCH_SPECIFIC 3
#define COMMPAGE_SIZE (0x8000)
#define COMMPAGE_TABLE_ENTRIES 64
#define COMMPAGE_SIGNATURE 'COMM'
#define COMMPAGE_VERSION 1
#define USER_COMMPAGE_ADDR ARCH_USER_COMMPAGE_ADDR
// set by the architecture specific implementation
#ifndef _ASSEMBLER
#define USER_COMMPAGE_TABLE ((void**)(USER_COMMPAGE_ADDR))
#ifdef __cplusplus
extern "C" {
#endif
status_t commpage_init(void);
void* allocate_commpage_entry(int entry, size_t size);
void* fill_commpage_entry(int entry, const void* copyFrom, size_t size);
status_t arch_commpage_init(void);
// implemented in the architecture specific part
#ifdef __cplusplus
} // extern "C"
#endif
#endif // ! _ASSEMBLER
#include <arch_commpage.h>
#endif /* _KERNEL_COMMPAGE_H */

@ -9,7 +9,7 @@
#include <SupportDefs.h>
typedef struct {
typedef struct syscall_info {
void *function; // pointer to the syscall function
int parameter_size; // summed up parameter size
} syscall_info;

@ -26,9 +26,10 @@ struct thread *thread_lookat_queue(struct thread_queue *q);
struct thread *thread_dequeue(struct thread_queue *q);
struct thread *thread_dequeue_id(struct thread_queue *q, thread_id id);
void thread_at_kernel_entry(void);
void thread_at_kernel_entry(bigtime_t now);
// called when the thread enters the kernel on behalf of the thread
void thread_at_kernel_exit(void);
void thread_at_kernel_exit_no_signals(void);
void thread_reset_for_exec(void);
status_t thread_init(struct kernel_args *args);
@ -69,6 +70,8 @@ status_t wait_for_thread_etc(thread_id id, uint32 flags, bigtime_t timeout,
status_t select_thread(int32 object, struct select_info *info, bool kernel);
status_t deselect_thread(int32 object, struct select_info *info, bool kernel);
#define syscall_64_bit_return_value() arch_syscall_64_bit_return_value()
// used in syscalls.c
status_t _user_set_thread_priority(thread_id thread, int32 newPriority);
status_t _user_rename_thread(thread_id thread, const char *name);

@ -7,6 +7,7 @@
#ifndef _KERNEL_THREAD_TYPES_H
#define _KERNEL_THREAD_TYPES_H
#ifndef _ASSEMBLER
#include <cbuf.h>
#include <smp.h>
@ -182,6 +183,9 @@ struct team {
typedef int32 (*thread_entry_func)(thread_func, void *);
struct thread {
int32 flags; // summary of events relevant in interrupt
// handlers (signals pending, user debugging
// enabled, etc.)
struct thread *all_next;
struct thread *team_next;
struct thread *queue_next; /* i.e. run queue, release queue, etc. */
@ -246,6 +250,7 @@ struct thread {
// stack
area_id kernel_stack_area;
addr_t kernel_stack_base;
addr_t kernel_stack_top;
area_id user_stack_area;
addr_t user_stack_base;
size_t user_stack_size;
@ -268,4 +273,17 @@ struct thread_queue {
struct thread *tail;
};
#endif // !_ASSEMBLER
// bits for the thread::flags field
#define THREAD_FLAGS_SIGNALS_PENDING 0x01
#define THREAD_FLAGS_DEBUG_THREAD 0x02
#define THREAD_FLAGS_DEBUGGER_INSTALLED 0x04
#define THREAD_FLAGS_BREAKPOINTS_DEFINED 0x08
#define THREAD_FLAGS_BREAKPOINTS_INSTALLED 0x10
#define THREAD_FLAGS_64_BIT_SYSCALL_RETURN 0x20
#endif /* _KERNEL_THREAD_TYPES_H */

@ -144,6 +144,7 @@ void user_debug_finish_after_exec();
void init_user_debug();
// debug event callbacks
void user_debug_pre_syscall(uint32 syscall, void *args);
@ -155,6 +156,7 @@ bool user_debug_handle_signal(int signal, struct sigaction *handler,
void user_debug_stop_thread();
void user_debug_team_created(team_id teamID);
void user_debug_team_deleted(team_id teamID, port_id debuggerPort);
void user_debug_update_new_thread_flags(thread_id threadID);
void user_debug_thread_created(thread_id threadID);
void user_debug_thread_deleted(team_id teamID, thread_id threadID);
void user_debug_image_created(const image_info *imageInfo);

@ -22,14 +22,8 @@ local librootFunctions =
strchr.o
strrchr.o
strtol.o
arch_string.o
;
local platformObjects = ;
if $(TARGET_ARCH) = x86 {
platformObjects += <src!system!kernel!arch!$(TARGET_ARCH)>cpuid.o ;
}
AddResources zbeos : boot_loader.rdef ;
KernelLd boot_loader_$(TARGET_BOOT_PLATFORM) :
@ -50,9 +44,6 @@ KernelLd boot_loader_$(TARGET_BOOT_PLATFORM) :
# kernel)
$(librootFunctions:G=src!system!kernel!lib)
# platform specific objects
$(platformObjects)
: $(HAIKU_TOP)/src/system/ldscripts/$(TARGET_ARCH)/boot_loader_$(TARGET_BOOT_PLATFORM).ld
: -Bstatic
;

@ -1,17 +1,14 @@
SubDir HAIKU_TOP src system boot arch m68k ;
{
local defines =
_BOOT_MODE
;
DEFINES += _BOOT_MODE ;
defines = [ FDefines $(defines) ] ;
SubDirCcFlags $(defines) -Wall -Wno-multichar ;
SubDirC++Flags $(defines) -Wall -Wno-multichar -fno-rtti ;
}
local librootArchObjects =
<src!system!libroot!posix!string!arch!$(TARGET_ARCH)>arch_string.o
;
KernelMergeObject boot_arch_m68k.o :
arch_elf.cpp
$(librootArchObjects)
;
SEARCH on [ FGristFiles arch_elf.cpp ]

@ -1,17 +1,14 @@
SubDir HAIKU_TOP src system boot arch ppc ;
{
local defines =
_BOOT_MODE
;
DEFINES += _BOOT_MODE ;
defines = [ FDefines $(defines) ] ;
SubDirCcFlags $(defines) -Wall -Wno-multichar ;
SubDirC++Flags $(defines) -Wall -Wno-multichar -fno-rtti ;
}
local librootArchObjects =
<src!system!libroot!posix!string!arch!$(TARGET_ARCH)>arch_string.o
;
KernelMergeObject boot_arch_ppc.o :
arch_elf.cpp
$(librootArchObjects)
;
SEARCH on [ FGristFiles arch_elf.cpp ]

@ -1,18 +1,22 @@
SubDir HAIKU_TOP src system boot arch x86 ;
{
local defines =
_BOOT_MODE
;
DEFINES += _BOOT_MODE ;
defines = [ FDefines $(defines) ] ;
SubDirCcFlags $(defines) -Wall -Wno-multichar ;
SubDirC++Flags $(defines) -Wall -Wno-multichar -fno-rtti ;
}
KernelMergeObject boot_arch_x86.o :
local kernelArchSources =
arch_elf.c
arch_string.S
;
SEARCH on [ FGristFiles arch_elf.c ]
local kernelArchObjects =
<src!system!kernel!arch!$(TARGET_ARCH)>cpuid.o
;
KernelMergeObject boot_arch_x86.o :
$(kernelArchSources)
: # additional flags
:
$(kernelArchObjects)
;
SEARCH on [ FGristFiles $(kernelArchSources) ]
= [ FDirName $(HAIKU_TOP) src system kernel arch $(TARGET_ARCH) ] ;

@ -16,6 +16,7 @@ AddResources kernel_$(TARGET_ARCH) : kernel.rdef ;
KernelMergeObject kernel_core.o :
boot_item.cpp
commpage.cpp
condition_variable.cpp
cpu.c
elf.cpp

@ -11,6 +11,7 @@ UsePrivateHeaders kernel [ FDirName kernel arch $(TARGET_ARCH) ]
SEARCH_SOURCE += [ FDirName $(SUBDIR) $(DOTDOT) generic ] ;
KernelStaticLibrary libx86 :
arch_commpage.cpp
arch_cpu.c
arch_debug.cpp
arch_debug_console.c
@ -20,6 +21,7 @@ KernelStaticLibrary libx86 :
# arch_selector.c
arch_real_time_clock.c
arch_smp.c
arch_string.S
arch_thread.c
arch_timer.c
arch_vm.cpp
@ -31,7 +33,6 @@ KernelStaticLibrary libx86 :
apm.cpp
bios.cpp
cpuid.S
commpage.c
syscall.S
generic_vm_physical_page_mapper.cpp
@ -39,6 +40,10 @@ KernelStaticLibrary libx86 :
$(TARGET_KERNEL_PIC_CCFLAGS) -Wno-unused
;
CreateAsmStructOffsetsHeader asm_offsets.h : asm_offsets.cpp ;
# We need to specify the dependency on the generated syscalls file explicitly.
Includes [ FGristFiles arch_x86.S arch_interrupts.S ]
: <syscalls>syscall_numbers.h ;
Includes [ FGristFiles arch_interrupts.S ]
: <syscalls>syscall_table.h ;

@ -0,0 +1,105 @@
/*
* Copyright 2007, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#include <commpage.h>
#include <string.h>
#include <KernelExport.h>
#include <cpu.h>
#include <smp.h>
// user syscall assembly stubs
extern "C" void _user_syscall_int(void);
extern unsigned int _user_syscall_int_end;
extern "C" void _user_syscall_sysenter(void);
extern unsigned int _user_syscall_sysenter_end;
// sysenter handler
extern "C" void x86_sysenter();
static bool
all_cpus_have_feature(enum x86_feature_type type, int feature)
{
int i;
int cpuCount = smp_get_num_cpus();
for (i = 0; i < cpuCount; i++) {
if (!(gCPU[i].arch.feature[type] & feature))
return false;
}
return true;
}
static void
init_intel_syscall_registers(void* dummy, int cpuNum)
{
x86_write_msr(IA32_MSR_SYSENTER_CS, KERNEL_CODE_SEG);
x86_write_msr(IA32_MSR_SYSENTER_ESP, 0);
x86_write_msr(IA32_MSR_SYSENTER_EIP, (addr_t)x86_sysenter);
}
#if 0
static void
init_amd_syscall_registers(void* dummy, int cpuNum)
{
// TODO: ...
}
#endif
static status_t
initialize_commpage_syscall(void)
{
void* syscallCode = &_user_syscall_int;
void* syscallCodeEnd = &_user_syscall_int_end;
// check syscall
if (all_cpus_have_feature(FEATURE_COMMON, IA32_FEATURE_SEP)
&& !(gCPU[0].arch.family == 6 && gCPU[0].arch.model < 3
&& gCPU[0].arch.stepping < 3)) {
// Intel sysenter/sysexit
dprintf("initialize_commpage_syscall(): sysenter/sysexit supported\n");
// the code to be used in userland
syscallCode = &_user_syscall_sysenter;
syscallCodeEnd = &_user_syscall_sysenter_end;
// tell all CPUs to init their sysenter/sysexit related registers
call_all_cpus_sync(&init_intel_syscall_registers, NULL);
} else if (all_cpus_have_feature(FEATURE_EXT_AMD,
IA32_FEATURE_AMD_EXT_SYSCALL)) {
// AMD syscall/sysret
dprintf("initialize_commpage_syscall(): syscall/sysret supported "
"-- not yet by Haiku, though");
} else {
// no special syscall support
dprintf("initialize_commpage_syscall(): no special syscall support\n");
}
// fill in the table entry
size_t len = (size_t)((addr_t)syscallCodeEnd - (addr_t)syscallCode);
fill_commpage_entry(COMMPAGE_ENTRY_X86_SYSCALL, syscallCode, len);
return B_OK;
}
status_t
arch_commpage_init(void)
{
// select the optimum syscall mechanism and patch the commpage
initialize_commpage_syscall();
return B_OK;
}

@ -14,6 +14,7 @@
#include <stdio.h>
#include <boot_device.h>
#include <commpage.h>
#include <smp.h>
#include <tls.h>
#include <vm.h>
@ -21,7 +22,6 @@
#include <arch_system_info.h>
#include <arch/x86/selector.h>
#include <boot/kernel_args.h>
#include <arch/x86/commpage.h>
#include "interrupts.h"
@ -76,6 +76,16 @@ static uint32 sDoubleFaultStack[10240];
static x86_cpu_module_info *sCpuModule;
extern void memcpy_generic(void* dest, const void* source, size_t count);
extern int memcpy_generic_end;
x86_optimized_functions gOptimizedFunctions = {
.memcpy = memcpy_generic,
.memcpy_end = &memcpy_generic_end
};
/** Disable CPU caches, and invalidate them. */
static void
@ -516,9 +526,6 @@ arch_cpu_init_post_vm(kernel_args *args)
// setup SSE2/3 support
init_sse();
// initialize the commpage support
commpage_init();
return B_OK;
}
@ -545,6 +552,24 @@ arch_cpu_init_post_modules(kernel_args *args)
if (x86_count_mtrrs() > 0)
call_all_cpus(&init_mtrrs, NULL);
// get optimized functions from the CPU module
if (sCpuModule != NULL && sCpuModule->get_optimized_functions != NULL) {
x86_optimized_functions functions;
memset(&functions, 0, sizeof(functions));
sCpuModule->get_optimized_functions(&functions);
if (functions.memcpy != NULL) {
gOptimizedFunctions.memcpy = functions.memcpy;
gOptimizedFunctions.memcpy_end = functions.memcpy_end;
}
}
// put the optimized functions into the commpage
fill_commpage_entry(COMMPAGE_ENTRY_X86_MEMCPY, gOptimizedFunctions.memcpy,
(addr_t)gOptimizedFunctions.memcpy_end
- (addr_t)gOptimizedFunctions.memcpy);
return B_OK;
}

@ -145,7 +145,7 @@ print_iframe(struct iframe *frame)
static void
setup_for_thread(char *arg, struct thread **_thread, uint32 *_ebp,
struct iframe_stack **_frameStack, uint32 *_oldPageDirectory)
uint32 *_oldPageDirectory)
{
struct thread *thread = NULL;
@ -180,21 +180,60 @@ setup_for_thread(char *arg, struct thread **_thread, uint32 *_ebp,
thread = thread_get_current_thread();
}
// We don't have a thread pointer early in the boot process
if (thread != NULL)
*_frameStack = &thread->arch_info.iframes;
else
*_frameStack = &gBootFrameStack;
*_thread = thread;
}
static bool
is_kernel_stack_address(struct thread* thread, addr_t address)
{
// We don't have a thread pointer in the early boot process, but then we are
// on the kernel stack for sure.
if (thread == NULL)
return IS_KERNEL_ADDRESS(address);
return address >= thread->kernel_stack_base
&& address < thread->kernel_stack_base + KERNEL_STACK_SIZE;
}
static bool
is_iframe(struct thread* thread, addr_t frame)
{
return is_kernel_stack_address(thread, frame)
&& (*(addr_t*)frame & ~IFRAME_TYPE_MASK) == 0;
}
static struct iframe *
find_previous_iframe(struct thread *thread, addr_t frame)
{
// iterate backwards through the stack frames, until we hit an iframe
while (is_kernel_stack_address(thread, frame)) {
if (is_iframe(thread, frame))
return (struct iframe*)frame;
frame = *(addr_t*)frame;
}
return NULL;
}
static struct iframe*
get_previous_iframe(struct thread* thread, struct iframe* frame)
{
if (frame == NULL)
return NULL;
return find_previous_iframe(thread, frame->ebp);
}
static int
stack_trace(int argc, char **argv)
{
uint32 previousLocations[NUM_PREVIOUS_LOCATIONS];
struct iframe_stack *frameStack;
struct thread *thread = NULL;
addr_t oldPageDirectory = 0;
uint32 ebp = x86_read_ebp();
@ -205,7 +244,7 @@ stack_trace(int argc, char **argv)
return 0;
}
setup_for_thread(argc == 2 ? argv[1] : NULL, &thread, &ebp, &frameStack,
setup_for_thread(argc == 2 ? argv[1] : NULL, &thread, &ebp,
&oldPageDirectory);
if (thread != NULL) {
@ -224,18 +263,14 @@ stack_trace(int argc, char **argv)
kprintf("frame caller <image>:function + offset\n");
for (;;) {
bool isIFrame = false;
// see if the ebp matches the iframe
for (i = 0; i < frameStack->index; i++) {
if (ebp == ((uint32)frameStack->frames[i] - 8)) {
// it's an iframe
isIFrame = true;
}
}
bool onKernelStack = true;
if (isIFrame) {
struct iframe *frame = (struct iframe *)(ebp + 8);
for (;;) {
onKernelStack = onKernelStack
&& is_kernel_stack_address(thread, ebp);
if (onKernelStack && is_iframe(thread, ebp)) {
struct iframe *frame = (struct iframe *)ebp;
print_iframe(frame);
print_stack_frame(thread, frame->eip, ebp, frame->ebp);
@ -328,7 +363,6 @@ print_call(struct thread *thread, addr_t eip, addr_t ebp, int32 argCount)
static int
show_call(int argc, char **argv)
{
struct iframe_stack *frameStack;
struct thread *thread = NULL;
addr_t oldPageDirectory = 0;
uint32 ebp = x86_read_ebp();
@ -348,7 +382,7 @@ show_call(int argc, char **argv)
return 0;
}
setup_for_thread(argc == 3 ? argv[1] : NULL, &thread, &ebp, &frameStack,
setup_for_thread(argc == 3 ? argv[1] : NULL, &thread, &ebp,
&oldPageDirectory);
int32 callIndex = strtoul(argv[argc == 3 ? 2 : 1], NULL, 0);
@ -356,19 +390,14 @@ show_call(int argc, char **argv)
if (thread != NULL)
kprintf("thread %ld, %s\n", thread->id, thread->name);
int32 index = 1;
for (; index <= callIndex; index++) {
bool isIFrame = false;
// see if the ebp matches the iframe
for (int32 i = 0; i < frameStack->index; i++) {
if (ebp == ((uint32)frameStack->frames[i] - 8)) {
// it's an iframe
isIFrame = true;
}
}
bool onKernelStack = true;
if (isIFrame) {
struct iframe *frame = (struct iframe *)(ebp + 8);
for (int32 index = 1; index <= callIndex; index++) {
onKernelStack = onKernelStack
&& is_kernel_stack_address(thread, ebp);
if (onKernelStack && is_iframe(thread, ebp)) {
struct iframe *frame = (struct iframe *)ebp;
if (index == callIndex)
print_call(thread, frame->eip, ebp, argCount);
@ -406,7 +435,6 @@ show_call(int argc, char **argv)
static int
dump_iframes(int argc, char **argv)
{
struct iframe_stack *frameStack;
struct thread *thread = NULL;
int32 i;
@ -424,17 +452,13 @@ dump_iframes(int argc, char **argv)
return 0;
}
// We don't have a thread pointer early in the boot process
if (thread != NULL)
frameStack = &thread->arch_info.iframes;
else
frameStack = &gBootFrameStack;
if (thread != NULL)
kprintf("iframes for thread 0x%lx \"%s\"\n", thread->id, thread->name);
for (i = 0; i < frameStack->index; i++) {
print_iframe(frameStack->frames[i]);
struct iframe* frame = find_previous_iframe(thread, x86_read_ebp());
while (frame != NULL) {
print_iframe(frame);
frame = get_previous_iframe(thread, frame);
}
return 0;

@ -99,8 +99,6 @@ static const int kInterruptNameCount = 20;
#define MAX_ARGS 16
struct iframe_stack gBootFrameStack;
typedef struct {
uint32 a, b;
} desc_table;
@ -109,6 +107,12 @@ static desc_table *sIDT = NULL;
static uint16 sLevelTriggeredInterrupts;
// binary mask: 1 level, 0 edge
// table with functions handling respective interrupts
typedef void interrupt_handler_function(struct iframe* frame);
#define INTERRUPT_HANDLER_TABLE_SIZE 256
interrupt_handler_function* gInterruptHandlerTable[
INTERRUPT_HANDLER_TABLE_SIZE];
static void
set_gate(desc_table *gate_addr, addr_t addr, int type, int dpl)
@ -319,6 +323,17 @@ exception_name(int number, char *buffer, int32 bufferSize)
}
static void
invalid_exception(struct iframe* frame)
{
struct thread* thread = thread_get_current_thread();
char name[32];
panic("unhandled trap 0x%lx (%s) at ip 0x%lx, thread 0x%lx!\n",
frame->vector, exception_name(frame->vector, name, sizeof(name)),
frame->eip, thread ? thread->id : -1);
}
static void
fatal_exception(struct iframe *frame)
{
@ -329,9 +344,57 @@ fatal_exception(struct iframe *frame)
static void
unexpected_exception(struct iframe *frame, debug_exception_type type,
int signal)
unexpected_exception(struct iframe* frame)
{
debug_exception_type type;
int signal;
switch (frame->vector) {
case 0: // Divide Error Exception (#DE)
type = B_DIVIDE_ERROR;
signal = SIGFPE;
break;
case 4: // Overflow Exception (#OF)
type = B_OVERFLOW_EXCEPTION;
signal = SIGTRAP;
break;
case 5: // BOUND Range Exceeded Exception (#BR)
type = B_BOUNDS_CHECK_EXCEPTION;
signal = SIGTRAP;
break;
case 6: // Invalid Opcode Exception (#UD)
type = B_INVALID_OPCODE_EXCEPTION;
signal = SIGILL;
break;
case 13: // General Protection Exception (#GP)
type = B_GENERAL_PROTECTION_FAULT;
signal = SIGKILL;
break;
case 16: // x87 FPU Floating-Point Error (#MF)
type = B_FLOATING_POINT_EXCEPTION;
signal = SIGFPE;
break;
case 17: // Alignment Check Exception (#AC)
type = B_ALIGNMENT_EXCEPTION;
signal = SIGTRAP;
break;
case 19: // SIMD Floating-Point Exception (#XF)
type = B_FLOATING_POINT_EXCEPTION;
signal = SIGFPE;
break;
default:
invalid_exception(frame);
return;
}
if (frame->cs == USER_CODE_SEG) {
enable_interrupts();
@ -347,232 +410,106 @@ unexpected_exception(struct iframe *frame, debug_exception_type type,
}
/* keep the compiler happy, this function must be called only from assembly */
void i386_handle_trap(struct iframe frame);
static void
double_fault_exception(struct iframe* frame)
{
// The double fault iframe contains no useful information (as
// per Intel's architecture spec). Thus we simply save the
// information from the (unhandable) exception which caused the
// double in our iframe. This will result even in useful stack
// traces. Only problem is that we trust that at least the
// TSS is still accessible.
struct tss *tss = &gCPU[smp_get_current_cpu()].arch.tss;
void
i386_handle_trap(struct iframe frame)
frame->cs = tss->cs;
frame->es = tss->es;
frame->ds = tss->ds;
frame->fs = tss->fs;
frame->gs = tss->gs;
frame->eip = tss->eip;
frame->ebp = tss->ebp;
frame->esp = tss->esp;
frame->eax = tss->eax;
frame->ebx = tss->ebx;
frame->ecx = tss->ecx;
frame->edx = tss->edx;
frame->esi = tss->esi;
frame->edi = tss->edi;
frame->flags = tss->eflags;
panic("double fault!\n");
}
static void
page_fault_exception(struct iframe* frame)
{
struct thread *thread = thread_get_current_thread();
int ret = B_HANDLED_INTERRUPT;
cpu_status state;
bool kernelDebugger = debug_debugger_running();
unsigned int cr2;
addr_t newip;
// all exceptions besides 3 (breakpoint), and 99 (syscall) enter this
// function with interrupts disabled
asm("movl %%cr2, %0" : "=r" (cr2));
state = disable_interrupts();
if (kernelDebugger) {
// if this thread has a fault handler, we're allowed to be here
if (thread && thread->fault_handler != NULL) {
frame->eip = thread->fault_handler;
return;
}
if (thread)
x86_push_iframe(&thread->arch_info.iframes, &frame);
else
x86_push_iframe(&gBootFrameStack, &frame);
if (frame.cs == USER_CODE_SEG) {
i386_exit_user_debug_at_kernel_entry();
thread_at_kernel_entry();
// otherwise, not really
panic("page fault in debugger without fault handler! Touching "
"address %p from eip %p\n", (void *)cr2, (void *)frame->eip);
return;
} else if ((frame->flags & 0x200) == 0) {
// if the interrupts were disabled, and we are not running the kernel startup
// the page fault was not allowed to happen and we must panic
panic("page fault, but interrupts were disabled. Touching address "
"%p from eip %p\n", (void *)cr2, (void *)frame->eip);
return;
} else if (thread != NULL && thread->page_faults_allowed < 1) {
panic("page fault not allowed at this place. Touching address "
"%p from eip %p\n", (void *)cr2, (void *)frame->eip);
return;
}
restore_interrupts(state);
enable_interrupts();
// if(frame.vector != 0x20)
// dprintf("i386_handle_trap: vector 0x%x, ip 0x%x, cpu %d\n", frame.vector, frame.eip, smp_get_current_cpu());
switch (frame.vector) {
// fatal exceptions
case 2: // NMI Interrupt
case 9: // Coprocessor Segment Overrun
case 7: // Device Not Available Exception (#NM)
case 10: // Invalid TSS Exception (#TS)
case 11: // Segment Not Present (#NP)
case 12: // Stack Fault Exception (#SS)
case 18: // Machine-Check Exception (#MC)
fatal_exception(&frame);
break;
case 8: // Double Fault Exception (#DF)
{
// The double fault iframe contains no useful information (as
// per Intel's architecture spec). Thus we simply save the
// information from the (unhandable) exception which caused the
// double in our iframe. This will result even in useful stack
// traces. Only problem is that we trust that at least the
// TSS is still accessible.
struct tss *tss = &gCPU[smp_get_current_cpu()].arch.tss;
frame.cs = tss->cs;
frame.es = tss->es;
frame.ds = tss->ds;
frame.fs = tss->fs;
frame.gs = tss->gs;
frame.eip = tss->eip;
frame.ebp = tss->ebp;
frame.esp = tss->esp;
frame.eax = tss->eax;
frame.ebx = tss->ebx;
frame.ecx = tss->ecx;
frame.edx = tss->edx;
frame.esi = tss->esi;
frame.edi = tss->edi;
frame.flags = tss->eflags;
panic("double fault!\n");
break;
}
// exceptions we can handle
// (most of them only when occurring in userland)
case 0: // Divide Error Exception (#DE)
unexpected_exception(&frame, B_DIVIDE_ERROR, SIGFPE);
break;
case 1: // Debug Exception (#DB)
ret = i386_handle_debug_exception(&frame);
break;
case 3: // Breakpoint Exception (#BP)
ret = i386_handle_breakpoint_exception(&frame);
break;
case 4: // Overflow Exception (#OF)
unexpected_exception(&frame, B_OVERFLOW_EXCEPTION, SIGTRAP);
break;
case 5: // BOUND Range Exceeded Exception (#BR)
unexpected_exception(&frame, B_BOUNDS_CHECK_EXCEPTION, SIGTRAP);
break;
case 6: // Invalid Opcode Exception (#UD)
unexpected_exception(&frame, B_INVALID_OPCODE_EXCEPTION, SIGILL);
break;
case 13: // General Protection Exception (#GP)
unexpected_exception(&frame, B_GENERAL_PROTECTION_FAULT, SIGKILL);
break;
case 14: // Page-Fault Exception (#PF)
{
bool kernelDebugger = debug_debugger_running();
unsigned int cr2;
addr_t newip;
asm("movl %%cr2, %0" : "=r" (cr2));
if (kernelDebugger) {
// if this thread has a fault handler, we're allowed to be here
if (thread && thread->fault_handler != NULL) {
frame.eip = thread->fault_handler;
break;
}
// otherwise, not really
panic("page fault in debugger without fault handler! Touching "
"address %p from eip %p\n", (void *)cr2, (void *)frame.eip);
break;
} else if ((frame.flags & 0x200) == 0) {
// if the interrupts were disabled, and we are not running the kernel startup
// the page fault was not allowed to happen and we must panic
panic("page fault, but interrupts were disabled. Touching address "
"%p from eip %p\n", (void *)cr2, (void *)frame.eip);
break;
} else if (thread != NULL && thread->page_faults_allowed < 1) {
panic("page fault not allowed at this place. Touching address "
"%p from eip %p\n", (void *)cr2, (void *)frame.eip);
}
enable_interrupts();
ret = vm_page_fault(cr2, frame.eip,
(frame.error_code & 0x2) != 0, // write access
(frame.error_code & 0x4) != 0, // userland
&newip);
if (newip != 0) {
// the page fault handler wants us to modify the iframe to set the
// IP the cpu will return to to be this ip
frame.eip = newip;
}
break;
}
case 16: // x87 FPU Floating-Point Error (#MF)
unexpected_exception(&frame, B_FLOATING_POINT_EXCEPTION, SIGFPE);
break;
case 17: // Alignment Check Exception (#AC)
unexpected_exception(&frame, B_ALIGNMENT_EXCEPTION, SIGTRAP);
break;
case 19: // SIMD Floating-Point Exception (#XF)
unexpected_exception(&frame, B_FLOATING_POINT_EXCEPTION, SIGFPE);
break;
case 99: // syscall
{
uint64 retcode;
unsigned int args[MAX_ARGS];
#if 0
{
int i;
dprintf("i386_handle_trap: syscall %d, count %d, ptr 0x%x\n", frame.eax, frame.ecx, frame.edx);
dprintf(" call stack:\n");
for(i=0; i<frame.ecx; i++)
dprintf("\t0x%x\n", ((unsigned int *)frame.edx)[i]);
vm_page_fault(cr2, frame->eip,
(frame->error_code & 0x2) != 0, // write access
(frame->error_code & 0x4) != 0, // userland
&newip);
if (newip != 0) {
// the page fault handler wants us to modify the iframe to set the
// IP the cpu will return to to be this ip
frame->eip = newip;
}
}
#endif
/* syscall interface works as such:
* %eax has syscall #
* %esp + 4 points to the syscall parameters
*/
if (frame.eax >= 0 && frame.eax < kSyscallCount) {
void *params = (void*)(frame.user_esp + 4);
int paramSize = kSyscallInfos[frame.eax].parameter_size;
if (IS_KERNEL_ADDRESS((addr_t)params)
|| user_memcpy(args, params, paramSize) < B_OK) {
retcode = B_BAD_ADDRESS;
} else
ret = syscall_dispatcher(frame.eax, (void *)args, &retcode);
} else {
// invalid syscall number
retcode = EINVAL;
}
frame.eax = retcode & 0xffffffff;
frame.edx = retcode >> 32;
break;
}
default:
if (frame.vector >= ARCH_INTERRUPT_BASE) {
bool levelTriggered = pic_is_level_triggered(frame.vector);
// This is a workaround for spurious assertions of interrupts 7/15
// which seems to be an often seen problem on the PC platform
if (pic_is_spurious_interrupt(frame.vector - ARCH_INTERRUPT_BASE)) {
TRACE(("got spurious interrupt at vector %ld\n", frame.vector));
break;
}
static void
hardware_interrupt(struct iframe* frame)
{
bool levelTriggered = pic_is_level_triggered(frame->vector);
int ret;
if (!levelTriggered)
pic_end_of_interrupt(frame.vector);
ret = int_io_interrupt_handler(frame.vector - ARCH_INTERRUPT_BASE,
levelTriggered);
if (levelTriggered)
pic_end_of_interrupt(frame.vector);
} else {
char name[32];
panic("i386_handle_trap: unhandled trap 0x%lx (%s) at ip 0x%lx, "
"thread 0x%lx!\n", frame.vector,
exception_name(frame.vector, name, sizeof(name)), frame.eip,
thread ? thread->id : -1);
ret = B_HANDLED_INTERRUPT;
}
break;
// This is a workaround for spurious assertions of interrupts 7/15
// which seems to be an often seen problem on the PC platform
if (pic_is_spurious_interrupt(frame->vector - ARCH_INTERRUPT_BASE)) {
TRACE(("got spurious interrupt at vector %ld\n", frame->vector));
return;
}
if (!levelTriggered)
pic_end_of_interrupt(frame->vector);
ret = int_io_interrupt_handler(frame->vector - ARCH_INTERRUPT_BASE,
levelTriggered);
if (levelTriggered)
pic_end_of_interrupt(frame->vector);
if (ret == B_INVOKE_SCHEDULER) {
cpu_status state = disable_interrupts();
GRAB_THREAD_LOCK();
@ -582,29 +519,15 @@ i386_handle_trap(struct iframe frame)
RELEASE_THREAD_LOCK();
restore_interrupts(state);
}
if (frame.cs == USER_CODE_SEG) {
enable_interrupts();
// interrupts are not enabled at this point if we came from
// a hardware interrupt
thread_at_kernel_exit();
i386_init_user_debug_at_kernel_exit(&frame);
}
// dprintf("0x%x cpu %d!\n", thread_get_current_thread_id(), smp_get_current_cpu());
disable_interrupts();
if (thread)
x86_pop_iframe(&thread->arch_info.iframes);
else
x86_pop_iframe(&gBootFrameStack);
}
status_t
arch_int_init(kernel_args *args)
{
int i;
interrupt_handler_function** table;
// set the global sIDT variable
sIDT = (desc_table *)args->arch_args.vir_idt;
@ -649,6 +572,7 @@ arch_int_init(kernel_args *args)
set_intr_gate(46, &trap46);
set_intr_gate(47, &trap47);
set_system_gate(98, &trap98); // for performance testing only
set_system_gate(99, &trap99);
set_intr_gate(251, &trap251);
@ -657,6 +581,35 @@ arch_int_init(kernel_args *args)
set_intr_gate(254, &trap254);
set_intr_gate(255, &trap255);
// init interrupt handler table
table = gInterruptHandlerTable;
// defaults
for (i = 0; i < ARCH_INTERRUPT_BASE; i++)
table[i] = invalid_exception;
for (i = ARCH_INTERRUPT_BASE; i < INTERRUPT_HANDLER_TABLE_SIZE; i++)
table[i] = hardware_interrupt;
table[0] = unexpected_exception; // Divide Error Exception (#DE)
table[1] = x86_handle_debug_exception; // Debug Exception (#DB)
table[2] = fatal_exception; // NMI Interrupt
table[3] = x86_handle_breakpoint_exception; // Breakpoint Exception (#BP)
table[4] = unexpected_exception; // Overflow Exception (#OF)
table[5] = unexpected_exception; // BOUND Range Exceeded Exception (#BR)
table[6] = unexpected_exception; // Invalid Opcode Exception (#UD)
table[7] = fatal_exception; // Device Not Available Exception (#NM)
table[8] = double_fault_exception; // Double Fault Exception (#DF)
table[9] = fatal_exception; // Coprocessor Segment Overrun
table[10] = fatal_exception; // Invalid TSS Exception (#TS)
table[11] = fatal_exception; // Segment Not Present (#NP)
table[12] = fatal_exception; // Stack Fault Exception (#SS)
table[13] = unexpected_exception; // General Protection Exception (#GP)
table[14] = page_fault_exception; // Page-Fault Exception (#PF)
table[16] = unexpected_exception; // x87 FPU Floating-Point Error (#MF)
table[17] = unexpected_exception; // Alignment Check Exception (#AC)
table[18] = fatal_exception; // Machine-Check Exception (#MC)
table[19] = unexpected_exception; // SIMD Floating-Point Exception (#XF)
return B_OK;
}

@ -1,18 +1,137 @@
/*
** Copyright 2002-2004, The Haiku Team. All rights reserved.
** Distributed under the terms of the Haiku License.
**
** Copyright 2001, Travis Geiselbrecht. All rights reserved.
** Copyright 2002, Michael Noisternig. All rights reserved.
** Distributed under the terms of the NewOS License.
*/
* Copyright 2002-2007, The Haiku Team. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
* Copyright 2002, Michael Noisternig. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include <arch/x86/arch_cpu.h>
#include <arch/x86/arch_kernel.h>
#include <arch/x86/descriptors.h>
#include <commpage.h>
#include <thread_types.h>
#include "asm_offsets.h"
#include "syscall_numbers.h"
#include "syscall_table.h"
#define FUNCTION(x) .global x; .type x,@function; x
#define UPDATE_THREAD_USER_TIME_COMMON() \
movl %eax, %ebx; /* save for later */ \
movl %edx, %ecx; \
\
/* thread->user_time += now - thread->last_time; */ \
sub THREAD_last_time(%edi), %eax; \
sbb (THREAD_last_time + 4)(%edi), %edx; \
add %eax, THREAD_user_time(%edi); \
adc %edx, (THREAD_user_time + 4)(%edi); \
\
/* thread->last_time = now; */ \
movl %ebx, THREAD_last_time(%edi); \
movl %ecx, (THREAD_last_time + 4)(%edi); \
\
/* thread->in_kernel = true; */ \
movb $1, THREAD_in_kernel(%edi)
#define UPDATE_THREAD_USER_TIME() \
call system_time; \
UPDATE_THREAD_USER_TIME_COMMON()
#define UPDATE_THREAD_USER_TIME_PUSH_TIME() \
call system_time; \
push %edx; \
push %eax; \
UPDATE_THREAD_USER_TIME_COMMON()
#define UPDATE_THREAD_KERNEL_TIME() \
call system_time; \
\
movl %eax, %ebx; /* save for later */ \
movl %edx, %ecx;
\
/* thread->kernel_time += now - thread->last_time; */ \
sub THREAD_last_time(%edi), %eax; \
sbb (THREAD_last_time + 4)(%edi), %edx; \
add %eax, THREAD_kernel_time(%edi); \
adc %edx, (THREAD_kernel_time + 4)(%edi); \
\
/* thread->last_time = now; */ \
movl %ebx, THREAD_last_time(%edi); \
movl %ecx, (THREAD_last_time + 4)(%edi); \
\
/* thread->in_kernel = false; */ \
movb $0, THREAD_in_kernel(%edi)
#define PUSH_IFRAME_BOTTOM(iframeType) \
pusha; \
push %ds; \
push %es; \
push %fs; \
push %gs; \
pushl $iframeType
#define PUSH_IFRAME_BOTTOM_SYSCALL() \
pushl $0; \
pushl $99; \
pushl %edx; \
pushl %eax; \
PUSH_IFRAME_BOTTOM(IFRAME_TYPE_SYSCALL)
#define POP_IFRAME_AND_RETURN() \
/* skip iframe type */ \
lea 4(%ebp), %esp; \
\
pop %gs; \
addl $4, %esp; /* we skip %fs, as this contains the CPU \
dependent TLS segment */ \
pop %es; \
pop %ds; \
\
popa; \
addl $16,%esp; /* ignore the vector, error code, and \
original eax/edx values */ \
iret
#define DISABLE_BREAKPOINTS() \
testl $THREAD_FLAGS_BREAKPOINTS_INSTALLED, THREAD_flags(%edi); \
jz 1f; \
call x86_exit_user_debug_at_kernel_entry; \
1:
#define COPY_SYSCALL_PARAMETERS() \
/* make room for the syscall params */ \
subl $80, %esp; \
\
/* get the address of the syscall parameters */ \
movl IFRAME_user_esp(%ebp), %esi; \
addl $4, %esi; \
cmp $KERNEL_BASE, %esi; /* must not be a kernel address */ \
jae bad_syscall_params; \
\
/* set the fault handler */ \
movl $bad_syscall_params, THREAD_fault_handler(%edi); \
\
/* target address is our stack */ \
movl %esp, %edi; \
\
/* number of syscall parameter words */ \
movl SYSCALL_INFO_parameter_size(%edx), %ecx; \
shrl $2, %ecx; \
\
/* copy */ \
cld; \
rep movsl; \
\
/* restore pointers and clear fault handler */ \
movl %edx, %esi; /* syscall info pointer */ \
movl %dr3, %edi; /* thread pointer */ \
movl $0, THREAD_fault_handler(%edi)
.text
#define TRAP_ERRC(name, vector) \
@ -86,106 +205,306 @@ TRAP(trap45, 45)
TRAP(trap46, 46)
TRAP(trap47, 47)
TRAP(trap99, 99)
TRAP(trap251, 251)
TRAP(trap252, 252)
TRAP(trap253, 253)
TRAP(trap254, 254)
TRAP(trap255, 255)
.align 16
.globl int_bottom
int_bottom:
pusha
push %ds
push %es
push %fs
push %gs
PUSH_IFRAME_BOTTOM(IFRAME_TYPE_OTHER)
movl %esp, %ebp // frame pointer is the iframe
cmp $USER_CODE_SEG, IFRAME_cs(%ebp)
je int_bottom_user
// disable interrupts -- the handler will enable them, if necessary
cli
pushl %ebp
movl IFRAME_vector(%ebp), %eax
call *gInterruptHandlerTable(, %eax, 4)
POP_IFRAME_AND_RETURN()
int_bottom_user:
movl $KERNEL_DATA_SEG,%eax
cld
movl %eax,%ds
movl %eax,%es
movl %ss,%ebx
movl %esp,%esi // save the old stack pointer
cmpl %eax,%ebx // check if we changed the stack
jne custom_stack
kernel_stack:
call i386_handle_trap
pop %gs
addl $4, %esp // we skip %fs, as this contains the CPU dependent TLS segment
pop %es
pop %ds
popa
addl $16,%esp // ignore the vector, error code, and original eax/edx values
// disable breakpoints, if installed
movl %dr3, %edi // thread pointer
cli // disable interrupts
DISABLE_BREAKPOINTS()
// update the thread's user time
UPDATE_THREAD_USER_TIME()
// leave interrupts disabled -- the handler will enable them, if
// necessary
pushl %ebp
movl IFRAME_vector(%ebp), %eax
call *gInterruptHandlerTable(, %eax, 4)
testl $(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \
| THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED) \
, THREAD_flags(%edi)
jnz kernel_exit_work
cli // disable interrupts
// update the thread's kernel time and return
UPDATE_THREAD_KERNEL_TIME()
POP_IFRAME_AND_RETURN()
// test interrupt handler for performance measurements
.align 16
.globl trap98
trap98:
iret
// custom stack -> copy registers to kernel stack and switch there
custom_stack:
movl %dr3,%edx // get current struct thread
movl %eax,%es // the iframe is on the wrong stack
addl _interrupt_stack_offset,%edx
lss (%edx),%esp
movl %ebx,%ds // point %ds to the user stack segment
subl $92,%esp
movl %esp,%edi // copy to the current stack
movl $21,%ecx // copy sizeof(iframe)
rep movsl // %esi still points to the old custom stack address
.align 16
.globl trap99
trap99:
// push error, vector, orig_edx, orig_eax, and other registers
PUSH_IFRAME_BOTTOM_SYSCALL()
// save %eax, the number of the syscall
movl %eax, %esi
movl $KERNEL_DATA_SEG,%eax
cld
movl %eax,%ds
subl $84,%esi
movl %esi,(%edi) // save custom stack address and segment *after* the
movl %ebx,4(%edi) // iframe structure on the stack
call i386_handle_trap
lss 84(%esp),%esp // reload custom stack address
movl %eax,%es
movl %esp, %ebp // frame pointer is the iframe
movl %dr3, %edi // thread pointer
// disable breakpoints, if installed
cli // disable interrupts
DISABLE_BREAKPOINTS()
// update the thread's user time
UPDATE_THREAD_USER_TIME_PUSH_TIME()
// leave the time on the stack (needed for post syscall debugging)
sti // enable interrupts
cmp $SYSCALL_COUNT, %esi // check syscall number
jae bad_syscall_number
movl $kSyscallInfos, %eax // get syscall info
lea (%eax, %esi, SYSCALL_INFO_sizeof), %edx
// copy parameters onto this stack
COPY_SYSCALL_PARAMETERS()
// pre syscall debugging
testl $THREAD_FLAGS_DEBUGGER_INSTALLED, THREAD_flags(%edi)
jnz do_pre_syscall_debug
pre_syscall_debug_done:
// call the syscall function
call *SYSCALL_INFO_function(%esi)
// overwrite the values of %eax and %edx on the stack (the syscall return
// value)
movl %edx, IFRAME_edx(%ebp)
movl %eax, IFRAME_eax(%ebp)
testl $(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \
| THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED) \
, THREAD_flags(%edi)
jnz post_syscall_work
cli // disable interrupts
// update the thread's kernel time and return
UPDATE_THREAD_KERNEL_TIME()
POP_IFRAME_AND_RETURN()
do_pre_syscall_debug:
movl %esp, %eax // syscall parameters
push %eax
movl IFRAME_orig_eax(%ebp), %eax // syscall number
push %eax
call user_debug_pre_syscall
addl $8, %esp
jmp pre_syscall_debug_done
post_syscall_work_sysenter:
// if the 64 bit return value bit is set, we have to clear it
testl $THREAD_FLAGS_64_BIT_SYSCALL_RETURN, THREAD_flags(%edi)
jz post_syscall_work
1:
movl THREAD_flags(%edi), %eax
movl %eax, %edx
orl $THREAD_FLAGS_64_BIT_SYSCALL_RETURN, %edx
lock
cmpxchgl %edx, THREAD_flags(%edi)
jnz 1b
post_syscall_work:
// post syscall debugging
testl $THREAD_FLAGS_DEBUGGER_INSTALLED, THREAD_flags(%edi)
jz 1f
pushl -4(%ebp) // syscall start time
pushl -8(%ebp)
push %edx // syscall return value
push %eax
lea 16(%esp), %eax // syscall parameters
push %eax
movl IFRAME_orig_eax(%ebp), %eax // syscall number
push %eax
call user_debug_post_syscall
addl $8, %esp
1:
bad_syscall_number:
kernel_exit_work:
// if no signals are pending and the thread shall not be debugged, we can
// use the quick kernel exit function
testl $(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD) \
, THREAD_flags(%edi)
jnz kernel_exit_handle_signals
cli // disable interrupts
call thread_at_kernel_exit_no_signals
kernel_exit_work_done:
// install breakpoints, if defined
testl $THREAD_FLAGS_BREAKPOINTS_DEFINED, THREAD_flags(%edi)
jz 1f
push %ebp
call x86_init_user_debug_at_kernel_exit
1:
POP_IFRAME_AND_RETURN()
kernel_exit_handle_signals:
// make sure interrupts are enabled (they are, when coming from a syscall
// but otherwise they might be disabled)
sti
call thread_at_kernel_exit
cli // disable interrupts
jmp kernel_exit_work_done
bad_syscall_params:
// clear the fault handler and exit normally
movl %dr3, %edi
movl $0, THREAD_fault_handler(%edi)
jmp kernel_exit_work
/*! Handler called by the sysenter instruction
ecx - user esp
*/
FUNCTION(x86_sysenter):
// switch the stack
movl %dr3, %edx
movl THREAD_kernel_stack_top(%edx), %esp
// push the iframe
pushl $USER_DATA_SEG // user_ss
pushl %ecx // user_esp
pushfl // eflags
orl $(1 << 9), (%esp) // set the IF (interrupts) bit
pushl $USER_CODE_SEG // user cs
// user_eip
movl USER_COMMPAGE_ADDR + 4 * COMMPAGE_ENTRY_X86_SYSCALL, %edx
addl $4, %edx // sysenter is at offset 2, 2 bytes long
pushl %edx
PUSH_IFRAME_BOTTOM_SYSCALL()
// save %eax, the number of the syscall
movl %eax, %esi
movl $KERNEL_DATA_SEG,%eax
cld
movl %eax,%ds
movl %eax,%es
movl %esp, %ebp // frame pointer is the iframe
movl %dr3, %edi // thread pointer
// disable breakpoints, if installed
cli // disable interrupts
DISABLE_BREAKPOINTS()
// update the thread's user time
UPDATE_THREAD_USER_TIME_PUSH_TIME()
// leave the time on the stack (needed for post syscall debugging)
sti // enable interrupts
cmp $SYSCALL_COUNT, %esi // check syscall number
jae bad_syscall_number
movl $kSyscallInfos, %eax // get syscall info
lea (%eax, %esi, SYSCALL_INFO_sizeof), %edx
// copy parameters onto this stack
COPY_SYSCALL_PARAMETERS()
// pre syscall debugging
testl $THREAD_FLAGS_DEBUGGER_INSTALLED, THREAD_flags(%edi)
jnz do_pre_syscall_debug
// if debugging is enabled, we take the slow syscall exit
// call the syscall function
call *SYSCALL_INFO_function(%esi)
// overwrite the values of %eax and %edx on the stack (the syscall return
// value)
movl %edx, IFRAME_edx(%ebp)
movl %eax, IFRAME_eax(%ebp)
testl $(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \
| THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \
| THREAD_FLAGS_64_BIT_SYSCALL_RETURN) \
, THREAD_flags(%edi)
jnz post_syscall_work_sysenter
// if any special work has to be done, we take the slow syscall exit
cli // disable interrupts
// update the thread's kernel time
UPDATE_THREAD_KERNEL_TIME()
// pop the bottom of the iframe
lea 4(%ebp), %esp // skip iframe type
pop %gs
pop %fs // doesn't come from userland, doesn't need TLS, may have changed %fs
addl $4, %esp /* we skip %fs, as this contains the CPU
dependent TLS segment */
pop %es
pop %ds
popa
addl $16,%esp // ignore the vector, error code, and original eax/edx values
iret
_interrupt_stack_offset:
.long 0
// this value will be maintained by the function below
// ecx already contains the user esp -- load edx with the return address
movl 16(%esp), %edx
// void i386_stack_init(struct farcall *interrupt_stack_offset)
/* setup in arch_thread.c: arch_thread_init_thread_struct() */
FUNCTION(i386_stack_init):
movl 4(%esp),%eax
movl %eax,_interrupt_stack_offset
ret
// pop eflags, which also reenables interrupts
addl $24, %esp // skip, orig_eax/edx, vector, error_code, eip, cs
popfl
// void i386_stack_switch(struct farcall new_stack)
FUNCTION(i386_stack_switch):
movl %dr3,%eax // get_current_thread
movl (%esp),%edx
pushf
popl %ecx
addl _interrupt_stack_offset,%eax
cli
pushl %ss
cmpl $KERNEL_DATA_SEG,(%esp)
je kernel_stack2
popl %eax
jmp switch
kernel_stack2:
popl 4(%eax)
movl %esp,(%eax)
switch:
lss 4(%esp),%esp
pushl %ecx
popf
jmp *%edx
sysexit
/** Is copied to the signal stack call to restore the original frame when
* the signal handler exits.
* The copying code (in arch_thread.c::arch_setup_signal_frame()) copies
* everything between the i386_return_from_signal and i386_end_return_from_signal
* symbols.
*/
/*! Is copied to the signal stack call to restore the original frame when
the signal handler exits.
The copying code (in arch_thread.c::arch_setup_signal_frame()) copies
everything between the i386_return_from_signal and i386_end_return_from_signal
symbols.
*/
FUNCTION(i386_return_from_signal):
addl $12, %esp // Flushes the 3 arguments to sa_handler
movl $SYSCALL_RESTORE_SIGNAL_FRAME, %eax
@ -198,19 +517,21 @@ FUNCTION(i386_return_from_signal):
FUNCTION(i386_end_return_from_signal):
/** void i386_restore_frame_from_syscall(struct iframe iframe);
* Pops the regs of the iframe from the stack to make it current and then
* return to userland.
*/
/*! void i386_restore_frame_from_syscall(struct iframe iframe);
Pops the regs of the iframe from the stack to make it current and then
return to userland.
Interrupts are disabled.
*/
FUNCTION(i386_restore_frame_from_syscall):
addl $4, %esp // make the iframe our current stack position (we don't need the
// return address anymore, as we will use the one of the frame)
pop %gs // recreate the frame environment
addl $4, %esp // we skip %fs, as this contains the CPU dependent TLS segment
pop %es
pop %ds
popa
addl $16,%esp // ignore the vector, error code, and original eax/edx values
// (which contain the syscall number and argument pointer)
iret
lea 4(%esp), %ebp // iframe to %ebp
// check, if any kernel exit work has to be done
movl %dr3, %edi
testl $(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \
| THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED) \
, THREAD_flags(%edi)
jnz kernel_exit_work
// update the thread's kernel time and return
UPDATE_THREAD_KERNEL_TIME()
POP_IFRAME_AND_RETURN()

@ -0,0 +1,54 @@
/*
** Copyright 2001, Travis Geiselbrecht. All rights reserved.
** Distributed under the terms of the NewOS License.
*/
#if !_BOOT_MODE
# include "asm_offsets.h"
#endif
#define FUNCTION(x) .global x; .type x,@function; x
#define SYM(x) .global x; x
// We don't need the indirection in the boot loader.
#if _BOOT_MODE
# define memcpy_generic memcpy
#endif
.align 4
FUNCTION(memcpy_generic):
pushl %esi
pushl %edi
movl 12(%esp),%edi /* dest */
movl %edi,%eax /* save dest ptr as return address */
movl 16(%esp),%esi /* source */
movl 20(%esp),%ecx /* count */
/* move by words */
cld
shrl $2,%ecx
rep
movsl
/* move any remaining data by bytes */
movl 20(%esp),%ecx
andl $3,%ecx
rep
movsb
popl %edi
popl %esi
ret
SYM(memcpy_generic_end):
#if !_BOOT_MODE
.align 4
FUNCTION(memcpy):
jmp *(gOptimizedFunctions + X86_OPTIMIZED_FUNCTIONS_memcpy)
#endif // !_BOOT_MODE

@ -54,27 +54,36 @@ arch_thread_init(struct kernel_args *args)
else
i386_fnsave(sInitialState.fpu_state);
// let the asm function know the offset to the interrupt stack within struct thread
// I know no better ( = static) way to tell the asm function the offset
i386_stack_init(&((struct thread *)0)->arch_info.interrupt_stack);
return B_OK;
}
void
x86_push_iframe(struct iframe_stack *stack, struct iframe *frame)
static struct iframe *
find_previous_iframe(addr_t frame)
{
ASSERT(stack->index < IFRAME_TRACE_DEPTH);
stack->frames[stack->index++] = frame;
struct thread *thread = thread_get_current_thread();
// iterate backwards through the stack frames, until we hit an iframe
while (frame >= thread->kernel_stack_base
&& frame < thread->kernel_stack_base + KERNEL_STACK_SIZE) {
addr_t previousFrame = *(addr_t*)frame;
if ((previousFrame & ~IFRAME_TYPE_MASK) == 0)
return (struct iframe*)frame;
frame = previousFrame;
}
return NULL;
}
void
x86_pop_iframe(struct iframe_stack *stack)
static struct iframe*
get_previous_iframe(struct iframe* frame)
{
ASSERT(stack->index > 0);
stack->index--;
if (frame == NULL)
return NULL;
return find_previous_iframe(frame->ebp);
}
@ -84,14 +93,10 @@ x86_pop_iframe(struct iframe_stack *stack)
sure that such iframe exists; ie. from syscalls, but usually not
from standard kernel threads.
*/
static struct iframe *
static struct iframe*
get_current_iframe(void)
{
struct thread *thread = thread_get_current_thread();
ASSERT(thread->arch_info.iframes.index >= 0);
return thread->arch_info.iframes.frames[
thread->arch_info.iframes.index - 1];
return find_previous_iframe(x86_read_ebp());
}
@ -105,13 +110,12 @@ get_current_iframe(void)
struct iframe *
i386_get_user_iframe(void)
{
struct thread *thread = thread_get_current_thread();
int i;
struct iframe* frame = get_current_iframe();
for (i = thread->arch_info.iframes.index - 1; i >= 0; i--) {
struct iframe *frame = thread->arch_info.iframes.frames[i];
while (frame != NULL) {
if (frame->cs == USER_CODE_SEG)
return frame;
frame = get_previous_iframe(frame);
}
return NULL;
@ -320,11 +324,6 @@ arch_thread_context_switch(struct thread *from, struct thread *to)
if ((newPageDirectory % B_PAGE_SIZE) != 0)
panic("arch_thread_context_switch: bad pgdir 0x%lx\n", newPageDirectory);
// reinit debugging; necessary, if the thread was preempted after
// initializing debugging before returning to userland
if (to->team->address_space != NULL)
i386_reinit_user_debug_after_context_switch(to);
gX86SwapFPUFunc(from->arch_info.fpu_state, to->arch_info.fpu_state);
i386_context_switch(&from->arch_info, &to->arch_info, newPageDirectory);
}
@ -373,10 +372,6 @@ arch_thread_enter_userspace(struct thread *t, addr_t entry, void *args1,
disable_interrupts();
// When entering the userspace, the iframe stack needs to be empty. After
// an exec() it'll still contain the iframe from the syscall, though.
t->arch_info.iframes.index = 0;
i386_set_tss_and_kstack(t->kernel_stack_base + KERNEL_STACK_SIZE);
// set the CPU dependent GDT entry for TLS
@ -570,3 +565,11 @@ arch_restore_fork_frame(struct arch_fork_arg *arg)
i386_restore_frame_from_syscall(arg->iframe);
}
void
arch_syscall_64_bit_return_value()
{
struct thread* thread = thread_get_current_thread();
atomic_or(&thread->flags, THREAD_FLAGS_64_BIT_SYSCALL_RETURN);
}

@ -511,6 +511,21 @@ arch_destroy_thread_debug_info(struct arch_thread_debug_info *info)
}
void
arch_update_thread_single_step()
{
if (struct iframe* frame = i386_get_user_iframe()) {
struct thread* thread = thread_get_current_thread();
// set/clear TF in EFLAGS depending on if single stepping is desired
if (thread->debug_info.flags & B_THREAD_DEBUG_SINGLE_STEP)
frame->flags |= (1 << X86_EFLAGS_TF);
else
frame->flags &= ~(1 << X86_EFLAGS_TF);
}
}
void
arch_set_debug_cpu_state(const struct debug_cpu_state *cpuState)
{
@ -613,6 +628,15 @@ arch_clear_watchpoint(void *address)
}
bool
arch_has_breakpoints(struct arch_team_debug_info *info)
{
// Reading info->dr7 is atomically, so we don't need to lock. The caller
// has to ensure, that the info doesn't go away.
return (info->dr7 != X86_BREAKPOINTS_DISABLED_DR7);
}
#if KERNEL_BREAKPOINTS
status_t
@ -683,14 +707,18 @@ arch_clear_kernel_watchpoint(void *address)
/**
* Interrupts are enabled.
* Interrupts are disabled.
*/
void
i386_init_user_debug_at_kernel_exit(struct iframe *frame)
x86_init_user_debug_at_kernel_exit(struct iframe *frame)
{
struct thread *thread = thread_get_current_thread();
cpu_status state = disable_interrupts();
#if !KERNEL_BREAKPOINTS
if (!(thread->flags & THREAD_FLAGS_BREAKPOINTS_DEFINED))
return;
#endif
GRAB_THREAD_LOCK();
GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
@ -698,37 +726,31 @@ i386_init_user_debug_at_kernel_exit(struct iframe *frame)
// install the breakpoints
install_breakpoints(teamInfo);
thread->debug_info.arch_info.flags |= X86_THREAD_DEBUG_DR7_SET;
// set/clear TF in EFLAGS depending on if single stepping is desired
if (thread->debug_info.flags & B_THREAD_DEBUG_SINGLE_STEP)
frame->flags |= (1 << X86_EFLAGS_TF);
else
frame->flags &= ~(1 << X86_EFLAGS_TF);
// ToDo: Move into a function called from thread_hit_debug_event().
// No need to have that here in the code executed for ever kernel->user
// mode switch.
atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_INSTALLED);
RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
RELEASE_THREAD_LOCK();
restore_interrupts(state);
}
/**
* Interrupts may be enabled.
* Interrupts are disabled.
*/
void
i386_exit_user_debug_at_kernel_entry()
x86_exit_user_debug_at_kernel_entry()
{
struct thread *thread = thread_get_current_thread();
cpu_status state = disable_interrupts();
#if !KERNEL_BREAKPOINTS
if (!(thread->flags & THREAD_FLAGS_BREAKPOINTS_INSTALLED))
return;
#endif
GRAB_THREAD_LOCK();
// disable breakpoints
disable_breakpoints();
thread->debug_info.arch_info.flags &= ~X86_THREAD_DEBUG_DR7_SET;
#if KERNEL_BREAKPOINTS
struct team* kernelTeam = team_get_kernel_team();
@ -737,49 +759,17 @@ i386_exit_user_debug_at_kernel_entry()
RELEASE_TEAM_DEBUG_INFO_LOCK(kernelTeam->debug_info);
#endif
atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_INSTALLED);
RELEASE_THREAD_LOCK();
restore_interrupts(state);
}
/**
* Interrupts are disabled and the thread lock is being held.
*/
void
i386_reinit_user_debug_after_context_switch(struct thread *thread)
{
// This function deals with a race condition: We set up the debugging
// registers in i386_init_user_debug_at_kernel_exit() when a userland
// thread is going to leave the kernel. Afterwards the thread might be
// preempted, though, since interrupts are enabled.
// X86_THREAD_DEBUG_DR7_SET indicates, when this happens.
// TODO: We should fix this by disabling interrupts before
// i386_init_user_debug_at_kernel_exit() is called and keep them disabled
// until returning from the interrupt.
if (thread->debug_info.arch_info.flags & X86_THREAD_DEBUG_DR7_SET) {
GRAB_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
install_breakpoints(thread->team->debug_info.arch_info);
RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
#if KERNEL_BREAKPOINTS
} else {
// we're still in the kernel
struct team* kernelTeam = team_get_kernel_team();
GRAB_TEAM_DEBUG_INFO_LOCK(kernelTeam->debug_info);
install_breakpoints(kernelTeam->debug_info.arch_info);
RELEASE_TEAM_DEBUG_INFO_LOCK(kernelTeam->debug_info);
#endif
}
}
/**
* Interrupts are disabled and will be enabled by the function.
*/
int
i386_handle_debug_exception(struct iframe *frame)
void
x86_handle_debug_exception(struct iframe *frame)
{
// get debug status and control registers
uint32 dr6, dr7;
@ -791,7 +781,7 @@ i386_handle_debug_exception(struct iframe *frame)
if (frame->cs != USER_CODE_SEG) {
panic("debug exception in kernel mode: dr6: 0x%lx, dr7: 0x%lx", dr6,
dr7);
return B_HANDLED_INTERRUPT;
return;
}
// check, which exception condition applies
@ -849,34 +839,30 @@ i386_handle_debug_exception(struct iframe *frame)
enable_interrupts();
}
return B_HANDLED_INTERRUPT;
}
/**
* Interrupts are disabled and will be enabled by the function.
*/
int
i386_handle_breakpoint_exception(struct iframe *frame)
void
x86_handle_breakpoint_exception(struct iframe *frame)
{
TRACE(("i386_handle_breakpoint_exception()\n"));
if (frame->cs != USER_CODE_SEG) {
panic("breakpoint exception in kernel mode");
return B_HANDLED_INTERRUPT;
return;
}
enable_interrupts();
user_debug_breakpoint_hit(true);
return B_HANDLED_INTERRUPT;
}
void
i386_init_user_debug()
x86_init_user_debug()
{
// get debug settings
if (void *handle = load_driver_settings("kernel")) {

@ -0,0 +1,56 @@
/*
* Copyright 2007, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
// This file is used to get C structure offsets into assembler code.
// The build system assembles the file and processes the output to create
// a header file with macro definitions, that can be included from assembler
// code.
#include <arch_cpu.h>
#include <ksyscalls.h>
#include <thread_types.h>
#define DEFINE_MACRO(macro, value) \
asm volatile("#define " #macro " %0" : : "i" (value))
#define DEFINE_OFFSET_MACRO(prefix, structure, member) \
DEFINE_MACRO(prefix##_##member, offsetof(struct structure, member));
#define DEFINE_SIZEOF_MACRO(prefix, structure) \
DEFINE_MACRO(prefix##_sizeof, sizeof(struct structure));
void
dummy()
{
// struct thread
DEFINE_OFFSET_MACRO(THREAD, thread, kernel_time);
DEFINE_OFFSET_MACRO(THREAD, thread, user_time);
DEFINE_OFFSET_MACRO(THREAD, thread, last_time);
DEFINE_OFFSET_MACRO(THREAD, thread, in_kernel);
DEFINE_OFFSET_MACRO(THREAD, thread, flags);
DEFINE_OFFSET_MACRO(THREAD, thread, kernel_stack_top);
DEFINE_OFFSET_MACRO(THREAD, thread, fault_handler);
// struct iframe
DEFINE_OFFSET_MACRO(IFRAME, iframe, cs);
DEFINE_OFFSET_MACRO(IFRAME, iframe, eax);
DEFINE_OFFSET_MACRO(IFRAME, iframe, edx);
DEFINE_OFFSET_MACRO(IFRAME, iframe, orig_eax);
DEFINE_OFFSET_MACRO(IFRAME, iframe, vector);
DEFINE_OFFSET_MACRO(IFRAME, iframe, eip);
DEFINE_OFFSET_MACRO(IFRAME, iframe, flags);
DEFINE_OFFSET_MACRO(IFRAME, iframe, user_esp);
// struct syscall_info
DEFINE_SIZEOF_MACRO(SYSCALL_INFO, syscall_info);
DEFINE_OFFSET_MACRO(SYSCALL_INFO, syscall_info, function);
DEFINE_OFFSET_MACRO(SYSCALL_INFO, syscall_info, parameter_size);
// struct x86_optimized_functions
DEFINE_OFFSET_MACRO(X86_OPTIMIZED_FUNCTIONS, x86_optimized_functions,
memcpy);
}

@ -1,81 +0,0 @@
/*
* Copyright 2007, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#include <arch/x86/commpage.h>
#include <string.h>
#include <KernelExport.h>
#include <vm.h>
#include <vm_types.h>
static area_id comm_area;
static area_id user_comm_area;
static unsigned long *comm_ptr;
static unsigned long *user_comm_ptr;
static void *next_comm_addr;
// user syscall assembly stub
extern void _user_syscall_int(void);
extern unsigned int _user_syscall_int_end;
static inline addr_t
commpage_ptr_to_user_ptr(const void *ptr)
{
return ((addr_t)ptr) + ((addr_t)user_comm_ptr - (addr_t)comm_ptr);
}
static status_t
initialize_commpage_syscall(void)
{
size_t len;
// for now, we're hard coded to use the legacy method (int 99)
len = (size_t)((void *)&_user_syscall_int_end - (void *)&_user_syscall_int);
memcpy(next_comm_addr, &_user_syscall_int, len);
// fill in the table entry
comm_ptr[COMMPAGE_ENTRY_SYSCALL] = commpage_ptr_to_user_ptr(next_comm_addr);
next_comm_addr = (void *)((addr_t)next_comm_addr + ROUNDUP(len, 4));
return B_OK;
}
status_t
commpage_init(void)
{
int i;
// create a read/write kernel area
comm_area = create_area("commpage", (void **)&comm_ptr, B_ANY_ADDRESS,
COMMPAGE_SIZE, B_FULL_LOCK, B_KERNEL_WRITE_AREA | B_KERNEL_READ_AREA);
// clone it at a fixed address with user read/only permissions
user_comm_ptr = (void *)USER_COMMPAGE_ADDR;
user_comm_area = clone_area("user_commpage", (void **)&user_comm_ptr,
B_EXACT_ADDRESS, B_READ_AREA | B_EXECUTE_AREA, comm_area);
// zero it out
memset(comm_ptr, 0, COMMPAGE_SIZE);
// fill in some of the table
comm_ptr[0] = COMMPAGE_SIGNATURE;
comm_ptr[1] = COMMPAGE_VERSION;
// the next slot to allocate space is after the table
next_comm_addr = (void *)&comm_ptr[TABLE_ENTRIES];
// select the optimum syscall mechanism and patch the commpage
initialize_commpage_syscall();
return B_OK;
}

@ -23,6 +23,7 @@ void trap44();void trap45();void trap46();void trap47();
void double_fault(); // int 8
void trap98();
void trap99();
void trap251();void trap252();void trap253();void trap254();void trap255();

@ -9,11 +9,21 @@
.text
/* user space half of the syscall mechanism, to be copied into the commpage */
// int 99 fallback
FUNCTION(_user_syscall_int):
int $99
ret
SYM(_user_syscall_int_end):
// Intel sysenter/sysexit
FUNCTION(_user_syscall_sysenter):
// sysexit forces us to trash edx (-> eip) and ecx (-> esp), but they are
// scratch registers anyway. We use ecx right away to store esp.
movl %esp, %ecx
sysenter
ret
SYM(_user_syscall_sysenter_end):

@ -0,0 +1,75 @@
/*
* Copyright 2007, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#include <commpage.h>
#include <string.h>
#include <KernelExport.h>
#include <vm.h>
#include <vm_types.h>
static area_id sCommPageArea;
static area_id sUserCommPageArea;
static addr_t* sCommPageAddress;
static addr_t* sUserCommPageAddress;
static void* sFreeCommPageSpace;
#define ALIGN_ENTRY(pointer) (void*)ROUNDUP((addr_t)(pointer), 8)
void*
allocate_commpage_entry(int entry, size_t size)
{
void* space = sFreeCommPageSpace;
sFreeCommPageSpace = ALIGN_ENTRY((addr_t)sFreeCommPageSpace + size);
sCommPageAddress[entry] = (addr_t)sUserCommPageAddress
+ ((addr_t)space - (addr_t)sCommPageAddress);
dprintf("allocate_commpage_entry(%d, %lu) -> %p\n", entry, size, (void*)sCommPageAddress[entry]);
return space;
}
void*
fill_commpage_entry(int entry, const void* copyFrom, size_t size)
{
void* space = allocate_commpage_entry(entry, size);
memcpy(space, copyFrom, size);
return space;
}
status_t
commpage_init(void)
{
// create a read/write kernel area
sCommPageArea = create_area("commpage", (void **)&sCommPageAddress,
B_ANY_ADDRESS, COMMPAGE_SIZE, B_FULL_LOCK,
B_KERNEL_WRITE_AREA | B_KERNEL_READ_AREA);
// clone it at a fixed address with user read/only permissions
sUserCommPageAddress = (addr_t*)USER_COMMPAGE_ADDR;
sUserCommPageArea = clone_area("user_commpage",
(void **)&sUserCommPageAddress, B_EXACT_ADDRESS,
B_READ_AREA | B_EXECUTE_AREA, sCommPageArea);
// zero it out
memset(sCommPageAddress, 0, COMMPAGE_SIZE);
// fill in some of the table
sCommPageAddress[0] = COMMPAGE_SIGNATURE;
sCommPageAddress[1] = COMMPAGE_VERSION;
// the next slot to allocate space is after the table
sFreeCommPageSpace = ALIGN_ENTRY(&sCommPageAddress[COMMPAGE_TABLE_ENTRIES]);
arch_commpage_init();
return B_OK;
}

@ -23,6 +23,8 @@
#include <vm_types.h>
#include <arch/user_debugger.h>
#include <util/AutoLock.h>
//#define TRACE_USER_DEBUGGER
#ifdef TRACE_USER_DEBUGGER
# define TRACE(x) dprintf x
@ -109,6 +111,96 @@ debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize,
}
/*! Updates the thread::flags field according to what user debugger flags are
set for the thread.
Interrupts must be disabled and the thread lock must be held.
*/
static void
update_thread_user_debug_flag(struct thread* thread)
{
if (atomic_get(&thread->debug_info.flags)
& (B_THREAD_DEBUG_STOP | B_THREAD_DEBUG_SINGLE_STEP)) {
atomic_or(&thread->flags, THREAD_FLAGS_DEBUG_THREAD);
} else
atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUG_THREAD);
}
/*! Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of the
current thread.
Interrupts must be disabled and the team lock must be held.
*/
static void
update_thread_breakpoints_flag()
{
struct thread* thread = thread_get_current_thread();
struct team* team = thread->team;
if (arch_has_breakpoints(&team->debug_info.arch_info))
atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED);
else
atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED);
}
/*! Updates the thread::flags THREAD_FLAGS_BREAKPOINTS_DEFINED bit of all
threads of the current team.
Interrupts must be disabled and the team lock must be held.
*/
static void
update_threads_breakpoints_flag()
{
InterruptsSpinLocker _(team_spinlock);
struct team* team = thread_get_current_thread()->team;
struct thread* thread = team->thread_list;
if (arch_has_breakpoints(&team->debug_info.arch_info)) {
for (; thread != NULL; thread = thread->team_next)
atomic_or(&thread->flags, THREAD_FLAGS_BREAKPOINTS_DEFINED);
} else {
for (; thread != NULL; thread = thread->team_next)
atomic_and(&thread->flags, ~THREAD_FLAGS_BREAKPOINTS_DEFINED);
}
}
/*! Updates the thread::flags B_TEAM_DEBUG_DEBUGGER_INSTALLED bit of the
current thread.
Interrupts must be disabled and the team lock must be held.
*/
static void
update_thread_debugger_installed_flag()
{
struct thread* thread = thread_get_current_thread();
struct team* team = thread->team;
if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED)
atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED);
else
atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED);
}
/*! Updates the thread::flags THREAD_FLAGS_DEBUGGER_INSTALLED bit of all
threads of the given team.
Interrupts must be disabled and the team lock must be held.
*/
static void
update_threads_debugger_installed_flag(struct team* team)
{
struct thread* thread = team->thread_list;
if (atomic_get(&team->debug_info.flags) & B_TEAM_DEBUG_DEBUGGER_INSTALLED) {
for (; thread != NULL; thread = thread->team_next)
atomic_or(&thread->flags, THREAD_FLAGS_DEBUGGER_INSTALLED);
} else {
for (; thread != NULL; thread = thread->team_next)
atomic_and(&thread->flags, ~THREAD_FLAGS_DEBUGGER_INSTALLED);
}
}
/**
* For the first initialization the function must be called with \a initLock
* set to \c true. If it would be possible that another thread accesses the
@ -379,6 +471,8 @@ thread_hit_debug_event_internal(debug_debugger_message event,
threadFlags |= B_THREAD_DEBUG_STOPPED;
atomic_set(&thread->debug_info.flags, threadFlags);
update_thread_user_debug_flag(thread);
RELEASE_TEAM_DEBUG_INFO_LOCK(thread->team->debug_info);
RELEASE_THREAD_LOCK();
restore_interrupts(state);
@ -519,6 +613,8 @@ thread_hit_debug_event_internal(debug_debugger_message event,
// unset the "stopped" state
atomic_and(&thread->debug_info.flags, ~B_THREAD_DEBUG_STOPPED);
update_thread_user_debug_flag(thread);
} else {
// the debugger is gone: cleanup our info completely
threadDebugInfo = thread->debug_info;
@ -529,6 +625,9 @@ thread_hit_debug_event_internal(debug_debugger_message event,
RELEASE_THREAD_LOCK();
restore_interrupts(state);
// enable/disable single stepping
arch_update_thread_single_step();
if (destroyThreadInfo)
destroy_thread_debug_info(&threadDebugInfo);
@ -744,6 +843,29 @@ user_debug_team_deleted(team_id teamID, port_id debuggerPort)
}
void
user_debug_update_new_thread_flags(thread_id threadID)
{
// Update thread::flags of the thread.
InterruptsLocker interruptsLocker;
SpinLocker threadLocker(thread_spinlock);
struct thread *thread = thread_get_thread_struct_locked(threadID);
if (!thread)
return;
update_thread_user_debug_flag(thread);
threadLocker.Unlock();
SpinLocker teamLocker(team_spinlock);
update_thread_breakpoints_flag();
update_thread_debugger_installed_flag();
}
void
user_debug_thread_created(thread_id threadID)
{
@ -976,6 +1098,9 @@ nub_thread_cleanup(struct thread *nubThread)
destroyDebugInfo = true;
}
// update the thread::flags fields
update_threads_debugger_installed_flag(nubThread->team);
RELEASE_TEAM_DEBUG_INFO_LOCK(nubThread->team->debug_info);
RELEASE_TEAM_LOCK();
restore_interrupts(state);
@ -1476,6 +1601,9 @@ debug_nub_thread(void *)
if (result == B_OK)
result = arch_set_breakpoint(address);
if (result == B_OK)
update_threads_breakpoints_flag();
// prepare the reply
reply.set_breakpoint.error = result;
replySize = sizeof(reply.set_breakpoint);
@ -1501,6 +1629,9 @@ debug_nub_thread(void *)
if (result == B_OK)
result = arch_clear_breakpoint(address);
if (result == B_OK)
update_threads_breakpoints_flag();
break;
}
@ -1527,6 +1658,9 @@ debug_nub_thread(void *)
if (result == B_OK)
result = arch_set_watchpoint(address, type, length);
if (result == B_OK)
update_threads_breakpoints_flag();
// prepare the reply
reply.set_watchpoint.error = result;
replySize = sizeof(reply.set_watchpoint);
@ -1552,6 +1686,9 @@ debug_nub_thread(void *)
if (result == B_OK)
result = arch_clear_watchpoint(address);
if (result == B_OK)
update_threads_breakpoints_flag();
break;
}
@ -1790,7 +1927,7 @@ debug_nub_thread(void *)
Interrupts must be disabled and the team debug info lock of the team to be
debugged must be held. The function will release the lock, but leave
interrupts disabled.
interrupts disabled. The team lock must be held, too.
The function also clears the arch specific team and thread debug infos
(including among other things formerly set break/watchpoints).
@ -1833,6 +1970,9 @@ install_team_debugger_init_debug_infos(struct team *team, team_id debuggerTeam,
}
RELEASE_THREAD_LOCK();
// update the thread::flags fields
update_threads_debugger_installed_flag(team);
}
@ -2218,6 +2358,8 @@ _user_debug_thread(thread_id threadID)
// set the flag that tells the thread to stop as soon as possible
atomic_or(&thread->debug_info.flags, B_THREAD_DEBUG_STOP);
update_thread_user_debug_flag(thread);
switch (thread->state) {
case B_THREAD_SUSPENDED:
// thread suspended: wake it up
@ -2272,10 +2414,16 @@ _user_set_debugger_breakpoint(void *address, uint32 type, int32 length,
// that we install a break/watchpoint the debugger doesn't know about.
// set the break/watchpoint
status_t result;
if (watchpoint)
return arch_set_watchpoint(address, type, length);
result = arch_set_watchpoint(address, type, length);
else
return arch_set_breakpoint(address);
result = arch_set_breakpoint(address);
if (result == B_OK)
update_threads_breakpoints_flag();
return result;
}
@ -2297,8 +2445,14 @@ _user_clear_debugger_breakpoint(void *address, bool watchpoint)
// that we clear a break/watchpoint the debugger has just installed.
// clear the break/watchpoint
status_t result;
if (watchpoint)
return arch_clear_watchpoint(address);
result = arch_clear_watchpoint(address);
else
return arch_clear_breakpoint(address);
result = arch_clear_breakpoint(address);
if (result == B_OK)
update_threads_breakpoints_flag();
return result;
}

@ -845,6 +845,8 @@ err1:
off_t
_user_seek(int fd, off_t pos, int seekType)
{
syscall_64_bit_return_value();
struct file_descriptor *descriptor;
descriptor = get_fd(get_current_io_context(false), fd);

@ -4186,7 +4186,7 @@ static status_t
file_select(struct file_descriptor *descriptor, uint8 event,
struct selectsync *sync)
{
FUNCTION(("file_select(%p, %u, %lu, %p)\n", descriptor, event, ref, sync));
FUNCTION(("file_select(%p, %u, %p)\n", descriptor, event, sync));
struct vnode *vnode = descriptor->u.vnode;

@ -113,6 +113,8 @@ KernelMergeObject kernel_posix.o :
: $(TARGET_KERNEL_PIC_CCFLAGS)
;
# TODO: Move the following arch specific part into arch/$(TARGET_ARCH) subdirs!
SEARCH_SOURCE += [ FDirName $(librootSources) os arch $(TARGET_ARCH) ] ;
KernelMergeObject kernel_os_arch_$(TARGET_ARCH).o :
@ -132,7 +134,7 @@ KernelMergeObject kernel_posix_arch_$(TARGET_ARCH).o :
siglongjmp.S
sigsetjmp.S
kernel_setjmp_save_sigs.c
arch_string.S
arch_string.S # TODO: Not needed for X86!
: $(TARGET_KERNEL_PIC_CCFLAGS)
;

@ -14,6 +14,7 @@
#include <arch/platform.h>
#include <boot_item.h>
#include <cbuf.h>
#include <commpage.h>
#include <condition_variable.h>
#include <cpu.h>
#include <debug.h>
@ -120,6 +121,7 @@ _start(kernel_args *bootKernelArgs, int currentCPU)
debug_init_post_vm(&sKernelArgs);
int_init_post_vm(&sKernelArgs);
cpu_init_post_vm(&sKernelArgs);
commpage_init();
TRACE("init system info\n");
system_info_init(&sKernelArgs);

@ -9,9 +9,11 @@
#include <KernelExport.h>
#include <arch/real_time_clock.h>
#include <commpage.h>
#include <real_time_clock.h>
#include <real_time_data.h>
#include <syscalls.h>
#include <thread.h>
#include <stdlib.h>
@ -90,27 +92,8 @@ rtc_debug(int argc, char **argv)
status_t
rtc_init(kernel_args *args)
{
void *clonedRealTimeData;
area_id area = create_area("real time data", (void **)&sRealTimeData,
B_ANY_KERNEL_ADDRESS, PAGE_ALIGN(sizeof(struct real_time_data)),
B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (area < B_OK) {
panic("rtc_init: error creating real time data area\n");
return area;
}
// On some systems like x86, a page cannot be read-only in userland and
// writable in the kernel. Therefore, we clone the real time data area
// here for user access; it doesn't hurt on other platforms, too.
// The area is used to share time critical information, such as the system
// time conversion factor which can change at any time.
if (clone_area("real time data userland", &clonedRealTimeData,
B_ANY_KERNEL_ADDRESS, B_READ_AREA, area) < B_OK) {
dprintf("rtc_init: error creating real time data userland area\n");
// we don't panic because it's not kernel critical
}
sRealTimeData = (struct real_time_data*)allocate_commpage_entry(
COMMPAGE_ENTRY_REAL_TIME_DATA, sizeof(struct real_time_data));
arch_rtc_init(args, sRealTimeData);
rtc_hw_to_system();
@ -242,6 +225,8 @@ _kern_get_timezone(time_t *_timezoneOffset, bool *_daylightSavingTime)
bigtime_t
_user_system_time(void)
{
syscall_64_bit_return_value();
return system_time();
}

@ -54,6 +54,28 @@ static status_t deliver_signal(struct thread *thread, uint signal,
uint32 flags);
/*! Updates the thread::flags field according to what signals are pending.
Interrupts must be disabled and the thread lock must be held.
*/
static void
update_thread_signals_flag(struct thread* thread)
{
if (atomic_get(&thread->sig_pending) & ~atomic_get(&thread->sig_block_mask))
atomic_or(&thread->flags, THREAD_FLAGS_SIGNALS_PENDING);
else
atomic_and(&thread->flags, ~THREAD_FLAGS_SIGNALS_PENDING);
}
static void
update_current_thread_signals_flag()
{
InterruptsSpinLocker locker(thread_spinlock);
update_thread_signals_flag(thread_get_current_thread());
}
static bool
notify_debugger(struct thread *thread, int signal, struct sigaction *handler,
bool deadly)
@ -246,6 +268,8 @@ handle_signals(struct thread *thread)
(handler->sa_mask | SIGNAL_TO_MASK(signal)) & BLOCKABLE_SIGNALS);
}
update_current_thread_signals_flag();
return reschedule;
}
@ -253,6 +277,8 @@ handle_signals(struct thread *thread)
if (restart)
arch_check_syscall_restart(thread);
update_current_thread_signals_flag();
return reschedule;
}
@ -358,6 +384,9 @@ deliver_signal(struct thread *thread, uint signal, uint32 flags)
}
break;
}
update_thread_signals_flag(thread);
return B_OK;
}
@ -482,6 +511,8 @@ sigprocmask(int how, const sigset_t *set, sigset_t *oldSet)
default:
return B_BAD_VALUE;
}
update_current_thread_signals_flag();
}
if (oldSet != NULL)
@ -631,6 +662,8 @@ sigsuspend(const sigset_t *mask)
state = disable_interrupts();
GRAB_THREAD_LOCK();
update_thread_signals_flag(thread);
scheduler_reschedule();
RELEASE_THREAD_LOCK();
@ -643,6 +676,8 @@ sigsuspend(const sigset_t *mask)
// restore the original block mask
atomic_set(&thread->sig_block_mask, oldMask);
update_current_thread_signals_flag();
// we're not supposed to actually succeed
// ToDo: could this get us into trouble with SA_RESTART handlers?
return B_INTERRUPTED;
@ -668,6 +703,8 @@ sigpending(sigset_t *set)
bigtime_t
_user_set_alarm(bigtime_t time, uint32 mode)
{
syscall_64_bit_return_value();
return set_alarm(time, mode);
}

@ -158,6 +158,8 @@ _user_is_computer_on(void)
static inline int64
_user_restore_signal_frame()
{
syscall_64_bit_return_value();
return arch_restore_signal_frame();
}

@ -220,6 +220,7 @@ create_thread_struct(struct thread *inthread, const char *name,
else
strcpy(thread->name, "unnamed thread");
thread->flags = 0;
thread->id = threadID >= 0 ? threadID : allocate_thread_id();
thread->team = NULL;
thread->cpu = cpu;
@ -408,6 +409,8 @@ create_thread(const char *name, team_id teamID, thread_entry_func entry,
return status;
}
thread->kernel_stack_top = thread->kernel_stack_base + KERNEL_STACK_SIZE;
state = disable_interrupts();
GRAB_THREAD_LOCK();
@ -501,6 +504,8 @@ create_thread(const char *name, team_id teamID, thread_entry_func entry,
kill_thread(thread->id);
}
user_debug_update_new_thread_flags(thread->id);
// copy the user entry over to the args field in the thread struct
// the function this will call will immediately switch the thread into
// user space.
@ -1058,6 +1063,7 @@ _dump_thread_info(struct thread *thread)
strerror(thread->kernel_errno));
kprintf("kernel_time: %Ld\n", thread->kernel_time);
kprintf("user_time: %Ld\n", thread->user_time);
kprintf("flags: 0x%lx\n", thread->flags);
kprintf("architecture dependant section:\n");
arch_thread_dump_info(&thread->arch_info);
}
@ -1458,26 +1464,20 @@ thread_get_thread_struct_locked(thread_id id)
Called in the interrupt handler code when a thread enters
the kernel for any reason.
Only tracks time for now.
Interrupts are disabled.
*/
void
thread_at_kernel_entry(void)
thread_at_kernel_entry(bigtime_t now)
{
struct thread *thread = thread_get_current_thread();
cpu_status state;
bigtime_t now;
TRACE(("thread_atkernel_entry: entry thread %ld\n", thread->id));
state = disable_interrupts();
TRACE(("thread_at_kernel_entry: entry thread %ld\n", thread->id));
// track user time
now = system_time();
thread->user_time += now - thread->last_time;
thread->last_time = now;
thread->in_kernel = true;
restore_interrupts(state);
}
@ -1492,7 +1492,7 @@ thread_at_kernel_exit(void)
cpu_status state;
bigtime_t now;
TRACE(("thread_atkernel_exit: exit thread %ld\n", thread->id));
TRACE(("thread_at_kernel_exit: exit thread %ld\n", thread->id));
if (handle_signals(thread)) {
state = disable_interrupts();
@ -1516,6 +1516,26 @@ thread_at_kernel_exit(void)
}
/*! The quick version of thread_kernel_exit(), in case no signals are pending
and no debugging shall be done.
Interrupts are disabled in this case.
*/
void
thread_at_kernel_exit_no_signals(void)
{
struct thread *thread = thread_get_current_thread();
TRACE(("thread_at_kernel_exit_no_signals: exit thread %ld\n", thread->id));
thread->in_kernel = false;
// track kernel time
bigtime_t now = system_time();
thread->kernel_time += now - thread->last_time;
thread->last_time = now;
}
void
thread_reset_for_exec(void)
{

@ -14,7 +14,7 @@
* branch to the syscall vector in the commpage
*/
#include <arch/x86/commpage.h>
#include <commpage.h>
#define _SYSCALL(name, n) \
.globl name; \
@ -22,7 +22,7 @@
.align 8; \
name: \
movl $n,%eax; \
jmp *(USER_COMMPAGE_ADDR + COMMPAGE_ENTRY_SYSCALL * 4)
jmp *(USER_COMMPAGE_ADDR + COMMPAGE_ENTRY_X86_SYSCALL * 4)
#define SYSCALL0(name, n) _SYSCALL(name, n)
#define SYSCALL1(name, n) _SYSCALL(name, n)

@ -4,6 +4,7 @@
*/
#include <commpage.h>
#include <libroot_private.h>
#include <real_time_data.h>
#include <syscalls.h>
@ -16,26 +17,16 @@
#include <syslog.h>
static struct real_time_data sRealTimeDefaults;
static struct real_time_data *sRealTimeData;
void
__init_time(void)
{
bool setDefaults = false;
area_id dataArea;
area_info info;
sRealTimeData = (struct real_time_data*)
USER_COMMPAGE_TABLE[COMMPAGE_ENTRY_REAL_TIME_DATA];
dataArea = find_area("real time data userland");
if (dataArea < 0 || get_area_info(dataArea, &info) < B_OK) {
syslog(LOG_ERR, "error finding real time data area: %s\n", strerror(dataArea));
sRealTimeData = &sRealTimeDefaults;
setDefaults = true;
} else
sRealTimeData = (struct real_time_data *)info.address;
__arch_init_time(sRealTimeData, setDefaults);
__arch_init_time(sRealTimeData, false);
}

@ -39,6 +39,8 @@ MergeObject posix_string.o :
SubDir HAIKU_TOP src system libroot posix string arch $(TARGET_ARCH) ;
UsePrivateKernelHeaders ;
MergeObject posix_string_arch_$(TARGET_ARCH).o :
arch_string.S
;

@ -3,29 +3,16 @@
** Distributed under the terms of the NewOS License.
*/
#if !_KERNEL_MODE
// TODO: This should not even be compiled for the kernel. Fix the TODO in
// src/system/kernel/lib/Jamfile!
#include <commpage.h>
#define FUNCTION(x) .global x; .type x,@function; x
.align 4
FUNCTION(memcpy):
pushl %esi
pushl %edi
movl 12(%esp),%edi /* dest */
movl %edi,%eax /* save dest ptr as return address */
movl 16(%esp),%esi /* source */
movl 20(%esp),%ecx /* count */
/* move by words */
cld
shrl $2,%ecx
rep
movsl
jmp *(USER_COMMPAGE_ADDR + COMMPAGE_ENTRY_X86_MEMCPY * 4)
/* move any remaining data by bytes */
movl 20(%esp),%ecx
andl $3,%ecx
rep
movsb
popl %edi
popl %esi
ret
#endif // !_KERNEL_MODE

@ -371,8 +371,16 @@ public:
if (!file.is_open())
throw IOException(string("Failed to open `") + filename + "'.");
// output syscall count macro
file << "#define SYSCALL_COUNT " << fSyscallCount << endl;
file << endl;
// assembler guard
file << "#ifndef _ASSEMBLER" << endl;
file << endl;
// output syscall count
file << "const int kSyscallCount = " << fSyscallCount << ";" << endl;
file << "const int kSyscallCount = SYSCALL_COUNT;" << endl;
file << endl;
// syscall infos array preamble
@ -394,6 +402,9 @@ public:
// syscall infos array end
file << "};" << endl;
// assembler guard end
file << "#endif // _ASSEMBLER" << endl;
}
void _WriteSTraceFile(const char *filename)