2005-02-10 05:47:46 +03:00
|
|
|
/*
|
2011-01-11 00:54:38 +03:00
|
|
|
* Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
|
2005-02-10 05:47:46 +03:00
|
|
|
* Distributed under the terms of the MIT License.
|
|
|
|
*
|
|
|
|
* Userland debugger support.
|
|
|
|
*/
|
|
|
|
#ifndef _KERNEL_USER_DEBUGGER_H
|
|
|
|
#define _KERNEL_USER_DEBUGGER_H
|
|
|
|
|
2009-09-28 06:54:38 +04:00
|
|
|
|
2005-02-24 19:07:19 +03:00
|
|
|
#include <debugger.h>
|
2005-02-10 05:47:46 +03:00
|
|
|
|
2005-02-28 03:34:06 +03:00
|
|
|
#include <arch/user_debugger.h>
|
|
|
|
|
2008-09-15 17:36:31 +04:00
|
|
|
#include <timer.h>
|
|
|
|
|
|
|
|
|
|
|
|
// limits
|
2008-09-23 03:03:25 +04:00
|
|
|
#define B_DEBUG_MIN_PROFILE_INTERVAL 10 /* in us */
|
2008-09-23 05:08:27 +04:00
|
|
|
#define B_DEBUG_STACK_TRACE_DEPTH 128
|
2008-09-21 02:04:03 +04:00
|
|
|
#define B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD 70 /* in % */
|
2008-09-15 17:36:31 +04:00
|
|
|
|
|
|
|
|
2009-06-24 01:03:57 +04:00
|
|
|
struct BreakpointManager;
|
2009-06-23 05:39:51 +04:00
|
|
|
struct ConditionVariable;
|
2008-09-15 17:36:31 +04:00
|
|
|
struct function_profile_info;
|
2011-01-11 00:54:38 +03:00
|
|
|
|
|
|
|
namespace BKernel {
|
|
|
|
struct Thread;
|
|
|
|
}
|
|
|
|
|
|
|
|
using BKernel::Thread;
|
|
|
|
|
2008-09-15 17:36:31 +04:00
|
|
|
|
2005-02-28 03:34:06 +03:00
|
|
|
// Team related debugging data.
|
|
|
|
//
|
|
|
|
// Locking policy:
|
|
|
|
// 1) When accessing the structure it must be made sure, that the structure,
|
2011-06-12 04:00:23 +04:00
|
|
|
// (i.e. the struct Team it lives in) isn't deleted. Thus one either needs to
|
|
|
|
// get a team reference, lock the team, or one accesses the structure from a
|
|
|
|
// thread of that team.
|
2009-09-28 06:54:38 +04:00
|
|
|
// 2) Access to the `flags' field is atomic. Reading via atomic_get()
|
2005-02-28 03:34:06 +03:00
|
|
|
// requires no further locks (in addition to 1) that is). Writing requires
|
2011-06-12 04:00:23 +04:00
|
|
|
// `lock' to be held and must be done atomically, too
|
2005-02-28 03:34:06 +03:00
|
|
|
// (atomic_{set,and,or}()). Reading with `lock' being held doesn't need to
|
|
|
|
// be done atomically.
|
2011-06-12 04:00:23 +04:00
|
|
|
// 3) Access to all other fields (read or write) requires `lock' to be held.
|
|
|
|
// 4) Locking order is scheduler lock -> Team -> Thread -> team_debug_info::lock
|
|
|
|
// -> thread_debug_info::lock.
|
2005-02-28 03:34:06 +03:00
|
|
|
//
|
2005-02-10 05:47:46 +03:00
|
|
|
struct team_debug_info {
|
2005-02-28 03:34:06 +03:00
|
|
|
spinlock lock;
|
|
|
|
// Guards the remaining fields. Should always be the innermost lock
|
2011-06-12 04:00:23 +04:00
|
|
|
// to be acquired/released, save for thread_debug_info::lock.
|
2005-02-28 03:34:06 +03:00
|
|
|
|
2005-02-10 05:47:46 +03:00
|
|
|
int32 flags;
|
2011-06-12 04:00:23 +04:00
|
|
|
// Set atomically. So reading atomically is OK, even when the lock is
|
|
|
|
// not held (at least if it is certain, that the team struct won't go).
|
2005-02-28 03:34:06 +03:00
|
|
|
|
2005-02-10 05:47:46 +03:00
|
|
|
team_id debugger_team;
|
|
|
|
port_id debugger_port;
|
|
|
|
thread_id nub_thread;
|
|
|
|
port_id nub_port;
|
|
|
|
// the port the nub thread is waiting on for commands from the debugger
|
2005-03-11 01:04:27 +03:00
|
|
|
sem_id debugger_write_lock;
|
|
|
|
// synchronizes writes to the debugger port with the setting (but not
|
|
|
|
// clearing) of the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag
|
2009-02-24 01:59:45 +03:00
|
|
|
thread_id causing_thread;
|
|
|
|
// thread that caused the debugger to be attached; -1 for manual
|
|
|
|
// debugger attachment (or no debugger installed)
|
2013-11-06 03:03:07 +04:00
|
|
|
int32 image_event;
|
2008-09-21 00:37:10 +04:00
|
|
|
// counter incremented whenever an image is created/deleted
|
2005-02-28 03:34:06 +03:00
|
|
|
|
2009-06-23 05:39:51 +04:00
|
|
|
struct ConditionVariable* debugger_changed_condition;
|
2011-06-12 04:00:23 +04:00
|
|
|
// Set to a condition variable when going to change the debugger. Anyone
|
|
|
|
// who wants to change the debugger as well, needs to wait until the
|
|
|
|
// condition variable is unset again (waiting for the condition and
|
|
|
|
// rechecking again). The field and the condition variable is protected
|
|
|
|
// by 'lock'. After setting the a condition variable the team is
|
|
|
|
// guaranteed not to be deleted (until it is unset) it might be removed
|
|
|
|
// from the team hash table, though.
|
2009-06-23 05:39:51 +04:00
|
|
|
|
2009-06-24 01:03:57 +04:00
|
|
|
struct BreakpointManager* breakpoint_manager;
|
|
|
|
// manages hard- and software breakpoints
|
|
|
|
|
2005-02-28 03:34:06 +03:00
|
|
|
struct arch_team_debug_info arch_info;
|
2005-02-10 05:47:46 +03:00
|
|
|
};
|
|
|
|
|
2011-06-12 04:00:23 +04:00
|
|
|
// Thread related debugging data.
|
|
|
|
//
|
|
|
|
// Locking policy:
|
|
|
|
// 1) When accessing the structure it must be made sure, that the structure,
|
|
|
|
// (i.e. the struct Thread it lives in) isn't deleted. Thus one either needs
|
|
|
|
// to get a thread reference, lock the thread, or one accesses the structure
|
|
|
|
// of the current thread.
|
|
|
|
// 2) Access to the `flags' field is atomic. Reading via atomic_get()
|
|
|
|
// requires no further locks (in addition to 1) that is). Writing requires
|
|
|
|
// `lock' to be held and must be done atomically, too
|
|
|
|
// (atomic_{set,and,or}()). Reading with `lock' being held doesn't need to
|
|
|
|
// be done atomically.
|
|
|
|
// 3) Access to all other fields (read or write) requires `lock' to be held.
|
|
|
|
// 4) Locking order is scheduler lock -> Team -> Thread -> team_debug_info::lock
|
|
|
|
// -> thread_debug_info::lock.
|
|
|
|
//
|
2005-02-10 05:47:46 +03:00
|
|
|
struct thread_debug_info {
|
2011-06-12 04:00:23 +04:00
|
|
|
spinlock lock;
|
|
|
|
// Guards the remaining fields. Should always be the innermost lock
|
|
|
|
// to be acquired/released.
|
|
|
|
|
2005-02-10 05:47:46 +03:00
|
|
|
int32 flags;
|
2011-06-12 04:00:23 +04:00
|
|
|
// Set atomically. So reading atomically is OK, even when the lock is
|
|
|
|
// not held (at least if it is certain, that the thread struct won't
|
|
|
|
// go).
|
2005-02-10 05:47:46 +03:00
|
|
|
port_id debug_port;
|
|
|
|
// the port the thread is waiting on for commands from the nub thread
|
2005-03-02 02:47:59 +03:00
|
|
|
|
2005-10-20 20:56:04 +04:00
|
|
|
sigset_t ignore_signals;
|
2005-03-13 00:43:35 +03:00
|
|
|
// the signals the debugger is not interested in
|
2005-10-20 20:56:04 +04:00
|
|
|
sigset_t ignore_signals_once;
|
2005-03-13 00:43:35 +03:00
|
|
|
// the signals the debugger wishes not to be notified of, when they
|
|
|
|
// occur the next time
|
|
|
|
|
2008-09-20 04:34:03 +04:00
|
|
|
// profiling related part; if samples != NULL, the thread is profiled
|
2008-09-15 17:36:31 +04:00
|
|
|
struct {
|
2008-09-20 04:34:03 +04:00
|
|
|
bigtime_t interval;
|
2008-09-15 17:36:31 +04:00
|
|
|
// sampling interval
|
2008-09-20 04:34:03 +04:00
|
|
|
area_id sample_area;
|
|
|
|
// cloned sample buffer area
|
|
|
|
addr_t* samples;
|
|
|
|
// sample buffer
|
|
|
|
int32 max_samples;
|
|
|
|
// maximum number of samples the buffer can hold
|
2008-09-21 02:04:03 +04:00
|
|
|
int32 flush_threshold;
|
|
|
|
// number of sample when the buffer is flushed (if possible)
|
2008-09-20 04:34:03 +04:00
|
|
|
int32 sample_count;
|
|
|
|
// number of samples the buffer currently holds
|
|
|
|
int32 stack_depth;
|
|
|
|
// number of return addresses to record per timer interval
|
2008-09-21 02:04:03 +04:00
|
|
|
int32 dropped_ticks;
|
|
|
|
// number of ticks that had to be dropped when the sample buffer was
|
|
|
|
// full and couldn't be flushed
|
2008-09-21 00:37:10 +04:00
|
|
|
int32 image_event;
|
|
|
|
// number of the image event when the first sample was written into
|
|
|
|
// the buffer
|
2008-09-29 04:53:38 +04:00
|
|
|
int32 last_image_event;
|
|
|
|
// number of the image event when the last sample was written into
|
|
|
|
// the buffer
|
2008-09-23 05:08:27 +04:00
|
|
|
bool variable_stack_depth;
|
|
|
|
// record a variable number of samples per hit
|
2008-09-20 16:44:41 +04:00
|
|
|
bool buffer_full;
|
|
|
|
// indicates that the sample buffer is full
|
2008-09-15 17:36:31 +04:00
|
|
|
union {
|
2008-09-20 04:34:03 +04:00
|
|
|
bigtime_t interval_left;
|
2008-09-15 17:36:31 +04:00
|
|
|
// when unscheduled: the time left of the current sampling
|
|
|
|
// interval
|
2008-09-20 04:34:03 +04:00
|
|
|
bigtime_t timer_end;
|
2008-09-15 17:36:31 +04:00
|
|
|
// when running: the absolute time the timer is supposed to go
|
|
|
|
// off
|
|
|
|
};
|
2008-09-20 04:34:03 +04:00
|
|
|
timer* installed_timer;
|
2008-09-15 17:36:31 +04:00
|
|
|
// when running and being profiled: the CPU's profiling timer
|
|
|
|
} profile;
|
|
|
|
|
2005-03-02 02:47:59 +03:00
|
|
|
struct arch_thread_debug_info arch_info;
|
2005-02-10 05:47:46 +03:00
|
|
|
};
|
|
|
|
|
2005-02-28 03:34:06 +03:00
|
|
|
#define GRAB_TEAM_DEBUG_INFO_LOCK(info) acquire_spinlock(&(info).lock)
|
|
|
|
#define RELEASE_TEAM_DEBUG_INFO_LOCK(info) release_spinlock(&(info).lock)
|
|
|
|
|
2005-02-10 05:47:46 +03:00
|
|
|
// team debugging flags (user-specifiable flags are in <debugger.h>)
|
|
|
|
enum {
|
2009-02-24 01:59:45 +03:00
|
|
|
B_TEAM_DEBUG_DEBUGGER_INSTALLED = 0x0001,
|
|
|
|
B_TEAM_DEBUG_DEBUGGER_HANDOVER = 0x0002, // marked for hand-over
|
|
|
|
B_TEAM_DEBUG_DEBUGGER_HANDING_OVER = 0x0004, // handing over
|
|
|
|
B_TEAM_DEBUG_DEBUGGER_DISABLED = 0x0008,
|
2005-02-10 05:47:46 +03:00
|
|
|
|
2009-02-24 01:59:45 +03:00
|
|
|
B_TEAM_DEBUG_KERNEL_FLAG_MASK = 0xffff,
|
2005-02-24 19:07:19 +03:00
|
|
|
|
2009-02-24 01:59:45 +03:00
|
|
|
B_TEAM_DEBUG_DEFAULT_FLAGS = 0,
|
2009-09-28 06:54:38 +04:00
|
|
|
B_TEAM_DEBUG_INHERITED_FLAGS = B_TEAM_DEBUG_DEBUGGER_DISABLED
|
2005-02-10 05:47:46 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
// thread debugging flags (user-specifiable flags are in <debugger.h>)
|
|
|
|
enum {
|
2010-02-25 23:20:16 +03:00
|
|
|
B_THREAD_DEBUG_INITIALIZED = 0x0001,
|
|
|
|
B_THREAD_DEBUG_DYING = 0x0002,
|
|
|
|
B_THREAD_DEBUG_STOP = 0x0004,
|
|
|
|
B_THREAD_DEBUG_STOPPED = 0x0008,
|
|
|
|
B_THREAD_DEBUG_SINGLE_STEP = 0x0010,
|
|
|
|
B_THREAD_DEBUG_NOTIFY_SINGLE_STEP = 0x0020,
|
2005-02-10 05:47:46 +03:00
|
|
|
|
2010-02-25 23:20:16 +03:00
|
|
|
B_THREAD_DEBUG_NUB_THREAD = 0x0040, // marks the nub thread
|
2005-11-03 03:40:36 +03:00
|
|
|
|
2010-02-25 23:20:16 +03:00
|
|
|
B_THREAD_DEBUG_KERNEL_FLAG_MASK = 0xffff,
|
2005-02-24 19:07:19 +03:00
|
|
|
|
2010-02-25 23:20:16 +03:00
|
|
|
B_THREAD_DEBUG_DEFAULT_FLAGS = 0,
|
2005-02-10 05:47:46 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
// messages sent from the debug nub thread to a debugged thread
|
2005-02-24 19:07:19 +03:00
|
|
|
typedef enum {
|
2005-02-10 05:47:46 +03:00
|
|
|
B_DEBUGGED_THREAD_MESSAGE_CONTINUE = 0,
|
2005-02-28 03:34:06 +03:00
|
|
|
B_DEBUGGED_THREAD_SET_CPU_STATE,
|
2005-03-14 02:57:34 +03:00
|
|
|
B_DEBUGGED_THREAD_GET_CPU_STATE,
|
2005-03-11 01:04:27 +03:00
|
|
|
B_DEBUGGED_THREAD_DEBUGGER_CHANGED,
|
2005-02-10 05:47:46 +03:00
|
|
|
} debugged_thread_message;
|
|
|
|
|
2005-02-24 19:07:19 +03:00
|
|
|
typedef struct {
|
|
|
|
uint32 handle_event;
|
2005-02-28 03:34:06 +03:00
|
|
|
bool single_step;
|
|
|
|
} debugged_thread_continue;
|
|
|
|
|
2005-03-10 01:05:43 +03:00
|
|
|
typedef struct {
|
|
|
|
port_id reply_port;
|
2005-03-14 02:57:34 +03:00
|
|
|
} debugged_thread_get_cpu_state;
|
2005-03-10 01:05:43 +03:00
|
|
|
|
2005-02-28 03:34:06 +03:00
|
|
|
typedef struct {
|
|
|
|
debug_cpu_state cpu_state;
|
|
|
|
} debugged_thread_set_cpu_state;
|
2005-02-24 19:07:19 +03:00
|
|
|
|
|
|
|
typedef union {
|
2005-02-28 03:34:06 +03:00
|
|
|
debugged_thread_continue continue_thread;
|
|
|
|
debugged_thread_set_cpu_state set_cpu_state;
|
2005-03-14 02:57:34 +03:00
|
|
|
debugged_thread_get_cpu_state get_cpu_state;
|
2005-02-24 19:07:19 +03:00
|
|
|
} debugged_thread_message_data;
|
2005-02-10 05:47:46 +03:00
|
|
|
|
|
|
|
|
2005-03-11 01:04:27 +03:00
|
|
|
// internal messages sent to the nub thread
|
|
|
|
typedef enum {
|
|
|
|
B_DEBUG_MESSAGE_HANDED_OVER = -1,
|
|
|
|
} debug_nub_kernel_message;
|
|
|
|
|
|
|
|
|
2005-02-10 05:47:46 +03:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// service calls
|
|
|
|
|
2005-02-28 03:34:06 +03:00
|
|
|
void clear_team_debug_info(struct team_debug_info *info, bool initLock);
|
2005-02-10 05:47:46 +03:00
|
|
|
|
2008-09-15 17:36:31 +04:00
|
|
|
void init_thread_debug_info(struct thread_debug_info *info);
|
|
|
|
void clear_thread_debug_info(struct thread_debug_info *info, bool dying);
|
2005-02-10 05:47:46 +03:00
|
|
|
void destroy_thread_debug_info(struct thread_debug_info *info);
|
|
|
|
|
2005-03-25 21:40:07 +03:00
|
|
|
void user_debug_prepare_for_exec();
|
|
|
|
void user_debug_finish_after_exec();
|
|
|
|
|
|
|
|
void init_user_debug();
|
2005-02-24 19:07:19 +03:00
|
|
|
|
2008-01-11 03:36:44 +03:00
|
|
|
|
2005-02-24 19:07:19 +03:00
|
|
|
// debug event callbacks
|
|
|
|
|
2005-02-10 05:47:46 +03:00
|
|
|
void user_debug_pre_syscall(uint32 syscall, void *args);
|
|
|
|
void user_debug_post_syscall(uint32 syscall, void *args, uint64 returnValue,
|
|
|
|
bigtime_t startTime);
|
2005-03-13 00:43:35 +03:00
|
|
|
bool user_debug_exception_occurred(debug_exception_type exception, int signal);
|
2005-02-24 19:07:19 +03:00
|
|
|
bool user_debug_handle_signal(int signal, struct sigaction *handler,
|
|
|
|
bool deadly);
|
2005-02-10 05:47:46 +03:00
|
|
|
void user_debug_stop_thread();
|
2005-02-24 19:07:19 +03:00
|
|
|
void user_debug_team_created(team_id teamID);
|
|
|
|
void user_debug_team_deleted(team_id teamID, port_id debuggerPort);
|
2008-09-20 17:59:41 +04:00
|
|
|
void user_debug_team_exec();
|
2011-06-12 04:00:23 +04:00
|
|
|
void user_debug_update_new_thread_flags(Thread* thread);
|
2005-02-24 19:07:19 +03:00
|
|
|
void user_debug_thread_created(thread_id threadID);
|
|
|
|
void user_debug_thread_deleted(team_id teamID, thread_id threadID);
|
2011-01-11 00:54:38 +03:00
|
|
|
void user_debug_thread_exiting(Thread* thread);
|
2005-02-24 19:07:19 +03:00
|
|
|
void user_debug_image_created(const image_info *imageInfo);
|
|
|
|
void user_debug_image_deleted(const image_info *imageInfo);
|
2005-03-14 02:57:34 +03:00
|
|
|
void user_debug_breakpoint_hit(bool software);
|
|
|
|
void user_debug_watchpoint_hit();
|
2005-03-02 02:47:59 +03:00
|
|
|
void user_debug_single_stepped();
|
2005-02-10 05:47:46 +03:00
|
|
|
|
2011-01-11 00:54:38 +03:00
|
|
|
void user_debug_thread_unscheduled(Thread* thread);
|
|
|
|
void user_debug_thread_scheduled(Thread* thread);
|
2008-09-15 17:36:31 +04:00
|
|
|
|
2005-02-10 05:47:46 +03:00
|
|
|
|
|
|
|
// syscalls
|
|
|
|
|
|
|
|
void _user_debugger(const char *message);
|
|
|
|
int _user_disable_debugger(int state);
|
|
|
|
|
|
|
|
status_t _user_install_default_debugger(port_id debuggerPort);
|
|
|
|
port_id _user_install_team_debugger(team_id team, port_id debuggerPort);
|
|
|
|
status_t _user_remove_team_debugger(team_id team);
|
|
|
|
status_t _user_debug_thread(thread_id thread);
|
2005-03-10 01:46:31 +03:00
|
|
|
void _user_wait_for_debugger(void);
|
2005-02-10 05:47:46 +03:00
|
|
|
|
2007-03-20 19:20:13 +03:00
|
|
|
status_t _user_set_debugger_breakpoint(void *address, uint32 type,
|
|
|
|
int32 length, bool watchpoint);
|
|
|
|
status_t _user_clear_debugger_breakpoint(void *address, bool watchpoint);
|
|
|
|
|
2015-04-10 16:44:58 +03:00
|
|
|
ssize_t _user_get_stack_trace(size_t addressCount, addr_t* returnAddresses);
|
2005-02-10 05:47:46 +03:00
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
} // extern "C"
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
#endif // _KERNEL_USER_DEBUGGER_H
|