* Extended the debugger API by sampling-based profiling support. This is
still pretty much work in progress. * Introduced init_thread_debug_info() which is used instead of clear_thread_debug_info() when the thread is created. The latter requires former initialization. * user_debug_thread_deleted() is now already invoked in thread_exit(), not in the undertaker. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@27531 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
243300b289
commit
cbcebd3330
@ -146,6 +146,9 @@ typedef enum {
|
||||
// handed over to another debugger;
|
||||
// the new debugger can just invoke
|
||||
// install_team_debugger()
|
||||
|
||||
B_DEBUG_START_PROFILER, // start/stop sampling
|
||||
B_DEBUG_STOP_PROFILER //
|
||||
} debug_nub_message;
|
||||
|
||||
// messages sent to the debugger
|
||||
@ -168,6 +171,9 @@ typedef enum {
|
||||
B_DEBUGGER_MESSAGE_IMAGE_CREATED, // an image has been created
|
||||
B_DEBUGGER_MESSAGE_IMAGE_DELETED, // an image has been deleted
|
||||
|
||||
B_DEBUGGER_MESSAGE_PROFILER_STOPPED, // a profiled thread is going to
|
||||
// exit
|
||||
|
||||
B_DEBUGGER_MESSAGE_HANDED_OVER, // the debugged team has been
|
||||
// handed over to another debugger
|
||||
} debug_debugger_message;
|
||||
@ -346,6 +352,35 @@ typedef struct {
|
||||
|
||||
// no parameters, no reply
|
||||
|
||||
// B_DEBUG_START_PROFILER
|
||||
|
||||
struct debug_profile_function {
|
||||
addr_t base; // function base address
|
||||
size_t size; // function size
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
port_id reply_port; // port to send the reply to
|
||||
thread_id thread; // thread to profile
|
||||
bigtime_t interval; // sample interval
|
||||
int32 function_count; // number of functions we count hits for
|
||||
struct debug_profile_function functions[1];
|
||||
// functions that shall be tracked
|
||||
} debug_nub_start_profiler;
|
||||
|
||||
typedef struct {
|
||||
status_t error;
|
||||
} debug_nub_start_profiler_reply;
|
||||
|
||||
// B_DEBUG_STOP_PROFILER
|
||||
|
||||
typedef struct {
|
||||
port_id reply_port; // port to send the reply to
|
||||
thread_id thread; // thread to profile
|
||||
} debug_nub_stop_profiler;
|
||||
|
||||
// reply is debug_profiler_stopped
|
||||
|
||||
// union of all messages structures sent to the debug nub thread
|
||||
typedef union {
|
||||
debug_nub_read_memory read_memory;
|
||||
@ -363,6 +398,8 @@ typedef union {
|
||||
debug_nub_get_signal_masks get_signal_masks;
|
||||
debug_nub_set_signal_handler set_signal_handler;
|
||||
debug_nub_get_signal_handler get_signal_handler;
|
||||
debug_nub_start_profiler start_profiler;
|
||||
debug_nub_stop_profiler stop_profiler;
|
||||
} debug_nub_message_data;
|
||||
|
||||
|
||||
@ -495,6 +532,18 @@ typedef struct {
|
||||
image_info info; // info for the image
|
||||
} debug_image_deleted;
|
||||
|
||||
// B_DEBUGGER_MESSAGE_PROFILER_STOPPED
|
||||
|
||||
typedef struct {
|
||||
debug_origin origin;
|
||||
int32 function_count;
|
||||
bigtime_t interval; // actual sample interval (might
|
||||
// differ from the requested one)
|
||||
int64 total_ticks; // total number of sample ticks
|
||||
int64 missed_ticks; // ticks that didn't hit a function
|
||||
int64 function_ticks[1]; // number of hits for each function
|
||||
} debug_profiler_stopped;
|
||||
|
||||
// B_DEBUGGER_MESSAGE_HANDED_OVER
|
||||
|
||||
typedef struct {
|
||||
@ -521,6 +570,7 @@ typedef union {
|
||||
debug_thread_deleted thread_deleted;
|
||||
debug_image_created image_created;
|
||||
debug_image_deleted image_deleted;
|
||||
debug_profiler_stopped profiler_stopped; // dynamic size!
|
||||
debug_handed_over handed_over;
|
||||
|
||||
debug_origin origin; // for convenience (no real message)
|
||||
|
@ -11,6 +11,17 @@
|
||||
|
||||
#include <arch/user_debugger.h>
|
||||
|
||||
#include <timer.h>
|
||||
|
||||
|
||||
// limits
|
||||
#define B_DEBUG_MAX_PROFILE_FUNCTIONS 100000
|
||||
#define B_DEBUG_MIN_PROFILE_INTERVAL 1000 /* in us */
|
||||
#define B_DEBUG_STACK_TRACE_DEPTH 5
|
||||
|
||||
|
||||
struct function_profile_info;
|
||||
|
||||
// Team related debugging data.
|
||||
//
|
||||
// Locking policy:
|
||||
@ -61,6 +72,28 @@ struct thread_debug_info {
|
||||
// the signals the debugger wishes not to be notified of, when they
|
||||
// occur the next time
|
||||
|
||||
struct {
|
||||
bigtime_t interval;
|
||||
// sampling interval
|
||||
union {
|
||||
bigtime_t interval_left;
|
||||
// when unscheduled: the time left of the current sampling
|
||||
// interval
|
||||
bigtime_t timer_end;
|
||||
// when running: the absolute time the timer is supposed to go
|
||||
// off
|
||||
};
|
||||
int32 function_count;
|
||||
// number of tracked functions
|
||||
struct function_profile_info* functions;
|
||||
// array of tracked functions
|
||||
debug_profiler_stopped* result;
|
||||
// the result message to be sent to the debugger when profiling end;
|
||||
// contains the current hit counts for all functions
|
||||
timer* installed_timer;
|
||||
// when running and being profiled: the CPU's profiling timer
|
||||
} profile;
|
||||
|
||||
struct arch_thread_debug_info arch_info;
|
||||
};
|
||||
|
||||
@ -135,8 +168,8 @@ extern "C" {
|
||||
void clear_team_debug_info(struct team_debug_info *info, bool initLock);
|
||||
void destroy_team_debug_info(struct team_debug_info *info);
|
||||
|
||||
void clear_thread_debug_info(struct thread_debug_info *info,
|
||||
bool dying);
|
||||
void init_thread_debug_info(struct thread_debug_info *info);
|
||||
void clear_thread_debug_info(struct thread_debug_info *info, bool dying);
|
||||
void destroy_thread_debug_info(struct thread_debug_info *info);
|
||||
|
||||
void user_debug_prepare_for_exec();
|
||||
@ -159,12 +192,16 @@ void user_debug_team_deleted(team_id teamID, port_id debuggerPort);
|
||||
void user_debug_update_new_thread_flags(thread_id threadID);
|
||||
void user_debug_thread_created(thread_id threadID);
|
||||
void user_debug_thread_deleted(team_id teamID, thread_id threadID);
|
||||
void user_debug_thread_exiting(struct thread* thread);
|
||||
void user_debug_image_created(const image_info *imageInfo);
|
||||
void user_debug_image_deleted(const image_info *imageInfo);
|
||||
void user_debug_breakpoint_hit(bool software);
|
||||
void user_debug_watchpoint_hit();
|
||||
void user_debug_single_stepped();
|
||||
|
||||
void user_debug_thread_unscheduled(struct thread* thread);
|
||||
void user_debug_thread_scheduled(struct thread* thread);
|
||||
|
||||
|
||||
// syscalls
|
||||
|
||||
|
@ -3,6 +3,7 @@ SubDir HAIKU_TOP src system kernel debug ;
|
||||
UsePrivateHeaders [ FDirName kernel debug ] syslog_daemon ;
|
||||
UsePrivateHeaders [ FDirName graphics common ] ;
|
||||
UsePrivateHeaders [ FDirName graphics vesa ] ;
|
||||
UsePrivateHeaders shared ;
|
||||
|
||||
KernelMergeObject kernel_debug.o :
|
||||
blue_screen.cpp
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2005-2006, Ingo Weinhold, bonefish@users.sf.net.
|
||||
* Copyright 2005-2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
@ -8,12 +8,18 @@
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include <arch/debug.h>
|
||||
#include <arch/user_debugger.h>
|
||||
#include <cpu.h>
|
||||
#include <debugger.h>
|
||||
#include <kernel.h>
|
||||
#include <KernelExport.h>
|
||||
#include <kscheduler.h>
|
||||
#include <ksignal.h>
|
||||
#include <ksyscalls.h>
|
||||
#include <port.h>
|
||||
#include <sem.h>
|
||||
#include <team.h>
|
||||
#include <thread.h>
|
||||
@ -21,8 +27,8 @@
|
||||
#include <user_debugger.h>
|
||||
#include <vm.h>
|
||||
#include <vm_types.h>
|
||||
#include <arch/user_debugger.h>
|
||||
|
||||
#include <AutoDeleter.h>
|
||||
#include <util/AutoLock.h>
|
||||
|
||||
//#define TRACE_USER_DEBUGGER
|
||||
@ -33,21 +39,31 @@
|
||||
#endif
|
||||
|
||||
|
||||
struct function_profile_info : debug_profile_function {
|
||||
int32 index;
|
||||
};
|
||||
|
||||
|
||||
static port_id sDefaultDebuggerPort = -1;
|
||||
// accessed atomically
|
||||
|
||||
static timer sProfilingTimers[B_MAX_CPU_COUNT];
|
||||
// a profiling timer for each CPU -- used when a profiled thread is running
|
||||
// on that CPU
|
||||
|
||||
|
||||
static int32 profiling_event(timer* unused);
|
||||
static status_t ensure_debugger_installed(team_id teamID, port_id *port = NULL);
|
||||
static void get_team_debug_info(team_debug_info &teamDebugInfo);
|
||||
|
||||
|
||||
static ssize_t
|
||||
kill_interruptable_read_port(port_id port, int32 *code, void *buffer,
|
||||
size_t bufferSize)
|
||||
{
|
||||
return read_port_etc(port, code, buffer, bufferSize,
|
||||
B_KILL_CAN_INTERRUPT, 0);
|
||||
}
|
||||
struct ProfileFunctionComparator {
|
||||
inline bool operator()(const function_profile_info& a,
|
||||
const function_profile_info& b) const
|
||||
{
|
||||
return a.base < b.base;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
static status_t
|
||||
@ -271,16 +287,42 @@ destroy_team_debug_info(struct team_debug_info *info)
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
init_thread_debug_info(struct thread_debug_info *info)
|
||||
{
|
||||
if (info) {
|
||||
arch_clear_thread_debug_info(&info->arch_info);
|
||||
info->flags = B_THREAD_DEBUG_DEFAULT_FLAGS;
|
||||
info->debug_port = -1;
|
||||
info->ignore_signals = 0;
|
||||
info->ignore_signals_once = 0;
|
||||
info->profile.functions = NULL;
|
||||
info->profile.result = NULL;
|
||||
info->profile.installed_timer = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*! Invoked with thread lock being held.
|
||||
*/
|
||||
void
|
||||
clear_thread_debug_info(struct thread_debug_info *info, bool dying)
|
||||
{
|
||||
if (info) {
|
||||
// cancel profiling timer
|
||||
if (info->profile.installed_timer != NULL) {
|
||||
cancel_timer(info->profile.installed_timer);
|
||||
info->profile.installed_timer = NULL;
|
||||
}
|
||||
|
||||
arch_clear_thread_debug_info(&info->arch_info);
|
||||
atomic_set(&info->flags,
|
||||
B_THREAD_DEBUG_DEFAULT_FLAGS | (dying ? B_THREAD_DEBUG_DYING : 0));
|
||||
info->debug_port = -1;
|
||||
info->ignore_signals = 0;
|
||||
info->ignore_signals_once = 0;
|
||||
info->profile.functions = NULL;
|
||||
info->profile.result = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@ -289,6 +331,9 @@ void
|
||||
destroy_thread_debug_info(struct thread_debug_info *info)
|
||||
{
|
||||
if (info) {
|
||||
free(info->profile.functions);
|
||||
free(info->profile.result);
|
||||
|
||||
arch_destroy_thread_debug_info(&info->arch_info);
|
||||
|
||||
if (info->debug_port >= 0) {
|
||||
@ -510,8 +555,10 @@ thread_hit_debug_event_internal(debug_debugger_message event,
|
||||
// read a command from the debug port
|
||||
int32 command;
|
||||
debugged_thread_message_data commandMessage;
|
||||
ssize_t commandMessageSize = kill_interruptable_read_port(port,
|
||||
&command, &commandMessage, sizeof(commandMessage));
|
||||
ssize_t commandMessageSize = read_port_etc(port, &command,
|
||||
&commandMessage, sizeof(commandMessage), B_KILL_CAN_INTERRUPT,
|
||||
0);
|
||||
|
||||
if (commandMessageSize < 0) {
|
||||
error = commandMessageSize;
|
||||
TRACE(("thread_hit_debug_event(): thread: %ld, failed "
|
||||
@ -928,6 +975,58 @@ user_debug_thread_deleted(team_id teamID, thread_id threadID)
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
user_debug_thread_exiting(struct thread* thread)
|
||||
{
|
||||
InterruptsLocker interruptsLocker;
|
||||
SpinLocker teamLocker(gTeamSpinlock);
|
||||
|
||||
struct team* team = thread->team;
|
||||
|
||||
GRAB_TEAM_DEBUG_INFO_LOCK(team->debug_info);
|
||||
|
||||
int32 teamDebugFlags = atomic_get(&team->debug_info.flags);
|
||||
port_id debuggerPort = team->debug_info.debugger_port;
|
||||
|
||||
RELEASE_TEAM_DEBUG_INFO_LOCK(team->debug_info);
|
||||
|
||||
teamLocker.Unlock();
|
||||
|
||||
// check, if a debugger is installed
|
||||
if ((teamDebugFlags & B_TEAM_DEBUG_DEBUGGER_INSTALLED) == 0
|
||||
|| debuggerPort < 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// detach the profile info and mark the thread dying
|
||||
SpinLocker threadLocker(gThreadSpinlock);
|
||||
|
||||
thread_debug_info& threadDebugInfo = thread->debug_info;
|
||||
if (threadDebugInfo.profile.functions == NULL)
|
||||
return;
|
||||
|
||||
int32 functionCount = threadDebugInfo.profile.function_count;
|
||||
function_profile_info* profileFunctions = threadDebugInfo.profile.functions;
|
||||
debug_profiler_stopped* profileResult = threadDebugInfo.profile.result;
|
||||
threadDebugInfo.profile.functions = NULL;
|
||||
threadDebugInfo.profile.result = NULL;
|
||||
|
||||
atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING);
|
||||
|
||||
threadLocker.Unlock();
|
||||
interruptsLocker.Unlock();
|
||||
|
||||
// notify the debugger
|
||||
size_t messageSize = sizeof(debug_profiler_stopped)
|
||||
+ 8 * (functionCount - 1);
|
||||
debugger_write(debuggerPort, B_DEBUGGER_MESSAGE_PROFILER_STOPPED,
|
||||
profileResult, messageSize, false);
|
||||
|
||||
free(profileFunctions);
|
||||
free(profileResult);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
user_debug_image_created(const image_info *imageInfo)
|
||||
{
|
||||
@ -1035,6 +1134,105 @@ user_debug_single_stepped()
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
schedule_profiling_timer(struct thread* thread, bigtime_t interval)
|
||||
{
|
||||
struct timer* timer = &sProfilingTimers[thread->cpu->cpu_num];
|
||||
thread->debug_info.profile.installed_timer = timer;
|
||||
thread->debug_info.profile.timer_end = system_time() + interval;
|
||||
add_timer(timer, &profiling_event, interval,
|
||||
B_ONE_SHOT_RELATIVE_TIMER | B_TIMER_ACQUIRE_THREAD_LOCK);
|
||||
}
|
||||
|
||||
|
||||
static function_profile_info*
|
||||
find_profiled_function(const thread_debug_info& debugInfo, addr_t address)
|
||||
{
|
||||
// binary search the function
|
||||
function_profile_info* functions = debugInfo.profile.functions;
|
||||
int32 lower = 0;
|
||||
int32 upper = debugInfo.profile.function_count;
|
||||
|
||||
while (lower < upper) {
|
||||
int32 mid = (lower + upper) / 2;
|
||||
if (address >= functions[mid].base + functions[mid].size)
|
||||
lower = mid + 1;
|
||||
else
|
||||
upper = mid;
|
||||
}
|
||||
|
||||
if (lower == debugInfo.profile.function_count)
|
||||
return NULL;
|
||||
|
||||
function_profile_info* function = &functions[lower];
|
||||
if (address >= function->base && address < function->base + function->size)
|
||||
return function;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
static int32
|
||||
profiling_event(timer* /*unused*/)
|
||||
{
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
thread_debug_info& debugInfo = thread->debug_info;
|
||||
|
||||
if (debugInfo.profile.functions != NULL) {
|
||||
// Find the hit function and increment the tick counter. We
|
||||
addr_t returnAddresses[B_DEBUG_STACK_TRACE_DEPTH];
|
||||
int32 count = arch_debug_get_stack_trace(returnAddresses,
|
||||
B_DEBUG_STACK_TRACE_DEPTH, 1, 0, false);
|
||||
|
||||
function_profile_info* function = NULL;
|
||||
for (int32 i = 0; i < count; i++) {
|
||||
function = find_profiled_function(debugInfo, returnAddresses[i]);
|
||||
if (function != NULL)
|
||||
break;
|
||||
}
|
||||
|
||||
if (function != NULL)
|
||||
debugInfo.profile.result->function_ticks[function->index]++;
|
||||
else
|
||||
debugInfo.profile.result->missed_ticks++;
|
||||
debugInfo.profile.result->total_ticks++;
|
||||
|
||||
// reschedule timer
|
||||
schedule_profiling_timer(thread, debugInfo.profile.interval);
|
||||
} else
|
||||
debugInfo.profile.installed_timer = NULL;
|
||||
|
||||
return B_HANDLED_INTERRUPT;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
user_debug_thread_unscheduled(struct thread* thread)
|
||||
{
|
||||
// if running, cancel the profiling timer
|
||||
struct timer* timer = thread->debug_info.profile.installed_timer;
|
||||
if (timer != NULL) {
|
||||
// track remaining time
|
||||
bigtime_t left = thread->debug_info.profile.timer_end - system_time();
|
||||
thread->debug_info.profile.interval_left = max_c(left, 0);
|
||||
thread->debug_info.profile.installed_timer = NULL;
|
||||
|
||||
// cancel timer
|
||||
cancel_timer(timer);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
user_debug_thread_scheduled(struct thread* thread)
|
||||
{
|
||||
if (thread->debug_info.profile.functions != NULL) {
|
||||
// install profiling timer
|
||||
schedule_profiling_timer(thread,
|
||||
thread->debug_info.profile.interval_left);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** \brief Called by the debug nub thread of a team to broadcast a message
|
||||
* that are initialized for debugging (and thus have a debug port).
|
||||
*/
|
||||
@ -1335,11 +1533,11 @@ debug_nub_thread(void *)
|
||||
while (true) {
|
||||
int32 command;
|
||||
debug_nub_message_data message;
|
||||
ssize_t messageSize = kill_interruptable_read_port(port, &command,
|
||||
&message, sizeof(message));
|
||||
ssize_t messageSize = read_port_etc(port, &command, &message,
|
||||
sizeof(message), B_PEEK_PORT_MESSAGE | B_KILL_CAN_INTERRUPT, 0);
|
||||
|
||||
if (messageSize < 0) {
|
||||
// The port is not longer valid or we were interrupted by a kill
|
||||
// The port is no longer valid or we were interrupted by a kill
|
||||
// signal: If we are still listed in the team's debug info as nub
|
||||
// thread, we need to update that.
|
||||
nub_thread_cleanup(nubThread);
|
||||
@ -1359,9 +1557,13 @@ debug_nub_thread(void *)
|
||||
debug_nub_set_watchpoint_reply set_watchpoint;
|
||||
debug_nub_get_signal_masks_reply get_signal_masks;
|
||||
debug_nub_get_signal_handler_reply get_signal_handler;
|
||||
debug_nub_start_profiler_reply start_profiler;
|
||||
debug_profiler_stopped stop_profiler;
|
||||
} reply;
|
||||
void* replyToSend = &reply;
|
||||
int32 replySize = 0;
|
||||
port_id replyPort = -1;
|
||||
bool removeCommandMessage = true;
|
||||
|
||||
// process the command
|
||||
switch (command) {
|
||||
@ -1901,12 +2103,209 @@ debug_nub_thread(void *)
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case B_DEBUG_START_PROFILER:
|
||||
{
|
||||
// get the parameters
|
||||
thread_id threadID = message.start_profiler.thread;
|
||||
replyPort = message.start_profiler.reply_port;
|
||||
int32 functionCount = message.start_profiler.function_count;
|
||||
status_t result = B_OK;
|
||||
|
||||
TRACE(("nub thread %ld: B_DEBUG_START_PROFILER: "
|
||||
"thread: %ld, %ld functions\n", nubThread->id, threadID,
|
||||
functionCount));
|
||||
|
||||
if (functionCount < 1
|
||||
|| functionCount > B_DEBUG_MAX_PROFILE_FUNCTIONS) {
|
||||
result = B_BAD_VALUE;
|
||||
}
|
||||
|
||||
// allocate memory for the complete message
|
||||
debug_nub_start_profiler* profileMessage = NULL;
|
||||
size_t size = 0;
|
||||
if (result == B_OK) {
|
||||
size = (addr_t)&message.start_profiler.functions[
|
||||
functionCount]
|
||||
- (addr_t)&message.start_profiler;
|
||||
profileMessage = (debug_nub_start_profiler*)malloc(size);
|
||||
if (profileMessage == NULL)
|
||||
result = B_NO_MEMORY;
|
||||
}
|
||||
MemoryDeleter profileMessageDeleter(profileMessage);
|
||||
|
||||
// read the complete message from the port
|
||||
if (result == B_OK) {
|
||||
int32 dummy;
|
||||
ssize_t bytesRead = read_port_etc(port, &dummy,
|
||||
profileMessage, size, B_RELATIVE_TIMEOUT, 0);
|
||||
if (bytesRead < 0) {
|
||||
result = bytesRead;
|
||||
} else {
|
||||
removeCommandMessage = false;
|
||||
|
||||
if ((size_t)bytesRead != size)
|
||||
result = B_BAD_VALUE;
|
||||
}
|
||||
}
|
||||
|
||||
// allocate memory for the function infos
|
||||
function_profile_info* profileFunctions = NULL;
|
||||
if (result == B_OK) {
|
||||
profileFunctions = (function_profile_info*)malloc(
|
||||
sizeof(function_profile_info) * functionCount);
|
||||
if (profileFunctions == NULL)
|
||||
result = B_NO_MEMORY;
|
||||
}
|
||||
MemoryDeleter profileFunctionsDeleter(profileFunctions);
|
||||
|
||||
// allocate memory for the reply
|
||||
debug_profiler_stopped* profileResult = NULL;
|
||||
size_t profileResultSize = 0;
|
||||
if (result == B_OK) {
|
||||
profileResultSize = sizeof(debug_profiler_stopped)
|
||||
+ 8 * (functionCount - 1);
|
||||
profileResult
|
||||
= (debug_profiler_stopped*)malloc(profileResultSize);
|
||||
if (profileResult == NULL)
|
||||
result = B_NO_MEMORY;
|
||||
}
|
||||
MemoryDeleter profileResultDeleter(profileResult);
|
||||
|
||||
// transfer the function array from the message
|
||||
if (result == B_OK) {
|
||||
for (int32 i = 0; i < functionCount; i++) {
|
||||
profileFunctions[i].base
|
||||
= profileMessage->functions[i].base;
|
||||
profileFunctions[i].size
|
||||
= profileMessage->functions[i].size;
|
||||
profileFunctions[i].index = i;
|
||||
}
|
||||
}
|
||||
|
||||
// sort the functions and prepare the reply
|
||||
if (result == B_OK) {
|
||||
std::sort(profileFunctions,
|
||||
profileFunctions + functionCount,
|
||||
ProfileFunctionComparator());
|
||||
|
||||
memset(profileResult, 0, profileResultSize);
|
||||
profileResult->origin.thread = threadID;
|
||||
profileResult->origin.team = nubThread->team->id;
|
||||
profileResult->origin.nub_port = -1;
|
||||
profileResult->interval = max_c(profileMessage->interval,
|
||||
B_DEBUG_MIN_PROFILE_INTERVAL);
|
||||
profileResult->function_count = functionCount;
|
||||
}
|
||||
|
||||
// get the thread and set the profile info
|
||||
cpu_status state = disable_interrupts();
|
||||
GRAB_THREAD_LOCK();
|
||||
|
||||
struct thread *thread
|
||||
= thread_get_thread_struct_locked(threadID);
|
||||
if (thread && thread->team == nubThread->team) {
|
||||
thread_debug_info &threadDebugInfo = thread->debug_info;
|
||||
if (threadDebugInfo.profile.functions == NULL) {
|
||||
threadDebugInfo.profile.interval
|
||||
= profileResult->interval;
|
||||
threadDebugInfo.profile.interval_left
|
||||
= threadDebugInfo.profile.interval;
|
||||
threadDebugInfo.profile.function_count = functionCount;
|
||||
threadDebugInfo.profile.functions = profileFunctions;
|
||||
threadDebugInfo.profile.result = profileResult;
|
||||
threadDebugInfo.profile.installed_timer = NULL;
|
||||
} else
|
||||
result = B_BAD_VALUE;
|
||||
} else
|
||||
result = B_BAD_THREAD_ID;
|
||||
|
||||
RELEASE_THREAD_LOCK();
|
||||
restore_interrupts(state);
|
||||
|
||||
// if all went well, keep the allocated structures
|
||||
if (result == B_OK) {
|
||||
profileFunctionsDeleter.Detach();
|
||||
profileResultDeleter.Detach();
|
||||
}
|
||||
|
||||
// send a reply to the debugger
|
||||
reply.start_profiler.error = result;
|
||||
sendReply = true;
|
||||
replySize = sizeof(reply.start_profiler);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case B_DEBUG_STOP_PROFILER:
|
||||
{
|
||||
// get the parameters
|
||||
thread_id threadID = message.stop_profiler.thread;
|
||||
replyPort = message.stop_profiler.reply_port;
|
||||
status_t result = B_OK;
|
||||
|
||||
TRACE(("nub thread %ld: B_DEBUG_STOP_PROFILER: "
|
||||
"thread: %ld\n", nubThread->id, threadID));
|
||||
|
||||
function_profile_info* profileFunctions = NULL;
|
||||
debug_profiler_stopped* profileResult = NULL;
|
||||
int32 functionCount = 0;
|
||||
|
||||
// get the thread and detach the profile info
|
||||
cpu_status state = disable_interrupts();
|
||||
GRAB_THREAD_LOCK();
|
||||
|
||||
struct thread *thread
|
||||
= thread_get_thread_struct_locked(threadID);
|
||||
if (thread && thread->team == nubThread->team) {
|
||||
thread_debug_info &threadDebugInfo = thread->debug_info;
|
||||
if (threadDebugInfo.profile.functions != NULL) {
|
||||
functionCount = threadDebugInfo.profile.function_count;
|
||||
profileFunctions = threadDebugInfo.profile.functions;
|
||||
profileResult = threadDebugInfo.profile.result;
|
||||
threadDebugInfo.profile.functions = NULL;
|
||||
threadDebugInfo.profile.result = NULL;
|
||||
} else
|
||||
result = B_BAD_VALUE;
|
||||
} else
|
||||
result = B_BAD_THREAD_ID;
|
||||
|
||||
RELEASE_THREAD_LOCK();
|
||||
restore_interrupts(state);
|
||||
|
||||
// prepare the reply
|
||||
if (result == B_OK) {
|
||||
replyToSend = profileResult;
|
||||
replySize = sizeof(debug_profiler_stopped)
|
||||
+ 8 * (functionCount - 1);
|
||||
} else {
|
||||
reply.stop_profiler.origin.thread = result;
|
||||
reply.stop_profiler.total_ticks = 0;
|
||||
reply.stop_profiler.missed_ticks = 0;
|
||||
replySize = sizeof(reply.stop_profiler);
|
||||
}
|
||||
sendReply = true;
|
||||
|
||||
free(profileFunctions);
|
||||
// profileResult is the reply to be sent and will be deleted
|
||||
// after sending.
|
||||
}
|
||||
}
|
||||
|
||||
// We only peeked the command message -- unless the command handler did
|
||||
// that already, we need to remove the message from the port.
|
||||
if (removeCommandMessage) {
|
||||
int32 dummy;
|
||||
read_port_etc(port, &dummy, NULL, 0, B_RELATIVE_TIMEOUT, 0);
|
||||
}
|
||||
|
||||
// send the reply, if necessary
|
||||
if (sendReply) {
|
||||
status_t error = kill_interruptable_write_port(replyPort, command,
|
||||
&reply, replySize);
|
||||
replyToSend, replySize);
|
||||
|
||||
if (replyToSend != &reply)
|
||||
free(replyToSend);
|
||||
|
||||
if (error != B_OK) {
|
||||
// The debugger port is either not longer existing or we got
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <thread.h>
|
||||
#include <timer.h>
|
||||
#include <tracing.h>
|
||||
#include <user_debugger.h>
|
||||
#include <util/AutoLock.h>
|
||||
#include <util/khash.h>
|
||||
|
||||
@ -539,11 +540,20 @@ scheduler_remove_from_run_queue(struct thread *thread)
|
||||
static void
|
||||
context_switch(struct thread *fromThread, struct thread *toThread)
|
||||
{
|
||||
if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
|
||||
user_debug_thread_unscheduled(fromThread);
|
||||
|
||||
toThread->cpu = fromThread->cpu;
|
||||
fromThread->cpu = NULL;
|
||||
|
||||
arch_thread_set_current_thread(toThread);
|
||||
arch_thread_context_switch(fromThread, toThread);
|
||||
|
||||
// Looks weird, but is correct. fromThread had been unscheduled earlier,
|
||||
// but is back now. The notification for a thread scheduled the first time
|
||||
// happens in thread.cpp:thread_kthread_entry().
|
||||
if ((fromThread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
|
||||
user_debug_thread_scheduled(fromThread);
|
||||
}
|
||||
|
||||
|
||||
|
@ -301,6 +301,11 @@ thread_kthread_entry(void)
|
||||
{
|
||||
struct thread *thread = thread_get_current_thread();
|
||||
|
||||
// The thread is new and has been scheduled the first time. Notify the user
|
||||
// debugger code.
|
||||
if ((thread->flags & THREAD_FLAGS_DEBUGGER_INSTALLED) != 0)
|
||||
user_debug_thread_scheduled(thread);
|
||||
|
||||
// simulates the thread spinlock release that would occur if the thread had been
|
||||
// rescheded from. The resched didn't happen because the thread is new.
|
||||
RELEASE_THREAD_LOCK();
|
||||
@ -386,7 +391,7 @@ create_thread(thread_creation_attributes& attributes, bool kernel)
|
||||
thread->next_state = B_THREAD_SUSPENDED;
|
||||
|
||||
// init debug structure
|
||||
clear_thread_debug_info(&thread->debug_info, false);
|
||||
init_thread_debug_info(&thread->debug_info);
|
||||
|
||||
snprintf(stack_name, B_OS_NAME_LENGTH, "%s_%ld_kstack", attributes.name,
|
||||
thread->id);
|
||||
@ -578,12 +583,6 @@ undertaker(void* /*args*/)
|
||||
if (entry.deathSem >= 0)
|
||||
release_sem_etc(entry.deathSem, 1, B_DO_NOT_RESCHEDULE);
|
||||
|
||||
// notify the debugger
|
||||
if (entry.teamID >= 0
|
||||
&& entry.teamID != team_get_kernel_team_id()) {
|
||||
user_debug_thread_deleted(entry.teamID, thread->id);
|
||||
}
|
||||
|
||||
// free the thread structure
|
||||
thread_enqueue(thread, &dead_q);
|
||||
// TODO: Use the slab allocator!
|
||||
@ -1353,6 +1352,8 @@ thread_exit(void)
|
||||
struct death_entry* threadDeathEntry = NULL;
|
||||
|
||||
if (team != team_get_kernel_team()) {
|
||||
user_debug_thread_exiting(thread);
|
||||
|
||||
if (team->main_thread == thread) {
|
||||
// this was the main thread in this team, so we will delete that as well
|
||||
deleteTeam = true;
|
||||
@ -1531,6 +1532,10 @@ thread_exit(void)
|
||||
delete_sem(cachedExitSem);
|
||||
}
|
||||
|
||||
// notify the debugger
|
||||
if (teamID != team_get_kernel_team_id())
|
||||
user_debug_thread_deleted(teamID, thread->id);
|
||||
|
||||
// enqueue in the undertaker list and reschedule for the last time
|
||||
UndertakerEntry undertakerEntry(thread, teamID, cachedDeathSem);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user