* Introduced a per-team counter that is incremented whenever an image

is created or deleted (or exec*() has been invoked). The counter is
  sent with several debugger messages.
* Track the image event counter that is used when samples are added to
  the profiling buffer. If the current team counter differs, we flush
  the buffer first (sending an update message to the debugger), so that
  the debugger has a chance to match the addresses to the correct images.
* Disable profiling for a thread while it runs in the debugger support
  code. This fixes potential deadlocks which could occur when a
  profiling timer event occurred that would require the buffer to be
  flushed while the thread was just sending something to the debugger or
  waiting for a command. As it turns out, this is not sufficient either,
  since we should never try to flush the buffer when the timer event
  occurred in the kernel, since the thread might hold a lock that the
  debugger thread could try to acquire. Will implement a more general
  solution later.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@27656 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2008-09-20 20:37:10 +00:00
parent 18fe1bf6fb
commit eba9a4c3ee
3 changed files with 170 additions and 44 deletions

View File

@ -371,9 +371,7 @@ typedef struct {
typedef struct {
status_t error;
int32 profile_event; // number of the last event influencing
// profiling (e.g. image
// created/deleted)
int32 image_event; // number of the last image event
bigtime_t interval; // actual sample interval (might
// differ from the requested one)
} debug_nub_start_profiler_reply;
@ -515,6 +513,7 @@ typedef struct {
typedef struct {
debug_origin origin;
int32 image_event; // number of the image event
} debug_team_exec;
// B_DEBUGGER_MESSAGE_THREAD_CREATED
@ -535,6 +534,7 @@ typedef struct {
typedef struct {
debug_origin origin;
image_info info; // info for the image
int32 image_event; // number of the image event
} debug_image_created;
// B_DEBUGGER_MESSAGE_IMAGE_DELETED
@ -542,17 +542,17 @@ typedef struct {
typedef struct {
debug_origin origin;
image_info info; // info for the image
int32 image_event; // number of the image event
} debug_image_deleted;
// B_DEBUGGER_MESSAGE_PROFILER_UPDATE
typedef struct {
debug_origin origin;
int32 profile_event; // number of the last event
// influencing profiling (e.g.
// image created/deleted); all
// samples were recorded after this
// event and before the next one
int32 image_event; // number of the last image event;
// all samples were recorded after
// this event and before the next
// one
int32 stack_depth; // number of return addresses per
// tick
int32 sample_count; // number of samples in the buffer

View File

@ -54,6 +54,8 @@ struct team_debug_info {
sem_id debugger_write_lock;
// synchronizes writes to the debugger port with the setting (but not
// clearing) of the B_TEAM_DEBUG_DEBUGGER_HANDOVER flag
vint32 image_event;
// counter incremented whenever an image is created/deleted
struct arch_team_debug_info arch_info;
};
@ -86,6 +88,11 @@ struct thread_debug_info {
// number of samples the buffer currently holds
int32 stack_depth;
// number of return addresses to record per timer interval
int32 image_event;
// number of the image event when the first sample was written into
// the buffer
int32 disabled;
// if > 0, profiling is temporarily disabled for the thread
bool buffer_full;
// indicates that the sample buffer is full
union {

View File

@ -47,11 +47,58 @@ static timer sProfilingTimers[B_MAX_CPU_COUNT];
// on that CPU
static void schedule_profiling_timer(struct thread* thread,
bigtime_t interval);
static int32 profiling_event(timer* unused);
static status_t ensure_debugger_installed(team_id teamID, port_id *port = NULL);
static void get_team_debug_info(team_debug_info &teamDebugInfo);
static void
enable_profiling()
{
struct thread* thread = thread_get_current_thread();
InterruptsSpinLocker _(gThreadSpinlock);
if (--thread->debug_info.profile.disabled > 0)
return;
if (thread->debug_info.profile.samples != NULL
&& !thread->debug_info.profile.buffer_full) {
// install profiling timer
schedule_profiling_timer(thread,
thread->debug_info.profile.interval_left);
}
}
static void
disable_profiling()
{
struct thread* thread = thread_get_current_thread();
InterruptsSpinLocker _(gThreadSpinlock);
if (thread->debug_info.profile.disabled++ > 0) {
ASSERT(thread->debug_info.profile.installed_timer == NULL);
return;
}
// if running, cancel the profiling timer
struct timer* timer = thread->debug_info.profile.installed_timer;
if (timer != NULL) {
// track remaining time
bigtime_t left = thread->debug_info.profile.timer_end - system_time();
thread->debug_info.profile.interval_left = max_c(left, 0);
thread->debug_info.profile.installed_timer = NULL;
// cancel timer
cancel_timer(timer);
}
}
static status_t
kill_interruptable_write_port(port_id port, int32 code, const void *buffer,
size_t bufferSize)
@ -72,6 +119,8 @@ debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize,
status_t error = B_OK;
disable_profiling();
// get the team debug info
team_debug_info teamDebugInfo;
get_team_debug_info(teamDebugInfo);
@ -83,6 +132,7 @@ debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize,
dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0);
if (error != B_OK) {
TRACE(("debugger_write() done1: %lx\n", error));
enable_profiling();
return error;
}
@ -109,6 +159,7 @@ debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize,
TRACE(("debugger_write() done: %lx\n", error));
enable_profiling();
return error;
}
@ -219,6 +270,7 @@ clear_team_debug_info(struct team_debug_info *info, bool initLock)
info->nub_thread = -1;
info->nub_port = -1;
info->debugger_write_lock = -1;
info->image_event = 0;
if (initLock)
B_INITIALIZE_SPINLOCK(&info->lock);
@ -269,6 +321,7 @@ destroy_team_debug_info(struct team_debug_info *info)
atomic_set(&info->flags, 0);
info->debugger_team = -1;
info->debugger_port = -1;
info->image_event = -1;
}
}
@ -284,6 +337,7 @@ init_thread_debug_info(struct thread_debug_info *info)
info->ignore_signals_once = 0;
info->profile.sample_area = -1;
info->profile.samples = NULL;
info->profile.disabled = 0;
info->profile.buffer_full = false;
info->profile.installed_timer = NULL;
}
@ -310,6 +364,7 @@ clear_thread_debug_info(struct thread_debug_info *info, bool dying)
info->ignore_signals_once = 0;
info->profile.sample_area = -1;
info->profile.samples = NULL;
info->profile.disabled = 0;
info->profile.buffer_full = false;
}
}
@ -680,6 +735,8 @@ static status_t
thread_hit_debug_event(debug_debugger_message event, const void *message,
int32 size, bool requireDebugger)
{
disable_profiling();
status_t result;
bool restart;
do {
@ -688,6 +745,8 @@ thread_hit_debug_event(debug_debugger_message event, const void *message,
requireDebugger, restart);
} while (result >= 0 && restart);
enable_profiling();
return result;
}
@ -897,7 +956,9 @@ user_debug_team_exec()
}
// prepare the message
debug_team_created message;
debug_team_exec message;
message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
+ 1;
thread_hit_debug_event(B_DEBUGGER_MESSAGE_TEAM_EXEC, &message,
sizeof(message), true);
@ -1022,8 +1083,10 @@ user_debug_thread_exiting(struct thread* thread)
area_id sampleArea = threadDebugInfo.profile.sample_area;
int32 sampleCount = threadDebugInfo.profile.sample_count;
int32 stackDepth = threadDebugInfo.profile.stack_depth;
int32 imageEvent = threadDebugInfo.profile.image_event;
threadDebugInfo.profile.sample_area = -1;
threadDebugInfo.profile.samples = NULL;
threadDebugInfo.profile.disabled = 0;
threadDebugInfo.profile.buffer_full = false;
atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING);
@ -1038,6 +1101,7 @@ user_debug_thread_exiting(struct thread* thread)
message.origin.nub_port = -1; // asynchronous message
message.sample_count = sampleCount;
message.stack_depth = stackDepth;
message.image_event = imageEvent;
message.stopped = true;
debugger_write(debuggerPort, B_DEBUGGER_MESSAGE_PROFILER_UPDATE,
&message, sizeof(message), false);
@ -1066,6 +1130,8 @@ user_debug_image_created(const image_info *imageInfo)
// prepare the message
debug_image_created message;
memcpy(&message.info, imageInfo, sizeof(image_info));
message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
+ 1;
thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message,
sizeof(message), true);
@ -1086,6 +1152,8 @@ user_debug_image_deleted(const image_info *imageInfo)
// prepare the message
debug_image_deleted message;
memcpy(&message.info, imageInfo, sizeof(image_info));
message.image_event = atomic_add(&thread->team->debug_info.image_event, 1)
+ 1;
thread_hit_debug_event(B_DEBUGGER_MESSAGE_IMAGE_CREATED, &message,
sizeof(message), true);
@ -1170,6 +1238,49 @@ schedule_profiling_timer(struct thread* thread, bigtime_t interval)
}
static bool
profiling_do_sample(bool& flushBuffer)
{
struct thread* thread = thread_get_current_thread();
thread_debug_info& debugInfo = thread->debug_info;
if (debugInfo.profile.samples == NULL)
return false;
// Check, whether the buffer is full or an image event occurred since the
// last sample was taken.
int32 sampleCount = debugInfo.profile.sample_count;
int32 stackDepth = debugInfo.profile.stack_depth;
int32 imageEvent = thread->team->debug_info.image_event;
if (debugInfo.profile.sample_count > 0) {
if (debugInfo.profile.image_event < imageEvent
|| debugInfo.profile.max_samples - sampleCount < stackDepth) {
flushBuffer = true;
return true;
}
} else {
// first sample -- set the image event
debugInfo.profile.image_event = imageEvent;
}
// get the samples
addr_t* returnAddresses = debugInfo.profile.samples
+ debugInfo.profile.sample_count;
if (stackDepth > 1) {
int32 count = arch_debug_get_stack_trace(returnAddresses, stackDepth, 1,
0, false);
for (int32 i = count; i < stackDepth; i++)
returnAddresses[i] = 0;
} else
*returnAddresses = (addr_t)arch_debug_get_interrupt_pc();
debugInfo.profile.sample_count += stackDepth;
return true;
}
static void
profiling_buffer_full(void*)
{
@ -1181,29 +1292,32 @@ profiling_buffer_full(void*)
if (debugInfo.profile.samples != NULL && debugInfo.profile.buffer_full) {
int32 sampleCount = debugInfo.profile.sample_count;
int32 stackDepth = debugInfo.profile.stack_depth;
if (debugInfo.profile.max_samples - sampleCount < stackDepth) {
// The sample buffer is indeed full; notify the debugger.
debugInfo.profile.sample_count = 0;
int32 imageEvent = debugInfo.profile.image_event;
RELEASE_THREAD_LOCK();
enable_interrupts();
// notify the debugger
debugInfo.profile.sample_count = 0;
// prepare the message
debug_profiler_update message;
message.sample_count = sampleCount;
message.stack_depth = stackDepth;
message.stopped = false;
RELEASE_THREAD_LOCK();
enable_interrupts();
thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE, &message,
sizeof(message), false);
// prepare the message
debug_profiler_update message;
message.sample_count = sampleCount;
message.stack_depth = stackDepth;
message.image_event = imageEvent;
message.stopped = false;
disable_interrupts();
GRAB_THREAD_LOCK();
}
thread_hit_debug_event(B_DEBUGGER_MESSAGE_PROFILER_UPDATE, &message,
sizeof(message), false);
if (debugInfo.profile.samples != NULL) {
schedule_profiling_timer(thread, debugInfo.profile.interval);
disable_interrupts();
GRAB_THREAD_LOCK();
// do the sampling and reschedule timer, if still profiling this thread
bool flushBuffer;
if (profiling_do_sample(flushBuffer)) {
debugInfo.profile.buffer_full = false;
schedule_profiling_timer(thread, debugInfo.profile.interval);
}
}
@ -1211,28 +1325,21 @@ profiling_buffer_full(void*)
}
/*! The thread spinlock is being held.
*/
static int32
profiling_event(timer* /*unused*/)
{
struct thread* thread = thread_get_current_thread();
thread_debug_info& debugInfo = thread->debug_info;
if (debugInfo.profile.samples != NULL) {
int32 stackDepth = debugInfo.profile.stack_depth;
addr_t* returnAddresses = debugInfo.profile.samples
+ debugInfo.profile.sample_count;
int32 count = arch_debug_get_stack_trace(returnAddresses, stackDepth, 1,
0, false);
for (int32 i = count; i < stackDepth; i++)
returnAddresses[i] = 0;
debugInfo.profile.sample_count += stackDepth;
int32 sampleCount = debugInfo.profile.sample_count;
if (debugInfo.profile.max_samples - sampleCount < stackDepth) {
// The sample buffer is full; we'll have to notify the debugger.
// We can't do that right here. Instead we set a post interrupt
// callback doing that for us, and don't reschedule the timer yet.
bool flushBuffer;
if (profiling_do_sample(flushBuffer)) {
if (flushBuffer) {
// The sample buffer needs to be flushed; we'll have to notify the
// debugger. We can't do that right here. Instead we set a post
// interrupt callback doing that for us, and don't reschedule the
// timer yet.
thread->post_interrupt_callback = profiling_buffer_full;
debugInfo.profile.installed_timer = NULL;
debugInfo.profile.buffer_full = true;
@ -1248,6 +1355,11 @@ profiling_event(timer* /*unused*/)
void
user_debug_thread_unscheduled(struct thread* thread)
{
if (thread->debug_info.profile.disabled > 0) {
ASSERT(thread->debug_info.profile.installed_timer == NULL);
return;
}
// if running, cancel the profiling timer
struct timer* timer = thread->debug_info.profile.installed_timer;
if (timer != NULL) {
@ -1265,6 +1377,9 @@ user_debug_thread_unscheduled(struct thread* thread)
void
user_debug_thread_scheduled(struct thread* thread)
{
if (thread->debug_info.profile.disabled > 0)
return;
if (thread->debug_info.profile.samples != NULL
&& !thread->debug_info.profile.buffer_full) {
// install profiling timer
@ -2190,6 +2305,7 @@ debug_nub_thread(void *)
}
// get the thread and set the profile info
int32 imageEvent = nubThread->team->debug_info.image_event;
if (result == B_OK) {
cpu_status state = disable_interrupts();
GRAB_THREAD_LOCK();
@ -2210,6 +2326,7 @@ debug_nub_thread(void *)
threadDebugInfo.profile.buffer_full = false;
threadDebugInfo.profile.interval_left = interval;
threadDebugInfo.profile.installed_timer = NULL;
threadDebugInfo.profile.image_event = imageEvent;
} else
result = B_BAD_VALUE;
} else
@ -2228,9 +2345,9 @@ debug_nub_thread(void *)
}
// send a reply to the debugger
// TODO: profile_event
reply.start_profiler.error = result;
reply.start_profiler.interval = interval;
reply.start_profiler.image_event = imageEvent;
sendReply = true;
replySize = sizeof(reply.start_profiler);
@ -2251,6 +2368,7 @@ debug_nub_thread(void *)
addr_t* samples = NULL;
int32 sampleCount = 0;
int32 stackDepth = 0;
int32 imageEvent = 0;
// get the thread and detach the profile info
cpu_status state = disable_interrupts();
@ -2265,6 +2383,7 @@ debug_nub_thread(void *)
samples = threadDebugInfo.profile.samples;
sampleCount = threadDebugInfo.profile.sample_count;
stackDepth = threadDebugInfo.profile.stack_depth;
imageEvent = threadDebugInfo.profile.image_event;
threadDebugInfo.profile.sample_area = -1;
threadDebugInfo.profile.samples = NULL;
threadDebugInfo.profile.buffer_full = false;
@ -2279,7 +2398,7 @@ debug_nub_thread(void *)
// prepare the reply
if (result == B_OK) {
reply.profiler_update.origin.thread = threadID;
// TODO: profile_event
reply.profiler_update.image_event = imageEvent;
reply.profiler_update.stack_depth = stackDepth;
reply.profiler_update.sample_count = sampleCount;
reply.profiler_update.stopped = true;