Solution for the potential deadlock when needing to flush the profiling
buffer during a timer event that interrupted a kernel function: We do now flush the buffer as soon as it is 70% full, *if* we didn't interrupt a kernel function. When the buffer runs full and we still haven't hit a user function, we drop the tick. The number of dropped ticks is recorded and sent to the debugger with the next update message. Reverted the previous partial solution (the temporary disabling of profiling while in debugger support code). git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@27658 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
1ecc0a33db
commit
ecfad924e0
@ -556,6 +556,9 @@ typedef struct {
|
|||||||
int32 stack_depth; // number of return addresses per
|
int32 stack_depth; // number of return addresses per
|
||||||
// tick
|
// tick
|
||||||
int32 sample_count; // number of samples in the buffer
|
int32 sample_count; // number of samples in the buffer
|
||||||
|
int32 dropped_ticks; // number of ticks that had been
|
||||||
|
// dropped, since the buffer was
|
||||||
|
// full
|
||||||
bool stopped; // if true, the thread is no longer
|
bool stopped; // if true, the thread is no longer
|
||||||
// being profiled
|
// being profiled
|
||||||
} debug_profiler_update;
|
} debug_profiler_update;
|
||||||
|
@ -15,9 +15,10 @@
|
|||||||
|
|
||||||
|
|
||||||
// limits
|
// limits
|
||||||
#define B_DEBUG_MAX_PROFILE_FUNCTIONS 100000
|
#define B_DEBUG_MAX_PROFILE_FUNCTIONS 100000
|
||||||
#define B_DEBUG_MIN_PROFILE_INTERVAL 100 /* in us */
|
#define B_DEBUG_MIN_PROFILE_INTERVAL 100 /* in us */
|
||||||
#define B_DEBUG_STACK_TRACE_DEPTH 5
|
#define B_DEBUG_STACK_TRACE_DEPTH 5
|
||||||
|
#define B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD 70 /* in % */
|
||||||
|
|
||||||
|
|
||||||
struct function_profile_info;
|
struct function_profile_info;
|
||||||
@ -84,15 +85,18 @@ struct thread_debug_info {
|
|||||||
// sample buffer
|
// sample buffer
|
||||||
int32 max_samples;
|
int32 max_samples;
|
||||||
// maximum number of samples the buffer can hold
|
// maximum number of samples the buffer can hold
|
||||||
|
int32 flush_threshold;
|
||||||
|
// number of sample when the buffer is flushed (if possible)
|
||||||
int32 sample_count;
|
int32 sample_count;
|
||||||
// number of samples the buffer currently holds
|
// number of samples the buffer currently holds
|
||||||
int32 stack_depth;
|
int32 stack_depth;
|
||||||
// number of return addresses to record per timer interval
|
// number of return addresses to record per timer interval
|
||||||
|
int32 dropped_ticks;
|
||||||
|
// number of ticks that had to be dropped when the sample buffer was
|
||||||
|
// full and couldn't be flushed
|
||||||
int32 image_event;
|
int32 image_event;
|
||||||
// number of the image event when the first sample was written into
|
// number of the image event when the first sample was written into
|
||||||
// the buffer
|
// the buffer
|
||||||
int32 disabled;
|
|
||||||
// if > 0, profiling is temporarily disabled for the thread
|
|
||||||
bool buffer_full;
|
bool buffer_full;
|
||||||
// indicates that the sample buffer is full
|
// indicates that the sample buffer is full
|
||||||
union {
|
union {
|
||||||
|
@ -54,51 +54,6 @@ static status_t ensure_debugger_installed(team_id teamID, port_id *port = NULL);
|
|||||||
static void get_team_debug_info(team_debug_info &teamDebugInfo);
|
static void get_team_debug_info(team_debug_info &teamDebugInfo);
|
||||||
|
|
||||||
|
|
||||||
static void
|
|
||||||
enable_profiling()
|
|
||||||
{
|
|
||||||
struct thread* thread = thread_get_current_thread();
|
|
||||||
|
|
||||||
InterruptsSpinLocker _(gThreadSpinlock);
|
|
||||||
|
|
||||||
if (--thread->debug_info.profile.disabled > 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (thread->debug_info.profile.samples != NULL
|
|
||||||
&& !thread->debug_info.profile.buffer_full) {
|
|
||||||
// install profiling timer
|
|
||||||
schedule_profiling_timer(thread,
|
|
||||||
thread->debug_info.profile.interval_left);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static void
|
|
||||||
disable_profiling()
|
|
||||||
{
|
|
||||||
struct thread* thread = thread_get_current_thread();
|
|
||||||
|
|
||||||
InterruptsSpinLocker _(gThreadSpinlock);
|
|
||||||
|
|
||||||
if (thread->debug_info.profile.disabled++ > 0) {
|
|
||||||
ASSERT(thread->debug_info.profile.installed_timer == NULL);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// if running, cancel the profiling timer
|
|
||||||
struct timer* timer = thread->debug_info.profile.installed_timer;
|
|
||||||
if (timer != NULL) {
|
|
||||||
// track remaining time
|
|
||||||
bigtime_t left = thread->debug_info.profile.timer_end - system_time();
|
|
||||||
thread->debug_info.profile.interval_left = max_c(left, 0);
|
|
||||||
thread->debug_info.profile.installed_timer = NULL;
|
|
||||||
|
|
||||||
// cancel timer
|
|
||||||
cancel_timer(timer);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static status_t
|
static status_t
|
||||||
kill_interruptable_write_port(port_id port, int32 code, const void *buffer,
|
kill_interruptable_write_port(port_id port, int32 code, const void *buffer,
|
||||||
size_t bufferSize)
|
size_t bufferSize)
|
||||||
@ -119,8 +74,6 @@ debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize,
|
|||||||
|
|
||||||
status_t error = B_OK;
|
status_t error = B_OK;
|
||||||
|
|
||||||
disable_profiling();
|
|
||||||
|
|
||||||
// get the team debug info
|
// get the team debug info
|
||||||
team_debug_info teamDebugInfo;
|
team_debug_info teamDebugInfo;
|
||||||
get_team_debug_info(teamDebugInfo);
|
get_team_debug_info(teamDebugInfo);
|
||||||
@ -132,7 +85,6 @@ debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize,
|
|||||||
dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0);
|
dontWait ? (uint32)B_RELATIVE_TIMEOUT : (uint32)B_KILL_CAN_INTERRUPT, 0);
|
||||||
if (error != B_OK) {
|
if (error != B_OK) {
|
||||||
TRACE(("debugger_write() done1: %lx\n", error));
|
TRACE(("debugger_write() done1: %lx\n", error));
|
||||||
enable_profiling();
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -159,7 +111,6 @@ debugger_write(port_id port, int32 code, const void *buffer, size_t bufferSize,
|
|||||||
|
|
||||||
TRACE(("debugger_write() done: %lx\n", error));
|
TRACE(("debugger_write() done: %lx\n", error));
|
||||||
|
|
||||||
enable_profiling();
|
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -337,7 +288,6 @@ init_thread_debug_info(struct thread_debug_info *info)
|
|||||||
info->ignore_signals_once = 0;
|
info->ignore_signals_once = 0;
|
||||||
info->profile.sample_area = -1;
|
info->profile.sample_area = -1;
|
||||||
info->profile.samples = NULL;
|
info->profile.samples = NULL;
|
||||||
info->profile.disabled = 0;
|
|
||||||
info->profile.buffer_full = false;
|
info->profile.buffer_full = false;
|
||||||
info->profile.installed_timer = NULL;
|
info->profile.installed_timer = NULL;
|
||||||
}
|
}
|
||||||
@ -364,7 +314,6 @@ clear_thread_debug_info(struct thread_debug_info *info, bool dying)
|
|||||||
info->ignore_signals_once = 0;
|
info->ignore_signals_once = 0;
|
||||||
info->profile.sample_area = -1;
|
info->profile.sample_area = -1;
|
||||||
info->profile.samples = NULL;
|
info->profile.samples = NULL;
|
||||||
info->profile.disabled = 0;
|
|
||||||
info->profile.buffer_full = false;
|
info->profile.buffer_full = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -735,8 +684,6 @@ static status_t
|
|||||||
thread_hit_debug_event(debug_debugger_message event, const void *message,
|
thread_hit_debug_event(debug_debugger_message event, const void *message,
|
||||||
int32 size, bool requireDebugger)
|
int32 size, bool requireDebugger)
|
||||||
{
|
{
|
||||||
disable_profiling();
|
|
||||||
|
|
||||||
status_t result;
|
status_t result;
|
||||||
bool restart;
|
bool restart;
|
||||||
do {
|
do {
|
||||||
@ -745,8 +692,6 @@ thread_hit_debug_event(debug_debugger_message event, const void *message,
|
|||||||
requireDebugger, restart);
|
requireDebugger, restart);
|
||||||
} while (result >= 0 && restart);
|
} while (result >= 0 && restart);
|
||||||
|
|
||||||
enable_profiling();
|
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1082,11 +1027,11 @@ user_debug_thread_exiting(struct thread* thread)
|
|||||||
|
|
||||||
area_id sampleArea = threadDebugInfo.profile.sample_area;
|
area_id sampleArea = threadDebugInfo.profile.sample_area;
|
||||||
int32 sampleCount = threadDebugInfo.profile.sample_count;
|
int32 sampleCount = threadDebugInfo.profile.sample_count;
|
||||||
|
int32 droppedTicks = threadDebugInfo.profile.dropped_ticks;
|
||||||
int32 stackDepth = threadDebugInfo.profile.stack_depth;
|
int32 stackDepth = threadDebugInfo.profile.stack_depth;
|
||||||
int32 imageEvent = threadDebugInfo.profile.image_event;
|
int32 imageEvent = threadDebugInfo.profile.image_event;
|
||||||
threadDebugInfo.profile.sample_area = -1;
|
threadDebugInfo.profile.sample_area = -1;
|
||||||
threadDebugInfo.profile.samples = NULL;
|
threadDebugInfo.profile.samples = NULL;
|
||||||
threadDebugInfo.profile.disabled = 0;
|
|
||||||
threadDebugInfo.profile.buffer_full = false;
|
threadDebugInfo.profile.buffer_full = false;
|
||||||
|
|
||||||
atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING);
|
atomic_or(&threadDebugInfo.flags, B_THREAD_DEBUG_DYING);
|
||||||
@ -1100,6 +1045,7 @@ user_debug_thread_exiting(struct thread* thread)
|
|||||||
message.origin.team = thread->team->id;
|
message.origin.team = thread->team->id;
|
||||||
message.origin.nub_port = -1; // asynchronous message
|
message.origin.nub_port = -1; // asynchronous message
|
||||||
message.sample_count = sampleCount;
|
message.sample_count = sampleCount;
|
||||||
|
message.dropped_ticks = droppedTicks;
|
||||||
message.stack_depth = stackDepth;
|
message.stack_depth = stackDepth;
|
||||||
message.image_event = imageEvent;
|
message.image_event = imageEvent;
|
||||||
message.stopped = true;
|
message.stopped = true;
|
||||||
@ -1254,9 +1200,19 @@ profiling_do_sample(bool& flushBuffer)
|
|||||||
int32 imageEvent = thread->team->debug_info.image_event;
|
int32 imageEvent = thread->team->debug_info.image_event;
|
||||||
if (debugInfo.profile.sample_count > 0) {
|
if (debugInfo.profile.sample_count > 0) {
|
||||||
if (debugInfo.profile.image_event < imageEvent
|
if (debugInfo.profile.image_event < imageEvent
|
||||||
|| debugInfo.profile.max_samples - sampleCount < stackDepth) {
|
|| debugInfo.profile.flush_threshold - sampleCount < stackDepth) {
|
||||||
flushBuffer = true;
|
if (!IS_KERNEL_ADDRESS(arch_debug_get_interrupt_pc())) {
|
||||||
return true;
|
flushBuffer = true;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// We can't flush the buffer now, since we interrupted a kernel
|
||||||
|
// function. If the buffer is not full yet, we add the samples,
|
||||||
|
// otherwise we have to drop them.
|
||||||
|
if (debugInfo.profile.max_samples - sampleCount < stackDepth) {
|
||||||
|
debugInfo.profile.dropped_ticks++;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// first sample -- set the image event
|
// first sample -- set the image event
|
||||||
@ -1291,11 +1247,13 @@ profiling_buffer_full(void*)
|
|||||||
|
|
||||||
if (debugInfo.profile.samples != NULL && debugInfo.profile.buffer_full) {
|
if (debugInfo.profile.samples != NULL && debugInfo.profile.buffer_full) {
|
||||||
int32 sampleCount = debugInfo.profile.sample_count;
|
int32 sampleCount = debugInfo.profile.sample_count;
|
||||||
|
int32 droppedTicks = debugInfo.profile.dropped_ticks;
|
||||||
int32 stackDepth = debugInfo.profile.stack_depth;
|
int32 stackDepth = debugInfo.profile.stack_depth;
|
||||||
int32 imageEvent = debugInfo.profile.image_event;
|
int32 imageEvent = debugInfo.profile.image_event;
|
||||||
|
|
||||||
// notify the debugger
|
// notify the debugger
|
||||||
debugInfo.profile.sample_count = 0;
|
debugInfo.profile.sample_count = 0;
|
||||||
|
debugInfo.profile.dropped_ticks = 0;
|
||||||
|
|
||||||
RELEASE_THREAD_LOCK();
|
RELEASE_THREAD_LOCK();
|
||||||
enable_interrupts();
|
enable_interrupts();
|
||||||
@ -1303,6 +1261,7 @@ profiling_buffer_full(void*)
|
|||||||
// prepare the message
|
// prepare the message
|
||||||
debug_profiler_update message;
|
debug_profiler_update message;
|
||||||
message.sample_count = sampleCount;
|
message.sample_count = sampleCount;
|
||||||
|
message.dropped_ticks = droppedTicks;
|
||||||
message.stack_depth = stackDepth;
|
message.stack_depth = stackDepth;
|
||||||
message.image_event = imageEvent;
|
message.image_event = imageEvent;
|
||||||
message.stopped = false;
|
message.stopped = false;
|
||||||
@ -1333,7 +1292,7 @@ profiling_event(timer* /*unused*/)
|
|||||||
struct thread* thread = thread_get_current_thread();
|
struct thread* thread = thread_get_current_thread();
|
||||||
thread_debug_info& debugInfo = thread->debug_info;
|
thread_debug_info& debugInfo = thread->debug_info;
|
||||||
|
|
||||||
bool flushBuffer;
|
bool flushBuffer = false;
|
||||||
if (profiling_do_sample(flushBuffer)) {
|
if (profiling_do_sample(flushBuffer)) {
|
||||||
if (flushBuffer) {
|
if (flushBuffer) {
|
||||||
// The sample buffer needs to be flushed; we'll have to notify the
|
// The sample buffer needs to be flushed; we'll have to notify the
|
||||||
@ -1355,11 +1314,6 @@ profiling_event(timer* /*unused*/)
|
|||||||
void
|
void
|
||||||
user_debug_thread_unscheduled(struct thread* thread)
|
user_debug_thread_unscheduled(struct thread* thread)
|
||||||
{
|
{
|
||||||
if (thread->debug_info.profile.disabled > 0) {
|
|
||||||
ASSERT(thread->debug_info.profile.installed_timer == NULL);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// if running, cancel the profiling timer
|
// if running, cancel the profiling timer
|
||||||
struct timer* timer = thread->debug_info.profile.installed_timer;
|
struct timer* timer = thread->debug_info.profile.installed_timer;
|
||||||
if (timer != NULL) {
|
if (timer != NULL) {
|
||||||
@ -1377,9 +1331,6 @@ user_debug_thread_unscheduled(struct thread* thread)
|
|||||||
void
|
void
|
||||||
user_debug_thread_scheduled(struct thread* thread)
|
user_debug_thread_scheduled(struct thread* thread)
|
||||||
{
|
{
|
||||||
if (thread->debug_info.profile.disabled > 0)
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (thread->debug_info.profile.samples != NULL
|
if (thread->debug_info.profile.samples != NULL
|
||||||
&& !thread->debug_info.profile.buffer_full) {
|
&& !thread->debug_info.profile.buffer_full) {
|
||||||
// install profiling timer
|
// install profiling timer
|
||||||
@ -2321,7 +2272,12 @@ debug_nub_thread(void *)
|
|||||||
threadDebugInfo.profile.samples = (addr_t*)samples;
|
threadDebugInfo.profile.samples = (addr_t*)samples;
|
||||||
threadDebugInfo.profile.max_samples
|
threadDebugInfo.profile.max_samples
|
||||||
= areaInfo.size / sizeof(addr_t);
|
= areaInfo.size / sizeof(addr_t);
|
||||||
|
threadDebugInfo.profile.flush_threshold
|
||||||
|
= threadDebugInfo.profile.max_samples
|
||||||
|
* B_DEBUG_PROFILE_BUFFER_FLUSH_THRESHOLD
|
||||||
|
/ 100;
|
||||||
threadDebugInfo.profile.sample_count = 0;
|
threadDebugInfo.profile.sample_count = 0;
|
||||||
|
threadDebugInfo.profile.dropped_ticks = 0;
|
||||||
threadDebugInfo.profile.stack_depth = stackDepth;
|
threadDebugInfo.profile.stack_depth = stackDepth;
|
||||||
threadDebugInfo.profile.buffer_full = false;
|
threadDebugInfo.profile.buffer_full = false;
|
||||||
threadDebugInfo.profile.interval_left = interval;
|
threadDebugInfo.profile.interval_left = interval;
|
||||||
@ -2369,6 +2325,7 @@ debug_nub_thread(void *)
|
|||||||
int32 sampleCount = 0;
|
int32 sampleCount = 0;
|
||||||
int32 stackDepth = 0;
|
int32 stackDepth = 0;
|
||||||
int32 imageEvent = 0;
|
int32 imageEvent = 0;
|
||||||
|
int32 droppedTicks = 0;
|
||||||
|
|
||||||
// get the thread and detach the profile info
|
// get the thread and detach the profile info
|
||||||
cpu_status state = disable_interrupts();
|
cpu_status state = disable_interrupts();
|
||||||
@ -2382,11 +2339,13 @@ debug_nub_thread(void *)
|
|||||||
sampleArea = threadDebugInfo.profile.sample_area;
|
sampleArea = threadDebugInfo.profile.sample_area;
|
||||||
samples = threadDebugInfo.profile.samples;
|
samples = threadDebugInfo.profile.samples;
|
||||||
sampleCount = threadDebugInfo.profile.sample_count;
|
sampleCount = threadDebugInfo.profile.sample_count;
|
||||||
|
droppedTicks = threadDebugInfo.profile.dropped_ticks;
|
||||||
stackDepth = threadDebugInfo.profile.stack_depth;
|
stackDepth = threadDebugInfo.profile.stack_depth;
|
||||||
imageEvent = threadDebugInfo.profile.image_event;
|
imageEvent = threadDebugInfo.profile.image_event;
|
||||||
threadDebugInfo.profile.sample_area = -1;
|
threadDebugInfo.profile.sample_area = -1;
|
||||||
threadDebugInfo.profile.samples = NULL;
|
threadDebugInfo.profile.samples = NULL;
|
||||||
threadDebugInfo.profile.buffer_full = false;
|
threadDebugInfo.profile.buffer_full = false;
|
||||||
|
threadDebugInfo.profile.dropped_ticks = 0;
|
||||||
} else
|
} else
|
||||||
result = B_BAD_VALUE;
|
result = B_BAD_VALUE;
|
||||||
} else
|
} else
|
||||||
@ -2401,6 +2360,7 @@ debug_nub_thread(void *)
|
|||||||
reply.profiler_update.image_event = imageEvent;
|
reply.profiler_update.image_event = imageEvent;
|
||||||
reply.profiler_update.stack_depth = stackDepth;
|
reply.profiler_update.stack_depth = stackDepth;
|
||||||
reply.profiler_update.sample_count = sampleCount;
|
reply.profiler_update.sample_count = sampleCount;
|
||||||
|
reply.profiler_update.dropped_ticks = droppedTicks;
|
||||||
reply.profiler_update.stopped = true;
|
reply.profiler_update.stopped = true;
|
||||||
} else
|
} else
|
||||||
reply.profiler_update.origin.thread = result;
|
reply.profiler_update.origin.thread = result;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user