* arch_debug_get_stack_trace():
- Replaced the "userOnly" parameter by a "flags" parameter, that allows to specify kernel and userland stack traces individually. - x86, m68k: Don't always skip the first frame as that prevents the caller from being able to record its own address. * capture_tracing_stack_trace(): Replaced the "userOnly" parameter by "kernelOnly", since one is probably always interested in the kernel stack trace, but might not want the userland stack trace. * Added stack trace support for VM cache kernel tracing. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@34742 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
42f2c2d099
commit
a38f850360
@ -49,6 +49,7 @@
|
||||
#define USER_MALLOC_TRACING 0
|
||||
#define VFS_PAGES_IO_TRACING 0
|
||||
#define VM_CACHE_TRACING 0
|
||||
#define VM_CACHE_TRACING_STACK_TRACE 0
|
||||
#define VM_PAGE_FAULT_TRACING 0
|
||||
#define WAIT_FOR_OBJECTS_TRACING 0
|
||||
|
||||
|
@ -18,6 +18,11 @@ struct kernel_args;
|
||||
struct thread;
|
||||
|
||||
|
||||
// arch_debug_get_stack_trace() flags
|
||||
#define STACK_TRACE_KERNEL 0x01
|
||||
#define STACK_TRACE_USER 0x02
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
@ -26,7 +31,7 @@ status_t arch_debug_init(kernel_args *args);
|
||||
void arch_debug_stack_trace(void);
|
||||
void *arch_debug_get_caller(void);
|
||||
int32 arch_debug_get_stack_trace(addr_t* returnAddresses, int32 maxCount,
|
||||
int32 skipIframes, int32 skipFrames, bool userOnly);
|
||||
int32 skipIframes, int32 skipFrames, uint32 flags);
|
||||
void* arch_debug_get_interrupt_pc(bool* _isSyscall);
|
||||
bool arch_debug_contains_call(struct thread *thread, const char *symbol,
|
||||
addr_t start, addr_t end);
|
||||
|
@ -208,7 +208,7 @@ uint8* alloc_tracing_buffer_memcpy(const void* source, size_t size, bool user);
|
||||
char* alloc_tracing_buffer_strcpy(const char* source, size_t maxSize,
|
||||
bool user);
|
||||
struct tracing_stack_trace* capture_tracing_stack_trace(int32 maxCount,
|
||||
int32 skipFrames, bool userOnly);
|
||||
int32 skipFrames, bool kernelOnly);
|
||||
void lock_tracing_buffer();
|
||||
void unlock_tracing_buffer();
|
||||
status_t tracing_init(void);
|
||||
|
@ -261,78 +261,10 @@ arch_debug_get_caller(void)
|
||||
}
|
||||
|
||||
|
||||
/*! Captures a stack trace (the return addresses) of the current thread.
|
||||
\param returnAddresses The array the return address shall be written to.
|
||||
\param maxCount The maximum number of return addresses to be captured.
|
||||
\param skipIframes The number of interrupt frames that shall be skipped. If
|
||||
greater than 0, \a skipFrames is ignored.
|
||||
\param skipFrames The number of stack frames that shall be skipped.
|
||||
\param userOnly If \c true, only userland return addresses are captured.
|
||||
\return The number of return addresses written to the given array.
|
||||
*/
|
||||
int32
|
||||
arch_debug_get_stack_trace(addr_t* returnAddresses, int32 maxCount,
|
||||
int32 skipIframes, int32 skipFrames, bool userOnly)
|
||||
int32 skipIframes, int32 skipFrames, uint32 flags)
|
||||
{
|
||||
/* struct iframe_stack *frameStack;
|
||||
addr_t framePointer;
|
||||
int32 count = 0;
|
||||
int32 i, num = 0, last = 0;
|
||||
|
||||
// Keep skipping normal stack frames until we've skipped the iframes we're
|
||||
// supposed to skip.
|
||||
if (skipIframes > 0) {
|
||||
skipFrames = INT_MAX;
|
||||
} else {
|
||||
// always skip our own frame
|
||||
skipFrames++;
|
||||
}
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
framePointer = (addr_t)get_current_stack_frame();
|
||||
|
||||
// We don't have a thread pointer early in the boot process
|
||||
if (thread != NULL)
|
||||
frameStack = &thread->arch_info.iframes;
|
||||
else
|
||||
frameStack = &gBootFrameStack;
|
||||
|
||||
while (framePointer != 0 && count < maxCount) {
|
||||
// see if the frame pointer matches the iframe
|
||||
struct iframe *frame = NULL;
|
||||
for (i = 0; i < frameStack->index; i++) {
|
||||
if (framePointer == (addr_t)frameStack->frames[i]) {
|
||||
// it's an iframe
|
||||
frame = frameStack->frames[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
addr_t ip;
|
||||
addr_t nextFrame;
|
||||
|
||||
if (frame) {
|
||||
ip = frame->cpu.pc;
|
||||
nextFrame = frame->a[6];
|
||||
|
||||
if (skipIframes > 0) {
|
||||
if (--skipIframes == 0)
|
||||
skipFrames = 0;
|
||||
}
|
||||
} else {
|
||||
if (get_next_frame(framePointer, &nextFrame, &ip) != B_OK)
|
||||
break;
|
||||
}
|
||||
|
||||
if (skipFrames <= 0 && (!userOnly || IS_USER_ADDRESS(framePointer)))
|
||||
returnAddresses[count++] = ip;
|
||||
else
|
||||
skipFrames--;
|
||||
|
||||
framePointer = nextFrame;
|
||||
}
|
||||
|
||||
return count;*/
|
||||
#warning ARM:IMPLEMENT
|
||||
|
||||
return 0;
|
||||
|
@ -279,18 +279,9 @@ arch_debug_get_caller(void)
|
||||
}
|
||||
|
||||
|
||||
/*! Captures a stack trace (the return addresses) of the current thread.
|
||||
\param returnAddresses The array the return address shall be written to.
|
||||
\param maxCount The maximum number of return addresses to be captured.
|
||||
\param skipIframes The number of interrupt frames that shall be skipped. If
|
||||
greater than 0, \a skipFrames is ignored.
|
||||
\param skipFrames The number of stack frames that shall be skipped.
|
||||
\param userOnly If \c true, only userland return addresses are captured.
|
||||
\return The number of return addresses written to the given array.
|
||||
*/
|
||||
int32
|
||||
arch_debug_get_stack_trace(addr_t* returnAddresses, int32 maxCount,
|
||||
int32 skipIframes, int32 skipFrames, bool userOnly)
|
||||
int32 skipIframes, int32 skipFrames, uint32 flags)
|
||||
{
|
||||
struct iframe_stack *frameStack;
|
||||
addr_t framePointer;
|
||||
@ -299,15 +290,12 @@ arch_debug_get_stack_trace(addr_t* returnAddresses, int32 maxCount,
|
||||
|
||||
// Keep skipping normal stack frames until we've skipped the iframes we're
|
||||
// supposed to skip.
|
||||
if (skipIframes > 0) {
|
||||
if (skipIframes > 0)
|
||||
skipFrames = INT_MAX;
|
||||
} else {
|
||||
// always skip our own frame
|
||||
skipFrames++;
|
||||
}
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
framePointer = (addr_t)get_current_stack_frame();
|
||||
bool onKernelStack = true;
|
||||
|
||||
// We don't have a thread pointer early in the boot process
|
||||
if (thread != NULL)
|
||||
@ -316,6 +304,12 @@ arch_debug_get_stack_trace(addr_t* returnAddresses, int32 maxCount,
|
||||
frameStack = &gBootFrameStack;
|
||||
|
||||
while (framePointer != 0 && count < maxCount) {
|
||||
onKernelStack = onKernelStack
|
||||
&& IS_KERNEL_ADDRESS(thread, framePointer);
|
||||
// TODO: Correctly determine whether this is a kernel address!
|
||||
if (!onKernelStack && (flags & STACK_TRACE_USER) == 0)
|
||||
break;
|
||||
|
||||
// see if the frame pointer matches the iframe
|
||||
struct iframe *frame = NULL;
|
||||
for (i = 0; i < frameStack->index; i++) {
|
||||
@ -342,9 +336,10 @@ arch_debug_get_stack_trace(addr_t* returnAddresses, int32 maxCount,
|
||||
break;
|
||||
}
|
||||
|
||||
if (skipFrames <= 0 && (!userOnly || IS_USER_ADDRESS(framePointer)))
|
||||
if (skipFrames <= 0
|
||||
&& ((flags & STACK_TRACE_KERNEL) != 0 || onKernelStack)) {
|
||||
returnAddresses[count++] = ip;
|
||||
else
|
||||
} else
|
||||
skipFrames--;
|
||||
|
||||
framePointer = nextFrame;
|
||||
|
@ -50,7 +50,7 @@ arch_debug_contains_call(struct thread* thread, const char* symbol,
|
||||
}
|
||||
|
||||
|
||||
void*
|
||||
void*
|
||||
arch_debug_get_caller(void)
|
||||
{
|
||||
#warning IMPLEMENT arch_debug_get_caller
|
||||
@ -60,7 +60,7 @@ arch_debug_get_caller(void)
|
||||
|
||||
int32
|
||||
arch_debug_get_stack_trace(addr_t* returnAddresses, int32 maxCount,
|
||||
int32 skipIframes, int32 skipFrames, bool userOnly)
|
||||
int32 skipIframes, int32 skipFrames, uint32 flags)
|
||||
{
|
||||
#warning IMPLEMENT arch_debug_get_stack_trace
|
||||
return 0;
|
||||
|
@ -284,7 +284,7 @@ arch_debug_get_caller(void)
|
||||
|
||||
int32
|
||||
arch_debug_get_stack_trace(addr_t* returnAddresses, int32 maxCount,
|
||||
int32 skipIframes, int32 skipFrames, bool userOnly)
|
||||
int32 skipIframes, int32 skipFrames, uint32 flags)
|
||||
{
|
||||
// TODO: Implement!
|
||||
return 0;
|
||||
|
@ -956,21 +956,19 @@ arch_debug_get_caller(void)
|
||||
\param skipIframes The number of interrupt frames that shall be skipped. If
|
||||
greater than 0, \a skipFrames is ignored.
|
||||
\param skipFrames The number of stack frames that shall be skipped.
|
||||
\param userOnly If \c true, only userland return addresses are captured.
|
||||
\param flags A combination of one or two of the following:
|
||||
- \c STACK_TRACE_KERNEL: Capture kernel return addresses.
|
||||
- \c STACK_TRACE_USER: Capture user return addresses.
|
||||
\return The number of return addresses written to the given array.
|
||||
*/
|
||||
int32
|
||||
arch_debug_get_stack_trace(addr_t* returnAddresses, int32 maxCount,
|
||||
int32 skipIframes, int32 skipFrames, bool userOnly)
|
||||
int32 skipIframes, int32 skipFrames, uint32 flags)
|
||||
{
|
||||
// Keep skipping normal stack frames until we've skipped the iframes we're
|
||||
// supposed to skip.
|
||||
if (skipIframes > 0) {
|
||||
if (skipIframes > 0)
|
||||
skipFrames = INT_MAX;
|
||||
} else {
|
||||
// always skip our own frame
|
||||
skipFrames++;
|
||||
}
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
int32 count = 0;
|
||||
@ -980,6 +978,8 @@ arch_debug_get_stack_trace(addr_t* returnAddresses, int32 maxCount,
|
||||
while (ebp != 0 && count < maxCount) {
|
||||
onKernelStack = onKernelStack
|
||||
&& is_kernel_stack_address(thread, ebp);
|
||||
if (!onKernelStack && (flags & STACK_TRACE_USER) == 0)
|
||||
break;
|
||||
|
||||
addr_t eip;
|
||||
addr_t nextEbp;
|
||||
@ -998,9 +998,10 @@ arch_debug_get_stack_trace(addr_t* returnAddresses, int32 maxCount,
|
||||
break;
|
||||
}
|
||||
|
||||
if (skipFrames <= 0 && (!userOnly || IS_USER_ADDRESS(ebp)))
|
||||
if (skipFrames <= 0
|
||||
&& ((flags & STACK_TRACE_KERNEL) != 0 || onKernelStack)) {
|
||||
returnAddresses[count++] = eip;
|
||||
else
|
||||
} else
|
||||
skipFrames--;
|
||||
|
||||
ebp = nextEbp;
|
||||
|
@ -1348,7 +1348,7 @@ SystemProfiler::_DoSample()
|
||||
|
||||
// get the samples
|
||||
int32 count = arch_debug_get_stack_trace(cpuData.buffer, fStackDepth, 1,
|
||||
0, false);
|
||||
0, STACK_TRACE_KERNEL | STACK_TRACE_USER);
|
||||
|
||||
InterruptsSpinLocker locker(fLock);
|
||||
|
||||
|
@ -1529,7 +1529,7 @@ alloc_tracing_buffer_strcpy(const char* source, size_t maxSize, bool user)
|
||||
|
||||
|
||||
tracing_stack_trace*
|
||||
capture_tracing_stack_trace(int32 maxCount, int32 skipFrames, bool userOnly)
|
||||
capture_tracing_stack_trace(int32 maxCount, int32 skipFrames, bool kernelOnly)
|
||||
{
|
||||
#if ENABLE_TRACING
|
||||
// TODO: page_fault_exception() doesn't allow us to gracefully handle
|
||||
@ -1544,7 +1544,7 @@ capture_tracing_stack_trace(int32 maxCount, int32 skipFrames, bool userOnly)
|
||||
if (stackTrace != NULL) {
|
||||
stackTrace->depth = arch_debug_get_stack_trace(
|
||||
stackTrace->return_addresses, maxCount, 0, skipFrames + 1,
|
||||
userOnly);
|
||||
STACK_TRACE_KERNEL | (kernelOnly ? 0 : STACK_TRACE_KERNEL));
|
||||
}
|
||||
|
||||
return stackTrace;
|
||||
|
@ -1344,14 +1344,14 @@ profiling_do_sample(bool& flushBuffer)
|
||||
if (debugInfo.profile.variable_stack_depth) {
|
||||
// variable sample count per hit
|
||||
*returnAddresses = arch_debug_get_stack_trace(returnAddresses + 1,
|
||||
stackDepth - 1, 1, 0, false);
|
||||
stackDepth - 1, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER);
|
||||
|
||||
debugInfo.profile.sample_count += *returnAddresses + 1;
|
||||
} else {
|
||||
// fixed sample count per hit
|
||||
if (stackDepth > 1) {
|
||||
int32 count = arch_debug_get_stack_trace(returnAddresses,
|
||||
stackDepth, 1, 0, false);
|
||||
stackDepth, 1, 0, STACK_TRACE_KERNEL | STACK_TRACE_USER);
|
||||
|
||||
for (int32 i = count; i < stackDepth; i++)
|
||||
returnAddresses[i] = 0;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2008, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
@ -1322,7 +1322,8 @@ get_caller()
|
||||
// this makes certain assumptions about how the code for the functions
|
||||
// ends up in the kernel object.
|
||||
addr_t returnAddresses[5];
|
||||
int32 depth = arch_debug_get_stack_trace(returnAddresses, 5, 0, 1, false);
|
||||
int32 depth = arch_debug_get_stack_trace(returnAddresses, 5, 0, 1,
|
||||
STACK_TRACE_KERNEL | STACK_TRACE_USER);
|
||||
|
||||
// find the first return address inside the VIP allocator
|
||||
int32 i = 0;
|
||||
|
@ -279,7 +279,8 @@ get_caller()
|
||||
// this makes certain assumptions about how the code for the functions
|
||||
// ends up in the kernel object.
|
||||
addr_t returnAddresses[5];
|
||||
int32 depth = arch_debug_get_stack_trace(returnAddresses, 5, 0, 1, false);
|
||||
int32 depth = arch_debug_get_stack_trace(returnAddresses, 5, 0, 1,
|
||||
STACK_TRACE_KERNEL);
|
||||
for (int32 i = 0; i < depth; i++) {
|
||||
if (returnAddresses[i] < (addr_t)&get_caller
|
||||
|| returnAddresses[i] > (addr_t)&malloc_referenced_release) {
|
||||
|
@ -64,10 +64,26 @@ class VMCacheTraceEntry : public AbstractTraceEntry {
|
||||
:
|
||||
fCache(cache)
|
||||
{
|
||||
#if VM_CACHE_TRACING_STACK_TRACE
|
||||
fStackTrace = capture_tracing_stack_trace(
|
||||
VM_CACHE_TRACING_STACK_TRACE, 0, true);
|
||||
// Don't capture userland stack trace to avoid potential
|
||||
// deadlocks.
|
||||
#endif
|
||||
}
|
||||
|
||||
#if VM_CACHE_TRACING_STACK_TRACE
|
||||
virtual void DumpStackTrace(TraceOutput& out)
|
||||
{
|
||||
out.PrintStackTrace(fStackTrace);
|
||||
}
|
||||
#endif
|
||||
|
||||
protected:
|
||||
VMCache* fCache;
|
||||
#if VM_CACHE_TRACING_STACK_TRACE
|
||||
tracing_stack_trace* fStackTrace;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user