* Revert r42319 as it introduces a race condition when entering the kernel
debugger. As sInDebugger is already > 0 when the first CPU enters KDL, code from other CPUs might see debug_debugger_running() == true already before they enter the debugger. * Instead, move the sDebuggerOnCPU setting out of the debugger loop and hold the value until after calling exit_kernel_debugger() so that the exit hooks still see debug_debugger_running() == true. * Also avoid calling exit_kernel_debugger() when we've been called recursively (previousCPU != -1). Previously the exit hooks would've been called and the debugger state reset erroneously. To balance the missing decrement of sInDebugger in that case we decrement sInDebugger in enter_kernel_debugger() also when detecting the recursion case. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@42320 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
3214710036
commit
14c345e147
|
@ -805,9 +805,6 @@ static void
|
|||
kernel_debugger_loop(const char* messagePrefix, const char* message,
|
||||
va_list args, int32 cpu)
|
||||
{
|
||||
int32 previousCPU = sDebuggerOnCPU;
|
||||
sDebuggerOnCPU = cpu;
|
||||
|
||||
DebugAllocPool* allocPool = create_debug_alloc_pool();
|
||||
|
||||
sCurrentKernelDebuggerMessagePrefix = messagePrefix;
|
||||
|
@ -936,8 +933,6 @@ kernel_debugger_loop(const char* messagePrefix, const char* message,
|
|||
va_end(sCurrentKernelDebuggerMessageArgs);
|
||||
|
||||
delete_debug_alloc_pool(allocPool);
|
||||
|
||||
sDebuggerOnCPU = previousCPU;
|
||||
}
|
||||
|
||||
|
||||
|
@ -945,6 +940,8 @@ static void
|
|||
enter_kernel_debugger(int32 cpu)
|
||||
{
|
||||
while (atomic_add(&sInDebugger, 1) > 0) {
|
||||
atomic_add(&sInDebugger, -1);
|
||||
|
||||
// The debugger is already running, find out where...
|
||||
if (sDebuggerOnCPU == cpu) {
|
||||
// We are re-entering the debugger on the same CPU.
|
||||
|
@ -955,7 +952,6 @@ enter_kernel_debugger(int32 cpu)
|
|||
// us. Process ICIs to ensure we get the halt request. Then we are
|
||||
// blocking there until everyone leaves the debugger and we can
|
||||
// try to enter it again.
|
||||
atomic_add(&sInDebugger, -1);
|
||||
smp_intercpu_int_handler(cpu);
|
||||
}
|
||||
|
||||
|
@ -1027,13 +1023,23 @@ kernel_debugger_internal(const char* messagePrefix, const char* message,
|
|||
} else
|
||||
enter_kernel_debugger(cpu);
|
||||
|
||||
// If we're called recursively sDebuggerOnCPU will be != -1.
|
||||
int32 previousCPU = sDebuggerOnCPU;
|
||||
sDebuggerOnCPU = cpu;
|
||||
|
||||
kernel_debugger_loop(messagePrefix, message, args, cpu);
|
||||
|
||||
if (sHandOverKDLToCPU < 0) {
|
||||
if (sHandOverKDLToCPU < 0 && previousCPU == -1) {
|
||||
// We're not handing over to a different CPU and we weren't
|
||||
// called recursively, so we'll exit the debugger.
|
||||
exit_kernel_debugger();
|
||||
break;
|
||||
}
|
||||
|
||||
sDebuggerOnCPU = previousCPU;
|
||||
|
||||
if (sHandOverKDLToCPU < 0)
|
||||
break;
|
||||
|
||||
hand_over_kernel_debugger();
|
||||
|
||||
debug_trap_cpu_in_kdl(cpu, true);
|
||||
|
@ -1617,7 +1623,7 @@ debug_stop_screen_debug_output(void)
|
|||
bool
|
||||
debug_debugger_running(void)
|
||||
{
|
||||
return sInDebugger > 0;
|
||||
return sDebuggerOnCPU != -1;
|
||||
}
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue