From 467fe4ca0c733b57bcf959e48c01108c7a6a644d Mon Sep 17 00:00:00 2001 From: Ingo Weinhold Date: Sun, 24 Apr 2016 18:22:14 +0200 Subject: [PATCH] kernel: Add core dump facility * Add function core_dump_write_core_file(). It writes a core file for the current thread's team. The file format is similar to that of other OSs (i.e. ELF with PT_LOAD segments and a PT_NOTE segment), but most of the notes are Haiku specific (infos for team, areas, images, threads). More data will probably need to be added. * Add team flag TEAM_FLAG_DUMP_CORE, thread flag THREAD_FLAGS_TRAP_FOR_CORE_DUMP, and Team property coreDumpCondition, a condition variable available while a core dump is progress. A thread that finds its flag THREAD_FLAGS_TRAP_FOR_CORE_DUMP set before exiting the kernel to userland calls core_dump_trap_thread(), which blocks on the condition variable until the core dump has finished. We need the team's threads to stop so we can get their CPU state (and have a generally unchanging team state while writing the core file). * Add user debugger message B_DEBUG_WRITE_CORE_FILE. It causes core_dump_write_core_file() to be called for the team. * Dumping core as an immediate effect of a terminal signal has not been implemented yet, but that should be fairly straight forward. --- headers/os/kernel/debugger.h | 21 +- headers/os/kernel/elf.h | 124 +- headers/private/kernel/core_dump.h | 18 + headers/private/kernel/thread_types.h | 16 +- headers/private/system/elf_private.h | 7 +- src/system/kernel/arch/x86/32/interrupts.S | 16 +- src/system/kernel/arch/x86/64/interrupts.S | 17 +- src/system/kernel/cache/vnode_store.h | 7 +- src/system/kernel/debug/Jamfile | 1 + src/system/kernel/debug/core_dump.cpp | 1440 ++++++++++++++++++++ src/system/kernel/debug/user_debugger.cpp | 27 +- src/system/kernel/signal.cpp | 29 +- src/system/kernel/team.cpp | 4 +- 13 files changed, 1698 insertions(+), 29 deletions(-) create mode 100644 headers/private/kernel/core_dump.h create mode 100644 src/system/kernel/debug/core_dump.cpp diff --git a/headers/os/kernel/debugger.h b/headers/os/kernel/debugger.h index 770d0b7c00..d7589452dd 100644 --- a/headers/os/kernel/debugger.h +++ b/headers/os/kernel/debugger.h @@ -1,5 +1,5 @@ /* - * Copyright 2005, Ingo Weinhold, bonefish@users.sf.net. + * Copyright 2005-2016 Haiku, Inc. All rights reserved. * Distributed under the terms of the MIT License. */ #ifndef _DEBUGGER_H @@ -163,7 +163,9 @@ typedef enum { // install_team_debugger() B_DEBUG_START_PROFILER, // start/stop sampling - B_DEBUG_STOP_PROFILER // + B_DEBUG_STOP_PROFILER, // + + B_DEBUG_WRITE_CORE_FILE // write a core file } debug_nub_message; // messages sent to the debugger @@ -412,6 +414,20 @@ typedef struct { thread_id thread; // thread to profile } debug_nub_stop_profiler; +// B_DEBUG_WRITE_CORE_FILE + +typedef struct { + port_id reply_port; // port to send the reply to + char path[B_PATH_NAME_LENGTH]; + // path of the core file; must not exist + // yet; must be absolute +} debug_nub_write_core_file; + +typedef struct { + status_t error; // B_OK on success +} debug_nub_write_core_file_reply; + + // reply is debug_profiler_update // union of all messages structures sent to the debug nub thread @@ -433,6 +449,7 @@ typedef union { debug_nub_get_signal_handler get_signal_handler; debug_nub_start_profiler start_profiler; debug_nub_stop_profiler stop_profiler; + debug_nub_write_core_file write_core_file; } debug_nub_message_data; diff --git a/headers/os/kernel/elf.h b/headers/os/kernel/elf.h index 6f486f6774..f25fc8b8b1 100644 --- a/headers/os/kernel/elf.h +++ b/headers/os/kernel/elf.h @@ -1,5 +1,5 @@ /* - * Copyright 2002-2015 Haiku, Inc. All rights reserved. + * Copyright 2002-2016 Haiku, Inc. All rights reserved. * Distributed under the terms of the MIT License. */ #ifndef _ELF_H @@ -153,6 +153,9 @@ typedef struct { #define ELFDATA2LSB 1 /* little endian */ #define ELFDATA2MSB 2 /* big endian */ +/* ELF version (EI_VERSION) */ +#define EV_NONE 0 /* invalid */ +#define EV_CURRENT 1 /* current version */ /*** section header ***/ @@ -578,6 +581,125 @@ typedef struct { #define VER_FLG_WEAK 0x2 /* weak version identifier */ +/*** core files ***/ + +/* note section header */ + +typedef struct { + Elf32_Word n_namesz; /* length of the note's name */ + Elf32_Word n_descsz; /* length of the note's descriptor */ + Elf32_Word n_type; /* note type */ +} Elf32_Nhdr; + +typedef struct { + Elf64_Word n_namesz; /* length of the note's name */ + Elf64_Word n_descsz; /* length of the note's descriptor */ + Elf64_Word n_type; /* note type */ +} Elf64_Nhdr; + +/* values for note name */ +#define ELF_NOTE_CORE "CORE" +#define ELF_NOTE_HAIKU "Haiku" + +/* values for note type (n_type) */ +/* ELF_NOTE_CORE/... */ +#define NT_FILE 0x46494c45 /* mapped files */ + +/* ELF_NOTE_HAIKU/... */ +#define NT_TEAM 0x7465616d /* team */ +#define NT_AREAS 0x61726561 /* areas */ +#define NT_IMAGES 0x696d6167 /* images */ +#define NT_THREADS 0x74687264 /* threads */ + +/* NT_TEAM: Elf32_Note_Team; char[] args */ +typedef struct { + int32 nt_id; /* team ID */ + int32 nt_uid; /* team owner ID */ + int32 nt_gid; /* team group ID */ +} Elf32_Note_Team; + +typedef Elf32_Note_Team Elf64_Note_Team; + +/* NT_AREAS: uint32 count; Elf32_Note_Area_Entry[count]; char[] names */ +typedef struct { + int32 na_id; /* area ID */ + uint32 na_lock; /* lock type (B_NO_LOCK, ...) */ + uint32 na_protection; /* protection (B_READ_AREA | ...) */ + uint32 na_base; /* area base address */ + uint32 na_size; /* area size */ + uint32 na_ram_size; /* physical memory used */ +} Elf32_Note_Area_Entry; + +/* NT_AREAS: uint64 count; Elf64_Note_Area_Entry[count]; char[] names */ +typedef struct { + int32 na_id; /* area ID */ + uint32 na_lock; /* lock type (B_NO_LOCK, ...) */ + uint32 na_protection; /* protection (B_READ_AREA | ...) */ + uint32 na_pad1; + uint64 na_base; /* area base address */ + uint64 na_size; /* area size */ + uint64 na_ram_size; /* physical memory used */ +} Elf64_Note_Area_Entry; + +/* NT_IMAGES: uint32 count; Elf32_Note_Image_Entry[count]; char[] names */ +typedef struct { + int32 ni_id; /* image ID */ + int32 ni_type; /* image type (B_APP_IMAGE, ...) */ + uint32 ni_init_routine; /* address of init function */ + uint32 ni_term_routine; /* address of termination function */ + int32 ni_device; /* device ID of mapped file */ + int64 ni_node; /* node ID of mapped file */ + uint32 ni_text_base; /* base address of text segment */ + uint32 ni_text_size; /* size of text segment */ + uint32 ni_data_base; /* base address of data segment */ + uint32 ni_data_size; /* size of data segment */ +} Elf32_Note_Image_Entry; + +/* NT_IMAGES: uint64 count; Elf64_Note_Image_Entry[count]; char[] names */ +typedef struct { + int32 ni_id; /* image ID */ + int32 ni_type; /* image type (B_APP_IMAGE, ...) */ + uint64 ni_init_routine; /* address of init function */ + uint64 ni_term_routine; /* address of termination function */ + uint32 ni_pad1; + int32 ni_device; /* device ID of mapped file */ + int64 ni_node; /* node ID of mapped file */ + uint64 ni_text_base; /* base address of text segment */ + uint64 ni_text_size; /* size of text segment */ + uint64 ni_data_base; /* base address of data segment */ + uint64 ni_data_size; /* size of data segment */ +} Elf64_Note_Image_Entry; + +/* NT_THREADS: + * uint32 count; + * uint32 cpuStateSize; + * {Elf32_Note_Thread_Entry, uint8[cpuStateSize] cpuState}[count]; + * char[] names + */ +typedef struct { + int32 nth_id; /* thread ID */ + int32 nth_state; /* thread state (B_THREAD_RUNNING, ...) */ + int32 nth_priority; /* thread priority */ + uint32 nth_stack_base; /* thread stack base address */ + uint32 nth_stack_end; /* thread stack end address */ +} Elf32_Note_Thread_Entry; + +/* NT_THREADS: + * uint64 count; + * uint64 cpuStateSize; + * {Elf64_Note_Thread_Entry, uint8[cpuStateSize] cpuState}[count]; + * char[] names + */ +typedef struct { + int32 nth_id; /* thread ID */ + int32 nth_state; /* thread state (B_THREAD_RUNNING, ...) */ + int32 nth_priority; /* thread priority */ + uint32 nth_pad1; + uint64 nth_stack_base; /* thread stack base address */ + uint64 nth_stack_end; /* thread stack end address */ +} Elf64_Note_Thread_Entry; + + /*** inline functions ***/ #ifdef __cplusplus diff --git a/headers/private/kernel/core_dump.h b/headers/private/kernel/core_dump.h new file mode 100644 index 0000000000..65808c3116 --- /dev/null +++ b/headers/private/kernel/core_dump.h @@ -0,0 +1,18 @@ +/* + * Copyright 2016, Ingo Weinhold, ingo_weinhold@gmx.de. + * Distributed under the terms of the MIT License. + * + * Core dump support. + */ +#ifndef _KERNEL_CORE_DUMP_H +#define _KERNEL_CORE_DUMP_H + + +#include + + +status_t core_dump_write_core_file(const char* path, bool killTeam); +void core_dump_trap_thread(); + + +#endif // _KERNEL_CORE_DUMP_H diff --git a/headers/private/kernel/thread_types.h b/headers/private/kernel/thread_types.h index cf9e8fa872..e90e25829a 100644 --- a/headers/private/kernel/thread_types.h +++ b/headers/private/kernel/thread_types.h @@ -1,5 +1,5 @@ /* - * Copyright 2004-2011, Haiku, Inc. + * Copyright 2004-2016, Haiku, Inc. * Distributed under the terms of the MIT License. * * Thread definition and structures @@ -44,6 +44,9 @@ enum team_state { }; #define TEAM_FLAG_EXEC_DONE 0x01 + // team has executed exec*() +#define TEAM_FLAG_DUMP_CORE 0x02 + // a core dump is in progress typedef enum job_control_state { JOB_CONTROL_STATE_NONE, @@ -392,6 +395,11 @@ public: Thread* lockedThread = NULL) const; bigtime_t UserCPUTime() const; + ConditionVariable* CoreDumpCondition() const + { return fCoreDumpCondition; } + void SetCoreDumpCondition( + ConditionVariable* condition) + { fCoreDumpCondition = condition; } private: Team(team_id id, bool kernel); @@ -412,6 +420,9 @@ private: // protected by scheduler lock TeamUserTimeUserTimerList fUserTimeUserTimers; int32 fUserDefinedTimerCount; // accessed atomically + + ConditionVariable* fCoreDumpCondition; + // protected by fLock }; @@ -821,6 +832,9 @@ using BKernel::ProcessGroupList; // the thread is currently in a syscall; set/reset only for certain // functions (e.g. ioctl()) to allow inner functions to discriminate // whether e.g. parameters were passed from userland or kernel +#define THREAD_FLAGS_TRAP_FOR_CORE_DUMP 0x1000 + // core dump in progress; the thread shall not exit the kernel to userland, + // but shall invoke core_dump_trap_thread() instead. #endif /* _KERNEL_THREAD_TYPES_H */ diff --git a/headers/private/system/elf_private.h b/headers/private/system/elf_private.h index ce2f6e2f20..78da6ffcd4 100644 --- a/headers/private/system/elf_private.h +++ b/headers/private/system/elf_private.h @@ -1,5 +1,5 @@ /* - * Copyright 2002-2015 Haiku, Inc. All rights reserved. + * Copyright 2002-2016 Haiku, Inc. All rights reserved. * Distributed under the terms of the MIT License. * * Copyright 2001 Travis Geiselbrecht. All rights reserved. @@ -37,6 +37,11 @@ DEFINE_ELF_TYPE(Verdef, elf_verdef); DEFINE_ELF_TYPE(Verdaux, elf_verdaux); DEFINE_ELF_TYPE(Verneed, elf_verneed); DEFINE_ELF_TYPE(Vernaux, elf_vernaux); +DEFINE_ELF_TYPE(Nhdr, elf_nhdr); +DEFINE_ELF_TYPE(Note_Team, elf_note_team); +DEFINE_ELF_TYPE(Note_Area_Entry, elf_note_area_entry); +DEFINE_ELF_TYPE(Note_Image_Entry, elf_note_image_entry); +DEFINE_ELF_TYPE(Note_Thread_Entry, elf_note_thread_entry); #undef DEFINE_ELF_TYPE #undef _ELF_TYPE diff --git a/src/system/kernel/arch/x86/32/interrupts.S b/src/system/kernel/arch/x86/32/interrupts.S index d42f66ed34..a3fea32096 100644 --- a/src/system/kernel/arch/x86/32/interrupts.S +++ b/src/system/kernel/arch/x86/32/interrupts.S @@ -1,5 +1,5 @@ /* - * Copyright 2002-2011, The Haiku Team. All rights reserved. + * Copyright 2002-2016, The Haiku Team. All rights reserved. * Distributed under the terms of the MIT License. * * Copyright 2001, Travis Geiselbrecht. All rights reserved. @@ -571,7 +571,8 @@ STATIC_FUNCTION(int_bottom_user): jne 1f testl $(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \ - | THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED) \ + | THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \ + | THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \ , THREAD_flags(%edi) jnz kernel_exit_work 1: @@ -654,6 +655,7 @@ STATIC_FUNCTION(handle_syscall): testl $(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \ | THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \ + | THREAD_FLAGS_TRAP_FOR_CORE_DUMP \ | THREAD_FLAGS_64_BIT_SYSCALL_RETURN \ | THREAD_FLAGS_RESTART_SYSCALL | THREAD_FLAGS_SYSCALL_RESTARTED) \ , THREAD_flags(%edi) @@ -714,9 +716,10 @@ FUNCTION_END(handle_syscall) bad_syscall_number: STATIC_FUNCTION(kernel_exit_work): - // if no signals are pending and the thread shall not be debugged, we can - // use the quick kernel exit function - testl $(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD) \ + // if no signals are pending and the thread shall not be debugged or stopped + // for a core dump, we can use the quick kernel exit function + testl $(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD \ + | THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \ , THREAD_flags(%edi) jnz kernel_exit_handle_signals cli // disable interrupts @@ -828,7 +831,8 @@ FUNCTION(x86_return_to_userland): // check, if any kernel exit work has to be done movl %gs:0, %edi testl $(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \ - | THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED) \ + | THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \ + | THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \ , THREAD_flags(%edi) jnz kernel_exit_work diff --git a/src/system/kernel/arch/x86/64/interrupts.S b/src/system/kernel/arch/x86/64/interrupts.S index c94f0d8dc9..5d7d9ee90c 100644 --- a/src/system/kernel/arch/x86/64/interrupts.S +++ b/src/system/kernel/arch/x86/64/interrupts.S @@ -272,7 +272,8 @@ STATIC_FUNCTION(int_bottom_user): // If there are no signals pending or we're not debugging, we can avoid // most of the work here, just need to update the kernel time. testl $(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \ - | THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED) \ + | THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \ + | THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \ , THREAD_flags(%r12) jnz .Lkernel_exit_work @@ -294,7 +295,8 @@ STATIC_FUNCTION(int_bottom_user): // Slow path for return to userland. // Do we need to handle signals? - testl $(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD) \ + testl $(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD \ + | THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \ , THREAD_flags(%r12) jnz .Lkernel_exit_handle_signals cli @@ -421,7 +423,7 @@ FUNCTION(x86_64_syscall_entry): 2: testl $(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \ | THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \ - | THREAD_FLAGS_RESTART_SYSCALL) \ + | THREAD_FLAGS_TRAP_FOR_CORE_DUMP | THREAD_FLAGS_RESTART_SYSCALL) \ , THREAD_flags(%r12) jnz .Lpost_syscall_work @@ -482,7 +484,8 @@ FUNCTION(x86_64_syscall_entry): addq $48, %rsp 1: // Do we need to handle signals? - testl $(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD) \ + testl $(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD \ + | THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \ , THREAD_flags(%r12) jnz .Lpost_syscall_handle_signals cli @@ -578,7 +581,8 @@ FUNCTION(x86_return_to_userland): // Perform kernel exit work. movq %gs:0, %r12 testl $(THREAD_FLAGS_DEBUGGER_INSTALLED | THREAD_FLAGS_SIGNALS_PENDING \ - | THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED) \ + | THREAD_FLAGS_DEBUG_THREAD | THREAD_FLAGS_BREAKPOINTS_DEFINED \ + | THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \ , THREAD_flags(%r12) jnz .Luserland_return_work @@ -593,7 +597,8 @@ FUNCTION(x86_return_to_userland): // Slow path for return to userland. // Do we need to handle signals? - testl $(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD) \ + testl $(THREAD_FLAGS_SIGNALS_PENDING | THREAD_FLAGS_DEBUG_THREAD \ + | THREAD_FLAGS_TRAP_FOR_CORE_DUMP) \ , THREAD_flags(%r12) jnz .Luserland_return_handle_signals cli diff --git a/src/system/kernel/cache/vnode_store.h b/src/system/kernel/cache/vnode_store.h index 34799763dd..8f3b3ed4e2 100644 --- a/src/system/kernel/cache/vnode_store.h +++ b/src/system/kernel/cache/vnode_store.h @@ -1,5 +1,5 @@ /* - * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de. + * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de. * Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de. * Distributed under the terms of the MIT License. */ @@ -48,6 +48,11 @@ public: void VnodeDeleted() { fVnodeDeleted = true; } + dev_t DeviceId() const + { return fDevice; } + ino_t InodeId() const + { return fInode; } + protected: virtual void DeleteObject(); diff --git a/src/system/kernel/debug/Jamfile b/src/system/kernel/debug/Jamfile index 9973af2c66..6b17975b72 100644 --- a/src/system/kernel/debug/Jamfile +++ b/src/system/kernel/debug/Jamfile @@ -11,6 +11,7 @@ SubDirHdrs [ FDirName $(SUBDIR) $(DOTDOT) device_manager ] ; KernelMergeObject kernel_debug.o : blue_screen.cpp BreakpointManager.cpp + core_dump.cpp debug.cpp debug_builtin_commands.cpp debug_commands.cpp diff --git a/src/system/kernel/debug/core_dump.cpp b/src/system/kernel/debug/core_dump.cpp new file mode 100644 index 0000000000..69857dc0af --- /dev/null +++ b/src/system/kernel/debug/core_dump.cpp @@ -0,0 +1,1440 @@ +/* + * Copyright 2016, Ingo Weinhold, ingo_weinhold@gmx.de. + * Distributed under the terms of the MIT License. + */ + + +#include + +#include +#include + +#include +#include + +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../cache/vnode_store.h" +#include "../vm/VMAddressSpaceLocking.h" + + +//#define TRACE_CORE_DUMP +#ifdef TRACE_CORE_DUMP +# define TRACE(...) dprintf(__VA_ARGS__) +#else +# define TRACE(...) do {} while (false) +#endif + + +namespace { + + +static const size_t kBufferSize = 1024 * 1024; +static const char* const kCoreNote = ELF_NOTE_CORE; +static const char* const kHaikuNote = ELF_NOTE_HAIKU; + + +struct Allocator { + Allocator() + : + fAligned(NULL), + fStrings(NULL), + fAlignedCapacity(0), + fStringCapacity(0), + fAlignedSize(0), + fStringSize(0) + { + } + + ~Allocator() + { + free(fAligned); + } + + bool HasMissingAllocations() const + { + return fAlignedSize > fAlignedCapacity || fStringSize > fStringCapacity; + } + + bool Reallocate() + { + free(fAligned); + + fAlignedCapacity = fAlignedSize; + fStringCapacity = fStringSize; + fAlignedSize = 0; + fStringSize = 0; + + fAligned = (uint8*)malloc(fAlignedCapacity + fStringCapacity); + if (fAligned == NULL) + return false; + fStrings = (char*)(fAligned + fAlignedCapacity); + + return true; + } + + void* AllocateAligned(size_t size) + { + size_t offset = fAlignedSize; + fAlignedSize += (size + 7) / 8 * 8; + if (fAlignedSize <= fAlignedCapacity) + return fAligned + offset; + return NULL; + } + + char* AllocateString(size_t length) + { + size_t offset = fStringSize; + fStringSize += length + 1; + if (fStringSize <= fStringCapacity) + return fStrings + offset; + return NULL; + } + + template + Type* New() + { + void* buffer = AllocateAligned(sizeof(Type)); + if (buffer == NULL) + return NULL; + return new(buffer) Type; + } + + char* DuplicateString(const char* string) + { + if (string == NULL) + return NULL; + char* newString = AllocateString(strlen(string)); + if (newString != NULL) + strcpy(newString, string); + return newString; + } + +private: + uint8* fAligned; + char* fStrings; + size_t fAlignedCapacity; + size_t fStringCapacity; + size_t fAlignedSize; + size_t fStringSize; +}; + + +struct TeamInfo : team_info { +}; + + +struct ThreadState : DoublyLinkedListLinkImpl { + ThreadState() + : + fThread(NULL), + fComplete(false) + { + } + + ~ThreadState() + { + SetThread(NULL); + } + + static ThreadState* Create() + { + ThreadState* state = new(std::nothrow) ThreadState; + if (state == NULL) + return NULL; + return state; + } + + Thread* GetThread() const + { + return fThread; + } + + void SetThread(Thread* thread) + { + if (fThread != NULL) + fThread->ReleaseReference(); + + fThread = thread; + + if (fThread != NULL) + fThread->AcquireReference(); + } + + /*! Invoke with thread lock and scheduler lock being held. */ + void GetState() + { + fState = fThread->state; + fPriority = fThread->priority; + fStackBase = fThread->user_stack_base; + fStackEnd = fStackBase + fThread->user_stack_size; + strlcpy(fName, fThread->name, sizeof(fName)); + if (arch_get_thread_debug_cpu_state(fThread, &fCpuState) != B_OK) + memset(&fCpuState, 0, sizeof(fCpuState)); + } + + bool IsComplete() const + { + return fComplete; + } + + void SetComplete(bool complete) + { + fComplete = complete; + } + + int32 State() const + { + return fState; + } + + int32 Priority() const + { + return fPriority; + } + + addr_t StackBase() const + { + return fStackBase; + } + + addr_t StackEnd() const + { + return fStackEnd; + } + + const char* Name() const + { + return fName; + } + + const debug_cpu_state* CpuState() const + { + return &fCpuState; + } + +private: + Thread* fThread; + int32 fState; + int32 fPriority; + addr_t fStackBase; + addr_t fStackEnd; + char fName[B_OS_NAME_LENGTH]; + debug_cpu_state fCpuState; + bool fComplete; +}; + + +typedef DoublyLinkedList ThreadStateList; + + +struct ImageInfo : DoublyLinkedListLinkImpl { + ImageInfo(struct image* image) + : + fId(image->info.id), + fType(image->info.type), + fDeviceId(image->info.device), + fNodeId(image->info.node), + fName(strdup(image->info.name)), + fInitRoutine((addr_t)image->info.init_routine), + fTermRoutine((addr_t)image->info.term_routine), + fText((addr_t)image->info.text), + fData((addr_t)image->info.data), + fTextSize(image->info.text_size), + fDataSize(image->info.data_size) + { + } + + ~ImageInfo() + { + free(fName); + } + + static ImageInfo* Create(struct image* image) + { + ImageInfo* imageInfo = new(std::nothrow) ImageInfo(image); + if (imageInfo == NULL || imageInfo->fName == NULL) { + delete imageInfo; + return NULL; + } + + return imageInfo; + } + + image_id Id() const + { + return fId; + } + + image_type Type() const + { + return fType; + } + + const char* Name() const + { + return fName; + } + + dev_t DeviceId() const + { + return fDeviceId; + } + + ino_t NodeId() const + { + return fNodeId; + } + + addr_t InitRoutine() const + { + return fInitRoutine; + } + + addr_t TermRoutine() const + { + return fTermRoutine; + } + + addr_t TextBase() const + { + return fText; + + } + + size_t TextSize() const + { + return fTextSize; + } + + addr_t DataBase() const + { + return fData; + } + + size_t DataSize() const + { + return fDataSize; + } + +private: + image_id fId; + image_type fType; + dev_t fDeviceId; + ino_t fNodeId; + char* fName; + addr_t fInitRoutine; + addr_t fTermRoutine; + addr_t fText; + addr_t fData; + size_t fTextSize; + size_t fDataSize; +}; + + +typedef DoublyLinkedList ImageInfoList; + + +struct AreaInfo : DoublyLinkedListLinkImpl { + static AreaInfo* Create(Allocator& allocator, VMArea* area, size_t ramSize, + dev_t deviceId, ino_t nodeId) + { + AreaInfo* areaInfo = allocator.New(); + const char* name = allocator.DuplicateString(area->name); + + if (areaInfo != NULL) { + areaInfo->fId = area->id; + areaInfo->fName = name; + areaInfo->fBase = area->Base(); + areaInfo->fSize = area->Size(); + areaInfo->fLock = B_FULL_LOCK; + areaInfo->fProtection = area->protection; + areaInfo->fRamSize = ramSize; + areaInfo->fDeviceId = deviceId; + areaInfo->fNodeId = nodeId; + areaInfo->fCacheOffset = area->cache_offset; + areaInfo->fImageInfo = NULL; + } + + return areaInfo; + } + + area_id Id() const + { + return fId; + } + + const char* Name() const + { + return fName; + } + + addr_t Base() const + { + return fBase; + } + + size_t Size() const + { + return fSize; + } + + uint32 Lock() const + { + return fLock; + } + + uint32 Protection() const + { + return fProtection; + } + + size_t RamSize() const + { + return fRamSize; + } + + off_t CacheOffset() const + { + return fCacheOffset; + } + + dev_t DeviceId() const + { + return fDeviceId; + } + + ino_t NodeId() const + { + return fNodeId; + } + + ImageInfo* GetImageInfo() const + { + return fImageInfo; + } + + void SetImageInfo(ImageInfo* imageInfo) + { + fImageInfo = imageInfo; + } + +private: + area_id fId; + const char* fName; + addr_t fBase; + size_t fSize; + uint32 fLock; + uint32 fProtection; + size_t fRamSize; + dev_t fDeviceId; + ino_t fNodeId; + off_t fCacheOffset; + ImageInfo* fImageInfo; +}; + + +typedef DoublyLinkedList AreaInfoList; + + +struct BufferedFile { + BufferedFile() + : + fFd(-1), + fBuffer(NULL), + fCapacity(0), + fOffset(0), + fBuffered(0), + fStatus(B_NO_INIT) + { + } + + ~BufferedFile() + { + if (fFd >= 0) + close(fFd); + + free(fBuffer); + } + + status_t Init(const char* path) + { + fCapacity = kBufferSize; + fBuffer = (uint8*)malloc(fCapacity); + if (fBuffer == NULL) + return B_NO_MEMORY; + + fFd = open(path, O_WRONLY | O_CREAT | O_EXCL, S_IRUSR); + if (fFd < 0) + return errno; + + fStatus = B_OK; + return B_OK; + } + + status_t Status() const + { + return fStatus; + } + + off_t EndOffset() const + { + return fOffset + (off_t)fBuffered; + } + + status_t Flush() + { + if (fStatus != B_OK) + return fStatus; + + if (fBuffered == 0) + return B_OK; + + ssize_t written = pwrite(fFd, fBuffer, fBuffered, fOffset); + if (written < 0) + return fStatus = errno; + if ((size_t)written != fBuffered) + return fStatus = B_IO_ERROR; + + fOffset += (off_t)fBuffered; + fBuffered = 0; + return B_OK; + } + + status_t Seek(off_t offset) + { + if (fStatus != B_OK) + return fStatus; + + if (fBuffered == 0) { + fOffset = offset; + } else if (offset != fOffset + (off_t)fBuffered) { + status_t error = Flush(); + if (error != B_OK) + return fStatus = error; + fOffset = offset; + } + + return B_OK; + } + + status_t Write(const void* data, size_t size) + { + if (fStatus != B_OK) + return fStatus; + + if (size == 0) + return B_OK; + + while (size > 0) { + size_t toWrite = std::min(size, fCapacity - fBuffered); + if (toWrite == 0) { + status_t error = Flush(); + if (error != B_OK) + return fStatus = error; + continue; + } + + memcpy(fBuffer + fBuffered, data, toWrite); + fBuffered += toWrite; + size -= toWrite; + } + + return B_OK; + } + + template + status_t Write(const Data& data) + { + return Write(&data, sizeof(data)); + } + + status_t WriteAt(off_t offset, const void* data, size_t size) + { + if (Seek(offset) != B_OK) + return fStatus; + + return Write(data, size); + } + + status_t WriteUserArea(addr_t base, size_t size) + { + uint8* data = (uint8*)base; + size = size / B_PAGE_SIZE * B_PAGE_SIZE; + + // copy the area page-wise into the buffer, flushing when necessary + while (size > 0) { + if (fBuffered + B_PAGE_SIZE > fCapacity) { + status_t error = Flush(); + if (error != B_OK) + return error; + } + + if (user_memcpy(fBuffer + fBuffered, data, B_PAGE_SIZE) != B_OK) + memset(fBuffer + fBuffered, 0, B_PAGE_SIZE); + + fBuffered += B_PAGE_SIZE; + data += B_PAGE_SIZE; + size -= B_PAGE_SIZE; + } + + return B_OK; + } + +private: + int fFd; + uint8* fBuffer; + size_t fCapacity; + off_t fOffset; + size_t fBuffered; + status_t fStatus; +}; + + +struct DummyWriter { + DummyWriter() + : + fWritten(0) + { + } + + status_t Status() const + { + return B_OK; + } + + size_t BytesWritten() const + { + return fWritten; + } + + status_t Write(const void* data, size_t size) + { + fWritten += size; + return B_OK; + } + + template + status_t Write(const Data& data) + { + return Write(&data, sizeof(data)); + } + +private: + size_t fWritten; +}; + + +struct CoreDumper { + CoreDumper() + : + fCurrentThread(thread_get_current_thread()), + fTeam(fCurrentThread->team), + fFile(), + fThreadCount(0), + fThreadStates(), + fPreAllocatedThreadStates(), + fAreaInfoAllocator(), + fAreaInfos(), + fImageInfos(), + fThreadBlockCondition() + { + fThreadBlockCondition.Init(this, "core dump"); + } + + ~CoreDumper() + { + while (ThreadState* state = fThreadStates.RemoveHead()) + delete state; + while (ThreadState* state = fPreAllocatedThreadStates.RemoveHead()) + delete state; + while (ImageInfo* info = fImageInfos.RemoveHead()) + delete info; + } + + status_t Dump(const char* path, bool killTeam) + { + // the path must be absolute + if (path[0] != '/') + return B_BAD_VALUE; + + AutoLocker teamLocker(fTeam); + + // indicate that we're dumping core + if ((atomic_or(&fTeam->flags, TEAM_FLAG_DUMP_CORE) + & TEAM_FLAG_DUMP_CORE) != 0) { + return B_BUSY; + } + + fTeam->SetCoreDumpCondition(&fThreadBlockCondition); + + int32 threadCount = _SetThreadsCoreDumpFlag(true); + + teamLocker.Unlock(); + + // write the core file + status_t error = _Dump(path, threadCount); + + // send kill signal, if requested + if (killTeam) + kill_team(fTeam->id); + + // clean up the team state and wake up waiting threads + teamLocker.Lock(); + + fTeam->SetCoreDumpCondition(NULL); + + atomic_and(&fTeam->flags, ~(int32)TEAM_FLAG_DUMP_CORE); + + _SetThreadsCoreDumpFlag(false); + + fThreadBlockCondition.NotifyAll(); + + return error; + } + +private: + status_t _Dump(const char* path, int32 threadCount) + { + status_t error = _GetTeamInfo(); + if (error != B_OK) + return error; + + // pre-allocate a list of thread states + if (!_AllocateThreadStates(threadCount)) + return B_NO_MEMORY; + + // collect the threads states + _GetThreadStates(); + + // collect the other team information + if (!_GetAreaInfos() || !_GetImageInfos()) + return B_NO_MEMORY; + + // open the file + error = fFile.Init(path); + if (error != B_OK) + return error; + + _PrepareCoreFileInfo(); + + // write ELF header + error = _WriteElfHeader(); + if (error != B_OK) + return error; + + // write note segment + error = _WriteNotes(); + if (error != B_OK) + return error; + + size_t notesEndOffset = (size_t)fFile.EndOffset(); + fNoteSegmentSize = notesEndOffset - fNoteSegmentOffset; + fFirstAreaSegmentOffset = (notesEndOffset + B_PAGE_SIZE - 1) + / B_PAGE_SIZE * B_PAGE_SIZE; + + error = _WriteProgramHeaders(); + if (error != B_OK) + return error; + + // write area segments + error = _WriteAreaSegments(); + if (error != B_OK) + return error; + + return _WriteElfHeader(); + } + + int32 _SetThreadsCoreDumpFlag(bool setFlag) + { + int32 count = 0; + + for (Thread* thread = fTeam->thread_list; thread != NULL; + thread = thread->team_next) { + count++; + if (setFlag) { + atomic_or(&thread->flags, THREAD_FLAGS_TRAP_FOR_CORE_DUMP); + } else { + atomic_and(&thread->flags, + ~(int32)THREAD_FLAGS_TRAP_FOR_CORE_DUMP); + } + } + + return count; + } + + status_t _GetTeamInfo() + { + return get_team_info(fTeam->id, &fTeamInfo); + } + + bool _AllocateThreadStates(int32 count) + { + if (!_PreAllocateThreadStates(count)) + return false; + + TeamLocker teamLocker(fTeam); + + for (;;) { + fThreadCount = 0; + int32 missing = 0; + + for (Thread* thread = fTeam->thread_list; thread != NULL; + thread = thread->team_next) { + fThreadCount++; + ThreadState* state = fPreAllocatedThreadStates.RemoveHead(); + if (state != NULL) { + state->SetThread(thread); + fThreadStates.Insert(state); + } else + missing++; + } + + if (missing == 0) + break; + + teamLocker.Unlock(); + + fPreAllocatedThreadStates.MoveFrom(&fThreadStates); + if (!_PreAllocateThreadStates(missing)) + return false; + + teamLocker.Lock(); + } + + return true; + } + + bool _PreAllocateThreadStates(int32 count) + { + for (int32 i = 0; i < count; i++) { + ThreadState* state = ThreadState::Create(); + if (state == NULL) + return false; + fPreAllocatedThreadStates.Insert(state); + } + + return true; + } + + void _GetThreadStates() + { + for (;;) { + bool missing = false; + for (ThreadStateList::Iterator it = fThreadStates.GetIterator(); + ThreadState* state = it.Next();) { + if (state->IsComplete()) + continue; + + Thread* thread = state->GetThread(); + AutoLocker threadLocker(thread); + if (thread->team != fTeam) { + // no longer in our team -- i.e. dying and transferred to + // the kernel team + threadLocker.Unlock(); + it.Remove(); + delete state; + fThreadCount--; + continue; + } + + InterruptsSpinLocker schedulerLocker(&thread->scheduler_lock); + if (thread != fCurrentThread + && thread->state == B_THREAD_RUNNING) { + missing = true; + continue; + } + + state->GetState(); + state->SetComplete(true); + } + + if (!missing) + break; + + // We still haven't got a state for all threads. Wait a moment and + // try again. + snooze(10000); + } + } + + bool _GetAreaInfos() + { + for (;;) { + AddressSpaceReadLocker addressSpaceLocker(fTeam->address_space, + true); + + for (VMAddressSpace::AreaIterator it + = addressSpaceLocker.AddressSpace()->GetAreaIterator(); + VMArea* area = it.Next();) { + + VMCache* cache = vm_area_get_locked_cache(area); + size_t ramSize = (size_t)cache->page_count * B_PAGE_SIZE; + // simplified, but what the kernel uses as well ATM + + // iterate to the root cache and, if it is a mapped file, get + // the file's node_ref + while (VMCache* source = cache->source) { + source->Lock(); + source->AcquireRefLocked(); + cache->ReleaseRefAndUnlock(); + cache = source; + } + + dev_t deviceId = -1; + ino_t nodeId = -1; + if (cache->type == CACHE_TYPE_VNODE) { + VMVnodeCache* vnodeCache = (VMVnodeCache*)cache; + deviceId = vnodeCache->DeviceId(); + nodeId = vnodeCache->InodeId(); + } + + cache->ReleaseRefAndUnlock(); + + AreaInfo* areaInfo = AreaInfo::Create(fAreaInfoAllocator, area, + ramSize, deviceId, nodeId); + + if (areaInfo != NULL) + fAreaInfos.Insert(areaInfo); + } + + addressSpaceLocker.Unlock(); + + if (!fAreaInfoAllocator.HasMissingAllocations()) + return true; + + if (!fAreaInfoAllocator.Reallocate()) + return false; + } + } + + bool _GetImageInfos() + { + return image_iterate_through_team_images(fTeam->id, + &_GetImageInfoCallback, this) == NULL; + } + + static bool _GetImageInfoCallback(struct image* image, void* cookie) + { + return ((CoreDumper*)cookie)->_GetImageInfo(image); + } + + bool _GetImageInfo(struct image* image) + { + ImageInfo* info = ImageInfo::Create(image); + if (info == NULL) + return true; + + fImageInfos.Insert(info); + return false; + } + + void _PrepareCoreFileInfo() + { + // assign image infos to area infos where possible + fAreaCount = 0; + fMappedFilesCount = 0; + for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); + AreaInfo* areaInfo = it.Next();) { + fAreaCount++; + dev_t deviceId = areaInfo->DeviceId(); + if (deviceId < 0) + continue; + ImageInfo* imageInfo = _FindImageInfo(deviceId, areaInfo->NodeId()); + if (imageInfo != NULL) { + areaInfo->SetImageInfo(imageInfo); + fMappedFilesCount++; + } + } + + fImageCount = fImageInfos.Count(); + fSegmentCount = 1 + fAreaCount; + fProgramHeadersOffset = sizeof(elf_ehdr); + fNoteSegmentOffset = fProgramHeadersOffset + + sizeof(elf_phdr) * fSegmentCount; + } + + ImageInfo* _FindImageInfo(dev_t deviceId, ino_t nodeId) const + { + for (ImageInfoList::ConstIterator it = fImageInfos.GetIterator(); + ImageInfo* info = it.Next();) { + if (info->DeviceId() == deviceId && info->NodeId() == nodeId) + return info; + } + + return NULL; + } + + status_t _WriteElfHeader() + { + elf_ehdr header; + memset(&header, 0, sizeof(header)); + + // e_ident + header.e_ident[EI_MAG0] = ELF_MAGIC[0]; + header.e_ident[EI_MAG1] = ELF_MAGIC[1]; + header.e_ident[EI_MAG2] = ELF_MAGIC[2]; + header.e_ident[EI_MAG3] = ELF_MAGIC[3]; +#ifdef B_HAIKU_64_BIT + header.e_ident[EI_CLASS] = ELFCLASS64; +#else + header.e_ident[EI_CLASS] = ELFCLASS32; +#endif +#if B_HOST_IS_LENDIAN + header.e_ident[EI_DATA] = ELFDATA2LSB; +#else + header.e_ident[EI_DATA] = ELFDATA2MSB; +#endif + header.e_ident[EI_VERSION] = EV_CURRENT; + + // e_type + header.e_type = ET_CORE; + + // e_machine +#if defined(__HAIKU_ARCH_X86) + header.e_machine = EM_386; +#elif defined(__HAIKU_ARCH_X86_64) + header.e_machine = EM_X86_64; +#elif defined(__HAIKU_ARCH_PPC) + header.e_machine = EM_PPC64; +#elif defined(__HAIKU_ARCH_M68K) + header.e_machine = EM_68K; +#elif defined(__HAIKU_ARCH_MIPSEL) + header.e_machine = EM_MIPS; +#elif defined(__HAIKU_ARCH_ARM) + header.e_machine = EM_ARM; +#else +# error Unsupported architecture! +#endif + + header.e_version = EV_CURRENT; + header.e_entry = 0; + header.e_phoff = sizeof(header); + header.e_shoff = 0; + header.e_flags = 0; + header.e_ehsize = sizeof(header); + header.e_phentsize = sizeof(elf_phdr); + header.e_phnum = fSegmentCount; + header.e_shentsize = sizeof(elf_shdr); + header.e_shnum = 0; + header.e_shstrndx = SHN_UNDEF; + + return fFile.WriteAt(0, &header, sizeof(header)); + } + + status_t _WriteProgramHeaders() + { + fFile.Seek(fProgramHeadersOffset); + + // write the header for the notes segment + elf_phdr header; + memset(&header, 0, sizeof(header)); + header.p_type = PT_NOTE; + header.p_flags = 0; + header.p_offset = fNoteSegmentOffset; + header.p_vaddr = 0; + header.p_paddr = 0; + header.p_filesz = fNoteSegmentSize; + header.p_memsz = 0; + header.p_align = 0; + fFile.Write(header); + + // write the headers for the area segments + size_t segmentOffset = fFirstAreaSegmentOffset; + for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); + AreaInfo* areaInfo = it.Next();) { + memset(&header, 0, sizeof(header)); + header.p_type = PT_LOAD; + header.p_flags = 0; + uint32 protection = areaInfo->Protection(); + if ((protection & B_READ_AREA) != 0) + header.p_flags |= PF_READ; + if ((protection & B_WRITE_AREA) != 0) + header.p_flags |= PF_WRITE; + if ((protection & B_EXECUTE_AREA) != 0) + header.p_flags |= PF_EXECUTE; + header.p_offset = segmentOffset; + header.p_vaddr = areaInfo->Base(); + header.p_paddr = 0; + header.p_filesz = areaInfo->Size(); + header.p_memsz = areaInfo->Size(); + header.p_align = 0; + fFile.Write(header); + + segmentOffset += areaInfo->Size(); + } + + return fFile.Status(); + } + + status_t _WriteAreaSegments() + { + fFile.Seek(fFirstAreaSegmentOffset); + + for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); + AreaInfo* areaInfo = it.Next();) { + status_t error = fFile.WriteUserArea(areaInfo->Base(), + areaInfo->Size()); + if (error != B_OK) + return error; + } + + return fFile.Status(); + } + + status_t _WriteNotes() + { + status_t error = fFile.Seek((off_t)fNoteSegmentOffset); + if (error != B_OK) + return error; + + error = _WriteFilesNote(); + if (error != B_OK) + return error; + + error = _WriteTeamNote(); + if (error != B_OK) + return error; + + error = _WriteAreasNote(); + if (error != B_OK) + return error; + + error = _WriteImagesNote(); + if (error != B_OK) + return error; + + error = _WriteThreadsNote(); + if (error != B_OK) + return error; + + return B_OK; + } + + template + void _WriteTeamNote(Writer& writer) + { + elf_note_team note; + memset(¬e, 0, sizeof(note)); + note.nt_id = fTeamInfo.team; + note.nt_uid = fTeamInfo.uid; + note.nt_gid = fTeamInfo.gid; + writer.Write(¬e, sizeof(note)); + + // write args + const char* args = fTeamInfo.args; + writer.Write(args, strlen(args) + 1); + } + + status_t _WriteTeamNote() + { + // determine needed size for the note's data + DummyWriter dummyWriter; + _WriteTeamNote(dummyWriter); + size_t dataSize = dummyWriter.BytesWritten(); + + // write the note header + _WriteNoteHeader(kHaikuNote, NT_TEAM, dataSize); + + // write the note data + _WriteTeamNote(fFile); + + // padding + _WriteNotePadding(dataSize); + + return fFile.Status(); + } + + template + void _WriteFilesNote(Writer& writer) + { + // file count and table size + writer.Write(fMappedFilesCount); + writer.Write((size_t)B_PAGE_SIZE); + + // write table + for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); + AreaInfo* areaInfo = it.Next();) { + if (areaInfo->GetImageInfo() == NULL) + continue; + + // start address, end address, and file offset in pages + writer.Write(areaInfo->Base()); + writer.Write(areaInfo->Base() + areaInfo->Size()); + writer.Write(size_t(areaInfo->CacheOffset() / B_PAGE_SIZE)); + } + + // write strings + for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); + AreaInfo* areaInfo = it.Next();) { + ImageInfo* imageInfo = areaInfo->GetImageInfo(); + if (imageInfo == NULL) + continue; + + const char* name = imageInfo->Name(); + writer.Write(name, strlen(name) + 1); + } + } + + status_t _WriteFilesNote() + { + // determine needed size for the note's data + DummyWriter dummyWriter; + _WriteFilesNote(dummyWriter); + size_t dataSize = dummyWriter.BytesWritten(); + + // write the note header + _WriteNoteHeader(kCoreNote, NT_FILE, dataSize); + + // write the note data + _WriteFilesNote(fFile); + + // padding + _WriteNotePadding(dataSize); + + return fFile.Status(); + } + + template + void _WriteAreasNote(Writer& writer) + { + // area count + writer.Write(fAreaCount); + + // write table + for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); + AreaInfo* areaInfo = it.Next();) { + elf_note_area_entry entry; + memset(&entry, 0, sizeof(entry)); + entry.na_id = areaInfo->Id(); + entry.na_lock = areaInfo->Lock(); + entry.na_protection = areaInfo->Protection(); + entry.na_base = areaInfo->Base(); + entry.na_size = areaInfo->Size(); + entry.na_ram_size = areaInfo->RamSize(); + writer.Write(&entry, sizeof(entry)); + } + + // write strings + for (AreaInfoList::Iterator it = fAreaInfos.GetIterator(); + AreaInfo* areaInfo = it.Next();) { + const char* name = areaInfo->Name(); + writer.Write(name, strlen(name) + 1); + } + } + + status_t _WriteAreasNote() + { + // determine needed size for the note's data + DummyWriter dummyWriter; + _WriteAreasNote(dummyWriter); + size_t dataSize = dummyWriter.BytesWritten(); + + // write the note header + _WriteNoteHeader(kHaikuNote, NT_AREAS, dataSize); + + // write the note data + _WriteAreasNote(fFile); + + // padding + _WriteNotePadding(dataSize); + + return fFile.Status(); + } + + template + void _WriteImagesNote(Writer& writer) + { + // image count + writer.Write(fImageCount); + + // write table + for (ImageInfoList::Iterator it = fImageInfos.GetIterator(); + ImageInfo* imageInfo = it.Next();) { + elf_note_image_entry entry; + memset(&entry, 0, sizeof(entry)); + entry.ni_id = imageInfo->Id(); + entry.ni_type = imageInfo->Type(); + entry.ni_init_routine = imageInfo->InitRoutine(); + entry.ni_term_routine = imageInfo->TermRoutine(); + entry.ni_device = imageInfo->DeviceId(); + entry.ni_node = imageInfo->NodeId(); + entry.ni_text_base = imageInfo->TextBase(); + entry.ni_text_size = imageInfo->TextSize(); + entry.ni_data_base = imageInfo->DataBase(); + entry.ni_data_size = imageInfo->DataSize(); + writer.Write(&entry, sizeof(entry)); + } + + // write strings + for (ImageInfoList::Iterator it = fImageInfos.GetIterator(); + ImageInfo* imageInfo = it.Next();) { + const char* name = imageInfo->Name(); + writer.Write(name, strlen(name) + 1); + } + } + + status_t _WriteImagesNote() + { + // determine needed size for the note's data + DummyWriter dummyWriter; + _WriteImagesNote(dummyWriter); + size_t dataSize = dummyWriter.BytesWritten(); + + // write the note header + _WriteNoteHeader(kHaikuNote, NT_IMAGES, dataSize); + + // write the note data + _WriteImagesNote(fFile); + + // padding + _WriteNotePadding(dataSize); + + return fFile.Status(); + } + + template + void _WriteThreadsNote(Writer& writer) + { + // thread count and size of CPU state + writer.Write(fThreadCount); + writer.Write(sizeof(debug_cpu_state)); + + // write table + for (ThreadStateList::Iterator it = fThreadStates.GetIterator(); + ThreadState* state = it.Next();) { + elf_note_thread_entry entry; + memset(&entry, 0, sizeof(entry)); + entry.nth_id = state->GetThread()->id; + entry.nth_state = state->State(); + entry.nth_priority = state->Priority(); + entry.nth_stack_base = state->StackBase(); + entry.nth_stack_end = state->StackEnd(); + writer.Write(&entry, sizeof(entry)); + writer.Write(state->CpuState(), sizeof(debug_cpu_state)); + } + + // write strings + for (ThreadStateList::Iterator it = fThreadStates.GetIterator(); + ThreadState* state = it.Next();) { + const char* name = state->Name(); + writer.Write(name, strlen(name) + 1); + } + } + + status_t _WriteThreadsNote() + { + // determine needed size for the note's data + DummyWriter dummyWriter; + _WriteThreadsNote(dummyWriter); + size_t dataSize = dummyWriter.BytesWritten(); + + // write the note header + _WriteNoteHeader(kHaikuNote, NT_THREADS, dataSize); + + // write the note data + _WriteThreadsNote(fFile); + + // padding + _WriteNotePadding(dataSize); + + return fFile.Status(); + } + + status_t _WriteNoteHeader(const char* name, uint32 type, uint32 dataSize) + { + // prepare and write the header + Elf32_Nhdr noteHeader; + memset(¬eHeader, 0, sizeof(noteHeader)); + size_t nameSize = strlen(name) + 1; + noteHeader.n_namesz = nameSize; + noteHeader.n_descsz = dataSize; + noteHeader.n_type = type; + fFile.Write(noteHeader); + + // write the name + fFile.Write(name, nameSize); + // pad the name to 4 byte alignment + _WriteNotePadding(nameSize); + return fFile.Status(); + } + + status_t _WriteNotePadding(size_t sizeToPad) + { + if (sizeToPad % 4 != 0) { + uint8 pad[3] = {}; + fFile.Write(&pad, 4 - sizeToPad % 4); + } + return fFile.Status(); + } + +private: + Thread* fCurrentThread; + Team* fTeam; + BufferedFile fFile; + TeamInfo fTeamInfo; + size_t fThreadCount; + ThreadStateList fThreadStates; + ThreadStateList fPreAllocatedThreadStates; + Allocator fAreaInfoAllocator; + AreaInfoList fAreaInfos; + ImageInfoList fImageInfos; + ConditionVariable fThreadBlockCondition; + size_t fSegmentCount; + size_t fProgramHeadersOffset; + size_t fNoteSegmentOffset; + size_t fNoteSegmentSize; + size_t fFirstAreaSegmentOffset; + size_t fAreaCount; + size_t fImageCount; + size_t fMappedFilesCount; +}; + + +} // unnamed namespace + + +status_t +core_dump_write_core_file(const char* path, bool killTeam) +{ + TRACE("core_dump_write_core_file(\"%s\", %d): team: %" B_PRId32 "\n", path, + killTeam, team_get_current_team_id()); + + CoreDumper* coreDumper = new(std::nothrow) CoreDumper(); + if (coreDumper == NULL) + return B_NO_MEMORY; + ObjectDeleter coreDumperDeleter(coreDumper); + return coreDumper->Dump(path, killTeam); +} + + +void +core_dump_trap_thread() +{ + Thread* thread = thread_get_current_thread(); + ConditionVariableEntry conditionVariableEntry; + TeamLocker teamLocker(thread->team); + + while ((atomic_get(&thread->flags) & THREAD_FLAGS_TRAP_FOR_CORE_DUMP) + != 0) { + thread->team->CoreDumpCondition()->Add(&conditionVariableEntry); + teamLocker.Unlock(); + conditionVariableEntry.Wait(); + teamLocker.Lock(); + } +} diff --git a/src/system/kernel/debug/user_debugger.cpp b/src/system/kernel/debug/user_debugger.cpp index 0d74fcd881..fb8fa5f870 100644 --- a/src/system/kernel/debug/user_debugger.cpp +++ b/src/system/kernel/debug/user_debugger.cpp @@ -1,5 +1,5 @@ /* - * Copyright 2005-2011, Ingo Weinhold, ingo_weinhold@gmx.de. + * Copyright 2005-2016, Ingo Weinhold, ingo_weinhold@gmx.de. * Copyright 2015, Rene Gollent, rene@gollent.com. * Distributed under the terms of the MIT License. */ @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -1696,6 +1697,7 @@ debug_nub_thread(void *) debug_nub_get_signal_handler_reply get_signal_handler; debug_nub_start_profiler_reply start_profiler; debug_profiler_update profiler_update; + debug_nub_write_core_file_reply write_core_file; } reply; int32 replySize = 0; port_id replyPort = -1; @@ -2416,6 +2418,29 @@ debug_nub_thread(void *) delete_area(sampleArea); } } + + break; + } + + case B_DEBUG_WRITE_CORE_FILE: + { + // get the parameters + replyPort = message.write_core_file.reply_port; + char* path = message.write_core_file.path; + path[sizeof(message.write_core_file.path) - 1] = '\0'; + + TRACE(("nub thread %" B_PRId32 ": B_DEBUG_WRITE_CORE_FILE" + ": path: %s\n", nubThread->id, path)); + + // write the core file + status_t result = core_dump_write_core_file(path, false); + + // prepare the reply + reply.write_core_file.error = result; + replySize = sizeof(reply.write_core_file); + sendReply = true; + + break; } } diff --git a/src/system/kernel/signal.cpp b/src/system/kernel/signal.cpp index 8305e0bf95..4191c4f4c3 100644 --- a/src/system/kernel/signal.cpp +++ b/src/system/kernel/signal.cpp @@ -1,6 +1,6 @@ /* * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org. - * Copyright 2011, Ingo Weinhold, ingo_weinhold@gmx.de. + * Copyright 2011-2016, Ingo Weinhold, ingo_weinhold@gmx.de. * Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de. * Copyright 2002, Angelo Mottola, a.mottola@libero.it. * @@ -21,6 +21,7 @@ #include #include +#include #include #include #include @@ -957,15 +958,25 @@ handle_signals(Thread* thread) } // Unless SIGKILL[THR] are pending, check, if the thread shall stop for - // debugging. - if ((signalMask & KILL_SIGNALS) == 0 - && (atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP) - != 0) { - locker.Unlock(); - teamLocker.Unlock(); + // a core dump or for debugging. + if ((signalMask & KILL_SIGNALS) == 0) { + if ((atomic_get(&thread->flags) & THREAD_FLAGS_TRAP_FOR_CORE_DUMP) + != 0) { + locker.Unlock(); + teamLocker.Unlock(); - user_debug_stop_thread(); - continue; + core_dump_trap_thread(); + continue; + } + + if ((atomic_get(&thread->debug_info.flags) & B_THREAD_DEBUG_STOP) + != 0) { + locker.Unlock(); + teamLocker.Unlock(); + + user_debug_stop_thread(); + continue; + } } // We're done, if there aren't any pending signals anymore. diff --git a/src/system/kernel/team.cpp b/src/system/kernel/team.cpp index 83fa90024c..c55b23957e 100644 --- a/src/system/kernel/team.cpp +++ b/src/system/kernel/team.cpp @@ -1,6 +1,6 @@ /* * Copyright 2014, Paweł Dziepak, pdziepak@quarnos.org. - * Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de. + * Copyright 2008-2016, Ingo Weinhold, ingo_weinhold@gmx.de. * Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de. * Distributed under the terms of the MIT License. * @@ -503,6 +503,8 @@ Team::Team(team_id id, bool kernel) memset(fSignalActions, 0, sizeof(fSignalActions)); fUserDefinedTimerCount = 0; + + fCoreDumpCondition = NULL; }