mmlr + bonefish:
* Add optional stack trace capturing for slab memory manager tracing. * Add allocation tracking for the slab allocator (enabled via SLAB_ALLOCATION_TRACKING). The allocation tracking requires tracing with stack traces to be enabled for object caches and/or the memory manager. - Add class AllocationTrackingInfo that associates an allocation with its respective tracing entry. The structure is added to the end of an allocation done by the memory manager. For the object caches there's a separate array for each slab. - Add code range markers to the slab code, so that the first caller into the slab code can be retrieved from the stack traces. - Add KDL command "allocations_per_caller" that lists all allocations summarized by caller. * Move debug definitions from slab_private.h to slab_debug.h. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@43072 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
97ac7257f6
commit
e1c6140eaa
@ -42,7 +42,7 @@
|
||||
#define DEBUG_FILE_MAP KDEBUG_LEVEL_1
|
||||
|
||||
|
||||
// heap
|
||||
// heap / slab
|
||||
|
||||
// Initialize newly allocated memory with something non zero.
|
||||
#define PARANOID_KERNEL_MALLOC KDEBUG_LEVEL_2
|
||||
@ -57,6 +57,9 @@
|
||||
// Enables the "allocations*" debugger commands.
|
||||
#define KERNEL_HEAP_LEAK_CHECK 0
|
||||
|
||||
// Enables the "allocations*" debugger commands for the slab.
|
||||
#define SLAB_ALLOCATION_TRACKING 0
|
||||
|
||||
|
||||
// interrupts
|
||||
|
||||
|
@ -47,6 +47,7 @@
|
||||
#define SCHEDULING_ANALYSIS_TRACING 0
|
||||
#define SIGNAL_TRACING 0
|
||||
#define SLAB_MEMORY_MANAGER_TRACING 0
|
||||
#define SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE 0 /* stack trace depth */
|
||||
#define SLAB_OBJECT_CACHE_TRACING 0
|
||||
#define SLAB_OBJECT_CACHE_TRACING_STACK_TRACE 0 /* stack trace depth */
|
||||
#define SWAP_TRACING 0
|
||||
|
@ -12,6 +12,9 @@
|
||||
#include "slab_private.h"
|
||||
|
||||
|
||||
RANGE_MARKER_FUNCTION_BEGIN(SlabHashedObjectCache)
|
||||
|
||||
|
||||
static inline int
|
||||
__fls0(size_t value)
|
||||
{
|
||||
@ -109,8 +112,9 @@ HashedObjectCache::CreateSlab(uint32 flags)
|
||||
|
||||
HashedSlab* slab = allocate_slab(flags);
|
||||
if (slab != NULL) {
|
||||
void* pages;
|
||||
if (MemoryManager::Allocate(this, flags, pages) == B_OK) {
|
||||
void* pages = NULL;
|
||||
if (MemoryManager::Allocate(this, flags, pages) == B_OK
|
||||
&& AllocateTrackingInfos(slab, slab_size, flags) == B_OK) {
|
||||
Lock();
|
||||
if (InitSlab(slab, pages, slab_size, flags)) {
|
||||
hash_table.InsertUnchecked(slab);
|
||||
@ -118,9 +122,12 @@ HashedObjectCache::CreateSlab(uint32 flags)
|
||||
return slab;
|
||||
}
|
||||
Unlock();
|
||||
MemoryManager::Free(pages, flags);
|
||||
FreeTrackingInfos(slab, flags);
|
||||
}
|
||||
|
||||
if (pages != NULL)
|
||||
MemoryManager::Free(pages, flags);
|
||||
|
||||
free_slab(slab, flags);
|
||||
}
|
||||
|
||||
@ -140,6 +147,7 @@ HashedObjectCache::ReturnSlab(slab* _slab, uint32 flags)
|
||||
UninitSlab(slab);
|
||||
|
||||
Unlock();
|
||||
FreeTrackingInfos(slab, flags);
|
||||
MemoryManager::Free(slab->pages, flags);
|
||||
free_slab(slab, flags);
|
||||
Lock();
|
||||
@ -180,3 +188,6 @@ HashedObjectCache::_ResizeHashTableIfNeeded(uint32 flags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
RANGE_MARKER_FUNCTION_END(SlabHashedObjectCache)
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "kernel_debug_config.h"
|
||||
|
||||
#include "ObjectCache.h"
|
||||
#include "slab_debug.h"
|
||||
#include "slab_private.h"
|
||||
|
||||
|
||||
@ -58,6 +59,9 @@ MemoryManager::AllocationEntry* MemoryManager::sAllocationEntryDontWait;
|
||||
bool MemoryManager::sMaintenanceNeeded;
|
||||
|
||||
|
||||
RANGE_MARKER_FUNCTION_BEGIN(SlabMemoryManager)
|
||||
|
||||
|
||||
// #pragma mark - kernel tracing
|
||||
|
||||
|
||||
@ -67,9 +71,12 @@ bool MemoryManager::sMaintenanceNeeded;
|
||||
//namespace SlabMemoryManagerCacheTracing {
|
||||
struct MemoryManager::Tracing {
|
||||
|
||||
class MemoryManagerTraceEntry : public AbstractTraceEntry {
|
||||
class MemoryManagerTraceEntry
|
||||
: public TRACE_ENTRY_SELECTOR(SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE) {
|
||||
public:
|
||||
MemoryManagerTraceEntry()
|
||||
:
|
||||
TraceEntryBase(SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE, 0, true)
|
||||
{
|
||||
}
|
||||
};
|
||||
@ -592,7 +599,14 @@ MemoryManager::Free(void* pages, uint32 flags)
|
||||
/*static*/ status_t
|
||||
MemoryManager::AllocateRaw(size_t size, uint32 flags, void*& _pages)
|
||||
{
|
||||
#if SLAB_MEMORY_MANAGER_TRACING
|
||||
#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
|
||||
AbstractTraceEntryWithStackTrace* traceEntry = T(AllocateRaw(size, flags));
|
||||
size += sizeof(AllocationTrackingInfo);
|
||||
#else
|
||||
T(AllocateRaw(size, flags));
|
||||
#endif
|
||||
#endif
|
||||
|
||||
size = ROUNDUP(size, SLAB_CHUNK_SIZE_SMALL);
|
||||
|
||||
@ -619,8 +633,13 @@ MemoryManager::AllocateRaw(size_t size, uint32 flags, void*& _pages)
|
||||
&virtualRestrictions, &physicalRestrictions, &_pages);
|
||||
|
||||
status_t result = area >= 0 ? B_OK : area;
|
||||
if (result == B_OK)
|
||||
if (result == B_OK) {
|
||||
fill_allocated_block(_pages, size);
|
||||
#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
|
||||
_AddTrackingInfo(_pages, size, traceEntry);
|
||||
#endif
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -661,6 +680,9 @@ MemoryManager::AllocateRaw(size_t size, uint32 flags, void*& _pages)
|
||||
_pages = (void*)chunkAddress;
|
||||
|
||||
fill_allocated_block(_pages, size);
|
||||
#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
|
||||
_AddTrackingInfo(_pages, size, traceEntry);
|
||||
#endif
|
||||
|
||||
TRACE("MemoryManager::AllocateRaw() done: %p (meta chunk: %d, chunk %d)\n",
|
||||
_pages, int(metaChunk - area->metaChunks),
|
||||
@ -1959,3 +1981,20 @@ MemoryManager::_DumpAreas(int argc, char** argv)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
|
||||
|
||||
void
|
||||
MemoryManager::_AddTrackingInfo(void* allocation, size_t size,
|
||||
AbstractTraceEntryWithStackTrace* traceEntry)
|
||||
{
|
||||
AllocationTrackingInfo* info = (AllocationTrackingInfo*)
|
||||
((uint8*)allocation + size - sizeof(AllocationTrackingInfo));
|
||||
info->Init(traceEntry);
|
||||
}
|
||||
|
||||
#endif // SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
|
||||
|
||||
|
||||
RANGE_MARKER_FUNCTION_END(SlabMemoryManager)
|
||||
|
@ -14,7 +14,11 @@
|
||||
#include <util/DoublyLinkedList.h>
|
||||
#include <util/OpenHashTable.h>
|
||||
|
||||
#include "kernel_debug_config.h"
|
||||
#include "tracing_config.h"
|
||||
|
||||
|
||||
class AbstractTraceEntryWithStackTrace;
|
||||
struct kernel_args;
|
||||
struct ObjectCache;
|
||||
struct VMArea;
|
||||
@ -191,6 +195,11 @@ private:
|
||||
static int _DumpArea(int argc, char** argv);
|
||||
static int _DumpAreas(int argc, char** argv);
|
||||
|
||||
#if SLAB_ALLOCATION_TRACKING && SLAB_MEMORY_MANAGER_TRACING
|
||||
static void _AddTrackingInfo(void* allocation, size_t size,
|
||||
AbstractTraceEntryWithStackTrace* entry);
|
||||
#endif
|
||||
|
||||
private:
|
||||
static const size_t kAreaAdminSize
|
||||
= ROUNDUP(sizeof(Area), B_PAGE_SIZE);
|
||||
|
@ -14,9 +14,13 @@
|
||||
#include <vm/vm.h>
|
||||
#include <vm/VMAddressSpace.h>
|
||||
|
||||
#include "MemoryManager.h"
|
||||
#include "slab_private.h"
|
||||
|
||||
|
||||
RANGE_MARKER_FUNCTION_BEGIN(SlabObjectCache)
|
||||
|
||||
|
||||
static void
|
||||
object_cache_return_object_wrapper(object_depot* depot, void* cookie,
|
||||
void* object, uint32 flags)
|
||||
@ -137,6 +141,7 @@ ObjectCache::InitSlab(slab* slab, void* pages, size_t byteCount, uint32 flags)
|
||||
|
||||
CREATE_PARANOIA_CHECK_SET(slab, "slab");
|
||||
|
||||
|
||||
for (size_t i = 0; i < slab->size; i++) {
|
||||
status_t status = B_OK;
|
||||
if (constructor)
|
||||
@ -267,3 +272,43 @@ ObjectCache::AssertObjectNotFreed(void* object)
|
||||
}
|
||||
|
||||
#endif // PARANOID_KERNEL_FREE
|
||||
|
||||
|
||||
#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
|
||||
|
||||
status_t
|
||||
ObjectCache::AllocateTrackingInfos(slab* slab, size_t byteCount, uint32 flags)
|
||||
{
|
||||
void* pages;
|
||||
size_t objectCount = byteCount / object_size;
|
||||
status_t result = MemoryManager::AllocateRaw(
|
||||
objectCount * sizeof(AllocationTrackingInfo), flags, pages);
|
||||
if (result == B_OK) {
|
||||
slab->tracking = (AllocationTrackingInfo*)pages;
|
||||
for (size_t i = 0; i < objectCount; i++)
|
||||
slab->tracking[i].Clear();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ObjectCache::FreeTrackingInfos(slab* slab, uint32 flags)
|
||||
{
|
||||
MemoryManager::FreeRawOrReturnCache(slab->tracking, flags);
|
||||
}
|
||||
|
||||
|
||||
AllocationTrackingInfo*
|
||||
ObjectCache::TrackingInfoFor(void* object) const
|
||||
{
|
||||
slab* objectSlab = ObjectSlab(object);
|
||||
return &objectSlab->tracking[((addr_t)object - objectSlab->offset
|
||||
- (addr_t)objectSlab->pages) / object_size];
|
||||
}
|
||||
|
||||
#endif // SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
|
||||
|
||||
|
||||
RANGE_MARKER_FUNCTION_END(SlabObjectCache)
|
||||
|
@ -14,7 +14,11 @@
|
||||
#include <slab/Slab.h>
|
||||
#include <util/DoublyLinkedList.h>
|
||||
|
||||
#include "kernel_debug_config.h"
|
||||
#include "slab_debug.h"
|
||||
|
||||
|
||||
class AllocationTrackingInfo;
|
||||
struct ResizeRequest;
|
||||
|
||||
|
||||
@ -28,6 +32,9 @@ struct slab : DoublyLinkedListLinkImpl<slab> {
|
||||
size_t count; // free objects
|
||||
size_t offset;
|
||||
object_link* free;
|
||||
#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
|
||||
AllocationTrackingInfo* tracking;
|
||||
#endif
|
||||
};
|
||||
|
||||
typedef DoublyLinkedList<slab> SlabList;
|
||||
@ -111,6 +118,15 @@ public:
|
||||
#if PARANOID_KERNEL_FREE
|
||||
bool AssertObjectNotFreed(void* object);
|
||||
#endif
|
||||
|
||||
status_t AllocateTrackingInfos(slab* slab,
|
||||
size_t byteCount, uint32 flags);
|
||||
void FreeTrackingInfos(slab* slab, uint32 flags);
|
||||
|
||||
#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
|
||||
AllocationTrackingInfo*
|
||||
TrackingInfoFor(void* object) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
||||
@ -146,4 +162,20 @@ check_cache_quota(ObjectCache* cache)
|
||||
}
|
||||
|
||||
|
||||
#if !SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
|
||||
|
||||
inline status_t
|
||||
ObjectCache::AllocateTrackingInfos(slab* slab, size_t byteCount, uint32 flags)
|
||||
{
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
inline void
|
||||
ObjectCache::FreeTrackingInfos(slab* slab, uint32 flags)
|
||||
{
|
||||
}
|
||||
|
||||
#endif // !SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
|
||||
|
||||
#endif // OBJECT_CACHE_H
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <smp.h>
|
||||
#include <util/AutoLock.h>
|
||||
|
||||
#include "slab_debug.h"
|
||||
#include "slab_private.h"
|
||||
|
||||
|
||||
@ -44,6 +45,9 @@ struct depot_cpu_store {
|
||||
};
|
||||
|
||||
|
||||
RANGE_MARKER_FUNCTION_BEGIN(SlabObjectDepot)
|
||||
|
||||
|
||||
bool
|
||||
DepotMagazine::IsEmpty() const
|
||||
{
|
||||
@ -458,3 +462,6 @@ dump_depot_magazine(int argCount, char** args)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
RANGE_MARKER_FUNCTION_END(SlabObjectDepot)
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <KernelExport.h>
|
||||
|
||||
#include <condition_variable.h>
|
||||
#include <elf.h>
|
||||
#include <kernel.h>
|
||||
#include <low_resource_manager.h>
|
||||
#include <slab/ObjectDepot.h>
|
||||
@ -30,6 +31,7 @@
|
||||
|
||||
#include "HashedObjectCache.h"
|
||||
#include "MemoryManager.h"
|
||||
#include "slab_debug.h"
|
||||
#include "slab_private.h"
|
||||
#include "SmallObjectCache.h"
|
||||
|
||||
@ -49,6 +51,47 @@ static MaintenanceQueue sMaintenanceQueue;
|
||||
static ConditionVariable sMaintenanceCondition;
|
||||
|
||||
|
||||
#if SLAB_ALLOCATION_TRACKING_AVAILABLE
|
||||
|
||||
struct caller_info {
|
||||
addr_t caller;
|
||||
size_t count;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
static const int32 kCallerInfoTableSize = 1024;
|
||||
static caller_info sCallerInfoTable[kCallerInfoTableSize];
|
||||
static int32 sCallerInfoCount = 0;
|
||||
|
||||
|
||||
RANGE_MARKER_FUNCTION_PROTOTYPES(slab_allocator)
|
||||
RANGE_MARKER_FUNCTION_PROTOTYPES(SlabHashedObjectCache)
|
||||
RANGE_MARKER_FUNCTION_PROTOTYPES(SlabMemoryManager)
|
||||
RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectCache)
|
||||
RANGE_MARKER_FUNCTION_PROTOTYPES(SlabObjectDepot)
|
||||
RANGE_MARKER_FUNCTION_PROTOTYPES(Slab)
|
||||
RANGE_MARKER_FUNCTION_PROTOTYPES(SlabSmallObjectCache)
|
||||
|
||||
|
||||
static const addr_t kSlabCodeAddressRanges[] = {
|
||||
RANGE_MARKER_FUNCTION_ADDRESS_RANGE(slab_allocator),
|
||||
RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabHashedObjectCache),
|
||||
RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabMemoryManager),
|
||||
RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectCache),
|
||||
RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabObjectDepot),
|
||||
RANGE_MARKER_FUNCTION_ADDRESS_RANGE(Slab),
|
||||
RANGE_MARKER_FUNCTION_ADDRESS_RANGE(SlabSmallObjectCache)
|
||||
};
|
||||
|
||||
static const uint32 kSlabCodeAddressRangeCount
|
||||
= sizeof(kSlabCodeAddressRanges) / sizeof(kSlabCodeAddressRanges[0]) / 2;
|
||||
|
||||
#endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
|
||||
|
||||
|
||||
RANGE_MARKER_FUNCTION_BEGIN(Slab)
|
||||
|
||||
|
||||
#if SLAB_OBJECT_CACHE_TRACING
|
||||
|
||||
|
||||
@ -284,6 +327,205 @@ dump_cache_info(int argc, char* argv[])
|
||||
}
|
||||
|
||||
|
||||
#if SLAB_ALLOCATION_TRACKING_AVAILABLE
|
||||
|
||||
#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
|
||||
// until memory manager tracking is analyzed
|
||||
|
||||
static caller_info*
|
||||
get_caller_info(addr_t caller)
|
||||
{
|
||||
// find the caller info
|
||||
for (int32 i = 0; i < sCallerInfoCount; i++) {
|
||||
if (caller == sCallerInfoTable[i].caller)
|
||||
return &sCallerInfoTable[i];
|
||||
}
|
||||
|
||||
// not found, add a new entry, if there are free slots
|
||||
if (sCallerInfoCount >= kCallerInfoTableSize)
|
||||
return NULL;
|
||||
|
||||
caller_info* info = &sCallerInfoTable[sCallerInfoCount++];
|
||||
info->caller = caller;
|
||||
info->count = 0;
|
||||
info->size = 0;
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
#endif // SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
|
||||
|
||||
|
||||
static int
|
||||
caller_info_compare_size(const void* _a, const void* _b)
|
||||
{
|
||||
const caller_info* a = (const caller_info*)_a;
|
||||
const caller_info* b = (const caller_info*)_b;
|
||||
return (int)(b->size - a->size);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
caller_info_compare_count(const void* _a, const void* _b)
|
||||
{
|
||||
const caller_info* a = (const caller_info*)_a;
|
||||
const caller_info* b = (const caller_info*)_b;
|
||||
return (int)(b->count - a->count);
|
||||
}
|
||||
|
||||
|
||||
#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
|
||||
|
||||
static bool
|
||||
analyze_allocation_callers(ObjectCache* cache, const SlabList& slabList,
|
||||
size_t& _totalAllocationSize, size_t& _totalAllocationCount)
|
||||
{
|
||||
for (SlabList::ConstIterator it = slabList.GetIterator();
|
||||
slab* slab = it.Next();) {
|
||||
for (uint32 i = 0; i < slab->size; i++) {
|
||||
AllocationTrackingInfo* info = &slab->tracking[i];
|
||||
if (!info->IsInitialized())
|
||||
continue;
|
||||
|
||||
_totalAllocationSize += cache->object_size;
|
||||
_totalAllocationCount++;
|
||||
|
||||
addr_t caller = 0;
|
||||
AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
|
||||
|
||||
if (traceEntry != NULL && info->IsTraceEntryValid()) {
|
||||
caller = tracing_find_caller_in_stack_trace(
|
||||
traceEntry->StackTrace(), kSlabCodeAddressRanges,
|
||||
kSlabCodeAddressRangeCount);
|
||||
}
|
||||
|
||||
caller_info* callerInfo = get_caller_info(caller);
|
||||
if (callerInfo == NULL) {
|
||||
kprintf("out of space for caller infos\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
callerInfo->count++;
|
||||
callerInfo->size += cache->object_size;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
analyze_allocation_callers(ObjectCache* cache, size_t& _totalAllocationSize,
|
||||
size_t& _totalAllocationCount)
|
||||
{
|
||||
return analyze_allocation_callers(cache, cache->full, _totalAllocationSize,
|
||||
_totalAllocationCount)
|
||||
&& analyze_allocation_callers(cache, cache->partial,
|
||||
_totalAllocationSize, _totalAllocationCount);
|
||||
}
|
||||
|
||||
#endif // SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
|
||||
|
||||
|
||||
static int
|
||||
dump_allocations_per_caller(int argc, char **argv)
|
||||
{
|
||||
bool sortBySize = true;
|
||||
ObjectCache* cache = NULL;
|
||||
|
||||
for (int32 i = 1; i < argc; i++) {
|
||||
if (strcmp(argv[i], "-c") == 0) {
|
||||
sortBySize = false;
|
||||
} else if (strcmp(argv[i], "-o") == 0) {
|
||||
uint64 cacheAddress;
|
||||
if (++i >= argc
|
||||
|| !evaluate_debug_expression(argv[i], &cacheAddress, true)) {
|
||||
print_debugger_command_usage(argv[0]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
cache = (ObjectCache*)(addr_t)cacheAddress;
|
||||
} else {
|
||||
print_debugger_command_usage(argv[0]);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
sCallerInfoCount = 0;
|
||||
|
||||
size_t totalAllocationSize = 0;
|
||||
size_t totalAllocationCount = 0;
|
||||
if (cache != NULL) {
|
||||
#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
|
||||
analyze_allocation_callers(cache, totalAllocationSize,
|
||||
totalAllocationCount);
|
||||
#else
|
||||
kprintf("Object cache allocation tracking not available. "
|
||||
"SLAB_OBJECT_CACHE_TRACING (%d) and "
|
||||
"SLAB_OBJECT_CACHE_TRACING_STACK_TRACE (%d) must be enabled.\n",
|
||||
SLAB_OBJECT_CACHE_TRACING, SLAB_OBJECT_CACHE_TRACING_STACK_TRACE);
|
||||
return 0;
|
||||
#endif
|
||||
} else {
|
||||
#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
|
||||
ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
|
||||
|
||||
while (it.HasNext()) {
|
||||
analyze_allocation_callers(it.Next(), totalAllocationSize,
|
||||
totalAllocationCount);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// sort the array
|
||||
qsort(sCallerInfoTable, sCallerInfoCount, sizeof(caller_info),
|
||||
sortBySize ? &caller_info_compare_size : &caller_info_compare_count);
|
||||
|
||||
kprintf("%ld different callers, sorted by %s...\n\n", sCallerInfoCount,
|
||||
sortBySize ? "size" : "count");
|
||||
|
||||
kprintf(" count size caller\n");
|
||||
kprintf("----------------------------------\n");
|
||||
for (int32 i = 0; i < sCallerInfoCount; i++) {
|
||||
caller_info& info = sCallerInfoTable[i];
|
||||
kprintf("%10" B_PRIuSIZE " %10" B_PRIuSIZE " %p", info.count,
|
||||
info.size, (void*)info.caller);
|
||||
|
||||
const char *symbol;
|
||||
const char *imageName;
|
||||
bool exactMatch;
|
||||
addr_t baseAddress;
|
||||
|
||||
if (elf_debug_lookup_symbol_address(info.caller, &baseAddress, &symbol,
|
||||
&imageName, &exactMatch) == B_OK) {
|
||||
kprintf(" %s + %#" B_PRIxADDR " (%s)%s\n", symbol,
|
||||
info.caller - baseAddress, imageName,
|
||||
exactMatch ? "" : " (nearest)");
|
||||
} else
|
||||
kprintf("\n");
|
||||
}
|
||||
|
||||
kprintf("\ntotal allocations: %" B_PRIuSIZE ", %" B_PRIuSIZE " bytes\n",
|
||||
totalAllocationCount, totalAllocationSize);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
|
||||
|
||||
|
||||
void
|
||||
add_alloc_tracing_entry(ObjectCache* cache, uint32 flags, void* object)
|
||||
{
|
||||
#if SLAB_OBJECT_CACHE_TRACING
|
||||
#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
|
||||
cache->TrackingInfoFor(object)->Init(T(Alloc(cache, flags, object)));
|
||||
#else
|
||||
T(Alloc(cache, flags, object));
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
@ -669,7 +911,7 @@ object_cache_alloc(object_cache* cache, uint32 flags)
|
||||
if (!(cache->flags & CACHE_NO_DEPOT)) {
|
||||
void* object = object_depot_obtain(&cache->depot);
|
||||
if (object) {
|
||||
T(Alloc(cache, flags, object));
|
||||
add_alloc_tracing_entry(cache, flags, object);
|
||||
return fill_allocated_block(object, cache->object_size);
|
||||
}
|
||||
}
|
||||
@ -718,7 +960,7 @@ object_cache_alloc(object_cache* cache, uint32 flags)
|
||||
}
|
||||
|
||||
void* object = link_to_object(link, cache->object_size);
|
||||
T(Alloc(cache, flags, object));
|
||||
add_alloc_tracing_entry(cache, flags, object);
|
||||
return fill_allocated_block(object, cache->object_size);
|
||||
}
|
||||
|
||||
@ -748,6 +990,10 @@ object_cache_free(object_cache* cache, void* object, uint32 flags)
|
||||
fill_freed_block(object, cache->object_size);
|
||||
#endif
|
||||
|
||||
#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
|
||||
cache->TrackingInfoFor(object)->Clear();
|
||||
#endif
|
||||
|
||||
if ((cache->flags & CACHE_NO_DEPOT) == 0) {
|
||||
object_depot_store(&cache->depot, object, flags);
|
||||
return;
|
||||
@ -802,6 +1048,17 @@ slab_init_post_area()
|
||||
"dump contents of an object depot");
|
||||
add_debugger_command("slab_magazine", dump_depot_magazine,
|
||||
"dump contents of a depot magazine");
|
||||
#if SLAB_ALLOCATION_TRACKING_AVAILABLE
|
||||
add_debugger_command_etc("allocations_per_caller",
|
||||
&dump_allocations_per_caller,
|
||||
"Dump current heap allocations summed up per caller",
|
||||
"[ \"-c\" ] [ -o <object cache> ]\n"
|
||||
"The current allocations will by summed up by caller (their count and\n"
|
||||
"size) printed in decreasing order by size or, if \"-c\" is\n"
|
||||
"specified, by allocation count. If given <object cache> specifies\n"
|
||||
"the address of the object cache for which to print the allocations.\n",
|
||||
0);
|
||||
#endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
|
||||
}
|
||||
|
||||
|
||||
@ -832,3 +1089,6 @@ slab_init_post_thread()
|
||||
|
||||
resume_thread(objectCacheResizer);
|
||||
}
|
||||
|
||||
|
||||
RANGE_MARKER_FUNCTION_END(Slab)
|
||||
|
@ -12,6 +12,9 @@
|
||||
#include "slab_private.h"
|
||||
|
||||
|
||||
RANGE_MARKER_FUNCTION_BEGIN(SlabSmallObjectCache)
|
||||
|
||||
|
||||
static inline slab *
|
||||
slab_in_pages(const void *pages, size_t slab_size)
|
||||
{
|
||||
@ -73,8 +76,14 @@ SmallObjectCache::CreateSlab(uint32 flags)
|
||||
if (error != B_OK)
|
||||
return NULL;
|
||||
|
||||
return InitSlab(slab_in_pages(pages, slab_size), pages,
|
||||
slab_size - sizeof(slab), flags);
|
||||
slab* newSlab = slab_in_pages(pages, slab_size);
|
||||
size_t byteCount = slab_size - sizeof(slab);
|
||||
if (AllocateTrackingInfos(newSlab, byteCount, flags) != B_OK) {
|
||||
MemoryManager::Free(pages, flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return InitSlab(newSlab, pages, byteCount, flags);
|
||||
}
|
||||
|
||||
|
||||
@ -84,6 +93,7 @@ SmallObjectCache::ReturnSlab(slab* slab, uint32 flags)
|
||||
UninitSlab(slab);
|
||||
|
||||
Unlock();
|
||||
FreeTrackingInfos(slab, flags);
|
||||
MemoryManager::Free(slab->pages, flags);
|
||||
Lock();
|
||||
}
|
||||
@ -94,3 +104,6 @@ SmallObjectCache::ObjectSlab(void* object) const
|
||||
{
|
||||
return slab_in_pages(lower_boundary(object, slab_size), slab_size);
|
||||
}
|
||||
|
||||
|
||||
RANGE_MARKER_FUNCTION_END(SlabSmallObjectCache)
|
||||
|
@ -44,6 +44,9 @@ static size_t sBootStrapMemorySize = 0;
|
||||
static size_t sUsedBootStrapMemory = 0;
|
||||
|
||||
|
||||
RANGE_MARKER_FUNCTION_BEGIN(slab_allocator)
|
||||
|
||||
|
||||
static int
|
||||
size_to_index(size_t size)
|
||||
{
|
||||
@ -272,3 +275,6 @@ realloc(void* address, size_t newSize)
|
||||
|
||||
|
||||
#endif // USE_SLAB_ALLOCATOR_FOR_MALLOC
|
||||
|
||||
|
||||
RANGE_MARKER_FUNCTION_END(slab_allocator)
|
||||
|
135
src/system/kernel/slab/slab_debug.h
Normal file
135
src/system/kernel/slab/slab_debug.h
Normal file
@ -0,0 +1,135 @@
|
||||
/*
|
||||
* Copyright 2011, Michael Lotz <mmlr@mlotz.ch>.
|
||||
* Copyright 2011, Ingo Weinhold <ingo_weinhold@gmx.de>.
|
||||
*
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef SLAB_DEBUG_H
|
||||
#define SLAB_DEBUG_H
|
||||
|
||||
|
||||
#include <debug.h>
|
||||
#include <slab/Slab.h>
|
||||
#include <tracing.h>
|
||||
|
||||
#include "kernel_debug_config.h"
|
||||
|
||||
|
||||
//#define TRACE_SLAB
|
||||
#ifdef TRACE_SLAB
|
||||
#define TRACE_CACHE(cache, format, args...) \
|
||||
dprintf("Cache[%p, %s] " format "\n", cache, cache->name , ##args)
|
||||
#else
|
||||
#define TRACE_CACHE(cache, format, bananas...) do { } while (0)
|
||||
#endif
|
||||
|
||||
|
||||
#define COMPONENT_PARANOIA_LEVEL OBJECT_CACHE_PARANOIA
|
||||
#include <debug_paranoia.h>
|
||||
|
||||
|
||||
// Macros determining whether allocation tracking is actually available.
|
||||
#define SLAB_OBJECT_CACHE_ALLOCATION_TRACKING (SLAB_ALLOCATION_TRACKING != 0 \
|
||||
&& SLAB_OBJECT_CACHE_TRACING != 0 \
|
||||
&& SLAB_OBJECT_CACHE_TRACING_STACK_TRACE > 0)
|
||||
// The object cache code needs to do allocation tracking.
|
||||
#define SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING (SLAB_ALLOCATION_TRACKING != 0 \
|
||||
&& SLAB_MEMORY_MANAGER_TRACING != 0 \
|
||||
&& SLAB_MEMORY_MANAGER_TRACING_STACK_TRACE > 0)
|
||||
// The memory manager code needs to do allocation tracking.
|
||||
#define SLAB_ALLOCATION_TRACKING_AVAILABLE \
|
||||
(SLAB_OBJECT_CACHE_ALLOCATION_TRACKING \
|
||||
|| SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING)
|
||||
// Guards code that is needed for either object cache or memory manager
|
||||
// allocation tracking.
|
||||
|
||||
|
||||
struct object_depot;
|
||||
|
||||
|
||||
void dump_object_depot(object_depot* depot);
|
||||
int dump_object_depot(int argCount, char** args);
|
||||
int dump_depot_magazine(int argCount, char** args);
|
||||
|
||||
|
||||
#if SLAB_ALLOCATION_TRACKING_AVAILABLE
|
||||
|
||||
class AllocationTrackingInfo {
|
||||
public:
|
||||
AbstractTraceEntryWithStackTrace* traceEntry;
|
||||
bigtime_t traceEntryTimestamp;
|
||||
|
||||
public:
|
||||
void Init(AbstractTraceEntryWithStackTrace* entry)
|
||||
{
|
||||
traceEntry = entry;
|
||||
traceEntryTimestamp = entry != NULL ? entry->Time() : -1;
|
||||
// Note: this is a race condition, if the tracing buffer wrapped and
|
||||
// got overwritten once, we would access an invalid trace entry
|
||||
// here. Obviously this is rather unlikely.
|
||||
}
|
||||
|
||||
void Clear()
|
||||
{
|
||||
traceEntry = NULL;
|
||||
traceEntryTimestamp = 0;
|
||||
}
|
||||
|
||||
bool IsInitialized() const
|
||||
{
|
||||
return traceEntryTimestamp != 0;
|
||||
}
|
||||
|
||||
AbstractTraceEntryWithStackTrace* TraceEntry() const
|
||||
{
|
||||
return traceEntry;
|
||||
}
|
||||
|
||||
bool IsTraceEntryValid() const
|
||||
{
|
||||
return tracing_is_entry_valid(traceEntry, traceEntryTimestamp);
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
|
||||
|
||||
|
||||
#if PARANOID_KERNEL_MALLOC || PARANOID_KERNEL_FREE
|
||||
static inline void*
|
||||
fill_block(void* buffer, size_t size, uint32 pattern)
|
||||
{
|
||||
if (buffer == NULL)
|
||||
return NULL;
|
||||
|
||||
size &= ~(sizeof(pattern) - 1);
|
||||
for (size_t i = 0; i < size / sizeof(pattern); i++)
|
||||
((uint32*)buffer)[i] = pattern;
|
||||
|
||||
return buffer;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static inline void*
|
||||
fill_allocated_block(void* buffer, size_t size)
|
||||
{
|
||||
#if PARANOID_KERNEL_MALLOC
|
||||
return fill_block(buffer, size, 0xcccccccc);
|
||||
#else
|
||||
return buffer;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static inline void*
|
||||
fill_freed_block(void* buffer, size_t size)
|
||||
{
|
||||
#if PARANOID_KERNEL_FREE
|
||||
return fill_block(buffer, size, 0xdeadbeef);
|
||||
#else
|
||||
return buffer;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#endif // SLAB_DEBUG_H
|
@ -14,27 +14,9 @@
|
||||
#include <slab/Slab.h>
|
||||
|
||||
|
||||
//#define TRACE_SLAB
|
||||
#ifdef TRACE_SLAB
|
||||
#define TRACE_CACHE(cache, format, args...) \
|
||||
dprintf("Cache[%p, %s] " format "\n", cache, cache->name , ##args)
|
||||
#else
|
||||
#define TRACE_CACHE(cache, format, bananas...) do { } while (0)
|
||||
#endif
|
||||
|
||||
|
||||
#define COMPONENT_PARANOIA_LEVEL OBJECT_CACHE_PARANOIA
|
||||
#include <debug_paranoia.h>
|
||||
|
||||
|
||||
|
||||
static const size_t kMinObjectAlignment = 8;
|
||||
|
||||
|
||||
struct ObjectCache;
|
||||
struct object_depot;
|
||||
|
||||
|
||||
void request_memory_manager_maintenance();
|
||||
|
||||
void* block_alloc(size_t size, size_t alignment, uint32 flags);
|
||||
@ -43,10 +25,6 @@ void block_free(void* block, uint32 flags);
|
||||
void block_allocator_init_boot();
|
||||
void block_allocator_init_rest();
|
||||
|
||||
void dump_object_depot(object_depot* depot);
|
||||
int dump_object_depot(int argCount, char** args);
|
||||
int dump_depot_magazine(int argCount, char** args);
|
||||
|
||||
|
||||
template<typename Type>
|
||||
static inline Type*
|
||||
@ -84,41 +62,4 @@ slab_internal_free(void* buffer, uint32 flags)
|
||||
}
|
||||
|
||||
|
||||
#if PARANOID_KERNEL_MALLOC || PARANOID_KERNEL_FREE
|
||||
static inline void*
|
||||
fill_block(void* buffer, size_t size, uint32 pattern)
|
||||
{
|
||||
if (buffer == NULL)
|
||||
return NULL;
|
||||
|
||||
size &= ~(sizeof(pattern) - 1);
|
||||
for (size_t i = 0; i < size / sizeof(pattern); i++)
|
||||
((uint32*)buffer)[i] = pattern;
|
||||
|
||||
return buffer;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static inline void*
|
||||
fill_allocated_block(void* buffer, size_t size)
|
||||
{
|
||||
#if PARANOID_KERNEL_MALLOC
|
||||
return fill_block(buffer, size, 0xcccccccc);
|
||||
#else
|
||||
return buffer;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static inline void*
|
||||
fill_freed_block(void* buffer, size_t size)
|
||||
{
|
||||
#if PARANOID_KERNEL_FREE
|
||||
return fill_block(buffer, size, 0xdeadbeef);
|
||||
#else
|
||||
return buffer;
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif // SLAB_PRIVATE_H
|
||||
|
Loading…
Reference in New Issue
Block a user