mmlr + bonefish:

Refactor the "allocations_per_caller" KDL command related functions.
They expect an instance of a class implementing the new
AllocationTrackingCallback interface, now. The only implementation ATM
is AllocationCollectorCallback, which does the work the now removed
slab_debug_add_allocation_for_caller() did before.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@43082 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2011-11-01 21:16:14 +00:00
parent 12a1034a7d
commit 50175c99c4
4 changed files with 99 additions and 57 deletions

View File

@ -869,7 +869,7 @@ MemoryManager::PerformMaintenance()
#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
/*static*/ bool
MemoryManager::AnalyzeAllocationCallers(bool resetAllocationInfos)
MemoryManager::AnalyzeAllocationCallers(AllocationTrackingCallback& callback)
{
for (AreaTable::Iterator it = sAreaTable.GetIterator();
Area* area = it.Next();) {
@ -892,9 +892,10 @@ MemoryManager::AnalyzeAllocationCallers(bool resetAllocationInfos)
addr_t chunkAddress = _ChunkAddress(metaChunk, chunk);
size_t size = reference - chunkAddress + 1;
slab_debug_add_allocation_for_caller(
_TrackingInfoFor((void*)chunkAddress, size), size,
resetAllocationInfos);
if (!callback.ProcessTrackingInfo(
_TrackingInfoFor((void*)chunkAddress, size), size)) {
return false;
}
}
}
}

View File

@ -61,7 +61,7 @@ public:
static void PerformMaintenance();
static bool AnalyzeAllocationCallers(
bool resetAllocationInfos);
AllocationTrackingCallback& callback);
private:
struct Tracing;

View File

@ -63,6 +63,8 @@ static const int32 kCallerInfoTableSize = 1024;
static caller_info sCallerInfoTable[kCallerInfoTableSize];
static int32 sCallerInfoCount = 0;
static caller_info* get_caller_info(addr_t caller);
RANGE_MARKER_FUNCTION_PROTOTYPES(slab_allocator)
RANGE_MARKER_FUNCTION_PROTOTYPES(SlabHashedObjectCache)
@ -327,8 +329,69 @@ dump_cache_info(int argc, char* argv[])
}
// #pragma mark - AllocationTrackingCallback
#if SLAB_ALLOCATION_TRACKING_AVAILABLE
AllocationTrackingCallback::~AllocationTrackingCallback()
{
}
#endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
// #pragma mark -
#if SLAB_ALLOCATION_TRACKING_AVAILABLE
namespace {
class AllocationCollectorCallback : public AllocationTrackingCallback {
public:
AllocationCollectorCallback(bool resetInfos)
:
fResetInfos(resetInfos)
{
}
virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
size_t allocationSize)
{
if (!info->IsInitialized())
return true;
addr_t caller = 0;
AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
if (traceEntry != NULL && info->IsTraceEntryValid()) {
caller = tracing_find_caller_in_stack_trace(
traceEntry->StackTrace(), kSlabCodeAddressRanges,
kSlabCodeAddressRangeCount);
}
caller_info* callerInfo = get_caller_info(caller);
if (callerInfo == NULL) {
kprintf("out of space for caller infos\n");
return false;
}
callerInfo->count++;
callerInfo->size += allocationSize;
if (fResetInfos)
info->Clear();
return true;
}
private:
bool fResetInfos;
};
} // unnamed namespace
static caller_info*
get_caller_info(addr_t caller)
{
@ -369,49 +432,17 @@ caller_info_compare_count(const void* _a, const void* _b)
}
bool
slab_debug_add_allocation_for_caller(AllocationTrackingInfo* info,
size_t allocationSize, bool resetAllocationInfos)
{
if (!info->IsInitialized())
return true;
addr_t caller = 0;
AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
if (traceEntry != NULL && info->IsTraceEntryValid()) {
caller = tracing_find_caller_in_stack_trace(
traceEntry->StackTrace(), kSlabCodeAddressRanges,
kSlabCodeAddressRangeCount);
}
caller_info* callerInfo = get_caller_info(caller);
if (callerInfo == NULL) {
kprintf("out of space for caller infos\n");
return false;
}
callerInfo->count++;
callerInfo->size += allocationSize;
if (resetAllocationInfos)
info->Clear();
return true;
}
#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
static bool
analyze_allocation_callers(ObjectCache* cache, const SlabList& slabList,
bool resetAllocationInfos)
AllocationTrackingCallback& callback)
{
for (SlabList::ConstIterator it = slabList.GetIterator();
slab* slab = it.Next();) {
for (uint32 i = 0; i < slab->size; i++) {
if (!slab_debug_add_allocation_for_caller(&slab->tracking[i],
cache->object_size, resetAllocationInfos)) {
if (!callback.ProcessTrackingInfo(&slab->tracking[i],
cache->object_size)) {
return false;
}
}
@ -422,11 +453,11 @@ analyze_allocation_callers(ObjectCache* cache, const SlabList& slabList,
static bool
analyze_allocation_callers(ObjectCache* cache, bool resetAllocationInfos)
analyze_allocation_callers(ObjectCache* cache,
AllocationTrackingCallback& callback)
{
return analyze_allocation_callers(cache, cache->full, resetAllocationInfos)
&& analyze_allocation_callers(cache, cache->partial,
resetAllocationInfos);
return analyze_allocation_callers(cache, cache->full, callback)
&& analyze_allocation_callers(cache, cache->partial, callback);
}
#endif // SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
@ -463,7 +494,8 @@ dump_allocations_per_caller(int argc, char **argv)
if (cache != NULL) {
#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
if (!analyze_allocation_callers(cache, resetAllocationInfos))
AllocationCollectorCallback callback(resetAllocationInfos);
if (!analyze_allocation_callers(cache, callback))
return 0;
#else
kprintf("Object cache allocation tracking not available. "
@ -473,17 +505,18 @@ dump_allocations_per_caller(int argc, char **argv)
return 0;
#endif
} else {
AllocationCollectorCallback callback(resetAllocationInfos);
#if SLAB_OBJECT_CACHE_ALLOCATION_TRACKING
ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
while (it.HasNext()) {
if (!analyze_allocation_callers(it.Next(), resetAllocationInfos))
for (ObjectCacheList::Iterator it = sObjectCaches.GetIterator();
it.HasNext();) {
if (!analyze_allocation_callers(it.Next(), callback))
return 0;
}
#endif
#if SLAB_MEMORY_MANAGER_ALLOCATION_TRACKING
if (!MemoryManager::AnalyzeAllocationCallers(resetAllocationInfos))
if (!MemoryManager::AnalyzeAllocationCallers(callback))
return 0;
#endif
}

View File

@ -86,6 +86,22 @@ public:
}
};
namespace BKernel {
class AllocationTrackingCallback {
public:
virtual ~AllocationTrackingCallback();
virtual bool ProcessTrackingInfo(
AllocationTrackingInfo* info,
size_t allocationSize) = 0;
};
}
using BKernel::AllocationTrackingCallback;
#endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
@ -93,14 +109,6 @@ void dump_object_depot(object_depot* depot);
int dump_object_depot(int argCount, char** args);
int dump_depot_magazine(int argCount, char** args);
#if SLAB_ALLOCATION_TRACKING_AVAILABLE
bool slab_debug_add_allocation_for_caller(
AllocationTrackingInfo* info, size_t allocationSize,
bool resetAllocationInfos);
#endif // SLAB_ALLOCATION_TRACKING_AVAILABLE
#if PARANOID_KERNEL_MALLOC || PARANOID_KERNEL_FREE
static inline void*