Add VM page allocation tracking similar to what happens in the slab already.

Introduces "page_allocation_infos" and "page_allocations_per_caller" KDL
commands.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@43190 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Michael Lotz 2011-11-04 18:07:07 +00:00
parent 905c75a595
commit 90c6930ebb
3 changed files with 416 additions and 0 deletions

View File

@ -110,5 +110,8 @@
// When set limits the amount of available RAM (in MB).
//#define LIMIT_AVAILABLE_MEMORY 256
// Enables tracking of page allocations.
#define VM_PAGE_ALLOCATION_TRACKING 0
#endif // KERNEL_DEBUG_CONFIG_H

View File

@ -12,6 +12,7 @@
#include <new>
#include <AllocationTracking.h>
#include <arch/vm_types.h>
#include <condition_variable.h>
#include <kernel.h>
@ -25,6 +26,11 @@
#include "kernel_debug_config.h"
#define VM_PAGE_ALLOCATION_TRACKING_AVAILABLE \
(VM_PAGE_ALLOCATION_TRACKING && PAGE_ALLOCATION_TRACING != 0 \
&& PAGE_ALLOCATION_TRACING_STACK_TRACE > 0)
class AsyncIOCallback;
struct vm_page_mapping;
struct VMCache;
@ -133,6 +139,10 @@ public:
vint32 accessing_thread;
#endif
#if VM_PAGE_ALLOCATION_TRACKING_AVAILABLE
AllocationTrackingInfo allocation_tracking_info;
#endif
private:
uint8 state : 3;
public:

View File

@ -23,6 +23,7 @@
#include <block_cache.h>
#include <boot/kernel_args.h>
#include <condition_variable.h>
#include <elf.h>
#include <heap.h>
#include <kernel.h>
#include <low_resource_manager.h>
@ -141,6 +142,32 @@ static page_num_t sNextPageUsagePageCount;
#endif
#if VM_PAGE_ALLOCATION_TRACKING_AVAILABLE
struct caller_info {
addr_t caller;
size_t count;
};
static const int32 kCallerInfoTableSize = 1024;
static caller_info sCallerInfoTable[kCallerInfoTableSize];
static int32 sCallerInfoCount = 0;
static caller_info* get_caller_info(addr_t caller);
RANGE_MARKER_FUNCTION_PROTOTYPES(vm_page)
static const addr_t kVMPageCodeAddressRange[] = {
RANGE_MARKER_FUNCTION_ADDRESS_RANGE(vm_page)
};
#endif
RANGE_MARKER_FUNCTION_BEGIN(vm_page)
struct page_stats {
int32 totalFreePages;
int32 unsatisfiedReservations;
@ -574,6 +601,165 @@ class SetPageState : public AbstractTraceEntry {
#endif // PAGE_STATE_TRACING
#if VM_PAGE_ALLOCATION_TRACKING_AVAILABLE
namespace BKernel {
class AllocationTrackingCallback {
public:
virtual ~AllocationTrackingCallback();
virtual bool ProcessTrackingInfo(
AllocationTrackingInfo* info,
page_num_t pageNumber) = 0;
};
}
using BKernel::AllocationTrackingCallback;
class AllocationCollectorCallback : public AllocationTrackingCallback {
public:
AllocationCollectorCallback(bool resetInfos)
:
fResetInfos(resetInfos)
{
}
virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
page_num_t pageNumber)
{
if (!info->IsInitialized())
return true;
addr_t caller = 0;
AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
if (traceEntry != NULL && info->IsTraceEntryValid()) {
caller = tracing_find_caller_in_stack_trace(
traceEntry->StackTrace(), kVMPageCodeAddressRange, 1);
}
caller_info* callerInfo = get_caller_info(caller);
if (callerInfo == NULL) {
kprintf("out of space for caller infos\n");
return false;
}
callerInfo->count++;
if (fResetInfos)
info->Clear();
return true;
}
private:
bool fResetInfos;
};
class AllocationInfoPrinterCallback : public AllocationTrackingCallback {
public:
AllocationInfoPrinterCallback(bool printStackTrace, page_num_t pageFilter,
team_id teamFilter, thread_id threadFilter)
:
fPrintStackTrace(printStackTrace),
fPageFilter(pageFilter),
fTeamFilter(teamFilter),
fThreadFilter(threadFilter)
{
}
virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
page_num_t pageNumber)
{
if (!info->IsInitialized())
return true;
if (fPageFilter != 0 && pageNumber != fPageFilter)
return true;
AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
if (traceEntry != NULL && !info->IsTraceEntryValid())
traceEntry = NULL;
if (traceEntry != NULL) {
if (fTeamFilter != -1 && traceEntry->TeamID() != fTeamFilter)
return true;
if (fThreadFilter != -1 && traceEntry->ThreadID() != fThreadFilter)
return true;
} else {
// we need the info if we have filters set
if (fTeamFilter != -1 || fThreadFilter != -1)
return true;
}
kprintf("page number %#" B_PRIxPHYSADDR, pageNumber);
if (traceEntry != NULL) {
kprintf(", team: %" B_PRId32 ", thread %" B_PRId32
", time %" B_PRId64 "\n", traceEntry->TeamID(),
traceEntry->ThreadID(), traceEntry->Time());
if (fPrintStackTrace)
tracing_print_stack_trace(traceEntry->StackTrace());
} else
kprintf("\n");
return true;
}
private:
bool fPrintStackTrace;
page_num_t fPageFilter;
team_id fTeamFilter;
thread_id fThreadFilter;
};
class AllocationDetailPrinterCallback : public AllocationTrackingCallback {
public:
AllocationDetailPrinterCallback(addr_t caller)
:
fCaller(caller)
{
}
virtual bool ProcessTrackingInfo(AllocationTrackingInfo* info,
page_num_t pageNumber)
{
if (!info->IsInitialized())
return true;
addr_t caller = 0;
AbstractTraceEntryWithStackTrace* traceEntry = info->TraceEntry();
if (traceEntry != NULL && !info->IsTraceEntryValid())
traceEntry = NULL;
if (traceEntry != NULL) {
caller = tracing_find_caller_in_stack_trace(
traceEntry->StackTrace(), kVMPageCodeAddressRange, 1);
}
if (caller != fCaller)
return true;
kprintf("page %#" B_PRIxPHYSADDR "\n", pageNumber);
if (traceEntry != NULL)
tracing_print_stack_trace(traceEntry->StackTrace());
return true;
}
private:
addr_t fCaller;
};
#endif // VM_PAGE_ALLOCATION_TRACKING_AVAILABLE
static int
find_page(int argc, char **argv)
{
@ -966,6 +1152,171 @@ dump_page_stats(int argc, char **argv)
}
#if VM_PAGE_ALLOCATION_TRACKING_AVAILABLE
static caller_info*
get_caller_info(addr_t caller)
{
// find the caller info
for (int32 i = 0; i < sCallerInfoCount; i++) {
if (caller == sCallerInfoTable[i].caller)
return &sCallerInfoTable[i];
}
// not found, add a new entry, if there are free slots
if (sCallerInfoCount >= kCallerInfoTableSize)
return NULL;
caller_info* info = &sCallerInfoTable[sCallerInfoCount++];
info->caller = caller;
info->count = 0;
return info;
}
static int
caller_info_compare_count(const void* _a, const void* _b)
{
const caller_info* a = (const caller_info*)_a;
const caller_info* b = (const caller_info*)_b;
return (int)(b->count - a->count);
}
static int
dump_page_allocations_per_caller(int argc, char** argv)
{
bool resetAllocationInfos = false;
bool printDetails = false;
addr_t caller = 0;
for (int32 i = 1; i < argc; i++) {
if (strcmp(argv[i], "-d") == 0) {
uint64 callerAddress;
if (++i >= argc
|| !evaluate_debug_expression(argv[i], &callerAddress, true)) {
print_debugger_command_usage(argv[0]);
return 0;
}
caller = callerAddress;
printDetails = true;
} else if (strcmp(argv[i], "-r") == 0) {
resetAllocationInfos = true;
} else {
print_debugger_command_usage(argv[0]);
return 0;
}
}
sCallerInfoCount = 0;
AllocationCollectorCallback collectorCallback(resetAllocationInfos);
AllocationDetailPrinterCallback detailsCallback(caller);
AllocationTrackingCallback& callback = printDetails
? (AllocationTrackingCallback&)detailsCallback
: (AllocationTrackingCallback&)collectorCallback;
for (page_num_t i = 0; i < sNumPages; i++)
callback.ProcessTrackingInfo(&sPages[i].allocation_tracking_info, i);
if (printDetails)
return 0;
// sort the array
qsort(sCallerInfoTable, sCallerInfoCount, sizeof(caller_info),
&caller_info_compare_count);
kprintf("%" B_PRId32 " different callers\n\n", sCallerInfoCount);
size_t totalAllocationCount = 0;
kprintf(" count caller\n");
kprintf("----------------------------------\n");
for (int32 i = 0; i < sCallerInfoCount; i++) {
caller_info& info = sCallerInfoTable[i];
kprintf("%10" B_PRIuSIZE " %p", info.count, (void*)info.caller);
const char* symbol;
const char* imageName;
bool exactMatch;
addr_t baseAddress;
if (elf_debug_lookup_symbol_address(info.caller, &baseAddress, &symbol,
&imageName, &exactMatch) == B_OK) {
kprintf(" %s + %#" B_PRIxADDR " (%s)%s\n", symbol,
info.caller - baseAddress, imageName,
exactMatch ? "" : " (nearest)");
} else
kprintf("\n");
totalAllocationCount += info.count;
}
kprintf("\ntotal page allocations: %" B_PRIuSIZE "\n",
totalAllocationCount);
return 0;
}
static int
dump_page_allocation_infos(int argc, char** argv)
{
page_num_t pageFilter = 0;
team_id teamFilter = -1;
thread_id threadFilter = -1;
bool printStackTraces = false;
for (int32 i = 1; i < argc; i++) {
if (strcmp(argv[i], "--stacktrace") == 0)
printStackTraces = true;
else if (strcmp(argv[i], "-p") == 0) {
uint64 pageNumber;
if (++i >= argc
|| !evaluate_debug_expression(argv[i], &pageNumber, true)) {
print_debugger_command_usage(argv[0]);
return 0;
}
pageFilter = pageNumber;
} else if (strcmp(argv[i], "--team") == 0) {
uint64 team;
if (++i >= argc
|| !evaluate_debug_expression(argv[i], &team, true)) {
print_debugger_command_usage(argv[0]);
return 0;
}
teamFilter = team;
} else if (strcmp(argv[i], "--thread") == 0) {
uint64 thread;
if (++i >= argc
|| !evaluate_debug_expression(argv[i], &thread, true)) {
print_debugger_command_usage(argv[0]);
return 0;
}
threadFilter = thread;
} else {
print_debugger_command_usage(argv[0]);
return 0;
}
}
AllocationInfoPrinterCallback callback(printStackTraces, pageFilter,
teamFilter, threadFilter);
for (page_num_t i = 0; i < sNumPages; i++)
callback.ProcessTrackingInfo(&sPages[i].allocation_tracking_info, i);
return 0;
}
#endif // VM_PAGE_ALLOCATION_TRACKING_AVAILABLE
#ifdef TRACK_PAGE_USAGE_STATS
static void
@ -1178,6 +1529,10 @@ free_page(vm_page* page, bool clear)
TA(FreePage(page->physical_page_number));
#if VM_PAGE_ALLOCATION_TRACKING_AVAILABLE
page->allocation_tracking_info.Clear();
#endif
ReadLocker locker(sFreePageQueuesLock);
DEBUG_PAGE_ACCESS_END(page);
@ -2922,6 +3277,10 @@ vm_page_init(kernel_args *args)
for (uint32 i = 0; i < sNumPages; i++) {
sPages[i].Init(sPhysicalPageOffset + i);
sFreePageQueue.Append(&sPages[i]);
#if VM_PAGE_ALLOCATION_TRACKING_AVAILABLE
sPages[i].allocation_tracking_info.Clear();
#endif
}
sUnreservedFreePages = sNumPages;
@ -3007,6 +3366,32 @@ vm_page_init_post_area(kernel_args *args)
B_KDEBUG_DONT_PARSE_ARGUMENTS);
#endif
#if VM_PAGE_ALLOCATION_TRACKING_AVAILABLE
add_debugger_command_etc("page_allocations_per_caller",
&dump_page_allocations_per_caller,
"Dump current page allocations summed up per caller",
"[ -d <caller> ] [ -r ]\n"
"The current allocations will by summed up by caller (their count)\n"
"printed in decreasing order by count.\n"
"If \"-d\" is given, each allocation for caller <caller> is printed\n"
"including the respective stack trace.\n"
"If \"-r\" is given, the allocation infos are reset after gathering\n"
"the information, so the next command invocation will only show the\n"
"allocations made after the reset.\n", 0);
add_debugger_command_etc("page_allocation_infos",
&dump_page_allocation_infos,
"Dump current page allocations",
"[ --stacktrace ] [ -p <page number> ] [ --team <team ID> ] "
"[ --thread <thread ID> ]\n"
"The current allocations filtered by optional values will be printed.\n"
"The optional \"-p\" page number filters for a specific page,\n"
"with \"--team\" and \"--thread\" allocations by specific teams\n"
"and/or threads can be filtered (these only work if a corresponding\n"
"tracing entry is still available).\n"
"If \"--stacktrace\" is given, then stack traces of the allocation\n"
"callers are printed, where available\n", 0);
#endif
return B_OK;
}
@ -3189,7 +3574,13 @@ vm_page_allocate_page(vm_page_reservation* reservation, uint32 flags)
if ((flags & VM_PAGE_ALLOC_CLEAR) != 0 && oldPageState != PAGE_STATE_CLEAR)
clear_page(page);
#if VM_PAGE_ALLOCATION_TRACKING_AVAILABLE
page->allocation_tracking_info.Init(
TA(AllocatePage(page->physical_page_number)));
#else
TA(AllocatePage(page->physical_page_number));
#endif
return page;
}
@ -3367,7 +3758,16 @@ allocate_page_run(page_num_t start, page_num_t length, uint32 flags,
// Note: We don't unreserve the pages since we pulled them out of the
// free/clear queues without adjusting sUnreservedFreePages.
#if VM_PAGE_ALLOCATION_TRACKING_AVAILABLE
AbstractTraceEntryWithStackTrace* traceEntry
= TA(AllocatePageRun(start, length));
for (page_num_t i = start; i < start + length; i++)
sPages[i].allocation_tracking_info.Init(traceEntry);
#else
TA(AllocatePageRun(start, length));
#endif
return length;
}
@ -3689,3 +4089,6 @@ vm_page_max_address()
{
return ((phys_addr_t)sPhysicalPageOffset + sNumPages) * B_PAGE_SIZE - 1;
}
RANGE_MARKER_FUNCTION_END(vm_page)