* Added method VMCache::TransferAreas() moving areas from one cache to

another. The code originates from vm_copy_on_write_area(). We now generate
  the VM cache tracing entries, though.
* count_writable_areas() -> VMCache::CountWritableAreas()
* Added debugger command "cache_stack" which is enabled when VM cache tracing
  is enabled. It prints the source caches of a given cache or area at the
  time of a specified tracing entry.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@34751 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2009-12-22 22:00:35 +00:00
parent ff48991114
commit 2e74d74f4f
3 changed files with 189 additions and 30 deletions

View File

@ -101,6 +101,8 @@ public:
status_t InsertAreaLocked(VMArea* area);
status_t RemoveArea(VMArea* area);
void TransferAreas(VMCache* fromCache);
uint32 CountWritableAreas(VMArea* ignoreArea) const;
status_t WriteModified();
status_t SetMinimalCommitment(off_t commitment);
@ -204,6 +206,7 @@ extern "C" {
#endif
status_t vm_cache_init(struct kernel_args *args);
void vm_cache_init_post_heap();
struct VMCache *vm_cache_acquire_locked_page_cache(struct vm_page *page,
bool dontWait);

View File

@ -79,6 +79,11 @@ class VMCacheTraceEntry : public AbstractTraceEntry {
}
#endif
VMCache* Cache() const
{
return fCache;
}
protected:
VMCache* fCache;
#if VM_CACHE_TRACING_STACK_TRACE
@ -182,6 +187,11 @@ class AddConsumer : public VMCacheTraceEntry {
fConsumer);
}
VMCache* Consumer() const
{
return fConsumer;
}
private:
VMCache* fConsumer;
};
@ -245,6 +255,11 @@ class InsertArea : public VMCacheTraceEntry {
fArea);
}
VMArea* Area() const
{
return fArea;
}
private:
VMArea* fArea;
};
@ -333,6 +348,115 @@ class RemovePage : public VMCacheTraceEntry {
#endif
// #pragma mark - debugger commands
#if VM_CACHE_TRACING
static void*
cache_stack_find_area_cache(const TraceEntryIterator& baseIterator, void* area)
{
using namespace VMCacheTracing;
// find the previous "insert area" entry for the given area
TraceEntryIterator iterator = baseIterator;
TraceEntry* entry = iterator.Current();
while (entry != NULL) {
if (InsertArea* insertAreaEntry = dynamic_cast<InsertArea*>(entry)) {
if (insertAreaEntry->Area() == area)
return insertAreaEntry->Cache();
}
entry = iterator.Previous();
}
return NULL;
}
static void*
cache_stack_find_consumer(const TraceEntryIterator& baseIterator, void* cache)
{
using namespace VMCacheTracing;
// find the previous "add consumer" or "create" entry for the given cache
TraceEntryIterator iterator = baseIterator;
TraceEntry* entry = iterator.Current();
while (entry != NULL) {
if (Create* createEntry = dynamic_cast<Create*>(entry)) {
if (createEntry->Cache() == cache)
return NULL;
} else if (AddConsumer* addEntry = dynamic_cast<AddConsumer*>(entry)) {
if (addEntry->Consumer() == cache)
return addEntry->Cache();
}
entry = iterator.Previous();
}
return NULL;
}
static int
command_cache_stack(int argc, char** argv)
{
if (argc < 3 || argc > 4) {
print_debugger_command_usage(argv[0]);
return 0;
}
bool isArea = false;
int argi = 1;
if (argc == 4) {
if (strcmp(argv[argi], "area") != 0) {
print_debugger_command_usage(argv[0]);
return 0;
}
argi++;
isArea = true;
}
uint64 addressValue;
uint64 debugEntryIndex;
if (!evaluate_debug_expression(argv[argi++], &addressValue, false)
|| !evaluate_debug_expression(argv[argi++], &debugEntryIndex, false)) {
return 0;
}
TraceEntryIterator baseIterator;
if (baseIterator.MoveTo((int32)debugEntryIndex) == NULL) {
kprintf("Invalid tracing entry index %" B_PRIu64 "\n", debugEntryIndex);
return 0;
}
void* address = (void*)(addr_t)addressValue;
kprintf("cache stack for %s %p at %" B_PRIu64 ":\n",
isArea ? "area" : "cache", address, debugEntryIndex);
if (isArea) {
address = cache_stack_find_area_cache(baseIterator, address);
if (address == NULL) {
kprintf(" cache not found\n");
return 0;
}
}
while (address != NULL) {
kprintf(" %p\n", address);
address = cache_stack_find_consumer(baseIterator, address);
}
return 0;
}
#endif // VM_CACHE_TRACING
// #pragma mark -
@ -343,6 +467,23 @@ vm_cache_init(kernel_args* args)
}
void
vm_cache_init_post_heap()
{
#if VM_CACHE_TRACING
add_debugger_command_etc("cache_stack", &command_cache_stack,
"List the ancestors (sources) of a VMCache at the time given by "
"tracing entry index",
"[ \"area\" ] <address> <tracing entry index>\n"
"All ancestors (sources) of a given VMCache at the time given by the\n"
"tracing entry index are listed. If \"area\" is given the supplied\n"
"address is an area instead of a cache address. The listing will\n"
"start with the area's cache at that point.\n",
0);
#endif // VM_CACHE_TRACING
}
VMCache*
vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
{
@ -750,6 +891,46 @@ VMCache::RemoveArea(VMArea* area)
}
/*! Transfers the areas from \a fromCache to this cache. This cache must not
have areas yet. Both caches must be locked.
*/
void
VMCache::TransferAreas(VMCache* fromCache)
{
AssertLocked();
fromCache->AssertLocked();
ASSERT(areas == NULL);
areas = fromCache->areas;
fromCache->areas = NULL;
for (VMArea* area = areas; area != NULL; area = area->cache_next) {
area->cache = this;
AcquireRefLocked();
fromCache->ReleaseRefLocked();
T(RemoveArea(fromCache, area));
T(InsertArea(this, area));
}
}
uint32
VMCache::CountWritableAreas(VMArea* ignoreArea) const
{
uint32 count = 0;
for (VMArea* area = areas; area != NULL; area = area->cache_next) {
if (area != ignoreArea
&& (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0) {
count++;
}
}
return count;
}
status_t
VMCache::WriteModified()
{

View File

@ -1638,17 +1638,7 @@ vm_copy_on_write_area(VMCache* lowerCache)
// transfer the lower cache areas to the upper cache
mutex_lock(&sAreaCacheLock);
upperCache->areas = lowerCache->areas;
lowerCache->areas = NULL;
for (VMArea* tempArea = upperCache->areas; tempArea != NULL;
tempArea = tempArea->cache_next) {
tempArea->cache = upperCache;
upperCache->AcquireRefLocked();
lowerCache->ReleaseRefLocked();
}
upperCache->TransferAreas(lowerCache);
mutex_unlock(&sAreaCacheLock);
lowerCache->AddConsumer(upperCache);
@ -1744,23 +1734,6 @@ vm_copy_area(team_id team, const char* name, void** _address,
}
//! You need to hold the cache lock when calling this function
static int32
count_writable_areas(VMCache* cache, VMArea* ignoreArea)
{
struct VMArea* area = cache->areas;
uint32 count = 0;
for (; area != NULL; area = area->cache_next) {
if (area != ignoreArea
&& (area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0)
count++;
}
return count;
}
static status_t
vm_set_area_protection(team_id team, area_id areaID, uint32 newProtection,
bool kernel)
@ -1799,7 +1772,7 @@ vm_set_area_protection(team_id team, area_id areaID, uint32 newProtection,
// writable -> !writable
if (cache->source != NULL && cache->temporary) {
if (count_writable_areas(cache, area) == 0) {
if (cache->CountWritableAreas(area) == 0) {
// Since this cache now lives from the pages in its source cache,
// we can change the cache's commitment to take only those pages
// into account that really are in this cache.
@ -3325,6 +3298,8 @@ vm_init(kernel_args* args)
TRACE(("vm_init: exit\n"));
vm_cache_init_post_heap();
return err;
}
@ -4744,7 +4719,7 @@ transfer_area(area_id id, void** _address, uint32 addressSpec, team_id target,
if (info.team != thread_get_current_thread()->team->id)
return B_PERMISSION_DENIED;
area_id clonedArea = vm_clone_area(target, info.name, _address,
addressSpec, info.protection, REGION_NO_PRIVATE_MAP, id, kernel);
if (clonedArea < 0)