diff --git a/build/config_headers/tracing_config.h b/build/config_headers/tracing_config.h index 4b76b2b0aa..7c4c11f42f 100644 --- a/build/config_headers/tracing_config.h +++ b/build/config_headers/tracing_config.h @@ -28,6 +28,8 @@ #define KERNEL_HEAP_TRACING 0 #define KTRACE_PRINTF_STACK_TRACE 0 /* stack trace depth */ #define PAGE_ALLOCATION_TRACING 0 +#define PAGE_DAEMON_TRACING 0 +#define PAGE_WRITER_TRACING 0 #define PARANOIA_TRACING 0 #define PARANOIA_TRACING_STACK_TRACE 0 /* stack trace depth */ #define OBJECT_CACHE_TRACING 0 diff --git a/src/system/kernel/vm/vm.cpp b/src/system/kernel/vm/vm.cpp index eb2b2aa47c..31d4584b7a 100644 --- a/src/system/kernel/vm/vm.cpp +++ b/src/system/kernel/vm/vm.cpp @@ -1529,6 +1529,9 @@ map_backing_store(vm_address_space *addressSpace, vm_cache *cache, // grab a ref to the address space (the area holds this) atomic_add(&addressSpace->ref_count, 1); +// ktrace_printf("map_backing_store: cache: %p (source: %p), \"%s\" -> %p", +// cache, sourceCache, areaName, area); + *_area = area; return B_OK; @@ -4717,6 +4720,10 @@ vm_soft_fault(vm_address_space *addressSpace, addr_t originalAddress, return B_BAD_ADDRESS; } +// ktrace_printf("page fault: %s %#lx, %s, area: %p", +// isWrite ? "write" : "read", originalAddress, isUser ? "user" : "kernel", +// area); + // check permissions uint32 protection = get_area_page_protection(area, address); if (isUser && (protection & B_USER_PROTECTION) == 0) { diff --git a/src/system/kernel/vm/vm_daemons.cpp b/src/system/kernel/vm/vm_daemons.cpp index f28f85a7db..a640821f25 100644 --- a/src/system/kernel/vm/vm_daemons.cpp +++ b/src/system/kernel/vm/vm_daemons.cpp @@ -12,6 +12,7 @@ #include +#include #include #include #include @@ -109,6 +110,84 @@ PageCacheLocker::Unlock() // #pragma mark - +#if PAGE_DAEMON_TRACING + +namespace PageDaemonTracing { + +class ActivatePage : public AbstractTraceEntry { + public: + ActivatePage(vm_page* page) + : + fCache(page->cache), + fPage(page) + { + Initialized(); + } + + virtual void AddDump(TraceOutput& out) + { + out.Print("page activated: %p, cache: %p", fPage, fCache); + } + + private: + VMCache* fCache; + vm_page* fPage; +}; + + +class DeactivatePage : public AbstractTraceEntry { + public: + DeactivatePage(vm_page* page) + : + fCache(page->cache), + fPage(page) + { + Initialized(); + } + + virtual void AddDump(TraceOutput& out) + { + out.Print("page deactivated: %p, cache: %p", fPage, fCache); + } + + private: + VMCache* fCache; + vm_page* fPage; +}; + + +class FreedPageSwap : public AbstractTraceEntry { + public: + FreedPageSwap(vm_page* page) + : + fCache(page->cache), + fPage(page) + { + Initialized(); + } + + virtual void AddDump(TraceOutput& out) + { + out.Print("page swap freed: %p, cache: %p", fPage, fCache); + } + + private: + VMCache* fCache; + vm_page* fPage; +}; + +} // namespace PageDaemonTracing + +# define T(x) new(std::nothrow) PageDaemonTracing::x + +#else +# define T(x) +#endif // PAGE_DAEMON_TRACING + + +// #pragma mark - + + #ifdef TRACK_PAGE_USAGE_STATS static void @@ -205,6 +284,7 @@ check_page_activation(int32 index) vm_page_set_state(page, PAGE_STATE_ACTIVE); page->usage_count = 1; TRACE(("page %p -> move to active\n", page)); + T(ActivatePage(page)); } else if (page->usage_count < 127) page->usage_count++; @@ -237,6 +317,7 @@ check_page_activation(int32 index) else vm_page_set_state(page, PAGE_STATE_INACTIVE); TRACE(("page %p -> move to inactive\n", page)); + T(DeactivatePage(page)); } return true; @@ -260,6 +341,7 @@ free_page_swap_space(int32 index) // We need to mark the page modified, since otherwise it could be // stolen and we'd lose its data. vm_page_set_state(page, PAGE_STATE_MODIFIED); + T(FreedPageSwap(page)); return true; } } @@ -299,7 +381,7 @@ page_daemon(void* /*unused*/) * pagesLeft / sLowPagesCount; uint32 leftToFree = sLowPagesCount - pagesLeft; TRACE(("wait interval %Ld, scan pages %lu, free %lu, " - "target %lu\n", scanWaitInterval, scanPagesCount, + "target %lu\n", scanWaitInterval, scanPagesCount, pagesLeft, leftToFree)); for (uint32 i = 0; i < scanPagesCount && leftToFree > 0; i++) { diff --git a/src/system/kernel/vm/vm_page.cpp b/src/system/kernel/vm/vm_page.cpp index a63545cd78..68f2ae7443 100644 --- a/src/system/kernel/vm/vm_page.cpp +++ b/src/system/kernel/vm/vm_page.cpp @@ -230,6 +230,39 @@ class StolenPage : public AbstractTraceEntry { #endif // PAGE_ALLOCATION_TRACING +#if PAGE_WRITER_TRACING + +namespace PageWriterTracing { + +class WritePage : public AbstractTraceEntry { + public: + WritePage(vm_page* page) + : + fCache(page->cache), + fPage(page) + { + Initialized(); + } + + virtual void AddDump(TraceOutput& out) + { + out.Print("page write: %p, cache: %p", fPage, fCache); + } + + private: + VMCache* fCache; + vm_page* fPage; +}; + +} // namespace PageWriterTracing + +# define TPW(x) new(std::nothrow) PageWriterTracing::x + +#else +# define TPW(x) +#endif // PAGE_WRITER_TRACING + + /*! Dequeues a page from the head of the given queue */ static vm_page * dequeue_page(page_queue *queue) @@ -1222,6 +1255,8 @@ page_writer(void* /*unused*/) locker.Unlock(); //dprintf("write page %p, cache %p (%ld)\n", page, page->cache, page->cache->ref_count); + TPW(WritePage(page)); + vm_clear_map_flags(page, PAGE_MODIFIED); cache->AcquireRefLocked(); numPages++;