From ebdb9dfebfb9db7d46625c73ba8ca35185a3f1aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Axel=20D=C3=B6rfler?= Date: Thu, 15 Dec 2005 21:35:19 +0000 Subject: [PATCH] Blocks are now put into an unused list when they aren't used. When memory gets low, the blocks on that list are also freed - but right now, that only happens when a block is put back (not directly on demand, ie. via a low memory handler). git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@15552 a95241bf-73f2-0310-859d-f6bbb57e9c96 --- src/system/kernel/cache/block_allocator.cpp | 48 ++++++++++++- src/system/kernel/cache/block_cache.cpp | 68 +++++++++++++++++-- src/system/kernel/cache/block_cache_private.h | 13 ++-- 3 files changed, 118 insertions(+), 11 deletions(-) diff --git a/src/system/kernel/cache/block_allocator.cpp b/src/system/kernel/cache/block_allocator.cpp index 3271058761..66862b0f39 100644 --- a/src/system/kernel/cache/block_allocator.cpp +++ b/src/system/kernel/cache/block_allocator.cpp @@ -182,7 +182,7 @@ block_range::Hash(void *_blockRange, const void *_address, uint32 range) /* static */ status_t -block_range::NewBlockRange(block_cache *cache, block_range **_range) +block_range::New(block_cache *cache, block_range **_range) { addr_t address = sBlockAddressPool.Get(); if (address == NULL) @@ -208,6 +208,37 @@ block_range::NewBlockRange(block_cache *cache, block_range **_range) } +/*static*/ +void +block_range::Delete(block_cache *cache, block_range *range) +{ + // unmap the memory + + vm_address_space *addressSpace = vm_get_kernel_aspace(); + vm_translation_map *map = &addressSpace->translation_map; + + map->ops->lock(map); + map->ops->unmap(map, range->base, range->base + kBlockRangeSize - 1); + map->ops->unlock(map); + + vm_put_aspace(addressSpace); + + sBlockAddressPool.Put(range->base); + + // free pages + + uint32 numPages = kBlockRangeSize / B_PAGE_SIZE; + for (uint32 i = 0; i < numPages; i++) { + if (range->pages[i] == NULL) + continue; + + vm_page_set_state(range->pages[i], PAGE_STATE_FREE); + } + + free(range); +} + + uint32 block_range::BlockIndex(block_cache *cache, void *address) { @@ -390,6 +421,21 @@ block_range::Free(block_cache *cache, void *address) } +bool +block_range::Unused(const block_cache *cache) const +{ + if (used_mask != 0) + return false; + + for (int32 chunk = cache->chunks_per_range; chunk-- > 0;) { + if (chunks[chunk].used_mask != 0) + return false; + } + + return true; +} + + // #pragma mark - diff --git a/src/system/kernel/cache/block_cache.cpp b/src/system/kernel/cache/block_cache.cpp index 061de92eb2..b8cb742a17 100644 --- a/src/system/kernel/cache/block_cache.cpp +++ b/src/system/kernel/cache/block_cache.cpp @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -208,7 +209,7 @@ block_cache::GetFreeRange() // we need to allocate a new range block_range *range; - if (block_range::NewBlockRange(this, &range) != B_OK) { + if (block_range::New(this, &range) != B_OK) { // ToDo: free up space in existing ranges // We may also need to free ranges from other caches to get a free one // (if not, an active volume might have stolen all free ranges already) @@ -236,6 +237,9 @@ block_cache::Free(void *address) block_range *range = GetRange(address); ASSERT(range != NULL); range->Free(this, address); + + if (range->Unused(this)) + block_range::Delete(this, range); } @@ -263,6 +267,9 @@ block_cache::FreeBlock(cached_block *block) Free(block->compare); #endif + if (range->Unused(this)) + block_range::Delete(this, range); + free(block); } @@ -283,12 +290,13 @@ block_cache::NewBlock(off_t blockNumber) range->Allocate(this, block); block->block_number = blockNumber; - block->lock = 0; + block->ref_count = 0; block->transaction_next = NULL; block->transaction = block->previous_transaction = NULL; block->original = NULL; block->parent_data = NULL; block->is_dirty = false; + block->unused = false; #ifdef DEBUG_CHANGED block->compare = NULL; #endif @@ -299,6 +307,23 @@ block_cache::NewBlock(off_t blockNumber) } +void +block_cache::RemoveUnusedBlocks(int32 count) +{ + while (count-- >= 0) { + cached_block *block = unused_blocks.First(); + if (block == NULL) + break; + + // remove block from lists + unused_blocks.Remove(block); + hash_remove(hash, block); + + FreeBlock(block); + } +} + + #ifdef DEBUG_CHANGED #define DUMPED_BLOCK_SIZE 16 @@ -357,9 +382,34 @@ put_cached_block(block_cache *cache, cached_block *block) } #endif - if (--block->lock == 0) - ; + if (--block->ref_count == 0 + && block->transaction == NULL + && block->previous_transaction == NULL) { + // put this block in the list of unused blocks + block->unused = true; + cache->unused_blocks.Add(block); // block->data = cache->allocator->Release(block->data); + } + + // free some blocks according to the low memory state + // (if there is enough memory left, we don't free any) + + int32 free = 1; + switch (vm_low_memory_state()) { + case B_NO_LOW_MEMORY: + return; + case B_LOW_MEMORY_NOTE: + free = 1; + break; + case B_LOW_MEMORY_WARNING: + free = 5; + break; + case B_LOW_MEMORY_CRITICAL: + free = 20; + break; + } + + cache->RemoveUnusedBlocks(free); } @@ -387,7 +437,7 @@ get_cached_block(block_cache *cache, off_t blockNumber, bool &allocated, bool re allocated = true; } else { /* - if (block->lock == 0 && block->data != NULL) { + if (block->ref_count == 0 && block->data != NULL) { // see if the old block can be resurrected block->data = cache->allocator->Acquire(block->data); } @@ -412,7 +462,13 @@ get_cached_block(block_cache *cache, off_t blockNumber, bool &allocated, bool re } } - block->lock++; + if (block->unused) { + TRACE(("remove block %Ld from unused\n", blockNumber)); + block->unused = false; + cache->unused_blocks.Remove(block); + } + + block->ref_count++; return block; } diff --git a/src/system/kernel/cache/block_cache_private.h b/src/system/kernel/cache/block_cache_private.h index c42b5cbc3d..2ef8e2c44d 100644 --- a/src/system/kernel/cache/block_cache_private.h +++ b/src/system/kernel/cache/block_cache_private.h @@ -31,7 +31,7 @@ typedef DoublyLinkedListLink block_link; struct cached_block { cached_block *next; // next in hash cached_block *transaction_next; - block_link previous_transaction_link; + block_link link; cached_block *chunk_next; block_chunk *chunk; off_t block_number; @@ -42,10 +42,10 @@ struct cached_block { void *compare; #endif int32 ref_count; - int32 lock; bool busy : 1; bool is_writing : 1; bool is_dirty : 1; + bool unused : 1; bool unmapped : 1; cache_transaction *transaction; cache_transaction *previous_transaction; @@ -56,7 +56,7 @@ struct cached_block { typedef DoublyLinkedList > block_list; + &cached_block::link> > block_list; struct block_chunk { cached_block *blocks; @@ -72,7 +72,8 @@ struct block_range { vm_page *pages[kNumBlockRangePages]; block_chunk chunks[0]; - static status_t NewBlockRange(block_cache *cache, block_range **_range); + static status_t New(block_cache *cache, block_range **_range); + static void Delete(block_cache *cache, block_range *range); status_t Allocate(block_cache *cache, cached_block *block); void Free(block_cache *cache, cached_block *block); @@ -84,6 +85,8 @@ struct block_range { uint32 ChunkIndex(block_cache *cache, void *address); block_chunk *Chunk(block_cache *cache, void *address); + bool Unused(const block_cache *cache) const; + static int Compare(void *_blockRange, const void *_address); static uint32 Hash(void *_blockRange, const void *_address, uint32 range); @@ -107,6 +110,7 @@ struct block_cache { uint32 range_mask; uint32 chunk_mask; block_list unmapped_blocks; + block_list unused_blocks; block_cache(int fd, off_t numBlocks, size_t blockSize); ~block_cache(); @@ -115,6 +119,7 @@ struct block_cache { block_range *GetFreeRange(); block_range *GetRange(void *address); + void RemoveUnusedBlocks(int32 count = LONG_MAX); void FreeBlock(cached_block *block); cached_block *NewBlock(off_t blockNumber); void Free(void *address);