Blocks are now put into an unused list when they aren't used.

When memory gets low, the blocks on that list are also freed - but right now,
that only happens when a block is put back (not directly on demand, ie. via
a low memory handler).


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@15552 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2005-12-15 21:35:19 +00:00
parent 1534aa236c
commit ebdb9dfebf
3 changed files with 118 additions and 11 deletions

View File

@ -182,7 +182,7 @@ block_range::Hash(void *_blockRange, const void *_address, uint32 range)
/* static */
status_t
block_range::NewBlockRange(block_cache *cache, block_range **_range)
block_range::New(block_cache *cache, block_range **_range)
{
addr_t address = sBlockAddressPool.Get();
if (address == NULL)
@ -208,6 +208,37 @@ block_range::NewBlockRange(block_cache *cache, block_range **_range)
}
/*static*/
void
block_range::Delete(block_cache *cache, block_range *range)
{
// unmap the memory
vm_address_space *addressSpace = vm_get_kernel_aspace();
vm_translation_map *map = &addressSpace->translation_map;
map->ops->lock(map);
map->ops->unmap(map, range->base, range->base + kBlockRangeSize - 1);
map->ops->unlock(map);
vm_put_aspace(addressSpace);
sBlockAddressPool.Put(range->base);
// free pages
uint32 numPages = kBlockRangeSize / B_PAGE_SIZE;
for (uint32 i = 0; i < numPages; i++) {
if (range->pages[i] == NULL)
continue;
vm_page_set_state(range->pages[i], PAGE_STATE_FREE);
}
free(range);
}
uint32
block_range::BlockIndex(block_cache *cache, void *address)
{
@ -390,6 +421,21 @@ block_range::Free(block_cache *cache, void *address)
}
bool
block_range::Unused(const block_cache *cache) const
{
if (used_mask != 0)
return false;
for (int32 chunk = cache->chunks_per_range; chunk-- > 0;) {
if (chunks[chunk].used_mask != 0)
return false;
}
return true;
}
// #pragma mark -

View File

@ -11,6 +11,7 @@
#include <block_cache.h>
#include <lock.h>
#include <vm_low_memory.h>
#include <util/kernel_cpp.h>
#include <util/DoublyLinkedList.h>
#include <util/AutoLock.h>
@ -208,7 +209,7 @@ block_cache::GetFreeRange()
// we need to allocate a new range
block_range *range;
if (block_range::NewBlockRange(this, &range) != B_OK) {
if (block_range::New(this, &range) != B_OK) {
// ToDo: free up space in existing ranges
// We may also need to free ranges from other caches to get a free one
// (if not, an active volume might have stolen all free ranges already)
@ -236,6 +237,9 @@ block_cache::Free(void *address)
block_range *range = GetRange(address);
ASSERT(range != NULL);
range->Free(this, address);
if (range->Unused(this))
block_range::Delete(this, range);
}
@ -263,6 +267,9 @@ block_cache::FreeBlock(cached_block *block)
Free(block->compare);
#endif
if (range->Unused(this))
block_range::Delete(this, range);
free(block);
}
@ -283,12 +290,13 @@ block_cache::NewBlock(off_t blockNumber)
range->Allocate(this, block);
block->block_number = blockNumber;
block->lock = 0;
block->ref_count = 0;
block->transaction_next = NULL;
block->transaction = block->previous_transaction = NULL;
block->original = NULL;
block->parent_data = NULL;
block->is_dirty = false;
block->unused = false;
#ifdef DEBUG_CHANGED
block->compare = NULL;
#endif
@ -299,6 +307,23 @@ block_cache::NewBlock(off_t blockNumber)
}
void
block_cache::RemoveUnusedBlocks(int32 count)
{
while (count-- >= 0) {
cached_block *block = unused_blocks.First();
if (block == NULL)
break;
// remove block from lists
unused_blocks.Remove(block);
hash_remove(hash, block);
FreeBlock(block);
}
}
#ifdef DEBUG_CHANGED
#define DUMPED_BLOCK_SIZE 16
@ -357,9 +382,34 @@ put_cached_block(block_cache *cache, cached_block *block)
}
#endif
if (--block->lock == 0)
;
if (--block->ref_count == 0
&& block->transaction == NULL
&& block->previous_transaction == NULL) {
// put this block in the list of unused blocks
block->unused = true;
cache->unused_blocks.Add(block);
// block->data = cache->allocator->Release(block->data);
}
// free some blocks according to the low memory state
// (if there is enough memory left, we don't free any)
int32 free = 1;
switch (vm_low_memory_state()) {
case B_NO_LOW_MEMORY:
return;
case B_LOW_MEMORY_NOTE:
free = 1;
break;
case B_LOW_MEMORY_WARNING:
free = 5;
break;
case B_LOW_MEMORY_CRITICAL:
free = 20;
break;
}
cache->RemoveUnusedBlocks(free);
}
@ -387,7 +437,7 @@ get_cached_block(block_cache *cache, off_t blockNumber, bool &allocated, bool re
allocated = true;
} else {
/*
if (block->lock == 0 && block->data != NULL) {
if (block->ref_count == 0 && block->data != NULL) {
// see if the old block can be resurrected
block->data = cache->allocator->Acquire(block->data);
}
@ -412,7 +462,13 @@ get_cached_block(block_cache *cache, off_t blockNumber, bool &allocated, bool re
}
}
block->lock++;
if (block->unused) {
TRACE(("remove block %Ld from unused\n", blockNumber));
block->unused = false;
cache->unused_blocks.Remove(block);
}
block->ref_count++;
return block;
}

View File

@ -31,7 +31,7 @@ typedef DoublyLinkedListLink<cached_block> block_link;
struct cached_block {
cached_block *next; // next in hash
cached_block *transaction_next;
block_link previous_transaction_link;
block_link link;
cached_block *chunk_next;
block_chunk *chunk;
off_t block_number;
@ -42,10 +42,10 @@ struct cached_block {
void *compare;
#endif
int32 ref_count;
int32 lock;
bool busy : 1;
bool is_writing : 1;
bool is_dirty : 1;
bool unused : 1;
bool unmapped : 1;
cache_transaction *transaction;
cache_transaction *previous_transaction;
@ -56,7 +56,7 @@ struct cached_block {
typedef DoublyLinkedList<cached_block,
DoublyLinkedListMemberGetLink<cached_block,
&cached_block::previous_transaction_link> > block_list;
&cached_block::link> > block_list;
struct block_chunk {
cached_block *blocks;
@ -72,7 +72,8 @@ struct block_range {
vm_page *pages[kNumBlockRangePages];
block_chunk chunks[0];
static status_t NewBlockRange(block_cache *cache, block_range **_range);
static status_t New(block_cache *cache, block_range **_range);
static void Delete(block_cache *cache, block_range *range);
status_t Allocate(block_cache *cache, cached_block *block);
void Free(block_cache *cache, cached_block *block);
@ -84,6 +85,8 @@ struct block_range {
uint32 ChunkIndex(block_cache *cache, void *address);
block_chunk *Chunk(block_cache *cache, void *address);
bool Unused(const block_cache *cache) const;
static int Compare(void *_blockRange, const void *_address);
static uint32 Hash(void *_blockRange, const void *_address, uint32 range);
@ -107,6 +110,7 @@ struct block_cache {
uint32 range_mask;
uint32 chunk_mask;
block_list unmapped_blocks;
block_list unused_blocks;
block_cache(int fd, off_t numBlocks, size_t blockSize);
~block_cache();
@ -115,6 +119,7 @@ struct block_cache {
block_range *GetFreeRange();
block_range *GetRange(void *address);
void RemoveUnusedBlocks(int32 count = LONG_MAX);
void FreeBlock(cached_block *block);
cached_block *NewBlock(off_t blockNumber);
void Free(void *address);