block_range::Delete() did not remove the block_range from the free list,

and thus, it could be used again which led to a crash.
Changed the free ranges list from a singly linked list to a doubly linked
list so that not all free ranges have to be searched for the one to be
freed anymore.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@15696 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2005-12-28 16:40:05 +00:00
parent 3dcaf063c2
commit 529bf1c6d7
3 changed files with 17 additions and 25 deletions

View File

@ -201,8 +201,7 @@ block_range::New(block_cache *cache, block_range **_range)
range->base = address;
// insert into free ranges list in cache
range->free_next = cache->free_ranges;
cache->free_ranges = range;
cache->free_ranges.Add(range);
*_range = range;
return B_OK;
@ -236,6 +235,9 @@ block_range::Delete(block_cache *cache, block_range *range)
vm_page_set_state(range->pages[i], PAGE_STATE_FREE);
}
// remove from cache free list
cache->free_ranges.Remove(range);
free(range);
}
@ -376,21 +378,7 @@ block_range::Allocate(block_cache *cache, block_chunk **_chunk)
if (used_mask == cache->range_mask) {
// range is full, remove it from the free list
// usually, the first entry will be ourself, but we don't count on it
block_range *last = NULL, *range = cache->free_ranges;
while (range != NULL && range != this) {
last = range;
range = range->free_next;
}
if (range == NULL) {
panic("block_range %p was free but not in the free list\n", this);
} else {
if (last)
last->free_next = free_next;
else
cache->free_ranges = free_next;
}
cache->free_ranges.Remove(this);
}
}
TRACE(("Allocate: used masks: chunk = %x, range = %lx\n", chunks[chunk].used_mask, used_mask));
@ -409,8 +397,7 @@ block_range::Free(block_cache *cache, void *address)
if (chunks[chunk].used_mask == cache->chunk_mask) {
if (used_mask == cache->range_mask) {
// range was full before, add it to the free list
free_next = cache->free_ranges;
cache->free_ranges = this;
cache->free_ranges.Add(this);
}
// chunk was full before, propagate usage bit to range
used_mask &= ~(1UL << chunk);

View File

@ -153,8 +153,7 @@ block_cache::block_cache(int _fd, off_t numBlocks, size_t blockSize)
next_transaction_id(1),
last_transaction(NULL),
transaction_hash(NULL),
ranges_hash(NULL),
free_ranges(NULL)
ranges_hash(NULL)
{
hash = hash_init(32, 0, &cached_block::Compare, &cached_block::Hash);
if (hash == NULL)
@ -208,8 +207,8 @@ block_cache::InitCheck()
block_range *
block_cache::GetFreeRange()
{
if (free_ranges != NULL)
return free_ranges;
if (!free_ranges.IsEmpty())
return free_ranges.First();
// we need to allocate a new range
block_range *range;

View File

@ -25,7 +25,9 @@ struct cache_transaction;
struct cached_block;
struct block_chunk;
struct block_cache;
struct block_range;
typedef DoublyLinkedListLink<cached_block> block_link;
typedef DoublyLinkedListLink<block_range> range_link;
struct cached_block {
@ -66,7 +68,7 @@ struct block_chunk {
struct block_range {
block_range *next; // next in hash
block_range *free_next;
range_link link;
addr_t base;
uint32 used_mask;
vm_page *pages[kNumBlockRangePages];
@ -91,6 +93,10 @@ struct block_range {
static uint32 Hash(void *_blockRange, const void *_address, uint32 range);
};
typedef DoublyLinkedList<block_range,
DoublyLinkedListMemberGetLink<block_range,
&block_range::link> > range_list;
struct block_cache {
hash_table *hash;
benaphore lock;
@ -102,7 +108,7 @@ struct block_cache {
hash_table *transaction_hash;
hash_table *ranges_hash;
block_range *free_ranges;
range_list free_ranges;
uint32 chunks_per_range;
size_t chunk_size;
uint32 range_mask;