Work in progress of the new block allocator for the block cache. It works, but
currently needs even more memory than the old one 8-) Not cleaned up at all. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@12921 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
6233964798
commit
317121e102
117
src/system/kernel/cache/BlockAllocator.cpp
vendored
117
src/system/kernel/cache/BlockAllocator.cpp
vendored
@ -1,117 +0,0 @@
|
||||
/*
|
||||
* Copyright 2005, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include "BlockAllocator.h"
|
||||
|
||||
#include <KernelExport.h>
|
||||
#include <util/AutoLock.h>
|
||||
|
||||
|
||||
BlockAllocator::AllocatorList BlockAllocator::sList;
|
||||
mutex BlockAllocator::sMutex;
|
||||
|
||||
|
||||
BlockAllocator::BlockAllocator(size_t size)
|
||||
:
|
||||
fSize(size)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
BlockAllocator::~BlockAllocator()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void *
|
||||
BlockAllocator::Get()
|
||||
{
|
||||
return malloc(fSize);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
BlockAllocator::Put(void *block)
|
||||
{
|
||||
free(block);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
BlockAllocator::Acquire()
|
||||
{
|
||||
fRefCount++;
|
||||
}
|
||||
|
||||
|
||||
int32
|
||||
BlockAllocator::Release()
|
||||
{
|
||||
return --fRefCount;
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - static
|
||||
|
||||
|
||||
// static
|
||||
BlockAllocator *
|
||||
BlockAllocator::GetAllocator(size_t size)
|
||||
{
|
||||
MutexLocker locker(&sMutex);
|
||||
|
||||
// search for allocator in list
|
||||
|
||||
AllocatorList::Iterator iterator = sList.GetIterator();
|
||||
while (iterator.HasNext()) {
|
||||
BlockAllocator *allocator = iterator.Next();
|
||||
|
||||
if (allocator->Size() == size) {
|
||||
allocator->Acquire();
|
||||
return allocator;
|
||||
}
|
||||
}
|
||||
|
||||
// it's not yet there, create new one
|
||||
|
||||
BlockAllocator *allocator = new BlockAllocator(size);
|
||||
if (allocator == NULL)
|
||||
return NULL;
|
||||
|
||||
sList.Add(allocator);
|
||||
return allocator;
|
||||
}
|
||||
|
||||
|
||||
// static
|
||||
void
|
||||
BlockAllocator::PutAllocator(BlockAllocator *allocator)
|
||||
{
|
||||
MutexLocker locker(&sMutex);
|
||||
|
||||
if (!allocator->Release()) {
|
||||
sList.Remove(allocator);
|
||||
delete allocator;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
extern "C" status_t
|
||||
init_block_allocator(void)
|
||||
{
|
||||
status_t status = mutex_init(&BlockAllocator::sMutex, "block allocator");
|
||||
if (status < B_OK)
|
||||
return status;
|
||||
|
||||
new(&BlockAllocator::sList) DoublyLinkedList<BlockAllocator>;
|
||||
// static initializers do not work in the kernel,
|
||||
// so we have to do it here, manually
|
||||
return B_OK;
|
||||
}
|
||||
|
43
src/system/kernel/cache/BlockAllocator.h
vendored
43
src/system/kernel/cache/BlockAllocator.h
vendored
@ -1,43 +0,0 @@
|
||||
/*
|
||||
* Copyright 2005, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef BLOCK_ALLOCATOR_H
|
||||
#define BLOCK_ALLOCATOR_H
|
||||
|
||||
|
||||
#include <util/DoublyLinkedList.h>
|
||||
#include <lock.h>
|
||||
|
||||
|
||||
class BlockAllocator : public DoublyLinkedListLinkImpl<BlockAllocator> {
|
||||
public:
|
||||
void *Get();
|
||||
void Put(void *block);
|
||||
|
||||
static BlockAllocator *GetAllocator(size_t size);
|
||||
static void PutAllocator(BlockAllocator *allocator);
|
||||
|
||||
protected:
|
||||
BlockAllocator(size_t size);
|
||||
~BlockAllocator();
|
||||
|
||||
size_t Size() const { return fSize; }
|
||||
|
||||
void Acquire();
|
||||
int32 Release();
|
||||
|
||||
private:
|
||||
typedef DoublyLinkedList<BlockAllocator> AllocatorList;
|
||||
|
||||
size_t fSize;
|
||||
int32 fRefCount;
|
||||
|
||||
public:
|
||||
static AllocatorList sList;
|
||||
static mutex sMutex;
|
||||
};
|
||||
|
||||
extern "C" status_t init_block_allocator(void);
|
||||
|
||||
#endif /* BLOCK_ALLOCATOR_H */
|
2
src/system/kernel/cache/Jamfile
vendored
2
src/system/kernel/cache/Jamfile
vendored
@ -1,7 +1,7 @@
|
||||
SubDir OBOS_TOP src system kernel cache ;
|
||||
|
||||
KernelMergeObject kernel_cache.o :
|
||||
BlockAllocator.cpp
|
||||
block_allocator.cpp
|
||||
block_cache.cpp
|
||||
file_cache.cpp
|
||||
vnode_store.cpp
|
||||
|
331
src/system/kernel/cache/block_allocator.cpp
vendored
Normal file
331
src/system/kernel/cache/block_allocator.cpp
vendored
Normal file
@ -0,0 +1,331 @@
|
||||
/*
|
||||
* Copyright 2005, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include "block_cache_private.h"
|
||||
|
||||
#include <KernelExport.h>
|
||||
#include <fs_cache.h>
|
||||
|
||||
#include <lock.h>
|
||||
#include <util/kernel_cpp.h>
|
||||
#include <util/DoublyLinkedList.h>
|
||||
#include <util/AutoLock.h>
|
||||
#include <util/khash.h>
|
||||
#include <vm.h>
|
||||
#include <vm_page.h>
|
||||
|
||||
#include <unistd.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
|
||||
|
||||
static class BlockAddressPool sBlockAddressPool;
|
||||
|
||||
|
||||
BlockAddressPool::BlockAddressPool()
|
||||
{
|
||||
benaphore_init(&fLock, "block address pool");
|
||||
|
||||
fBase = 0xa0000000;
|
||||
// directly after the I/O space area
|
||||
fArea = vm_create_null_area(vm_get_kernel_aspace_id(), "block cache", (void **)&fBase,
|
||||
B_BASE_ADDRESS, kBlockAddressSize);
|
||||
|
||||
fFirstFree = fBase;
|
||||
fNextFree = -1;
|
||||
}
|
||||
|
||||
|
||||
BlockAddressPool::~BlockAddressPool()
|
||||
{
|
||||
delete_area(fArea);
|
||||
}
|
||||
|
||||
|
||||
addr_t
|
||||
BlockAddressPool::Get()
|
||||
{
|
||||
BenaphoreLocker locker(&fLock);
|
||||
|
||||
// ToDo: we must make sure that a single volume will never eat
|
||||
// all available ranges! Every mounted volume should have a
|
||||
// guaranteed minimum available for its own use.
|
||||
|
||||
addr_t address = fFirstFree;
|
||||
if (address != NULL) {
|
||||
// Blocks are first taken from the initial free chunk, and
|
||||
// when that is empty, from the free list.
|
||||
// This saves us the initialization of the free list array,
|
||||
// dunno if it's such a good idea, though.
|
||||
if ((fFirstFree += RangeSize()) >= fBase + kBlockAddressSize)
|
||||
fFirstFree = NULL;
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
if (fNextFree == -1)
|
||||
return NULL;
|
||||
|
||||
address = (fNextFree << RangeShift()) + fBase;
|
||||
fNextFree = fFreeList[fNextFree];
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
BlockAddressPool::Put(addr_t address)
|
||||
{
|
||||
BenaphoreLocker locker(&fLock);
|
||||
|
||||
int32 index = (address - fBase) >> RangeShift();
|
||||
fFreeList[index] = fNextFree;
|
||||
fNextFree = index;
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
/* static */
|
||||
int
|
||||
block_range::Compare(void *_blockRange, const void *_address)
|
||||
{
|
||||
block_range *range = (block_range *)_blockRange;
|
||||
addr_t address = (addr_t)_address;
|
||||
|
||||
return ((range->base - sBlockAddressPool.BaseAddress()) >> sBlockAddressPool.RangeShift())
|
||||
== ((address - sBlockAddressPool.BaseAddress()) >> sBlockAddressPool.RangeShift());
|
||||
}
|
||||
|
||||
|
||||
/* static */
|
||||
uint32
|
||||
block_range::Hash(void *_blockRange, const void *_address, uint32 range)
|
||||
{
|
||||
block_range *blockRange = (block_range *)_blockRange;
|
||||
addr_t address = (addr_t)_address;
|
||||
|
||||
if (blockRange != NULL)
|
||||
return ((blockRange->base - sBlockAddressPool.BaseAddress())
|
||||
>> sBlockAddressPool.RangeShift()) % range;
|
||||
|
||||
return ((address - sBlockAddressPool.BaseAddress())
|
||||
>> sBlockAddressPool.RangeShift()) % range;
|
||||
}
|
||||
|
||||
|
||||
/* static */
|
||||
status_t
|
||||
block_range::NewBlockRange(block_cache *cache, block_range **_range)
|
||||
{
|
||||
addr_t address = sBlockAddressPool.Get();
|
||||
if (address == NULL)
|
||||
return B_ENTRY_NOT_FOUND;
|
||||
|
||||
block_range *range = (block_range *)malloc(sizeof(block_range)
|
||||
+ cache->chunks_per_range * sizeof(block_chunk));
|
||||
if (range == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
memset(range, 0, sizeof(block_range) + cache->chunks_per_range * sizeof(block_chunk));
|
||||
range->base = address;
|
||||
|
||||
*_range = range;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
uint32
|
||||
block_range::BlockIndex(block_cache *cache, void *address)
|
||||
{
|
||||
return (((addr_t)address - base) % cache->chunk_size) / cache->block_size;
|
||||
}
|
||||
|
||||
|
||||
uint32
|
||||
block_range::ChunkIndex(block_cache *cache, void *address)
|
||||
{
|
||||
return ((addr_t)address - base) / cache->chunk_size;
|
||||
}
|
||||
|
||||
|
||||
block_chunk *
|
||||
block_range::Chunk(block_cache *cache, void *address)
|
||||
{
|
||||
return &chunks[ChunkIndex(cache, address)];
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
block_range::Allocate(block_cache *cache, cached_block *block)
|
||||
{
|
||||
block_chunk *chunk;
|
||||
|
||||
void *address = Allocate(cache, &chunk);
|
||||
if (address == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
block->data = address;
|
||||
|
||||
// add the block to the chunk
|
||||
block->chunk_next = chunk->blocks;
|
||||
chunk->blocks = block;
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
block_range::Free(block_cache *cache, cached_block *block)
|
||||
{
|
||||
Free(cache, block->data);
|
||||
block_chunk *chunk = Chunk(cache, block->data);
|
||||
|
||||
// remove block from list
|
||||
|
||||
cached_block *last = NULL, *next = chunk->blocks;
|
||||
while (next != NULL && next != block) {
|
||||
last = next;
|
||||
next = next->chunk_next;
|
||||
}
|
||||
if (next == NULL) {
|
||||
panic("cached_block %p was not in chunk %p\n", block, chunk);
|
||||
} else {
|
||||
if (last)
|
||||
last->chunk_next = block->chunk_next;
|
||||
else
|
||||
chunk->blocks = block->chunk_next;
|
||||
}
|
||||
|
||||
block->data = NULL;
|
||||
}
|
||||
|
||||
|
||||
void *
|
||||
block_range::Allocate(block_cache *cache, block_chunk **_chunk)
|
||||
{
|
||||
// get free chunk
|
||||
|
||||
uint32 chunk;
|
||||
for (chunk = 0; chunk < cache->chunks_per_range; chunk++) {
|
||||
if ((used_mask & (1UL << chunk)) == 0)
|
||||
break;
|
||||
}
|
||||
if (chunk == cache->chunks_per_range)
|
||||
return NULL;
|
||||
|
||||
// get free block in chunk
|
||||
|
||||
uint32 numBlocks = cache->chunk_size / cache->block_size;
|
||||
uint32 i;
|
||||
for (i = 0; i < numBlocks; i++) {
|
||||
if ((chunks[chunk].used_mask & (1UL << i)) == 0)
|
||||
break;
|
||||
}
|
||||
if (i == numBlocks) {
|
||||
panic("block_chunk %lu in range %p pretended to be free but isn't\n", i, this);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!chunks[chunk].mapped) {
|
||||
// allocate pages if needed
|
||||
uint32 numPages = cache->chunk_size / B_PAGE_SIZE;
|
||||
uint32 pageBaseIndex = numPages * chunk;
|
||||
if (pages[pageBaseIndex] == NULL) {
|
||||
// there are no pages for us yet
|
||||
for (uint32 i = 0; i < numPages; i++) {
|
||||
vm_page *page = vm_page_allocate_page(PAGE_STATE_FREE);
|
||||
if (page == NULL) {
|
||||
// ToDo: handle this gracefully
|
||||
panic("no memory for block!!\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pages[pageBaseIndex + i] = page;
|
||||
}
|
||||
}
|
||||
|
||||
// map the memory
|
||||
|
||||
vm_address_space *addressSpace = vm_get_kernel_aspace();
|
||||
vm_translation_map *map = &addressSpace->translation_map;
|
||||
map->ops->lock(map);
|
||||
|
||||
for (uint32 i = 0; i < numPages; i++) {
|
||||
map->ops->map(map, base + chunk * cache->chunk_size + i * B_PAGE_SIZE,
|
||||
pages[pageBaseIndex + i]->ppn * B_PAGE_SIZE,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
vm_put_aspace(addressSpace);
|
||||
|
||||
chunks[chunk].mapped = true;
|
||||
}
|
||||
|
||||
chunks[chunk].used_mask |= 1UL << i;
|
||||
if (chunks[chunk].used_mask == (1UL << numBlocks) - 1) {
|
||||
// all blocks are used in this chunk, propagate usage bit
|
||||
used_mask |= 1UL << chunk;
|
||||
|
||||
if (used_mask == cache->range_mask) {
|
||||
// range is full, remove it from the free list
|
||||
|
||||
// usually, the first entry will be us, but we don't count on it
|
||||
block_range *last = NULL, *range = cache->free_ranges;
|
||||
while (range != NULL && range != this) {
|
||||
last = range;
|
||||
range = range->free_next;
|
||||
}
|
||||
if (range == NULL) {
|
||||
panic("block_range %p was free but not in the free list\n", this);
|
||||
} else {
|
||||
if (last)
|
||||
last->free_next = free_next;
|
||||
else
|
||||
cache->free_ranges = free_next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (_chunk)
|
||||
*_chunk = &chunks[chunk];
|
||||
return (void *)(base + cache->chunk_size * chunk + cache->block_size * i);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
block_range::Free(block_cache *cache, void *address)
|
||||
{
|
||||
uint32 chunk = ChunkIndex(cache, address);
|
||||
|
||||
if (chunks[chunk].used_mask == cache->chunk_mask) {
|
||||
if (used_mask == cache->range_mask) {
|
||||
// range was full before, add it to the free list
|
||||
free_next = cache->free_ranges;
|
||||
cache->free_ranges = this;
|
||||
}
|
||||
// chunk was full before, propagate usage bit to range
|
||||
used_mask &= ~(1UL << chunk);
|
||||
}
|
||||
chunks[chunk].used_mask |= BlockIndex(cache, address);
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
extern "C" status_t
|
||||
init_block_allocator(void)
|
||||
{
|
||||
new(&sBlockAddressPool) BlockAddressPool;
|
||||
// static initializers do not work in the kernel,
|
||||
// so we have to do it here, manually
|
||||
|
||||
return sBlockAddressPool.InitCheck();
|
||||
}
|
374
src/system/kernel/cache/block_cache.cpp
vendored
374
src/system/kernel/cache/block_cache.cpp
vendored
@ -4,7 +4,7 @@
|
||||
*/
|
||||
|
||||
|
||||
#include "BlockAllocator.h"
|
||||
#include "block_cache_private.h"
|
||||
|
||||
#include <KernelExport.h>
|
||||
#include <fs_cache.h>
|
||||
@ -13,7 +13,10 @@
|
||||
#include <lock.h>
|
||||
#include <util/kernel_cpp.h>
|
||||
#include <util/DoublyLinkedList.h>
|
||||
#include <util/AutoLock.h>
|
||||
#include <util/khash.h>
|
||||
#include <vm.h>
|
||||
#include <vm_page.h>
|
||||
|
||||
#include <unistd.h>
|
||||
#include <stdlib.h>
|
||||
@ -37,44 +40,6 @@
|
||||
# define TRACE(x) ;
|
||||
#endif
|
||||
|
||||
#define DEBUG_CHANGED
|
||||
|
||||
struct cache_transaction;
|
||||
struct cached_block;
|
||||
typedef DoublyLinkedListLink<cached_block> block_link;
|
||||
|
||||
|
||||
struct cached_block {
|
||||
cached_block *next; // next in hash
|
||||
cached_block *transaction_next;
|
||||
block_link previous_transaction_link;
|
||||
off_t block_number;
|
||||
void *data;
|
||||
void *original;
|
||||
#ifdef DEBUG_CHANGED
|
||||
void *compare;
|
||||
#endif
|
||||
int32 ref_count;
|
||||
int32 lock;
|
||||
bool is_dirty;
|
||||
cache_transaction *transaction;
|
||||
cache_transaction *previous_transaction;
|
||||
};
|
||||
|
||||
struct block_cache {
|
||||
hash_table *hash;
|
||||
benaphore lock;
|
||||
int fd;
|
||||
off_t max_blocks;
|
||||
size_t block_size;
|
||||
int32 next_transaction_id;
|
||||
hash_table *transaction_hash;
|
||||
BlockAllocator *allocator;
|
||||
};
|
||||
|
||||
typedef DoublyLinkedList<cached_block,
|
||||
DoublyLinkedListMemberGetLink<cached_block,
|
||||
&cached_block::previous_transaction_link> > block_list;
|
||||
|
||||
struct cache_transaction {
|
||||
cache_transaction *next;
|
||||
@ -87,52 +52,6 @@ struct cache_transaction {
|
||||
bool open;
|
||||
};
|
||||
|
||||
struct cache {
|
||||
hash_table *hash;
|
||||
benaphore lock;
|
||||
off_t max_blocks;
|
||||
size_t block_size;
|
||||
};
|
||||
|
||||
static const int32 kNumCaches = 16;
|
||||
struct cache sCaches[kNumCaches];
|
||||
// we can cache the first 16 fds (I said we were dumb, right?)
|
||||
|
||||
|
||||
class BenaphoreLocker {
|
||||
public:
|
||||
BenaphoreLocker(int fd)
|
||||
: fBenaphore(NULL)
|
||||
{
|
||||
if (fd < 0 || fd >= kNumCaches)
|
||||
return;
|
||||
|
||||
fBenaphore = &sCaches[fd].lock;
|
||||
benaphore_lock(fBenaphore);
|
||||
}
|
||||
|
||||
BenaphoreLocker(block_cache *cache)
|
||||
: fBenaphore(&cache->lock)
|
||||
{
|
||||
benaphore_lock(fBenaphore);
|
||||
}
|
||||
|
||||
~BenaphoreLocker()
|
||||
{
|
||||
if (fBenaphore != NULL)
|
||||
benaphore_unlock(fBenaphore);
|
||||
}
|
||||
|
||||
status_t InitCheck()
|
||||
{
|
||||
return fBenaphore != NULL ? B_OK : B_ERROR;
|
||||
}
|
||||
|
||||
private:
|
||||
benaphore *fBenaphore;
|
||||
};
|
||||
|
||||
|
||||
static status_t write_cached_block(block_cache *cache, cached_block *block, bool deleteTransaction = true);
|
||||
|
||||
|
||||
@ -180,8 +99,9 @@ lookup_transaction(block_cache *cache, int32 id)
|
||||
// #pragma mark - private block_cache
|
||||
|
||||
|
||||
static int
|
||||
cached_block_compare(void *_cacheEntry, const void *_block)
|
||||
/* static */
|
||||
int
|
||||
cached_block::Compare(void *_cacheEntry, const void *_block)
|
||||
{
|
||||
cached_block *cacheEntry = (cached_block *)_cacheEntry;
|
||||
const off_t *block = (const off_t *)_block;
|
||||
@ -190,8 +110,10 @@ cached_block_compare(void *_cacheEntry, const void *_block)
|
||||
}
|
||||
|
||||
|
||||
static uint32
|
||||
cached_block_hash(void *_cacheEntry, const void *_block, uint32 range)
|
||||
|
||||
/* static */
|
||||
uint32
|
||||
cached_block::Hash(void *_cacheEntry, const void *_block, uint32 range)
|
||||
{
|
||||
cached_block *cacheEntry = (cached_block *)_cacheEntry;
|
||||
const off_t *block = (const off_t *)_block;
|
||||
@ -203,34 +125,142 @@ cached_block_hash(void *_cacheEntry, const void *_block, uint32 range)
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
free_cached_block(block_cache *cache, cached_block *block)
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
block_cache::block_cache(int _fd, off_t numBlocks, size_t blockSize)
|
||||
:
|
||||
hash(NULL),
|
||||
fd(_fd),
|
||||
max_blocks(numBlocks),
|
||||
block_size(blockSize),
|
||||
next_transaction_id(1),
|
||||
transaction_hash(NULL),
|
||||
ranges_hash(NULL),
|
||||
free_ranges(NULL)
|
||||
{
|
||||
cache->allocator->Put(block->data);
|
||||
cache->allocator->Put(block->original);
|
||||
hash = hash_init(32, 0, &cached_block::Compare, &cached_block::Hash);
|
||||
if (hash == NULL)
|
||||
return;
|
||||
|
||||
transaction_hash = hash_init(16, 0, &transaction_compare, &::transaction_hash);
|
||||
if (transaction_hash == NULL)
|
||||
return;
|
||||
|
||||
ranges_hash = hash_init(16, 0, &block_range::Compare, &block_range::Hash);
|
||||
if (ranges_hash == NULL)
|
||||
return;
|
||||
|
||||
if (benaphore_init(&lock, "block cache") < B_OK)
|
||||
return;
|
||||
|
||||
chunk_size = max_c(blockSize, B_PAGE_SIZE);
|
||||
chunks_per_range = kBlockRangeSize / chunk_size;
|
||||
range_mask = (1UL << chunks_per_range) - 1;
|
||||
chunk_mask = (1UL << (chunk_size / blockSize)) - 1;
|
||||
}
|
||||
|
||||
|
||||
block_cache::~block_cache()
|
||||
{
|
||||
benaphore_destroy(&lock);
|
||||
|
||||
hash_uninit(ranges_hash);
|
||||
hash_uninit(transaction_hash);
|
||||
hash_uninit(hash);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
block_cache::InitCheck()
|
||||
{
|
||||
if (lock.sem < B_OK)
|
||||
return lock.sem;
|
||||
|
||||
if (hash == NULL || transaction_hash == NULL || ranges_hash == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
block_range *
|
||||
block_cache::GetFreeRange()
|
||||
{
|
||||
if (free_ranges != NULL)
|
||||
return free_ranges;
|
||||
|
||||
// we need to allocate a new range
|
||||
block_range *range;
|
||||
if (block_range::NewBlockRange(this, &range) != B_OK) {
|
||||
// ToDo: free up space in existing ranges
|
||||
// We may also need to free ranges from other caches to get a free one
|
||||
// (if not, an active volume might have stolen all free ranges already)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
hash_insert(ranges_hash, range);
|
||||
return range;
|
||||
}
|
||||
|
||||
|
||||
block_range *
|
||||
block_cache::GetRange(void *address)
|
||||
{
|
||||
return (block_range *)hash_lookup(ranges_hash, address);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
block_cache::Free(void *address)
|
||||
{
|
||||
block_range *range = GetRange(address);
|
||||
ASSERT(range != NULL);
|
||||
range->Free(this, address);
|
||||
}
|
||||
|
||||
|
||||
void *
|
||||
block_cache::Allocate()
|
||||
{
|
||||
block_range *range = GetFreeRange();
|
||||
if (range == NULL)
|
||||
return NULL;
|
||||
|
||||
return range->Allocate(this);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
block_cache::FreeBlock(cached_block *block)
|
||||
{
|
||||
block_range *range = GetRange(block->data);
|
||||
ASSERT(range != NULL);
|
||||
range->Free(this, block);
|
||||
|
||||
Free(block->original);
|
||||
#ifdef DEBUG_CHANGED
|
||||
cache->allocator->Put(block->compare);
|
||||
Free(block->compare);
|
||||
#endif
|
||||
|
||||
free(block);
|
||||
}
|
||||
|
||||
|
||||
static cached_block *
|
||||
new_cached_block(block_cache *cache, off_t blockNumber, bool cleared = false)
|
||||
cached_block *
|
||||
block_cache::NewBlock(off_t blockNumber)
|
||||
{
|
||||
cached_block *block = (cached_block *)malloc(sizeof(cached_block));
|
||||
if (block == NULL)
|
||||
return NULL;
|
||||
|
||||
if (!cleared) {
|
||||
block->data = cache->allocator->Get();
|
||||
if (block->data == NULL) {
|
||||
free(block);
|
||||
return NULL;
|
||||
}
|
||||
} else
|
||||
block->data = NULL;
|
||||
block_range *range = GetFreeRange();
|
||||
if (range == NULL) {
|
||||
free(block);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
range->Allocate(this, block);
|
||||
|
||||
block->block_number = blockNumber;
|
||||
block->lock = 0;
|
||||
@ -242,7 +272,7 @@ new_cached_block(block_cache *cache, off_t blockNumber, bool cleared = false)
|
||||
block->compare = NULL;
|
||||
#endif
|
||||
|
||||
hash_insert(cache->hash, block);
|
||||
hash_insert(hash, block);
|
||||
|
||||
return block;
|
||||
}
|
||||
@ -301,12 +331,14 @@ put_cached_block(block_cache *cache, cached_block *block)
|
||||
write_cached_block(cache, block);
|
||||
panic("block_cache: supposed to be clean block was changed!\n");
|
||||
|
||||
cache->allocator->Put(block->compare);
|
||||
cache->Free(block->compare);
|
||||
block->compare = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
block->lock--;
|
||||
if (--block->lock == 0)
|
||||
;
|
||||
// block->data = cache->allocator->Release(block->data);
|
||||
}
|
||||
|
||||
|
||||
@ -320,34 +352,41 @@ put_cached_block(block_cache *cache, off_t blockNumber)
|
||||
|
||||
|
||||
static cached_block *
|
||||
get_cached_block(block_cache *cache, off_t blockNumber, bool cleared = false)
|
||||
get_cached_block(block_cache *cache, off_t blockNumber, bool &allocated, bool readBlock = true)
|
||||
{
|
||||
cached_block *block = (cached_block *)hash_lookup(cache->hash, &blockNumber);
|
||||
bool allocated = false;
|
||||
allocated = false;
|
||||
|
||||
if (block == NULL) {
|
||||
// read block into cache
|
||||
block = new_cached_block(cache, blockNumber, cleared);
|
||||
block = cache->NewBlock(blockNumber);
|
||||
if (block == NULL)
|
||||
return NULL;
|
||||
|
||||
allocated = true;
|
||||
}
|
||||
|
||||
if (!allocated && block->data == NULL && !cleared) {
|
||||
// there is no block yet, but we need one
|
||||
block->data = cache->allocator->Get();
|
||||
if (block->data == NULL)
|
||||
return NULL;
|
||||
} else {
|
||||
/*
|
||||
if (block->lock == 0 && block->data != NULL) {
|
||||
// see if the old block can be resurrected
|
||||
block->data = cache->allocator->Acquire(block->data);
|
||||
}
|
||||
|
||||
allocated = true;
|
||||
if (block->data == NULL) {
|
||||
// there is no block yet, but we need one
|
||||
block->data = cache->allocator->Get();
|
||||
if (block->data == NULL)
|
||||
return NULL;
|
||||
|
||||
allocated = true;
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
if (allocated && !cleared) {
|
||||
if (allocated && readBlock) {
|
||||
int32 blockSize = cache->block_size;
|
||||
|
||||
if (read_pos(cache->fd, blockNumber * blockSize, block->data, blockSize) < blockSize) {
|
||||
free_cached_block(cache, block);
|
||||
cache->FreeBlock(block);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
@ -361,23 +400,17 @@ static void *
|
||||
get_writable_cached_block(block_cache *cache, off_t blockNumber, off_t base, off_t length,
|
||||
int32 transactionID, bool cleared)
|
||||
{
|
||||
BenaphoreLocker locker(cache);
|
||||
BenaphoreLocker locker(&cache->lock);
|
||||
|
||||
TRACE(("get_writable_cached_block(blockNumber = %Ld, transaction = %ld)\n", blockNumber, transactionID));
|
||||
|
||||
cached_block *block = get_cached_block(cache, blockNumber, cleared);
|
||||
bool allocated;
|
||||
cached_block *block = get_cached_block(cache, blockNumber, allocated, !cleared);
|
||||
if (block == NULL)
|
||||
return NULL;
|
||||
|
||||
// if there is no transaction support, we just return the current block
|
||||
if (transactionID == -1) {
|
||||
if (cleared && block->data == NULL) {
|
||||
block->data = cache->allocator->Get();
|
||||
if (block->data == NULL) {
|
||||
put_cached_block(cache, block);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
if (cleared)
|
||||
memset(block->data, 0, cache->block_size);
|
||||
|
||||
@ -416,9 +449,9 @@ get_writable_cached_block(block_cache *cache, off_t blockNumber, off_t base, off
|
||||
transaction->num_blocks++;
|
||||
}
|
||||
|
||||
if (block->data != NULL && block->original == NULL) {
|
||||
if (!(allocated && cleared) && block->original == NULL) {
|
||||
// we already have data, so we need to save it
|
||||
block->original = cache->allocator->Get();
|
||||
block->original = cache->Allocate();
|
||||
if (block->original == NULL) {
|
||||
put_cached_block(cache, block);
|
||||
return NULL;
|
||||
@ -427,15 +460,9 @@ get_writable_cached_block(block_cache *cache, off_t blockNumber, off_t base, off
|
||||
memcpy(block->original, block->data, cache->block_size);
|
||||
}
|
||||
|
||||
if (block->data == NULL && cleared) {
|
||||
// there is no data yet, we need a clean new block
|
||||
block->data = cache->allocator->Get();
|
||||
if (block->data == NULL) {
|
||||
put_cached_block(cache, block);
|
||||
return NULL;
|
||||
}
|
||||
if (cleared)
|
||||
memset(block->data, 0, cache->block_size);
|
||||
}
|
||||
|
||||
block->is_dirty = true;
|
||||
|
||||
return block->data;
|
||||
@ -511,7 +538,7 @@ cache_start_transaction(void *_cache)
|
||||
|
||||
TRACE(("cache_transaction_start(): id %ld started\n", transaction->id));
|
||||
|
||||
BenaphoreLocker locker(cache);
|
||||
BenaphoreLocker locker(&cache->lock);
|
||||
hash_insert(cache->transaction_hash, transaction);
|
||||
|
||||
return transaction->id;
|
||||
@ -522,7 +549,7 @@ extern "C" status_t
|
||||
cache_sync_transaction(void *_cache, int32 id)
|
||||
{
|
||||
block_cache *cache = (block_cache *)_cache;
|
||||
BenaphoreLocker locker(cache);
|
||||
BenaphoreLocker locker(&cache->lock);
|
||||
status_t status = B_ENTRY_NOT_FOUND;
|
||||
|
||||
hash_iterator iterator;
|
||||
@ -552,7 +579,7 @@ extern "C" status_t
|
||||
cache_end_transaction(void *_cache, int32 id, transaction_notification_hook hook, void *data)
|
||||
{
|
||||
block_cache *cache = (block_cache *)_cache;
|
||||
BenaphoreLocker locker(cache);
|
||||
BenaphoreLocker locker(&cache->lock);
|
||||
|
||||
TRACE(("cache_end_transaction(id = %ld)\n", id));
|
||||
|
||||
@ -577,7 +604,7 @@ cache_end_transaction(void *_cache, int32 id, transaction_notification_hook hook
|
||||
}
|
||||
|
||||
if (block->original != NULL) {
|
||||
cache->allocator->Put(block->original);
|
||||
cache->Free(block->original);
|
||||
block->original = NULL;
|
||||
}
|
||||
|
||||
@ -599,7 +626,7 @@ extern "C" status_t
|
||||
cache_abort_transaction(void *_cache, int32 id)
|
||||
{
|
||||
block_cache *cache = (block_cache *)_cache;
|
||||
BenaphoreLocker locker(cache);
|
||||
BenaphoreLocker locker(&cache->lock);
|
||||
|
||||
TRACE(("cache_abort_transaction(id = %ld)\n", id));
|
||||
|
||||
@ -619,7 +646,7 @@ cache_abort_transaction(void *_cache, int32 id)
|
||||
TRACE(("cache_abort_transaction(id = %ld): restored contents of block %Ld\n",
|
||||
transaction->id, block->block_number));
|
||||
memcpy(block->data, block->original, cache->block_size);
|
||||
cache->allocator->Put(block->original);
|
||||
cache->Free(block->original);
|
||||
block->original = NULL;
|
||||
}
|
||||
|
||||
@ -660,7 +687,7 @@ cache_next_block_in_transaction(void *_cache, int32 id, uint32 *_cookie, off_t *
|
||||
cached_block *block = (cached_block *)*_cookie;
|
||||
block_cache *cache = (block_cache *)_cache;
|
||||
|
||||
BenaphoreLocker locker(cache);
|
||||
BenaphoreLocker locker(&cache->lock);
|
||||
|
||||
cache_transaction *transaction = lookup_transaction(cache, id);
|
||||
if (transaction == NULL)
|
||||
@ -703,9 +730,9 @@ block_cache_delete(void *_cache, bool allowWrites)
|
||||
uint32 cookie = 0;
|
||||
cached_block *block;
|
||||
while ((block = (cached_block *)hash_remove_first(cache->hash, &cookie)) != NULL) {
|
||||
free_cached_block(cache, block);
|
||||
cache->FreeBlock(block);
|
||||
}
|
||||
|
||||
|
||||
// free all transactions (they will all be aborted)
|
||||
|
||||
cookie = 0;
|
||||
@ -714,11 +741,6 @@ block_cache_delete(void *_cache, bool allowWrites)
|
||||
delete transaction;
|
||||
}
|
||||
|
||||
hash_uninit(cache->hash);
|
||||
hash_uninit(cache->transaction_hash);
|
||||
BlockAllocator::PutAllocator(cache->allocator);
|
||||
benaphore_destroy(&cache->lock);
|
||||
|
||||
delete cache;
|
||||
}
|
||||
|
||||
@ -726,41 +748,16 @@ block_cache_delete(void *_cache, bool allowWrites)
|
||||
extern "C" void *
|
||||
block_cache_create(int fd, off_t numBlocks, size_t blockSize)
|
||||
{
|
||||
block_cache *cache = new block_cache;
|
||||
block_cache *cache = new block_cache(fd, numBlocks, blockSize);
|
||||
if (cache == NULL)
|
||||
return NULL;
|
||||
|
||||
cache->hash = hash_init(32, 0, &cached_block_compare, &cached_block_hash);
|
||||
if (cache->hash == NULL)
|
||||
goto err1;
|
||||
|
||||
cache->transaction_hash = hash_init(16, 0, &transaction_compare, &transaction_hash);
|
||||
if (cache->transaction_hash == NULL)
|
||||
goto err2;
|
||||
|
||||
cache->allocator = BlockAllocator::GetAllocator(blockSize);
|
||||
if (cache->allocator == NULL)
|
||||
goto err3;
|
||||
|
||||
if (benaphore_init(&cache->lock, "block cache") < B_OK)
|
||||
goto err4;
|
||||
|
||||
cache->fd = fd;
|
||||
cache->max_blocks = numBlocks;
|
||||
cache->block_size = blockSize;
|
||||
cache->next_transaction_id = 1;
|
||||
if (cache->InitCheck() != B_OK) {
|
||||
delete cache;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return cache;
|
||||
|
||||
err4:
|
||||
BlockAllocator::PutAllocator(cache->allocator);
|
||||
err3:
|
||||
hash_uninit(cache->transaction_hash);
|
||||
err2:
|
||||
hash_uninit(cache->hash);
|
||||
err1:
|
||||
delete cache;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
@ -772,7 +769,7 @@ block_cache_sync(void *_cache)
|
||||
// we will sync all dirty blocks to disk that have a completed
|
||||
// transaction or no transaction only
|
||||
|
||||
BenaphoreLocker locker(cache);
|
||||
BenaphoreLocker locker(&cache->lock);
|
||||
hash_iterator iterator;
|
||||
hash_open(cache->hash, &iterator);
|
||||
|
||||
@ -837,15 +834,16 @@ extern "C" const void *
|
||||
block_cache_get_etc(void *_cache, off_t blockNumber, off_t base, off_t length)
|
||||
{
|
||||
block_cache *cache = (block_cache *)_cache;
|
||||
BenaphoreLocker locker(cache);
|
||||
BenaphoreLocker locker(&cache->lock);
|
||||
bool allocated;
|
||||
|
||||
cached_block *block = get_cached_block(cache, blockNumber);
|
||||
cached_block *block = get_cached_block(cache, blockNumber, allocated);
|
||||
if (block == NULL)
|
||||
return NULL;
|
||||
|
||||
#ifdef DEBUG_CHANGED
|
||||
if (block->compare == NULL)
|
||||
block->compare = cache->allocator->Get();
|
||||
block->compare = cache->Allocate();
|
||||
if (block->compare != NULL)
|
||||
memcpy(block->compare, block->data, cache->block_size);
|
||||
#endif
|
||||
@ -876,7 +874,7 @@ extern "C" void
|
||||
block_cache_put(void *_cache, off_t blockNumber)
|
||||
{
|
||||
block_cache *cache = (block_cache *)_cache;
|
||||
BenaphoreLocker locker(cache);
|
||||
BenaphoreLocker locker(&cache->lock);
|
||||
|
||||
put_cached_block(cache, blockNumber);
|
||||
}
|
||||
|
163
src/system/kernel/cache/block_cache_private.h
vendored
Normal file
163
src/system/kernel/cache/block_cache_private.h
vendored
Normal file
@ -0,0 +1,163 @@
|
||||
/*
|
||||
* Copyright 2004-2005, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef BLOCK_CACHE_PRIVATE_H
|
||||
#define BLOCK_CACHE_PRIVATE_H
|
||||
|
||||
|
||||
#include <KernelExport.h>
|
||||
#include <fs_cache.h>
|
||||
|
||||
#include <block_cache.h>
|
||||
#include <lock.h>
|
||||
#include <util/kernel_cpp.h>
|
||||
#include <util/DoublyLinkedList.h>
|
||||
#include <util/AutoLock.h>
|
||||
#include <util/khash.h>
|
||||
#include <vm.h>
|
||||
#include <vm_page.h>
|
||||
|
||||
|
||||
#define DEBUG_CHANGED
|
||||
|
||||
|
||||
static const size_t kBlockAddressSize = 128 * 1024 * 1024; // 128 MB
|
||||
static const size_t kBlockRangeSize = 64 * 1024; // 64 kB
|
||||
static const size_t kBlockRangeShift = 16;
|
||||
static const size_t kNumBlockRangePages = kBlockRangeSize / B_PAGE_SIZE;
|
||||
|
||||
struct cache_transaction;
|
||||
struct cached_block;
|
||||
struct block_chunk;
|
||||
struct block_cache;
|
||||
typedef DoublyLinkedListLink<cached_block> block_link;
|
||||
|
||||
|
||||
struct cached_block {
|
||||
cached_block *next; // next in hash
|
||||
cached_block *transaction_next;
|
||||
block_link previous_transaction_link;
|
||||
cached_block *chunk_next;
|
||||
block_chunk *chunk;
|
||||
off_t block_number;
|
||||
void *data;
|
||||
void *original;
|
||||
#ifdef DEBUG_CHANGED
|
||||
void *compare;
|
||||
#endif
|
||||
int32 ref_count;
|
||||
int32 lock;
|
||||
bool busy : 1;
|
||||
bool is_writing : 1;
|
||||
bool is_dirty : 1;
|
||||
bool unmapped : 1;
|
||||
cache_transaction *transaction;
|
||||
cache_transaction *previous_transaction;
|
||||
|
||||
static int Compare(void *_cacheEntry, const void *_block);
|
||||
static uint32 Hash(void *_cacheEntry, const void *_block, uint32 range);
|
||||
};
|
||||
|
||||
typedef DoublyLinkedList<cached_block,
|
||||
DoublyLinkedListMemberGetLink<cached_block,
|
||||
&cached_block::previous_transaction_link> > block_list;
|
||||
|
||||
struct block_chunk {
|
||||
cached_block *blocks;
|
||||
bool mapped;
|
||||
uint16 used_mask; // min chunk size is 4096 bytes; min block size is 256 bytes
|
||||
};
|
||||
|
||||
struct block_range {
|
||||
block_range *next; // next in hash
|
||||
block_range *free_next;
|
||||
addr_t base;
|
||||
uint32 used_mask;
|
||||
vm_page *pages[kNumBlockRangePages];
|
||||
block_chunk chunks[0];
|
||||
|
||||
static status_t NewBlockRange(block_cache *cache, block_range **_range);
|
||||
|
||||
status_t Allocate(block_cache *cache, cached_block *block);
|
||||
void Free(block_cache *cache, cached_block *block);
|
||||
|
||||
void *Allocate(block_cache *cache, block_chunk **_chunk = NULL);
|
||||
void Free(block_cache *cache, void *address);
|
||||
|
||||
uint32 BlockIndex(block_cache *cache, void *address);
|
||||
uint32 ChunkIndex(block_cache *cache, void *address);
|
||||
block_chunk *Chunk(block_cache *cache, void *address);
|
||||
|
||||
static int Compare(void *_blockRange, const void *_address);
|
||||
static uint32 Hash(void *_blockRange, const void *_address, uint32 range);
|
||||
|
||||
//bool HasFreeBlocks() const { return (1L << num_chunks) - 1 != used_mask; }
|
||||
};
|
||||
|
||||
struct block_cache {
|
||||
|
||||
hash_table *hash;
|
||||
benaphore lock;
|
||||
int fd;
|
||||
off_t max_blocks;
|
||||
size_t block_size;
|
||||
int32 next_transaction_id;
|
||||
hash_table *transaction_hash;
|
||||
|
||||
hash_table *ranges_hash;
|
||||
block_range *free_ranges;
|
||||
uint32 chunks_per_range;
|
||||
size_t chunk_size;
|
||||
uint32 range_mask;
|
||||
uint32 chunk_mask;
|
||||
block_list unmapped_blocks;
|
||||
|
||||
block_cache(int fd, off_t numBlocks, size_t blockSize);
|
||||
~block_cache();
|
||||
|
||||
status_t InitCheck();
|
||||
|
||||
block_range *GetFreeRange();
|
||||
block_range *GetRange(void *address);
|
||||
void FreeBlock(cached_block *block);
|
||||
cached_block *NewBlock(off_t blockNumber);
|
||||
void Free(void *address);
|
||||
void *Allocate();
|
||||
};
|
||||
|
||||
class BlockAddressPool {
|
||||
public:
|
||||
BlockAddressPool();
|
||||
~BlockAddressPool();
|
||||
|
||||
status_t InitCheck() const { return fArea >= B_OK ? B_OK : fArea; }
|
||||
|
||||
size_t RangeSize() const { return kBlockRangeSize; }
|
||||
size_t RangeShift() const { return kBlockRangeShift; }
|
||||
addr_t BaseAddress() const { return fBase; }
|
||||
|
||||
addr_t Get();
|
||||
void Put(addr_t address);
|
||||
|
||||
private:
|
||||
benaphore fLock;
|
||||
area_id fArea;
|
||||
addr_t fBase;
|
||||
addr_t fFirstFree;
|
||||
int32 fNextFree;
|
||||
int32 fFreeList[kBlockAddressSize / kBlockRangeSize];
|
||||
};
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
status_t init_block_allocator();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* BLOCK_CACHE_PRIVATE_H */
|
Loading…
Reference in New Issue
Block a user