* Replaced my block allocator code in favour of Hugo's slab allocator
(so we can't allocate more physical pages than virtual address space anymore, but so what?). * Used the new CACHE_LARGE_SLAB flag as a temporary work-around; else the slab would easily create several thousands of areas, which our area code (and kernel heap) can't really handle that well (gets awfully slow). * Block caches with the same size could share the same slab, but we don't do that yet. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23567 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
855ab2b312
commit
a96e7cce98
1
src/system/kernel/cache/Jamfile
vendored
1
src/system/kernel/cache/Jamfile
vendored
@ -1,7 +1,6 @@
|
||||
SubDir HAIKU_TOP src system kernel cache ;
|
||||
|
||||
KernelMergeObject kernel_cache.o :
|
||||
block_allocator.cpp
|
||||
block_cache.cpp
|
||||
file_cache.cpp
|
||||
file_map.cpp
|
||||
|
447
src/system/kernel/cache/block_allocator.cpp
vendored
447
src/system/kernel/cache/block_allocator.cpp
vendored
@ -1,447 +0,0 @@
|
||||
/*
|
||||
* Copyright 2005-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include "block_cache_private.h"
|
||||
|
||||
#include <KernelExport.h>
|
||||
|
||||
#include <vm_address_space.h>
|
||||
#include <vm_page.h>
|
||||
#include <vm_types.h>
|
||||
#include <util/AutoLock.h>
|
||||
#include <util/khash.h>
|
||||
|
||||
#include <new>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
|
||||
//#define TRACE_BLOCK_ALLOCATOR
|
||||
#ifdef TRACE_BLOCK_ALLOCATOR
|
||||
# define TRACE(x) dprintf x
|
||||
#else
|
||||
# define TRACE(x) ;
|
||||
#endif
|
||||
|
||||
|
||||
/** The BlockAddressPool class manages a block of virtual memory address
|
||||
* ranges.
|
||||
* The actual block of kBlockAddressSize bytes, currently 128 MB (as defined
|
||||
* in block_cache_private.h) is divided into kBlockRangeSize or 64 kB
|
||||
* ranges.
|
||||
* You can ask for a free range, and when you're done, you return it into
|
||||
* the pool. You usually only do that when the block cache owning the
|
||||
* ranges is deleted, or if memory becomes tight, a new volume is mounted
|
||||
* or whatever.
|
||||
*
|
||||
* The block_range class on the other side manages each range. Only allocated
|
||||
* ranges have a block_range object.
|
||||
* In the block cache, the ranges are put into a hash table - they are
|
||||
* accessed within that hash by address. That way, a cached_block allocated
|
||||
* in one range doesn't need to know its range.
|
||||
*
|
||||
* If you actually need physical memory, you must allocate it using the
|
||||
* block_range Allocate() methods.
|
||||
*
|
||||
* The block_range is further divided into block_chunks. A block_chunk is
|
||||
* a multiple of the page size - for block sizes below the page size, it's
|
||||
* just one page. Pages for the block range are actually allocated and
|
||||
* mapped per chunk, and not for the whole range.
|
||||
*
|
||||
* The block_range only exists to avoid fragmentation of the virtual memory
|
||||
* region reserved for the block cache, since all allocations have the same
|
||||
* size there can't be any fragmentation at all.
|
||||
*
|
||||
* NOTE: This code is a bit complicated, and maybe a slab allocator would have been
|
||||
* the better choice. What this allows for (and what would not be easily possible
|
||||
* with a slab allocator) is to keep blocks in physical memory, but don't
|
||||
* have them mapped. This, of course, would only be important if you have
|
||||
* more memory than address space available - which sounds like a good idea
|
||||
* now, but with 64-bit address spaces it looks a bit different :-)
|
||||
*/
|
||||
|
||||
class BlockAddressPool {
|
||||
public:
|
||||
BlockAddressPool();
|
||||
~BlockAddressPool();
|
||||
|
||||
status_t InitCheck() const { return fArea >= B_OK ? B_OK : fArea; }
|
||||
|
||||
size_t RangeSize() const { return kBlockRangeSize; }
|
||||
size_t RangeShift() const { return kBlockRangeShift; }
|
||||
addr_t BaseAddress() const { return fBase; }
|
||||
|
||||
addr_t Get();
|
||||
void Put(addr_t address);
|
||||
|
||||
private:
|
||||
benaphore fLock;
|
||||
area_id fArea;
|
||||
addr_t fBase;
|
||||
addr_t fFirstFree;
|
||||
int32 fNextFree;
|
||||
int32 fFreeList[kBlockAddressSize / kBlockRangeSize];
|
||||
};
|
||||
|
||||
|
||||
static class BlockAddressPool sBlockAddressPool;
|
||||
|
||||
|
||||
BlockAddressPool::BlockAddressPool()
|
||||
{
|
||||
benaphore_init(&fLock, "block address pool");
|
||||
|
||||
fBase = 0xa0000000;
|
||||
// directly after the I/O space area
|
||||
fArea = vm_create_null_area(vm_kernel_address_space_id(), "block cache",
|
||||
(void **)&fBase, B_BASE_ADDRESS, kBlockAddressSize);
|
||||
|
||||
fFirstFree = fBase;
|
||||
fNextFree = -1;
|
||||
}
|
||||
|
||||
|
||||
BlockAddressPool::~BlockAddressPool()
|
||||
{
|
||||
delete_area(fArea);
|
||||
}
|
||||
|
||||
|
||||
addr_t
|
||||
BlockAddressPool::Get()
|
||||
{
|
||||
BenaphoreLocker locker(&fLock);
|
||||
|
||||
// ToDo: we must make sure that a single volume will never eat
|
||||
// all available ranges! Every mounted volume should have a
|
||||
// guaranteed minimum available for its own use.
|
||||
|
||||
addr_t address = fFirstFree;
|
||||
if (address != NULL) {
|
||||
// Blocks are first taken from the initial free chunk, and
|
||||
// when that is empty, from the free list.
|
||||
// This saves us the initialization of the free list array,
|
||||
// dunno if it's such a good idea, though.
|
||||
if ((fFirstFree += RangeSize()) >= fBase + kBlockAddressSize)
|
||||
fFirstFree = NULL;
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
if (fNextFree == -1)
|
||||
return NULL;
|
||||
|
||||
address = (fNextFree << RangeShift()) + fBase;
|
||||
fNextFree = fFreeList[fNextFree];
|
||||
|
||||
return address;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
BlockAddressPool::Put(addr_t address)
|
||||
{
|
||||
BenaphoreLocker locker(&fLock);
|
||||
|
||||
int32 index = (address - fBase) >> RangeShift();
|
||||
fFreeList[index] = fNextFree;
|
||||
fNextFree = index;
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
/* static */
|
||||
int
|
||||
block_range::Compare(void *_blockRange, const void *_address)
|
||||
{
|
||||
block_range *range = (block_range *)_blockRange;
|
||||
addr_t address = (addr_t)_address;
|
||||
|
||||
return ((range->base - sBlockAddressPool.BaseAddress()) >> sBlockAddressPool.RangeShift())
|
||||
- ((address - sBlockAddressPool.BaseAddress()) >> sBlockAddressPool.RangeShift());
|
||||
}
|
||||
|
||||
|
||||
/* static */
|
||||
uint32
|
||||
block_range::Hash(void *_blockRange, const void *_address, uint32 range)
|
||||
{
|
||||
block_range *blockRange = (block_range *)_blockRange;
|
||||
addr_t address = (addr_t)_address;
|
||||
|
||||
if (blockRange != NULL) {
|
||||
return ((blockRange->base - sBlockAddressPool.BaseAddress())
|
||||
>> sBlockAddressPool.RangeShift()) % range;
|
||||
}
|
||||
|
||||
return ((address - sBlockAddressPool.BaseAddress())
|
||||
>> sBlockAddressPool.RangeShift()) % range;
|
||||
}
|
||||
|
||||
|
||||
/* static */
|
||||
status_t
|
||||
block_range::New(block_cache *cache, block_range **_range)
|
||||
{
|
||||
addr_t address = sBlockAddressPool.Get();
|
||||
if (address == NULL)
|
||||
return B_ENTRY_NOT_FOUND;
|
||||
|
||||
block_range *range = (block_range *)malloc(sizeof(block_range)
|
||||
+ cache->chunks_per_range * sizeof(block_chunk));
|
||||
if (range == NULL) {
|
||||
sBlockAddressPool.Put(address);
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
|
||||
TRACE(("new block range %p, base = %p!\n", range, (void *)address));
|
||||
memset(range, 0, sizeof(block_range) + cache->chunks_per_range * sizeof(block_chunk));
|
||||
range->base = address;
|
||||
|
||||
// insert into free ranges list and hash in cache
|
||||
cache->free_ranges.Add(range);
|
||||
hash_insert(cache->ranges_hash, range);
|
||||
|
||||
*_range = range;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
/*static*/
|
||||
void
|
||||
block_range::Delete(block_cache *cache, block_range *range)
|
||||
{
|
||||
TRACE(("delete block range %p, base = %p!\n", range, (void *)range->base));
|
||||
|
||||
// unmap the memory
|
||||
|
||||
vm_address_space *addressSpace = vm_kernel_address_space();
|
||||
vm_translation_map *map = &addressSpace->translation_map;
|
||||
|
||||
map->ops->lock(map);
|
||||
map->ops->unmap(map, range->base, range->base + kBlockRangeSize - 1);
|
||||
map->ops->unlock(map);
|
||||
|
||||
sBlockAddressPool.Put(range->base);
|
||||
|
||||
// free pages
|
||||
|
||||
uint32 numPages = kBlockRangeSize / B_PAGE_SIZE;
|
||||
for (uint32 i = 0; i < numPages; i++) {
|
||||
if (range->pages[i] == NULL)
|
||||
continue;
|
||||
|
||||
vm_page_set_state(range->pages[i], PAGE_STATE_FREE);
|
||||
}
|
||||
|
||||
// remove from cache free list and hash
|
||||
cache->free_ranges.Remove(range);
|
||||
hash_remove(cache->ranges_hash, range);
|
||||
|
||||
free(range);
|
||||
}
|
||||
|
||||
|
||||
uint32
|
||||
block_range::BlockIndex(block_cache *cache, void *address)
|
||||
{
|
||||
return (((addr_t)address - base) % cache->chunk_size) / cache->block_size;
|
||||
}
|
||||
|
||||
|
||||
uint32
|
||||
block_range::ChunkIndex(block_cache *cache, void *address)
|
||||
{
|
||||
return ((addr_t)address - base) / cache->chunk_size;
|
||||
}
|
||||
|
||||
|
||||
block_chunk *
|
||||
block_range::Chunk(block_cache *cache, void *address)
|
||||
{
|
||||
return &chunks[ChunkIndex(cache, address)];
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
block_range::Allocate(block_cache *cache, cached_block *block)
|
||||
{
|
||||
block_chunk *chunk;
|
||||
|
||||
void *address = Allocate(cache, &chunk);
|
||||
if (address == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
block->current_data = address;
|
||||
|
||||
// add the block to the chunk
|
||||
block->chunk_next = chunk->blocks;
|
||||
chunk->blocks = block;
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
block_range::Free(block_cache *cache, cached_block *block)
|
||||
{
|
||||
Free(cache, block->current_data);
|
||||
block_chunk *chunk = Chunk(cache, block->current_data);
|
||||
|
||||
// remove block from list
|
||||
|
||||
cached_block *last = NULL, *next = chunk->blocks;
|
||||
while (next != NULL && next != block) {
|
||||
last = next;
|
||||
next = next->chunk_next;
|
||||
}
|
||||
if (next == NULL) {
|
||||
panic("cached_block %p was not in chunk %p\n", block, chunk);
|
||||
} else {
|
||||
if (last)
|
||||
last->chunk_next = block->chunk_next;
|
||||
else
|
||||
chunk->blocks = block->chunk_next;
|
||||
}
|
||||
|
||||
block->current_data = NULL;
|
||||
}
|
||||
|
||||
|
||||
void *
|
||||
block_range::Allocate(block_cache *cache, block_chunk **_chunk)
|
||||
{
|
||||
// get free chunk in range
|
||||
|
||||
uint32 chunk;
|
||||
for (chunk = 0; chunk < cache->chunks_per_range; chunk++) {
|
||||
if ((used_mask & (1UL << chunk)) == 0)
|
||||
break;
|
||||
}
|
||||
if (chunk == cache->chunks_per_range) {
|
||||
panic("block_range %p pretended to be free but isn't\n", this);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// get free block in chunk
|
||||
|
||||
uint32 numBlocks = cache->chunk_size / cache->block_size;
|
||||
uint32 block;
|
||||
for (block = 0; block < numBlocks; block++) {
|
||||
if ((chunks[chunk].used_mask & (1UL << block)) == 0)
|
||||
break;
|
||||
}
|
||||
if (block == numBlocks) {
|
||||
panic("block_chunk %lu in range %p pretended to be free but isn't\n", block, this);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!chunks[chunk].mapped) {
|
||||
// allocate pages if needed
|
||||
uint32 numPages = cache->chunk_size / B_PAGE_SIZE;
|
||||
uint32 pageBaseIndex = numPages * chunk;
|
||||
if (pages[pageBaseIndex] == NULL) {
|
||||
// there are no pages for us yet
|
||||
for (uint32 i = 0; i < numPages; i++) {
|
||||
vm_page *page = vm_page_allocate_page(PAGE_STATE_FREE, false);
|
||||
if (page == NULL) {
|
||||
// ToDo: handle this gracefully
|
||||
panic("no memory for block!!\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
pages[pageBaseIndex + i] = page;
|
||||
}
|
||||
}
|
||||
|
||||
// map the memory
|
||||
|
||||
vm_address_space *addressSpace = vm_kernel_address_space();
|
||||
vm_translation_map *map = &addressSpace->translation_map;
|
||||
size_t reservePages = map->ops->map_max_pages_need(map,
|
||||
0, numPages * B_PAGE_SIZE);
|
||||
|
||||
vm_page_reserve_pages(reservePages);
|
||||
map->ops->lock(map);
|
||||
|
||||
for (uint32 i = 0; i < numPages; i++) {
|
||||
map->ops->map(map, base + chunk * cache->chunk_size + i * B_PAGE_SIZE,
|
||||
pages[pageBaseIndex + i]->physical_page_number * B_PAGE_SIZE,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
vm_page_unreserve_pages(reservePages);
|
||||
|
||||
chunks[chunk].mapped = true;
|
||||
}
|
||||
|
||||
chunks[chunk].used_mask |= 1UL << block;
|
||||
if (chunks[chunk].used_mask == cache->chunk_mask) {
|
||||
// all blocks are used in this chunk, propagate usage bit
|
||||
used_mask |= 1UL << chunk;
|
||||
|
||||
if (used_mask == cache->range_mask) {
|
||||
// range is full, remove it from the free list
|
||||
cache->free_ranges.Remove(this);
|
||||
}
|
||||
}
|
||||
TRACE(("Allocate: used masks: chunk = %x, range = %lx\n", chunks[chunk].used_mask, used_mask));
|
||||
|
||||
if (_chunk)
|
||||
*_chunk = &chunks[chunk];
|
||||
return (void *)(base + cache->chunk_size * chunk + cache->block_size * block);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
block_range::Free(block_cache *cache, void *address)
|
||||
{
|
||||
uint32 chunk = ChunkIndex(cache, address);
|
||||
|
||||
if (chunks[chunk].used_mask == cache->chunk_mask) {
|
||||
if (used_mask == cache->range_mask) {
|
||||
// range was full before, add it to the free list
|
||||
cache->free_ranges.Add(this);
|
||||
}
|
||||
// chunk was full before, propagate usage bit to range
|
||||
used_mask &= ~(1UL << chunk);
|
||||
}
|
||||
chunks[chunk].used_mask &= ~(1UL << BlockIndex(cache, address));
|
||||
|
||||
TRACE(("Free: used masks: chunk = %x, range = %lx\n", chunks[chunk].used_mask, used_mask));
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
block_range::Unused(const block_cache *cache) const
|
||||
{
|
||||
if (used_mask != 0)
|
||||
return false;
|
||||
|
||||
for (int32 chunk = cache->chunks_per_range; chunk-- > 0;) {
|
||||
if (chunks[chunk].used_mask != 0)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
extern "C" status_t
|
||||
init_block_allocator(void)
|
||||
{
|
||||
new(&sBlockAddressPool) BlockAddressPool;
|
||||
// static initializers do not work in the kernel,
|
||||
// so we have to do it here, manually
|
||||
|
||||
return sBlockAddressPool.InitCheck();
|
||||
}
|
145
src/system/kernel/cache/block_cache.cpp
vendored
145
src/system/kernel/cache/block_cache.cpp
vendored
@ -195,6 +195,7 @@ static status_t write_cached_block(block_cache *cache, cached_block *block,
|
||||
static DoublyLinkedList<block_cache> sCaches;
|
||||
static mutex sCachesLock;
|
||||
#endif
|
||||
static object_cache *sBlockCache;
|
||||
|
||||
|
||||
// #pragma mark - private transaction
|
||||
@ -311,7 +312,6 @@ block_cache::block_cache(int _fd, off_t numBlocks, size_t blockSize,
|
||||
next_transaction_id(1),
|
||||
last_transaction(NULL),
|
||||
transaction_hash(NULL),
|
||||
ranges_hash(NULL),
|
||||
read_only(readOnly)
|
||||
{
|
||||
#ifdef DEBUG_BLOCK_CACHE
|
||||
@ -320,6 +320,11 @@ block_cache::block_cache(int _fd, off_t numBlocks, size_t blockSize,
|
||||
mutex_unlock(&sCachesLock);
|
||||
#endif
|
||||
|
||||
buffer_cache = create_object_cache_etc("block cache buffers", blockSize,
|
||||
8, 0, CACHE_LARGE_SLAB, NULL, NULL, NULL, NULL);
|
||||
if (buffer_cache == NULL)
|
||||
return;
|
||||
|
||||
hash = hash_init(32, 0, &cached_block::Compare, &cached_block::Hash);
|
||||
if (hash == NULL)
|
||||
return;
|
||||
@ -329,18 +334,9 @@ block_cache::block_cache(int _fd, off_t numBlocks, size_t blockSize,
|
||||
if (transaction_hash == NULL)
|
||||
return;
|
||||
|
||||
ranges_hash = hash_init(16, 0, &block_range::Compare, &block_range::Hash);
|
||||
if (ranges_hash == NULL)
|
||||
return;
|
||||
|
||||
if (benaphore_init(&lock, "block cache") < B_OK)
|
||||
return;
|
||||
|
||||
chunk_size = max_c(blockSize, B_PAGE_SIZE);
|
||||
chunks_per_range = kBlockRangeSize / chunk_size;
|
||||
range_mask = (1UL << chunks_per_range) - 1;
|
||||
chunk_mask = (1UL << (chunk_size / blockSize)) - 1;
|
||||
|
||||
register_low_memory_handler(&block_cache::LowMemoryHandler, this, 0);
|
||||
}
|
||||
|
||||
@ -357,9 +353,10 @@ block_cache::~block_cache()
|
||||
|
||||
benaphore_destroy(&lock);
|
||||
|
||||
hash_uninit(ranges_hash);
|
||||
hash_uninit(transaction_hash);
|
||||
hash_uninit(hash);
|
||||
|
||||
delete_object_cache(buffer_cache);
|
||||
}
|
||||
|
||||
|
||||
@ -369,83 +366,31 @@ block_cache::InitCheck()
|
||||
if (lock.sem < B_OK)
|
||||
return lock.sem;
|
||||
|
||||
if (hash == NULL || transaction_hash == NULL || ranges_hash == NULL)
|
||||
if (buffer_cache == NULL || hash == NULL || transaction_hash == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
block_range *
|
||||
block_cache::GetFreeRange()
|
||||
{
|
||||
if (!free_ranges.IsEmpty())
|
||||
return free_ranges.First();
|
||||
|
||||
// we need to allocate a new range
|
||||
block_range *range;
|
||||
if (block_range::New(this, &range) != B_OK) {
|
||||
RemoveUnusedBlocks(2, 50);
|
||||
|
||||
if (!free_ranges.IsEmpty())
|
||||
return free_ranges.First();
|
||||
|
||||
RemoveUnusedBlocks(LONG_MAX, 50);
|
||||
|
||||
if (!free_ranges.IsEmpty())
|
||||
return free_ranges.First();
|
||||
|
||||
// TODO: We also need to free ranges from other caches to get a free one
|
||||
// (if not, an active volume might have stolen all free ranges already)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return range;
|
||||
}
|
||||
|
||||
|
||||
block_range *
|
||||
block_cache::GetRange(void *address)
|
||||
{
|
||||
return (block_range *)hash_lookup(ranges_hash, address);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
block_cache::Free(void *address)
|
||||
block_cache::Free(void *buffer)
|
||||
{
|
||||
if (address == NULL)
|
||||
return;
|
||||
|
||||
block_range *range = GetRange(address);
|
||||
if (range == NULL)
|
||||
panic("no range for address %p\n", address);
|
||||
|
||||
ASSERT(range != NULL);
|
||||
range->Free(this, address);
|
||||
|
||||
if (range->Unused(this))
|
||||
block_range::Delete(this, range);
|
||||
object_cache_free(buffer_cache, buffer);
|
||||
}
|
||||
|
||||
|
||||
void *
|
||||
block_cache::Allocate()
|
||||
{
|
||||
block_range *range = GetFreeRange();
|
||||
if (range == NULL)
|
||||
return NULL;
|
||||
|
||||
return range->Allocate(this);
|
||||
return object_cache_alloc(buffer_cache, 0);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
block_cache::FreeBlock(cached_block *block)
|
||||
{
|
||||
block_range *range = GetRange(block->current_data);
|
||||
ASSERT(range != NULL);
|
||||
range->Free(this, block);
|
||||
Free(block->current_data);
|
||||
|
||||
if (block->original_data != NULL || block->parent_data != NULL) {
|
||||
panic("block_cache::FreeBlock(): %p, %p\n", block->original_data,
|
||||
@ -456,10 +401,7 @@ block_cache::FreeBlock(cached_block *block)
|
||||
Free(block->compare);
|
||||
#endif
|
||||
|
||||
if (range->Unused(this))
|
||||
block_range::Delete(this, range);
|
||||
|
||||
delete block;
|
||||
object_cache_free(sBlockCache, block);
|
||||
}
|
||||
|
||||
|
||||
@ -477,6 +419,14 @@ block_cache::_GetUnusedBlock()
|
||||
// remove block from lists
|
||||
iterator.Remove();
|
||||
hash_remove(hash, block);
|
||||
|
||||
// TODO: see if parent/compare data is handled correctly here!
|
||||
if (block->parent_data != NULL
|
||||
&& block->parent_data != block->original_data)
|
||||
Free(block->parent_data);
|
||||
if (block->original_data != NULL)
|
||||
Free(block->original_data);
|
||||
|
||||
return block;
|
||||
}
|
||||
|
||||
@ -488,15 +438,7 @@ block_cache::_GetUnusedBlock()
|
||||
cached_block *
|
||||
block_cache::NewBlock(off_t blockNumber)
|
||||
{
|
||||
cached_block *block = new(nothrow) cached_block;
|
||||
if (block != NULL) {
|
||||
block_range *range = GetFreeRange();
|
||||
if (range == NULL) {
|
||||
delete block;
|
||||
block = NULL;
|
||||
} else
|
||||
range->Allocate(this, block);
|
||||
}
|
||||
cached_block *block = (cached_block *)object_cache_alloc(sBlockCache, 0);
|
||||
if (block == NULL) {
|
||||
dprintf("block allocation failed, unused list is %sempty.\n",
|
||||
unused_blocks.IsEmpty() ? "" : "not ");
|
||||
@ -509,6 +451,12 @@ block_cache::NewBlock(off_t blockNumber)
|
||||
}
|
||||
}
|
||||
|
||||
block->current_data = Allocate();
|
||||
if (block->current_data == NULL) {
|
||||
object_cache_free(sBlockCache, block);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
block->block_number = blockNumber;
|
||||
block->ref_count = 0;
|
||||
block->accessed = 0;
|
||||
@ -701,23 +649,6 @@ get_cached_block(block_cache *cache, off_t blockNumber, bool *_allocated,
|
||||
|
||||
hash_insert(cache->hash, block);
|
||||
*_allocated = true;
|
||||
} else {
|
||||
// TODO: currently, the data is always mapped in
|
||||
/*
|
||||
if (block->ref_count == 0 && block->current_data != NULL) {
|
||||
// see if the old block can be resurrected
|
||||
block->current_data = cache->allocator->Acquire(block->current_data);
|
||||
}
|
||||
|
||||
if (block->current_data == NULL) {
|
||||
// there is no block yet, but we need one
|
||||
block->current_data = cache->allocator->Get();
|
||||
if (block->current_data == NULL)
|
||||
return NULL;
|
||||
|
||||
*_allocated = true;
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
if (*_allocated && readBlock) {
|
||||
@ -938,8 +869,6 @@ dump_cache(int argc, char **argv)
|
||||
kprintf(" is-dirty");
|
||||
if (block->unused)
|
||||
kprintf(" unused");
|
||||
if (block->unmapped)
|
||||
kprintf(" unmapped");
|
||||
kprintf("\n");
|
||||
if (block->transaction != NULL) {
|
||||
kprintf(" transaction: %p (%ld)\n", block->transaction,
|
||||
@ -965,10 +894,6 @@ dump_cache(int argc, char **argv)
|
||||
kprintf(" max_blocks: %Ld\n", cache->max_blocks);
|
||||
kprintf(" block_size: %lu\n", cache->block_size);
|
||||
kprintf(" next_transaction_id: %ld\n", cache->next_transaction_id);
|
||||
kprintf(" chunks_per_range: %lu\n", cache->chunks_per_range);
|
||||
kprintf(" chunks_size: %lu\n", cache->chunk_size);
|
||||
kprintf(" range_mask: %lu\n", cache->range_mask);
|
||||
kprintf(" chunks_mask: %lu\n", cache->chunk_mask);
|
||||
|
||||
if (showBlocks) {
|
||||
kprintf(" blocks:\n");
|
||||
@ -984,13 +909,13 @@ dump_cache(int argc, char **argv)
|
||||
cached_block *block;
|
||||
while ((block = (cached_block *)hash_next(cache->hash, &iterator)) != NULL) {
|
||||
if (showBlocks) {
|
||||
kprintf("%08lx %9Ld %08lx %08lx %08lx %5ld %6ld %c%c%c%c%c %08lx "
|
||||
kprintf("%08lx %9Ld %08lx %08lx %08lx %5ld %6ld %c%c%c%c %08lx "
|
||||
"%08lx\n", (addr_t)block, block->block_number,
|
||||
(addr_t)block->current_data, (addr_t)block->original_data,
|
||||
(addr_t)block->parent_data, block->ref_count, block->accessed,
|
||||
block->busy ? 'B' : '-', block->is_writing ? 'W' : '-',
|
||||
block->is_dirty ? 'B' : '-', block->unused ? 'U' : '-',
|
||||
block->unmapped ? 'M' : '-', (addr_t)block->transaction,
|
||||
(addr_t)block->transaction,
|
||||
(addr_t)block->previous_transaction);
|
||||
}
|
||||
|
||||
@ -1026,6 +951,11 @@ dump_caches(int argc, char **argv)
|
||||
extern "C" status_t
|
||||
block_cache_init(void)
|
||||
{
|
||||
sBlockCache = create_object_cache_etc("cached blocks", sizeof(cached_block),
|
||||
8, 0, CACHE_LARGE_SLAB, NULL, NULL, NULL, NULL);
|
||||
if (sBlockCache == NULL)
|
||||
return B_ERROR;
|
||||
|
||||
#ifdef DEBUG_BLOCK_CACHE
|
||||
mutex_init(&sCachesLock, "block caches");
|
||||
new (&sCaches) DoublyLinkedList<block_cache>;
|
||||
@ -1035,7 +965,7 @@ block_cache_init(void)
|
||||
add_debugger_command("block_cache", &dump_cache, "dumps a specific block cache");
|
||||
#endif
|
||||
|
||||
return init_block_allocator();
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
@ -1498,7 +1428,6 @@ cache_blocks_in_sub_transaction(void *_cache, int32 id)
|
||||
|
||||
|
||||
// #pragma mark - public block cache API
|
||||
// public interface
|
||||
|
||||
|
||||
extern "C" void
|
||||
|
71
src/system/kernel/cache/block_cache_private.h
vendored
71
src/system/kernel/cache/block_cache_private.h
vendored
@ -7,6 +7,7 @@
|
||||
|
||||
|
||||
#include <lock.h>
|
||||
#include <slab/Slab.h>
|
||||
#include <util/DoublyLinkedList.h>
|
||||
|
||||
struct hash_table;
|
||||
@ -16,25 +17,16 @@ struct vm_page;
|
||||
#define DEBUG_CHANGED
|
||||
|
||||
|
||||
static const size_t kBlockAddressSize = 128 * 1024 * 1024; // 128 MB
|
||||
static const size_t kBlockRangeSize = 64 * 1024; // 64 kB
|
||||
static const size_t kBlockRangeShift = 16;
|
||||
static const size_t kNumBlockRangePages = kBlockRangeSize / B_PAGE_SIZE;
|
||||
|
||||
struct cache_transaction;
|
||||
struct cached_block;
|
||||
struct block_chunk;
|
||||
struct block_cache;
|
||||
struct block_range;
|
||||
typedef DoublyLinkedListLink<cached_block> block_link;
|
||||
typedef DoublyLinkedListLink<block_range> range_link;
|
||||
|
||||
|
||||
struct cached_block {
|
||||
cached_block *next; // next in hash
|
||||
cached_block *transaction_next;
|
||||
block_link link;
|
||||
cached_block *chunk_next;
|
||||
off_t block_number;
|
||||
void *current_data;
|
||||
void *original_data;
|
||||
@ -48,7 +40,6 @@ struct cached_block {
|
||||
bool is_writing : 1;
|
||||
bool is_dirty : 1;
|
||||
bool unused : 1;
|
||||
bool unmapped : 1;
|
||||
cache_transaction *transaction;
|
||||
cache_transaction *previous_transaction;
|
||||
|
||||
@ -60,43 +51,6 @@ typedef DoublyLinkedList<cached_block,
|
||||
DoublyLinkedListMemberGetLink<cached_block,
|
||||
&cached_block::link> > block_list;
|
||||
|
||||
struct block_chunk {
|
||||
cached_block *blocks;
|
||||
bool mapped;
|
||||
uint16 used_mask; // min chunk size is 4096 bytes; min block size is 256 bytes
|
||||
};
|
||||
|
||||
struct block_range {
|
||||
block_range *next; // next in hash
|
||||
range_link link;
|
||||
addr_t base;
|
||||
uint32 used_mask;
|
||||
vm_page *pages[kNumBlockRangePages];
|
||||
block_chunk chunks[0];
|
||||
|
||||
static status_t New(block_cache *cache, block_range **_range);
|
||||
static void Delete(block_cache *cache, block_range *range);
|
||||
|
||||
status_t Allocate(block_cache *cache, cached_block *block);
|
||||
void Free(block_cache *cache, cached_block *block);
|
||||
|
||||
void *Allocate(block_cache *cache, block_chunk **_chunk = NULL);
|
||||
void Free(block_cache *cache, void *address);
|
||||
|
||||
uint32 BlockIndex(block_cache *cache, void *address);
|
||||
uint32 ChunkIndex(block_cache *cache, void *address);
|
||||
block_chunk *Chunk(block_cache *cache, void *address);
|
||||
|
||||
bool Unused(const block_cache *cache) const;
|
||||
|
||||
static int Compare(void *_blockRange, const void *_address);
|
||||
static uint32 Hash(void *_blockRange, const void *_address, uint32 range);
|
||||
};
|
||||
|
||||
typedef DoublyLinkedList<block_range,
|
||||
DoublyLinkedListMemberGetLink<block_range,
|
||||
&block_range::link> > range_list;
|
||||
|
||||
struct block_cache : DoublyLinkedListLinkImpl<block_cache> {
|
||||
hash_table *hash;
|
||||
benaphore lock;
|
||||
@ -107,13 +61,7 @@ struct block_cache : DoublyLinkedListLinkImpl<block_cache> {
|
||||
cache_transaction *last_transaction;
|
||||
hash_table *transaction_hash;
|
||||
|
||||
hash_table *ranges_hash;
|
||||
range_list free_ranges;
|
||||
uint32 chunks_per_range;
|
||||
size_t chunk_size;
|
||||
uint32 range_mask;
|
||||
uint32 chunk_mask;
|
||||
block_list unmapped_blocks;
|
||||
object_cache *buffer_cache;
|
||||
block_list unused_blocks;
|
||||
|
||||
bool read_only;
|
||||
@ -123,13 +71,11 @@ struct block_cache : DoublyLinkedListLinkImpl<block_cache> {
|
||||
|
||||
status_t InitCheck();
|
||||
|
||||
block_range *GetFreeRange();
|
||||
block_range *GetRange(void *address);
|
||||
void RemoveUnusedBlocks(int32 maxAccessed = LONG_MAX,
|
||||
int32 count = LONG_MAX);
|
||||
void FreeBlock(cached_block *block);
|
||||
cached_block *NewBlock(off_t blockNumber);
|
||||
void Free(void *address);
|
||||
void Free(void *buffer);
|
||||
void *Allocate();
|
||||
|
||||
static void LowMemoryHandler(void *data, int32 level);
|
||||
@ -138,15 +84,4 @@ private:
|
||||
cached_block *_GetUnusedBlock();
|
||||
};
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
status_t init_block_allocator();
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* BLOCK_CACHE_PRIVATE_H */
|
||||
|
Loading…
Reference in New Issue
Block a user