From 824e907a2c8692631a9dd79405e227cadd83e6b4 Mon Sep 17 00:00:00 2001 From: Ingo Weinhold Date: Sun, 11 Mar 2007 15:04:53 +0000 Subject: [PATCH] Added Haiku block cache interface to the UserlandFS server. Basically copied and adjusted the respective kernel code. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@20367 a95241bf-73f2-0310-859d-f6bbb57e9c96 --- .../file_systems/userlandfs/server/Jamfile | 3 + .../userlandfs/server/UserlandFSServer.cpp | 6 + .../userlandfs/server/haiku_block_cache.cpp | 1123 +++++++++++++++++ .../server/haiku_block_cache_priv.h | 93 ++ .../userlandfs/server/haiku_fs_cache.h | 74 ++ .../userlandfs/server/haiku_hash.cpp | 274 ++++ .../userlandfs/server/haiku_hash.h | 53 + .../userlandfs/server/haiku_kernel_emu.cpp | 173 +++ .../userlandfs/server/haiku_lock.cpp | 247 ++++ .../userlandfs/server/haiku_lock.h | 159 +++ .../userlandfs/server/kernel_emu.cpp | 43 +- .../userlandfs/server/kernel_emu.h | 11 +- 12 files changed, 2253 insertions(+), 6 deletions(-) create mode 100644 src/add-ons/kernel/file_systems/userlandfs/server/haiku_block_cache.cpp create mode 100644 src/add-ons/kernel/file_systems/userlandfs/server/haiku_block_cache_priv.h create mode 100644 src/add-ons/kernel/file_systems/userlandfs/server/haiku_fs_cache.h create mode 100644 src/add-ons/kernel/file_systems/userlandfs/server/haiku_hash.cpp create mode 100644 src/add-ons/kernel/file_systems/userlandfs/server/haiku_hash.h create mode 100644 src/add-ons/kernel/file_systems/userlandfs/server/haiku_lock.cpp create mode 100644 src/add-ons/kernel/file_systems/userlandfs/server/haiku_lock.h diff --git a/src/add-ons/kernel/file_systems/userlandfs/server/Jamfile b/src/add-ons/kernel/file_systems/userlandfs/server/Jamfile index b6e426ebcd..903bbc3cf2 100644 --- a/src/add-ons/kernel/file_systems/userlandfs/server/Jamfile +++ b/src/add-ons/kernel/file_systems/userlandfs/server/Jamfile @@ -40,6 +40,9 @@ Application UserlandFSServer BeOSKernelVolume.cpp DispatcherFileSystem.cpp FileSystem.cpp + haiku_block_cache.cpp + haiku_hash.cpp + haiku_lock.cpp HaikuKernelFileSystem.cpp HaikuKernelVolume.cpp kernel_emu.cpp diff --git a/src/add-ons/kernel/file_systems/userlandfs/server/UserlandFSServer.cpp b/src/add-ons/kernel/file_systems/userlandfs/server/UserlandFSServer.cpp index 5c1532da6a..3df5f75b41 100644 --- a/src/add-ons/kernel/file_systems/userlandfs/server/UserlandFSServer.cpp +++ b/src/add-ons/kernel/file_systems/userlandfs/server/UserlandFSServer.cpp @@ -23,6 +23,7 @@ #include "DispatcherDefs.h" #include "FileSystem.h" #include "FSInfo.h" +#include "haiku_block_cache_priv.h" #include "HaikuKernelFileSystem.h" #include "RequestThread.h" #include "ServerDefs.h" @@ -275,6 +276,11 @@ UserlandFSServer::_CreateHaikuKernelInterface(const char* fsName, RETURN_ERROR(B_NO_MEMORY); ObjectDeleter fsDeleter(fileSystem); + // init block cache + error = UserlandFS::HaikuKernelEmu::block_cache_init(); + if (error != B_OK) + RETURN_ERROR(error); + // init the FS error = fileSystem->Init(); if (error != B_OK) diff --git a/src/add-ons/kernel/file_systems/userlandfs/server/haiku_block_cache.cpp b/src/add-ons/kernel/file_systems/userlandfs/server/haiku_block_cache.cpp new file mode 100644 index 0000000000..147833fd5f --- /dev/null +++ b/src/add-ons/kernel/file_systems/userlandfs/server/haiku_block_cache.cpp @@ -0,0 +1,1123 @@ +/* + * Copyright 2004-2006, Axel Dörfler, axeld@pinc-software.de. All rights reserved. + * Distributed under the terms of the MIT License. + */ + +#include "haiku_block_cache_priv.h" + +#include + +#include +#include +#include +#include + +#include "haiku_fs_cache.h" +#include "haiku_hash.h" +#include "kernel_emu.h" + +// ToDo: this is a naive but growing implementation to test the API: +// 1) block reading/writing is not at all optimized for speed, it will +// just read and write single blocks. +// 2) the locking could be improved; getting a block should not need to +// wait for blocks to be written +// 3) dirty blocks are only written back if asked for +// TODO: the retrieval/copy of the original data could be delayed until the +// new data must be written, ie. in low memory situations. + +//#define TRACE_BLOCK_CACHE +#ifdef TRACE_BLOCK_CACHE +# define TRACE(x) dprintf x +#else +# define TRACE(x) ; +#endif + +// This macro is used for fatal situations that are acceptable in a running +// system, like out of memory situations - should only panic for debugging. +#define FATAL(x) panic x + +using UserlandFS::KernelEmu::dprintf; +using UserlandFS::KernelEmu::dump_block; +using UserlandFS::KernelEmu::panic; + +namespace UserlandFS { +namespace HaikuKernelEmu { + +static const int32 kMaxBlockCount = 1024; + + +struct cache_transaction { + cache_transaction(); + + cache_transaction *next; + int32 id; + int32 num_blocks; + int32 sub_num_blocks; + cached_block *first_block; + block_list blocks; + transaction_notification_hook notification_hook; + void *notification_data; + bool open; + bool has_sub_transaction; +}; + +static status_t write_cached_block(block_cache *cache, cached_block *block, + bool deleteTransaction = true); + + +// #pragma mark - private transaction + + +cache_transaction::cache_transaction() +{ + num_blocks = 0; + sub_num_blocks = 0; + first_block = NULL; + notification_hook = NULL; + notification_data = NULL; + open = true; +} + + +static int +transaction_compare(void *_transaction, const void *_id) +{ + cache_transaction *transaction = (cache_transaction *)_transaction; + const int32 *id = (const int32 *)_id; + + return transaction->id - *id; +} + + +static uint32 +transaction_hash(void *_transaction, const void *_id, uint32 range) +{ + cache_transaction *transaction = (cache_transaction *)_transaction; + const int32 *id = (const int32 *)_id; + + if (transaction != NULL) + return transaction->id % range; + + return (uint32)*id % range; +} + + +static void +delete_transaction(block_cache *cache, cache_transaction *transaction) +{ + hash_remove(cache->transaction_hash, transaction); + if (cache->last_transaction == transaction) + cache->last_transaction = NULL; + + delete transaction; +} + + +static cache_transaction * +lookup_transaction(block_cache *cache, int32 id) +{ + return (cache_transaction *)hash_lookup(cache->transaction_hash, &id); +} + + +// #pragma mark - cached_block + + +/* static */ +int +cached_block::Compare(void *_cacheEntry, const void *_block) +{ + cached_block *cacheEntry = (cached_block *)_cacheEntry; + const off_t *block = (const off_t *)_block; + + return cacheEntry->block_number - *block; +} + + + +/* static */ +uint32 +cached_block::Hash(void *_cacheEntry, const void *_block, uint32 range) +{ + cached_block *cacheEntry = (cached_block *)_cacheEntry; + const off_t *block = (const off_t *)_block; + + if (cacheEntry != NULL) + return cacheEntry->block_number % range; + + return (uint64)*block % range; +} + + +// #pragma mark - block_cache + + +block_cache::block_cache(int _fd, off_t numBlocks, size_t blockSize, bool readOnly) + : + hash(NULL), + fd(_fd), + max_blocks(numBlocks), + block_size(blockSize), + next_transaction_id(1), + last_transaction(NULL), + transaction_hash(NULL), + read_only(readOnly) +{ + hash = hash_init(32, 0, &cached_block::Compare, &cached_block::Hash); + if (hash == NULL) + return; + + transaction_hash = hash_init(16, 0, &transaction_compare, + &UserlandFS::HaikuKernelEmu::transaction_hash); + if (transaction_hash == NULL) + return; + + if (benaphore_init(&lock, "block cache") < B_OK) + return; +} + + +block_cache::~block_cache() +{ + benaphore_destroy(&lock); + + hash_uninit(transaction_hash); + hash_uninit(hash); +} + + +status_t +block_cache::InitCheck() +{ + if (lock.sem < B_OK) + return lock.sem; + + if (hash == NULL || transaction_hash == NULL) + return B_NO_MEMORY; + + return B_OK; +} + + +void +block_cache::Free(void *address) +{ + if (address == NULL) + return; + + free(address); +} + + +void * +block_cache::Allocate() +{ + return malloc(block_size); +} + + +void +block_cache::FreeBlock(cached_block *block) +{ + Free(block->current_data); + block->current_data = NULL; + + if (block->original_data != NULL || block->parent_data != NULL) + panic("block_cache::FreeBlock(): %p, %p\n", block->original_data, block->parent_data); + +#ifdef DEBUG_CHANGED + Free(block->compare); +#endif + + delete block; +} + + +cached_block * +block_cache::NewBlock(off_t blockNumber) +{ + cached_block *block = new(nothrow) cached_block; + if (block == NULL) { + FATAL(("could not allocate block!\n")); + return NULL; + } + + // if we hit the limit of blocks to cache¸ try to free one or more + if (allocated_block_count >= kMaxBlockCount) { + RemoveUnusedBlocks(LONG_MAX, + allocated_block_count - kMaxBlockCount + 1); + } + + block->current_data = Allocate(); + if (!block->current_data) { + FATAL(("could not allocate block data!\n")); + delete block; + return NULL; + } + + block->block_number = blockNumber; + block->ref_count = 0; + block->accessed = 0; + block->transaction_next = NULL; + block->transaction = block->previous_transaction = NULL; + block->original_data = NULL; + block->parent_data = NULL; + block->is_dirty = false; + block->unused = false; +#ifdef DEBUG_CHANGED + block->compare = NULL; +#endif + + hash_insert(hash, block); + allocated_block_count++; + + return block; +} + + +void +block_cache::RemoveUnusedBlocks(int32 maxAccessed, int32 count) +{ + TRACE(("block_cache: remove up to %ld unused blocks\n", count)); + + cached_block *next = NULL; + for (cached_block *block = unused_blocks.First(); block != NULL; block = next) { + next = block->next; + + if (maxAccessed < block->accessed) + continue; + + TRACE((" remove block %Ld, accessed %ld times\n", + block->block_number, block->accessed)); + + // this can only happen if no transactions are used + if (block->is_dirty) + write_cached_block(this, block, false); + + // remove block from lists + unused_blocks.Remove(block); + hash_remove(hash, block); + + FreeBlock(block); + + if (--count <= 0) + break; + } +} + + +// #pragma mark - + + +static void +put_cached_block(block_cache *cache, cached_block *block) +{ +#ifdef DEBUG_CHANGED + if (!block->is_dirty && block->compare != NULL && memcmp(block->current_data, block->compare, cache->block_size)) { + dprintf("new block:\n"); + dump_block((const char *)block->current_data, 256, " "); + dprintf("unchanged block:\n"); + dump_block((const char *)block->compare, 256, " "); + write_cached_block(cache, block); + panic("block_cache: supposed to be clean block was changed!\n"); + + cache->Free(block->compare); + block->compare = NULL; + } +#endif + + if (--block->ref_count == 0 + && block->transaction == NULL + && block->previous_transaction == NULL) { + // put this block in the list of unused blocks + block->unused = true; + cache->unused_blocks.Add(block); + } + + if (cache->allocated_block_count > kMaxBlockCount) { + cache->RemoveUnusedBlocks(LONG_MAX, + cache->allocated_block_count - kMaxBlockCount); + } +} + + +static void +put_cached_block(block_cache *cache, off_t blockNumber) +{ + if (blockNumber < 0 || blockNumber >= cache->max_blocks) + panic("put_cached_block: invalid block number %lld (max %lld)", blockNumber, cache->max_blocks - 1); + + cached_block *block = (cached_block *)hash_lookup(cache->hash, &blockNumber); + if (block != NULL) + put_cached_block(cache, block); +} + + +static cached_block * +get_cached_block(block_cache *cache, off_t blockNumber, bool *allocated, bool readBlock = true) +{ + if (blockNumber < 0 || blockNumber >= cache->max_blocks) + panic("get_cached_block: invalid block number %lld (max %lld)", blockNumber, cache->max_blocks - 1); + + cached_block *block = (cached_block *)hash_lookup(cache->hash, &blockNumber); + *allocated = false; + + if (block == NULL) { + // read block into cache + block = cache->NewBlock(blockNumber); + if (block == NULL) + return NULL; + + *allocated = true; + } else { +/* + if (block->ref_count == 0 && block->current_data != NULL) { + // see if the old block can be resurrected + block->current_data = cache->allocator->Acquire(block->current_data); + } + + if (block->current_data == NULL) { + // there is no block yet, but we need one + block->current_data = cache->allocator->Get(); + if (block->current_data == NULL) + return NULL; + + *allocated = true; + } +*/ + } + + if (*allocated && readBlock) { + int32 blockSize = cache->block_size; + + if (read_pos(cache->fd, blockNumber * blockSize, block->current_data, blockSize) < blockSize) { + hash_remove(cache->hash, block); + cache->FreeBlock(block); + FATAL(("could not read block %Ld\n", blockNumber)); + return NULL; + } + } + + if (block->unused) { + //TRACE(("remove block %Ld from unused\n", blockNumber)); + block->unused = false; + cache->unused_blocks.Remove(block); + } + + block->ref_count++; + block->accessed++; + + return block; +} + + +/** Returns the writable block data for the requested blockNumber. + * If \a cleared is true, the block is not read from disk; an empty block + * is returned. + * This is the only method to insert a block into a transaction. It makes + * sure that the previous block contents are preserved in that case. + */ + +static void * +get_writable_cached_block(block_cache *cache, off_t blockNumber, off_t base, off_t length, + int32 transactionID, bool cleared) +{ + TRACE(("get_writable_cached_block(blockNumber = %Ld, transaction = %ld)\n", blockNumber, transactionID)); + + if (blockNumber < 0 || blockNumber >= cache->max_blocks) + panic("get_writable_cached_block: invalid block number %lld (max %lld)", blockNumber, cache->max_blocks - 1); + + bool allocated; + cached_block *block = get_cached_block(cache, blockNumber, &allocated, !cleared); + if (block == NULL) + return NULL; + + // if there is no transaction support, we just return the current block + if (transactionID == -1) { + if (cleared) + memset(block->current_data, 0, cache->block_size); + + block->is_dirty = true; + // mark the block as dirty + + return block->current_data; + } + + if (block->transaction != NULL && block->transaction->id != transactionID) { + // ToDo: we have to wait here until the other transaction is done. + // Maybe we should even panic, since we can't prevent any deadlocks. + panic("get_writable_cached_block(): asked to get busy writable block (transaction %ld)\n", block->transaction->id); + put_cached_block(cache, block); + return NULL; + } + if (block->transaction == NULL && transactionID != -1) { + // get new transaction + cache_transaction *transaction = lookup_transaction(cache, transactionID); + if (transaction == NULL) { + panic("get_writable_cached_block(): invalid transaction %ld!\n", transactionID); + put_cached_block(cache, block); + return NULL; + } + if (!transaction->open) { + panic("get_writable_cached_block(): transaction already done!\n"); + put_cached_block(cache, block); + return NULL; + } + + block->transaction = transaction; + + // attach the block to the transaction block list + block->transaction_next = transaction->first_block; + transaction->first_block = block; + transaction->num_blocks++; + } + + if (!(allocated && cleared) && block->original_data == NULL) { + // we already have data, so we need to preserve it + block->original_data = cache->Allocate(); + if (block->original_data == NULL) { + FATAL(("could not allocate original_data\n")); + put_cached_block(cache, block); + return NULL; + } + + memcpy(block->original_data, block->current_data, cache->block_size); + } + if (block->parent_data == block->current_data) { + // remember any previous contents for the parent transaction + block->parent_data = cache->Allocate(); + if (block->parent_data == NULL) { + // ToDo: maybe we should just continue the current transaction in this case... + FATAL(("could not allocate parent\n")); + put_cached_block(cache, block); + return NULL; + } + + memcpy(block->parent_data, block->current_data, cache->block_size); + block->transaction->sub_num_blocks++; + } + + if (cleared) + memset(block->current_data, 0, cache->block_size); + + block->is_dirty = true; + + return block->current_data; +} + + +static status_t +write_cached_block(block_cache *cache, cached_block *block, bool deleteTransaction) +{ + cache_transaction *previous = block->previous_transaction; + int32 blockSize = cache->block_size; + + void *data = previous && block->original_data ? block->original_data : block->current_data; + // we first need to write back changes from previous transactions + + TRACE(("write_cached_block(block %Ld)\n", block->block_number)); + + ssize_t written = write_pos(cache->fd, block->block_number * blockSize, data, blockSize); + + if (written < blockSize) { + FATAL(("could not write back block %Ld (%s)\n", block->block_number, strerror(errno))); + return B_IO_ERROR; + } + + if (data == block->current_data) + block->is_dirty = false; + + if (previous != NULL) { + previous->blocks.Remove(block); + block->previous_transaction = NULL; + + // Has the previous transation been finished with that write? + if (--previous->num_blocks == 0) { + TRACE(("cache transaction %ld finished!\n", previous->id)); + + if (previous->notification_hook != NULL) + previous->notification_hook(previous->id, previous->notification_data); + + if (deleteTransaction) + delete_transaction(cache, previous); + } + } + + return B_OK; +} + + +status_t +block_cache_init() +{ + return B_OK; +} + + +// #pragma mark - public transaction API + + +int32 +cache_start_transaction(void *_cache) +{ + block_cache *cache = (block_cache *)_cache; + BenaphoreLocker locker(&cache->lock); + + if (cache->last_transaction && cache->last_transaction->open) + panic("last transaction (%ld) still open!\n", cache->last_transaction->id); + + cache_transaction *transaction = new(nothrow) cache_transaction; + if (transaction == NULL) + return B_NO_MEMORY; + + transaction->id = atomic_add(&cache->next_transaction_id, 1); + cache->last_transaction = transaction; + + TRACE(("cache_start_transaction(): id %ld started\n", transaction->id)); + + hash_insert(cache->transaction_hash, transaction); + + return transaction->id; +} + + +status_t +cache_sync_transaction(void *_cache, int32 id) +{ + block_cache *cache = (block_cache *)_cache; + BenaphoreLocker locker(&cache->lock); + status_t status = B_ENTRY_NOT_FOUND; + + hash_iterator iterator; + hash_open(cache->transaction_hash, &iterator); + + cache_transaction *transaction; + while ((transaction = (cache_transaction *)hash_next(cache->transaction_hash, &iterator)) != NULL) { + // ToDo: fix hash interface to make this easier + + if (transaction->id <= id && !transaction->open) { + while (transaction->num_blocks > 0) { + status = write_cached_block(cache, transaction->blocks.Head(), false); + if (status != B_OK) + return status; + } + delete_transaction(cache, transaction); + hash_rewind(cache->transaction_hash, &iterator); + } + } + + hash_close(cache->transaction_hash, &iterator, false); + return B_OK; +} + + +status_t +cache_end_transaction(void *_cache, int32 id, transaction_notification_hook hook, void *data) +{ + block_cache *cache = (block_cache *)_cache; + BenaphoreLocker locker(&cache->lock); + + TRACE(("cache_end_transaction(id = %ld)\n", id)); + + cache_transaction *transaction = lookup_transaction(cache, id); + if (transaction == NULL) { + panic("cache_end_transaction(): invalid transaction ID\n"); + return B_BAD_VALUE; + } + + transaction->notification_hook = hook; + transaction->notification_data = data; + + // iterate through all blocks and free the unchanged original contents + + cached_block *block = transaction->first_block, *next; + for (; block != NULL; block = next) { + next = block->transaction_next; + + if (block->previous_transaction != NULL) { + // need to write back pending changes + write_cached_block(cache, block); + } + + if (block->original_data != NULL) { + cache->Free(block->original_data); + block->original_data = NULL; + } + if (transaction->has_sub_transaction) { + if (block->parent_data != block->current_data) + cache->Free(block->parent_data); + block->parent_data = NULL; + } + + // move the block to the previous transaction list + transaction->blocks.Add(block); + + block->previous_transaction = transaction; + block->transaction_next = NULL; + block->transaction = NULL; + } + + transaction->open = false; + + return B_OK; +} + + +status_t +cache_abort_transaction(void *_cache, int32 id) +{ + block_cache *cache = (block_cache *)_cache; + BenaphoreLocker locker(&cache->lock); + + TRACE(("cache_abort_transaction(id = %ld)\n", id)); + + cache_transaction *transaction = lookup_transaction(cache, id); + if (transaction == NULL) { + panic("cache_abort_transaction(): invalid transaction ID\n"); + return B_BAD_VALUE; + } + + // iterate through all blocks and restore their original contents + + cached_block *block = transaction->first_block, *next; + for (; block != NULL; block = next) { + next = block->transaction_next; + + if (block->original_data != NULL) { + TRACE(("cache_abort_transaction(id = %ld): restored contents of block %Ld\n", + transaction->id, block->block_number)); + memcpy(block->current_data, block->original_data, cache->block_size); + cache->Free(block->original_data); + block->original_data = NULL; + } + if (transaction->has_sub_transaction) { + if (block->parent_data != block->current_data) + cache->Free(block->parent_data); + block->parent_data = NULL; + } + + block->transaction_next = NULL; + block->transaction = NULL; + } + + delete_transaction(cache, transaction); + return B_OK; +} + + +/** Acknowledges the current parent transaction, and starts a new transaction + * from its sub transaction. + * The new transaction also gets a new transaction ID. + */ + +int32 +cache_detach_sub_transaction(void *_cache, int32 id, + transaction_notification_hook hook, void *data) +{ + block_cache *cache = (block_cache *)_cache; + BenaphoreLocker locker(&cache->lock); + + TRACE(("cache_detach_sub_transaction(id = %ld)\n", id)); + + cache_transaction *transaction = lookup_transaction(cache, id); + if (transaction == NULL) { + panic("cache_detach_sub_transaction(): invalid transaction ID\n"); + return B_BAD_VALUE; + } + if (!transaction->has_sub_transaction) + return B_BAD_VALUE; + + // create a new transaction for the sub transaction + cache_transaction *newTransaction = new(nothrow) cache_transaction; + if (transaction == NULL) + return B_NO_MEMORY; + + newTransaction->id = atomic_add(&cache->next_transaction_id, 1); + + transaction->notification_hook = hook; + transaction->notification_data = data; + + // iterate through all blocks and free the unchanged original contents + + cached_block *block = transaction->first_block, *next, *last = NULL; + for (; block != NULL; block = next) { + next = block->transaction_next; + + if (block->previous_transaction != NULL) { + // need to write back pending changes + write_cached_block(cache, block); + } + + if (block->original_data != NULL && block->parent_data != NULL + && block->parent_data != block->current_data) { + // free the original data if the parent data of the transaction + // will be made current - but keep them otherwise + cache->Free(block->original_data); + block->original_data = NULL; + } + if (block->parent_data != NULL && block->parent_data != block->current_data) { + // we need to move this block over to the new transaction + block->original_data = block->parent_data; + if (last == NULL) + newTransaction->first_block = block; + else + last->transaction_next = block; + + last = block; + } + block->parent_data = NULL; + + // move the block to the previous transaction list + transaction->blocks.Add(block); + + block->previous_transaction = transaction; + block->transaction_next = NULL; + block->transaction = newTransaction; + } + + transaction->open = false; + + hash_insert(cache->transaction_hash, newTransaction); + cache->last_transaction = newTransaction; + + return B_OK; +} + + +status_t +cache_abort_sub_transaction(void *_cache, int32 id) +{ + block_cache *cache = (block_cache *)_cache; + BenaphoreLocker locker(&cache->lock); + + TRACE(("cache_abort_sub_transaction(id = %ld)\n", id)); + + cache_transaction *transaction = lookup_transaction(cache, id); + if (transaction == NULL) { + panic("cache_abort_sub_transaction(): invalid transaction ID\n"); + return B_BAD_VALUE; + } + if (!transaction->has_sub_transaction) + return B_BAD_VALUE; + + // revert all changes back to the version of the parent + + cached_block *block = transaction->first_block, *next; + for (; block != NULL; block = next) { + next = block->transaction_next; + + if (block->parent_data == NULL) { + if (block->original_data != NULL) { + // the parent transaction didn't change the block, but the sub + // transaction did - we need to revert from the original data + memcpy(block->current_data, block->original_data, cache->block_size); + } + } else if (block->parent_data != block->current_data) { + // the block has been changed and must be restored + TRACE(("cache_abort_sub_transaction(id = %ld): restored contents of block %Ld\n", + transaction->id, block->block_number)); + memcpy(block->current_data, block->parent_data, cache->block_size); + cache->Free(block->parent_data); + } + + block->parent_data = NULL; + } + + // all subsequent changes will go into the main transaction + transaction->has_sub_transaction = false; + return B_OK; +} + + +status_t +cache_start_sub_transaction(void *_cache, int32 id) +{ + block_cache *cache = (block_cache *)_cache; + BenaphoreLocker locker(&cache->lock); + + TRACE(("cache_start_sub_transaction(id = %ld)\n", id)); + + cache_transaction *transaction = lookup_transaction(cache, id); + if (transaction == NULL) { + panic("cache_start_sub_transaction(): invalid transaction ID %ld\n", id); + return B_BAD_VALUE; + } + + // move all changed blocks up to the parent + + cached_block *block = transaction->first_block, *next; + for (; block != NULL; block = next) { + next = block->transaction_next; + + if (transaction->has_sub_transaction + && block->parent_data != NULL + && block->parent_data != block->current_data) { + // there already is an older sub transaction - we acknowledge + // its changes and move its blocks up to the parent + cache->Free(block->parent_data); + } + + // we "allocate" the parent data lazily, that means, we don't copy + // the data (and allocate memory for it) until we need to + block->parent_data = block->current_data; + } + + // all subsequent changes will go into the sub transaction + transaction->has_sub_transaction = true; + transaction->sub_num_blocks = 0; + + return B_OK; +} + + +status_t +cache_next_block_in_transaction(void *_cache, int32 id, uint32 *_cookie, off_t *_blockNumber, + void **_data, void **_unchangedData) +{ + cached_block *block = (cached_block *)*_cookie; + block_cache *cache = (block_cache *)_cache; + + BenaphoreLocker locker(&cache->lock); + + cache_transaction *transaction = lookup_transaction(cache, id); + if (transaction == NULL) + return B_BAD_VALUE; + + if (block == NULL) + block = transaction->first_block; + else + block = block->transaction_next; + + if (block == NULL) + return B_ENTRY_NOT_FOUND; + + if (_blockNumber) + *_blockNumber = block->block_number; + if (_data) + *_data = block->current_data; + if (_unchangedData) + *_unchangedData = block->original_data; + + *_cookie = (uint32)block; + return B_OK; +} + + +int32 +cache_blocks_in_transaction(void *_cache, int32 id) +{ + block_cache *cache = (block_cache *)_cache; + BenaphoreLocker locker(&cache->lock); + + cache_transaction *transaction = lookup_transaction(cache, id); + if (transaction == NULL) + return B_BAD_VALUE; + + return transaction->num_blocks; +} + + +int32 +cache_blocks_in_sub_transaction(void *_cache, int32 id) +{ + block_cache *cache = (block_cache *)_cache; + BenaphoreLocker locker(&cache->lock); + + cache_transaction *transaction = lookup_transaction(cache, id); + if (transaction == NULL) + return B_BAD_VALUE; + + return transaction->sub_num_blocks; +} + + +// #pragma mark - public block cache API +// public interface + + +void +block_cache_delete(void *_cache, bool allowWrites) +{ + block_cache *cache = (block_cache *)_cache; + + if (allowWrites) + block_cache_sync(cache); + + BenaphoreLocker locker(&cache->lock); + + // free all blocks + + uint32 cookie = 0; + cached_block *block; + while ((block = (cached_block *)hash_remove_first(cache->hash, &cookie)) != NULL) { + cache->FreeBlock(block); + } + + // free all transactions (they will all be aborted) + + cookie = 0; + cache_transaction *transaction; + while ((transaction = (cache_transaction *)hash_remove_first(cache->transaction_hash, &cookie)) != NULL) { + delete transaction; + } + + delete cache; +} + + +void * +block_cache_create(int fd, off_t numBlocks, size_t blockSize, bool readOnly) +{ + block_cache *cache = new(nothrow) block_cache(fd, numBlocks, blockSize, readOnly); + if (cache == NULL) + return NULL; + + if (cache->InitCheck() != B_OK) { + delete cache; + return NULL; + } + + return cache; +} + + +status_t +block_cache_sync(void *_cache) +{ + block_cache *cache = (block_cache *)_cache; + + // we will sync all dirty blocks to disk that have a completed + // transaction or no transaction only + + BenaphoreLocker locker(&cache->lock); + hash_iterator iterator; + hash_open(cache->hash, &iterator); + + cached_block *block; + while ((block = (cached_block *)hash_next(cache->hash, &iterator)) != NULL) { + if (block->previous_transaction != NULL + || (block->transaction == NULL && block->is_dirty)) { + status_t status = write_cached_block(cache, block); + if (status != B_OK) + return status; + } + } + + hash_close(cache->hash, &iterator, false); + return B_OK; +} + + +status_t +block_cache_make_writable(void *_cache, off_t blockNumber, int32 transaction) +{ + block_cache *cache = (block_cache *)_cache; + BenaphoreLocker locker(&cache->lock); + + if (cache->read_only) + panic("tried to make block writable on a read-only cache!"); + + // ToDo: this can be done better! + void *block = get_writable_cached_block(cache, blockNumber, + blockNumber, 1, transaction, false); + if (block != NULL) { + put_cached_block((block_cache *)_cache, blockNumber); + return B_OK; + } + + return B_ERROR; +} + + +void * +block_cache_get_writable_etc(void *_cache, off_t blockNumber, off_t base, + off_t length, int32 transaction) +{ + block_cache *cache = (block_cache *)_cache; + BenaphoreLocker locker(&cache->lock); + + TRACE(("block_cache_get_writable_etc(block = %Ld, transaction = %ld)\n", + blockNumber, transaction)); + if (cache->read_only) + panic("tried to get writable block on a read-only cache!"); + + return get_writable_cached_block(cache, blockNumber, base, length, + transaction, false); +} + + +void * +block_cache_get_writable(void *_cache, off_t blockNumber, int32 transaction) +{ + return block_cache_get_writable_etc(_cache, blockNumber, + blockNumber, 1, transaction); +} + + +void * +block_cache_get_empty(void *_cache, off_t blockNumber, int32 transaction) +{ + block_cache *cache = (block_cache *)_cache; + BenaphoreLocker locker(&cache->lock); + + TRACE(("block_cache_get_empty(block = %Ld, transaction = %ld)\n", + blockNumber, transaction)); + if (cache->read_only) + panic("tried to get empty writable block on a read-only cache!"); + + return get_writable_cached_block((block_cache *)_cache, blockNumber, + blockNumber, 1, transaction, true); +} + + +const void * +block_cache_get_etc(void *_cache, off_t blockNumber, off_t base, off_t length) +{ + block_cache *cache = (block_cache *)_cache; + BenaphoreLocker locker(&cache->lock); + bool allocated; + + cached_block *block = get_cached_block(cache, blockNumber, &allocated); + if (block == NULL) + return NULL; + +#ifdef DEBUG_CHANGED + if (block->compare == NULL) + block->compare = cache->Allocate(); + if (block->compare != NULL) + memcpy(block->compare, block->current_data, cache->block_size); +#endif + return block->current_data; +} + + +const void * +block_cache_get(void *_cache, off_t blockNumber) +{ + return block_cache_get_etc(_cache, blockNumber, blockNumber, 1); +} + + +status_t +block_cache_set_dirty(void *_cache, off_t blockNumber, bool isDirty, int32 transaction) +{ + // not yet implemented + // Note, you must only use this function on blocks that were acquired writable! + if (isDirty) + panic("block_cache_set_dirty(): not yet implemented that way!\n"); + + return B_OK; +} + + +void +block_cache_put(void *_cache, off_t blockNumber) +{ + block_cache *cache = (block_cache *)_cache; + BenaphoreLocker locker(&cache->lock); + + put_cached_block(cache, blockNumber); +} + +} // namespace HaikuKernelEmu +} // namespace UserlandFS diff --git a/src/add-ons/kernel/file_systems/userlandfs/server/haiku_block_cache_priv.h b/src/add-ons/kernel/file_systems/userlandfs/server/haiku_block_cache_priv.h new file mode 100644 index 0000000000..ee25813230 --- /dev/null +++ b/src/add-ons/kernel/file_systems/userlandfs/server/haiku_block_cache_priv.h @@ -0,0 +1,93 @@ +/* + * Copyright 2004-2006, Axel Dörfler, axeld@pinc-software.de. All rights reserved. + * Distributed under the terms of the MIT License. + */ +#ifndef USERLAND_FS_HAIKU_BLOCK_CACHE_PRIVATE_H +#define USERLAND_FS_HAIKU_BLOCK_CACHE_PRIVATE_H + +#include "haiku_lock.h" + +#include + + +namespace UserlandFS { +namespace HaikuKernelEmu { + +struct hash_table; +struct vm_page; + + +#define DEBUG_CHANGED + + +struct cache_transaction; +struct cached_block; +struct block_cache; +typedef DoublyLinkedListLink block_link; + + +struct cached_block { + cached_block *next; // next in hash + cached_block *transaction_next; + block_link link; + off_t block_number; + void *current_data; + void *original_data; + void *parent_data; +#ifdef DEBUG_CHANGED + void *compare; +#endif + int32 ref_count; + int32 accessed; + bool busy : 1; + bool is_writing : 1; + bool is_dirty : 1; + bool unused : 1; + bool unmapped : 1; + cache_transaction *transaction; + cache_transaction *previous_transaction; + + static int Compare(void *_cacheEntry, const void *_block); + static uint32 Hash(void *_cacheEntry, const void *_block, uint32 range); +}; + +typedef DoublyLinkedList > block_list; + +struct block_cache { + hash_table *hash; + benaphore lock; + int fd; + off_t max_blocks; + size_t block_size; + int32 allocated_block_count; + int32 next_transaction_id; + cache_transaction *last_transaction; + hash_table *transaction_hash; + + block_list unmapped_blocks; + block_list unused_blocks; + + bool read_only; + + block_cache(int fd, off_t numBlocks, size_t blockSize, bool readOnly); + ~block_cache(); + + status_t InitCheck(); + + void RemoveUnusedBlocks(int32 maxAccessed = LONG_MAX, int32 count = LONG_MAX); + void FreeBlock(cached_block *block); + cached_block *NewBlock(off_t blockNumber); + void Free(void *address); + void *Allocate(); + + static void LowMemoryHandler(void *data, int32 level); +}; + +status_t block_cache_init(); + +} // namespace HaikuKernelEmu +} // namespace UserlandFS + +#endif /* USERLAND_FS_HAIKU_BLOCK_CACHE_PRIVATE_H */ diff --git a/src/add-ons/kernel/file_systems/userlandfs/server/haiku_fs_cache.h b/src/add-ons/kernel/file_systems/userlandfs/server/haiku_fs_cache.h new file mode 100644 index 0000000000..5b7268ac24 --- /dev/null +++ b/src/add-ons/kernel/file_systems/userlandfs/server/haiku_fs_cache.h @@ -0,0 +1,74 @@ +/* File System File and Block Caches + * + * Copyright 2004-2005, Haiku Inc. All Rights Reserved. + * Distributed under the terms of the MIT License. + */ +#ifndef USERLAND_FS_HAIKU_FS_CACHE_H +#define USERLAND_FS_HAIKU_FS_CACHE_H + + +#include + + +namespace UserlandFS { +namespace HaikuKernelEmu { + +/* transactions */ +extern int32 cache_start_transaction(void *_cache); +extern status_t cache_sync_transaction(void *_cache, int32 id); +extern status_t cache_end_transaction(void *_cache, int32 id, + transaction_notification_hook hook, void *data); +extern status_t cache_abort_transaction(void *_cache, int32 id); +extern int32 cache_detach_sub_transaction(void *_cache, int32 id, + transaction_notification_hook hook, void *data); +extern status_t cache_abort_sub_transaction(void *_cache, int32 id); +extern status_t cache_start_sub_transaction(void *_cache, int32 id); +extern status_t cache_next_block_in_transaction(void *_cache, int32 id, + uint32 *_cookie, off_t *_blockNumber, void **_data, + void **_unchangedData); +extern int32 cache_blocks_in_transaction(void *_cache, int32 id); +extern int32 cache_blocks_in_sub_transaction(void *_cache, int32 id); + +/* block cache */ +extern void block_cache_delete(void *_cache, bool allowWrites); +extern void *block_cache_create(int fd, off_t numBlocks, size_t blockSize, + bool readOnly); +extern status_t block_cache_sync(void *_cache); + +extern status_t block_cache_make_writable(void *_cache, off_t blockNumber, + int32 transaction); +extern void *block_cache_get_writable_etc(void *_cache, off_t blockNumber, + off_t base, off_t length, int32 transaction); +extern void *block_cache_get_writable(void *_cache, off_t blockNumber, + int32 transaction); +extern void *block_cache_get_empty(void *_cache, off_t blockNumber, + int32 transaction); +extern const void *block_cache_get_etc(void *_cache, off_t blockNumber, + off_t base, off_t length); +extern const void *block_cache_get(void *_cache, off_t blockNumber); +extern status_t block_cache_set_dirty(void *_cache, off_t blockNumber, + bool isDirty, int32 transaction); +extern void block_cache_put(void *_cache, off_t blockNumber); + +/* file cache */ +extern void *file_cache_create(mount_id mountID, vnode_id vnodeID, off_t size, + int fd); +extern void file_cache_delete(void *_cacheRef); +extern status_t file_cache_set_size(void *_cacheRef, off_t size); +extern status_t file_cache_sync(void *_cache); +extern status_t file_cache_invalidate_file_map(void *_cacheRef, off_t offset, + off_t size); + +extern status_t file_cache_read_pages(void *_cacheRef, off_t offset, + const iovec *vecs, size_t count, size_t *_numBytes); +extern status_t file_cache_write_pages(void *_cacheRef, off_t offset, + const iovec *vecs, size_t count, size_t *_numBytes); +extern status_t file_cache_read(void *_cacheRef, off_t offset, void *bufferBase, + size_t *_size); +extern status_t file_cache_write(void *_cacheRef, off_t offset, + const void *buffer, size_t *_size); + +} // namespace HaikuKernelEmu +} // namespace UserlandFS + +#endif /* USERLAND_FS_HAIKU_FS_CACHE_H */ diff --git a/src/add-ons/kernel/file_systems/userlandfs/server/haiku_hash.cpp b/src/add-ons/kernel/file_systems/userlandfs/server/haiku_hash.cpp new file mode 100644 index 0000000000..4c70ef36f0 --- /dev/null +++ b/src/add-ons/kernel/file_systems/userlandfs/server/haiku_hash.cpp @@ -0,0 +1,274 @@ +/* Generic hash table +** +** Copyright 2001, Travis Geiselbrecht. All rights reserved. +** Distributed under the terms of the NewOS License. +*/ + +#include "haiku_hash.h" + +#include +#include + +#include +#include +#include + +#undef TRACE +#define TRACE_HASH 0 +#if TRACE_HASH +# define TRACE(x) dprintf x +#else +# define TRACE(x) ; +#endif + +namespace UserlandFS { +namespace HaikuKernelEmu { + + +// ToDo: the hashtable is not expanded when necessary (no load factor, no nothing) +// Could try to use pools instead of malloc() for the elements - might be +// faster than the current approach. + +struct hash_table { + struct hash_element **table; + int next_ptr_offset; + uint32 table_size; + int num_elements; + int flags; + int (*compare_func)(void *e, const void *key); + uint32 (*hash_func)(void *e, const void *key, uint32 range); +}; + +// XXX gross hack +#define NEXT_ADDR(t, e) ((void *)(((unsigned long)(e)) + (t)->next_ptr_offset)) +#define NEXT(t, e) ((void *)(*(unsigned long *)NEXT_ADDR(t, e))) +#define PUT_IN_NEXT(t, e, val) (*(unsigned long *)NEXT_ADDR(t, e) = (long)(val)) + + +static inline void * +next_element(hash_table *table, void *element) +{ + // ToDo: should we use this instead of the NEXT() macro? + return (void *)(*(unsigned long *)NEXT_ADDR(table, element)); +} + + +struct hash_table * +hash_init(uint32 table_size, int next_ptr_offset, + int compare_func(void *e, const void *key), + uint32 hash_func(void *e, const void *key, uint32 range)) +{ + struct hash_table *t; + unsigned int i; + + if (compare_func == NULL || hash_func == NULL) { + dprintf("hash_init() called with NULL function pointer\n"); + return NULL; + } + + t = (struct hash_table *)malloc(sizeof(struct hash_table)); + if (t == NULL) + return NULL; + + t->table = (struct hash_element **)malloc(sizeof(void *) * table_size); + if (t->table == NULL) { + free(t); + return NULL; + } + + for (i = 0; i < table_size; i++) + t->table[i] = NULL; + + t->table_size = table_size; + t->next_ptr_offset = next_ptr_offset; + t->flags = 0; + t->num_elements = 0; + t->compare_func = compare_func; + t->hash_func = hash_func; + + TRACE(("hash_init: created table %p, next_ptr_offset %d, compare_func %p, hash_func %p\n", + t, next_ptr_offset, compare_func, hash_func)); + + return t; +} + + +int +hash_uninit(struct hash_table *table) +{ + ASSERT(table->num_elements == 0); + + free(table->table); + free(table); + + return 0; +} + + +status_t +hash_insert(struct hash_table *table, void *element) +{ + uint32 hash; + + ASSERT(table != NULL && element != NULL); + TRACE(("hash_insert: table 0x%x, element 0x%x\n", table, element)); + + hash = table->hash_func(element, NULL, table->table_size); + PUT_IN_NEXT(table, element, table->table[hash]); + table->table[hash] = (struct hash_element *)element; + table->num_elements++; + + // ToDo: resize hash table if it's grown too much! + + return 0; +} + + +status_t +hash_remove(struct hash_table *table, void *_element) +{ + uint32 hash = table->hash_func(_element, NULL, table->table_size); + void *element, *lastElement = NULL; + + for (element = table->table[hash]; element != NULL; lastElement = element, element = NEXT(table, element)) { + if (element == _element) { + if (lastElement != NULL) { + // connect the previous entry with the next one + PUT_IN_NEXT(table, lastElement, NEXT(table, element)); + } else + table->table[hash] = (struct hash_element *)NEXT(table, element); + table->num_elements--; + + return B_OK; + } + } + + return B_ERROR; +} + + +void * +hash_remove_first(struct hash_table *table, uint32 *_cookie) +{ + uint32 index; + + for (index = _cookie ? *_cookie : 0; index < table->table_size; index++) { + void *element = table->table[index]; + if (element != NULL) { + // remove the first element we find + table->table[index] = (struct hash_element *)NEXT(table, element); + table->num_elements--; + if (_cookie) + *_cookie = index; + return element; + } + } + + return NULL; +} + + +void * +hash_find(struct hash_table *table, void *searchedElement) +{ + uint32 hash = table->hash_func(searchedElement, NULL, table->table_size); + void *element; + + for (element = table->table[hash]; element != NULL; element = NEXT(table, element)) { + if (element == searchedElement) + return element; + } + + return NULL; +} + + +void * +hash_lookup(struct hash_table *table, const void *key) +{ + uint32 hash = table->hash_func(NULL, key, table->table_size); + void *element; + + for (element = table->table[hash]; element != NULL; element = NEXT(table, element)) { + if (table->compare_func(element, key) == 0) + return element; + } + + return NULL; +} + + +struct hash_iterator * +hash_open(struct hash_table *table, struct hash_iterator *iterator) +{ + if (iterator == NULL) { + iterator = (struct hash_iterator *)malloc(sizeof(struct hash_iterator)); + if (iterator == NULL) + return NULL; + } + + hash_rewind(table, iterator); + + return iterator; +} + + +void +hash_close(struct hash_table *table, struct hash_iterator *iterator, bool freeIterator) +{ + if (freeIterator) + free(iterator); +} + + +void +hash_rewind(struct hash_table *table, struct hash_iterator *iterator) +{ + iterator->current = NULL; + iterator->bucket = -1; +} + + +void * +hash_next(struct hash_table *table, struct hash_iterator *iterator) +{ + uint32 index; + +restart: + if (iterator->current == NULL) { + // get next bucket + for (index = (uint32)(iterator->bucket + 1); index < table->table_size; index++) { + if (table->table[index]) { + iterator->bucket = index; + iterator->current = table->table[index]; + break; + } + } + } else { + iterator->current = NEXT(table, iterator->current); + if (!iterator->current) + goto restart; + } + + return iterator->current; +} + + +uint32 +hash_hash_string(const char *string) +{ + uint32 hash = 0; + char c; + + // we assume hash to be at least 32 bits + while ((c = *string++) != 0) { + hash ^= hash >> 28; + hash <<= 4; + hash ^= c; + } + + return hash; +} + +} // namespace HaikuKernelEmu +} // namespace UserlandFS diff --git a/src/add-ons/kernel/file_systems/userlandfs/server/haiku_hash.h b/src/add-ons/kernel/file_systems/userlandfs/server/haiku_hash.h new file mode 100644 index 0000000000..2bbcb925db --- /dev/null +++ b/src/add-ons/kernel/file_systems/userlandfs/server/haiku_hash.h @@ -0,0 +1,53 @@ +/* +** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. +** Distributed under the terms of the NewOS License. +*/ +#ifndef USERLAND_FS_HAIKU_HASH_H +#define USERLAND_FS_HAIKU_HASH_H + +#include + +namespace UserlandFS { +namespace HaikuKernelEmu { + +// can be allocated on the stack +typedef struct hash_iterator { + void *current; + int bucket; +} hash_iterator; + +typedef struct hash_table hash_table; + +struct hash_table *hash_init(uint32 table_size, int next_ptr_offset, + int compare_func(void *element, const void *key), + uint32 hash_func(void *element, const void *key, uint32 range)); +int hash_uninit(struct hash_table *table); +status_t hash_insert(struct hash_table *table, void *_element); +status_t hash_remove(struct hash_table *table, void *_element); +void *hash_remove_first(struct hash_table *table, uint32 *_cookie); +void *hash_find(struct hash_table *table, void *e); +void *hash_lookup(struct hash_table *table, const void *key); +struct hash_iterator *hash_open(struct hash_table *table, struct hash_iterator *i); +void hash_close(struct hash_table *table, struct hash_iterator *i, bool free_iterator); +void *hash_next(struct hash_table *table, struct hash_iterator *i); +void hash_rewind(struct hash_table *table, struct hash_iterator *i); + +/* function ptrs must look like this: + * + * uint32 hash_func(void *e, const void *key, uint32 range); + * hash function should calculate hash on either e or key, + * depending on which one is not NULL + * int compare_func(void *e, const void *key); + * compare function should compare the element with + * the key, returning 0 if equal, other if not + * NOTE: compare func can be null, in which case the hash + * code will compare the key pointer with the target + * ToDo: check this! + */ + +uint32 hash_hash_string(const char *str); + +} // namespace HaikuKernelEmu +} // namespace UserlandFS + +#endif /* USERLAND_FS_HAIKU_HASH_H */ diff --git a/src/add-ons/kernel/file_systems/userlandfs/server/haiku_kernel_emu.cpp b/src/add-ons/kernel/file_systems/userlandfs/server/haiku_kernel_emu.cpp index 98db7f004a..6f027001d0 100644 --- a/src/add-ons/kernel/file_systems/userlandfs/server/haiku_kernel_emu.cpp +++ b/src/add-ons/kernel/file_systems/userlandfs/server/haiku_kernel_emu.cpp @@ -4,11 +4,13 @@ #include #include +#include #include #include #include #include "Debug.h" +#include "haiku_fs_cache.h" #include "kernel_emu.h" @@ -157,6 +159,177 @@ get_vnode_removed(mount_id mountID, vnode_id vnodeID, bool* removed) } +// #pragma mark - Transaction + + +// cache_start_transaction +int32 +cache_start_transaction(void *_cache) +{ + return UserlandFS::HaikuKernelEmu::cache_start_transaction(_cache); +} + +// cache_sync_transaction +status_t +cache_sync_transaction(void *_cache, int32 id) +{ + return UserlandFS::HaikuKernelEmu::cache_sync_transaction(_cache, id); +} + +// cache_end_transaction +status_t +cache_end_transaction(void *_cache, int32 id, + transaction_notification_hook hook, void *data) +{ + return UserlandFS::HaikuKernelEmu::cache_end_transaction(_cache, id, hook, + data); +} + +// cache_abort_transaction +status_t +cache_abort_transaction(void *_cache, int32 id) +{ + return UserlandFS::HaikuKernelEmu::cache_abort_transaction(_cache, id); +} + +// cache_detach_sub_transaction +int32 +cache_detach_sub_transaction(void *_cache, int32 id, + transaction_notification_hook hook, void *data) +{ + return UserlandFS::HaikuKernelEmu::cache_detach_sub_transaction(_cache, id, + hook, data); +} + +// cache_abort_sub_transaction +status_t +cache_abort_sub_transaction(void *_cache, int32 id) +{ + return UserlandFS::HaikuKernelEmu::cache_abort_sub_transaction(_cache, id); +} + +// cache_start_sub_transaction +status_t +cache_start_sub_transaction(void *_cache, int32 id) +{ + return UserlandFS::HaikuKernelEmu::cache_start_sub_transaction(_cache, id); +} + +// cache_next_block_in_transaction +status_t +cache_next_block_in_transaction(void *_cache, int32 id, uint32 *_cookie, + off_t *_blockNumber, void **_data, void **_unchangedData) +{ + return UserlandFS::HaikuKernelEmu::cache_next_block_in_transaction(_cache, + id, _cookie, _blockNumber, _data, _unchangedData); +} + +// cache_blocks_in_transaction +int32 +cache_blocks_in_transaction(void *_cache, int32 id) +{ + return UserlandFS::HaikuKernelEmu::cache_blocks_in_transaction(_cache, id); +} + +// cache_blocks_in_sub_transaction +int32 +cache_blocks_in_sub_transaction(void *_cache, int32 id) +{ + return UserlandFS::HaikuKernelEmu::cache_blocks_in_sub_transaction(_cache, + id); +} + + +// #pragma mark - Block Cache + + +// block_cache_delete +void +block_cache_delete(void *_cache, bool allowWrites) +{ + UserlandFS::HaikuKernelEmu::block_cache_delete(_cache, allowWrites); +} + +// block_cache_create +void * +block_cache_create(int fd, off_t numBlocks, size_t blockSize, bool readOnly) +{ + return UserlandFS::HaikuKernelEmu::block_cache_create(fd, numBlocks, + blockSize, readOnly); +} + +// block_cache_sync +status_t +block_cache_sync(void *_cache) +{ + return UserlandFS::HaikuKernelEmu::block_cache_sync(_cache); +} + +// block_cache_make_writable +status_t +block_cache_make_writable(void *_cache, off_t blockNumber, int32 transaction) +{ + return UserlandFS::HaikuKernelEmu::block_cache_make_writable(_cache, + blockNumber, transaction); +} + +// block_cache_get_writable_etc +void * +block_cache_get_writable_etc(void *_cache, off_t blockNumber, off_t base, + off_t length, int32 transaction) +{ + return UserlandFS::HaikuKernelEmu::block_cache_get_writable_etc(_cache, + blockNumber, base, length, transaction); +} + +// block_cache_get_writable +void * +block_cache_get_writable(void *_cache, off_t blockNumber, int32 transaction) +{ + return UserlandFS::HaikuKernelEmu::block_cache_get_writable(_cache, + blockNumber, transaction); +} + +// block_cache_get_empty +void * +block_cache_get_empty(void *_cache, off_t blockNumber, int32 transaction) +{ + return UserlandFS::HaikuKernelEmu::block_cache_get_empty(_cache, + blockNumber, transaction); +} + +// block_cache_get_etc +const void * +block_cache_get_etc(void *_cache, off_t blockNumber, off_t base, off_t length) +{ + return UserlandFS::HaikuKernelEmu::block_cache_get_etc(_cache, blockNumber, + base, length); +} + +// block_cache_get +const void * +block_cache_get(void *_cache, off_t blockNumber) +{ + return UserlandFS::HaikuKernelEmu::block_cache_get(_cache, blockNumber); +} + +// block_cache_set_dirty +status_t +block_cache_set_dirty(void *_cache, off_t blockNumber, bool isDirty, + int32 transaction) +{ + return UserlandFS::HaikuKernelEmu::block_cache_set_dirty(_cache, + blockNumber, isDirty, transaction); +} + +// block_cache_put +void +block_cache_put(void *_cache, off_t blockNumber) +{ + UserlandFS::HaikuKernelEmu::block_cache_put(_cache, blockNumber); +} + + // #pragma mark - Misc diff --git a/src/add-ons/kernel/file_systems/userlandfs/server/haiku_lock.cpp b/src/add-ons/kernel/file_systems/userlandfs/server/haiku_lock.cpp new file mode 100644 index 0000000000..472c0cc52c --- /dev/null +++ b/src/add-ons/kernel/file_systems/userlandfs/server/haiku_lock.cpp @@ -0,0 +1,247 @@ +/* + * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved. + * Distributed under the terms of the MIT License. + * + * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. + * Distributed under the terms of the NewOS License. + */ + +/* Mutex and recursive_lock code */ + +#include "haiku_lock.h" + +#include + +#include "kernel_emu.h" + +using UserlandFS::KernelEmu::panic; + +namespace UserlandFS { +namespace HaikuKernelEmu { + + +int32 +recursive_lock_get_recursion(recursive_lock *lock) +{ + if (lock->holder == find_thread(NULL)) + return lock->recursion; + + return -1; +} + + +status_t +recursive_lock_init(recursive_lock *lock, const char *name) +{ + if (lock == NULL) + return B_BAD_VALUE; + + if (name == NULL) + name = "recursive lock"; + + lock->holder = -1; + lock->recursion = 0; + lock->sem = create_sem(1, name); + + if (lock->sem >= B_OK) + return B_OK; + + return lock->sem; +} + + +void +recursive_lock_destroy(recursive_lock *lock) +{ + if (lock == NULL) + return; + + delete_sem(lock->sem); + lock->sem = -1; +} + + +status_t +recursive_lock_lock(recursive_lock *lock) +{ + thread_id thread = find_thread(NULL); + + if (thread != lock->holder) { + status_t status = acquire_sem(lock->sem); + if (status < B_OK) + return status; + + lock->holder = thread; + } + lock->recursion++; + return B_OK; +} + + +void +recursive_lock_unlock(recursive_lock *lock) +{ + if (find_thread(NULL) != lock->holder) + panic("recursive_lock %p unlocked by non-holder thread!\n", lock); + + if (--lock->recursion == 0) { + lock->holder = -1; + release_sem(lock->sem); + } +} + + +// #pragma mark - + + +status_t +mutex_init(mutex *m, const char *name) +{ + if (m == NULL) + return EINVAL; + + if (name == NULL) + name = "mutex_sem"; + + m->holder = -1; + + m->sem = create_sem(1, name); + if (m->sem >= B_OK) + return B_OK; + + return m->sem; +} + + +void +mutex_destroy(mutex *mutex) +{ + if (mutex == NULL) + return; + + if (mutex->sem >= 0) { + delete_sem(mutex->sem); + mutex->sem = -1; + } + mutex->holder = -1; +} + + +status_t +mutex_lock(mutex *mutex) +{ + thread_id me = find_thread(NULL); + status_t status; + + status = acquire_sem(mutex->sem); + if (status < B_OK) + return status; + + if (me == mutex->holder) + panic("mutex_lock failure: mutex %p (sem = 0x%lx) acquired twice by thread 0x%lx\n", mutex, mutex->sem, me); + + mutex->holder = me; + return B_OK; +} + + +void +mutex_unlock(mutex *mutex) +{ + thread_id me = find_thread(NULL); + + if (me != mutex->holder) { + panic("mutex_unlock failure: thread 0x%lx is trying to release mutex %p (current holder 0x%lx)\n", + me, mutex, mutex->holder); + } + + mutex->holder = -1; + release_sem(mutex->sem); +} + + +// #pragma mark - + + +status_t +benaphore_init(benaphore *ben, const char *name) +{ + if (ben == NULL || name == NULL) + return B_BAD_VALUE; + + ben->count = 1; + ben->sem = create_sem(0, name); + if (ben->sem >= B_OK) + return B_OK; + + return ben->sem; +} + + +void +benaphore_destroy(benaphore *ben) +{ + delete_sem(ben->sem); + ben->sem = -1; +} + + +// #pragma mark - + + +status_t +rw_lock_init(rw_lock *lock, const char *name) +{ + if (lock == NULL) + return B_BAD_VALUE; + + if (name == NULL) + name = "r/w lock"; + + lock->sem = create_sem(RW_MAX_READERS, name); + if (lock->sem >= B_OK) + return B_OK; + + return lock->sem; +} + + +void +rw_lock_destroy(rw_lock *lock) +{ + if (lock == NULL) + return; + + delete_sem(lock->sem); +} + + +status_t +rw_lock_read_lock(rw_lock *lock) +{ + return acquire_sem(lock->sem); +} + + +status_t +rw_lock_read_unlock(rw_lock *lock) +{ + return release_sem(lock->sem); +} + + +status_t +rw_lock_write_lock(rw_lock *lock) +{ + return acquire_sem_etc(lock->sem, RW_MAX_READERS, 0, 0); +} + + +status_t +rw_lock_write_unlock(rw_lock *lock) +{ + return release_sem_etc(lock->sem, RW_MAX_READERS, 0); +} + +} // namespace HaikuKernelEmu +} // namespace UserlandFS diff --git a/src/add-ons/kernel/file_systems/userlandfs/server/haiku_lock.h b/src/add-ons/kernel/file_systems/userlandfs/server/haiku_lock.h new file mode 100644 index 0000000000..ccd86f392f --- /dev/null +++ b/src/add-ons/kernel/file_systems/userlandfs/server/haiku_lock.h @@ -0,0 +1,159 @@ +/* + * Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. + * Distributed under the terms of the MIT License. + * + * Copyright 2001-2002, Travis Geiselbrecht. All rights reserved. + * Distributed under the terms of the NewOS License. + */ +#ifndef USERLAND_FS_HAIKU_LOCK_H +#define USERLAND_FS_HAIKU_LOCK_H + +#include + + +namespace UserlandFS { +namespace HaikuKernelEmu { + +typedef struct recursive_lock { + sem_id sem; + thread_id holder; + int recursion; +} recursive_lock; + +typedef struct mutex { + sem_id sem; + thread_id holder; +} mutex; + +typedef struct benaphore { + sem_id sem; + int32 count; +} benaphore; + +// Note: this is currently a trivial r/w lock implementation +// it will be replaced with something better later - this +// or a similar API will be made publically available at this point. +typedef struct rw_lock { + sem_id sem; + int32 count; + benaphore writeLock; +} rw_lock; + +#define RW_MAX_READERS 1000000 + + +extern status_t recursive_lock_init(recursive_lock *lock, const char *name); +extern void recursive_lock_destroy(recursive_lock *lock); +extern status_t recursive_lock_lock(recursive_lock *lock); +extern void recursive_lock_unlock(recursive_lock *lock); +extern int32 recursive_lock_get_recursion(recursive_lock *lock); + +extern status_t mutex_init(mutex *m, const char *name); +extern void mutex_destroy(mutex *m); +extern status_t mutex_lock(mutex *m); +extern void mutex_unlock(mutex *m); + +extern status_t benaphore_init(benaphore *ben, const char *name); +extern void benaphore_destroy(benaphore *ben); + +static inline status_t +benaphore_lock_etc(benaphore *ben, uint32 flags, bigtime_t timeout) +{ + if (atomic_add(&ben->count, -1) <= 0) + return acquire_sem_etc(ben->sem, 1, flags, timeout); + + return B_OK; +} + + +static inline status_t +benaphore_lock(benaphore *ben) +{ + if (atomic_add(&ben->count, -1) <= 0) + return acquire_sem(ben->sem); + + return B_OK; +} + + +static inline status_t +benaphore_unlock(benaphore *ben) +{ + if (atomic_add(&ben->count, 1) < 0) + return release_sem(ben->sem); + + return B_OK; +} + +extern status_t rw_lock_init(rw_lock *lock, const char *name); +extern void rw_lock_destroy(rw_lock *lock); +extern status_t rw_lock_read_lock(rw_lock *lock); +extern status_t rw_lock_read_unlock(rw_lock *lock); +extern status_t rw_lock_write_lock(rw_lock *lock); +extern status_t rw_lock_write_unlock(rw_lock *lock); + + +/* C++ Auto Locking */ + +#include "AutoLocker.h" + + +// MutexLocking +class MutexLocking { +public: + inline bool Lock(mutex *lockable) + { + return mutex_lock(lockable) == B_OK; + } + + inline void Unlock(mutex *lockable) + { + mutex_unlock(lockable); + } +}; + +// MutexLocker +typedef AutoLocker MutexLocker; + +// RecursiveLockLocking +class RecursiveLockLocking { +public: + inline bool Lock(recursive_lock *lockable) + { + return recursive_lock_lock(lockable) == B_OK; + } + + inline void Unlock(recursive_lock *lockable) + { + recursive_lock_unlock(lockable); + } +}; + +// RecursiveLocker +typedef AutoLocker RecursiveLocker; + +// BenaphoreLocking +class BenaphoreLocking { +public: + inline bool Lock(benaphore *lockable) + { + return benaphore_lock(lockable) == B_OK; + } + + inline void Unlock(benaphore *lockable) + { + benaphore_unlock(lockable); + } +}; + +// BenaphoreLocker +typedef AutoLocker BenaphoreLocker; + +} // namespace HaikuKernelEmu +} // namespace UserlandFS + +using UserlandFS::HaikuKernelEmu::MutexLocker; +using UserlandFS::HaikuKernelEmu::RecursiveLocker; +using UserlandFS::HaikuKernelEmu::BenaphoreLocker; + +#endif /* USERLAND_FS_HAIKU_LOCK_H */ diff --git a/src/add-ons/kernel/file_systems/userlandfs/server/kernel_emu.cpp b/src/add-ons/kernel/file_systems/userlandfs/server/kernel_emu.cpp index ebb631708a..0cef8f6a87 100644 --- a/src/add-ons/kernel/file_systems/userlandfs/server/kernel_emu.cpp +++ b/src/add-ons/kernel/file_systems/userlandfs/server/kernel_emu.cpp @@ -512,27 +512,60 @@ UserlandFS::KernelEmu::kernel_debugger(const char *message) debugger(message); } -// panic +// vpanic void -UserlandFS::KernelEmu::panic(const char *format, ...) +UserlandFS::KernelEmu::vpanic(const char *format, va_list args) { char buffer[1024]; strcpy(buffer, "PANIC: "); int32 prefixLen = strlen(buffer); int bufferSize = sizeof(buffer) - prefixLen; - va_list args; - va_start(args, format); + // no vsnprintf() on PPC #if defined(__INTEL__) vsnprintf(buffer + prefixLen, bufferSize - 1, format, args); #else vsprintf(buffer + prefixLen, format, args); #endif - va_end(args); + buffer[sizeof(buffer) - 1] = '\0'; debugger(buffer); } +// panic +void +UserlandFS::KernelEmu::panic(const char *format, ...) +{ + va_list args; + va_start(args, format); + vpanic(format, args); + va_end(args); +} + +// vdprintf +void +UserlandFS::KernelEmu::vdprintf(const char *format, va_list args) +{ + vprintf(format, args); +} + +// dprintf +void +UserlandFS::KernelEmu::dprintf(const char *format, ...) +{ + va_list args; + va_start(args, format); + dprintf(format, args); + va_end(args); +} + +void +UserlandFS::KernelEmu::dump_block(const char *buffer, int size, + const char *prefix) +{ + // TODO: Implement! +} + // parse_expression //ulong //parse_expression(char *str) diff --git a/src/add-ons/kernel/file_systems/userlandfs/server/kernel_emu.h b/src/add-ons/kernel/file_systems/userlandfs/server/kernel_emu.h index 3d3b7d093f..6db1612c49 100644 --- a/src/add-ons/kernel/file_systems/userlandfs/server/kernel_emu.h +++ b/src/add-ons/kernel/file_systems/userlandfs/server/kernel_emu.h @@ -1,5 +1,7 @@ // kernel_emu.h +#include + #include struct selectsync; @@ -31,7 +33,14 @@ status_t unremove_vnode(mount_id nsid, vnode_id vnid); status_t get_vnode_removed(mount_id nsid, vnode_id vnid, bool* removed); void kernel_debugger(const char *message); -void panic(const char *format, ...); +void vpanic(const char *format, va_list args); +void panic(const char *format, ...) __attribute__ ((format (__printf__, 1, 2))); + +void vdprintf(const char *format, va_list args); +void dprintf(const char *format, ...) + __attribute__ ((format (__printf__, 1, 2))); + +void dump_block(const char *buffer, int size, const char *prefix); int add_debugger_command(char *name, int (*func)(int argc, char **argv), char *help);