* Added condition variables.

* Added a dummy slab implementation.
* Updated the lock, hash, and block cache implementations.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@29336 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2009-02-27 16:17:58 +00:00
parent 42ef52132a
commit 203c622934
13 changed files with 2509 additions and 567 deletions

View File

@ -8,6 +8,9 @@ SubDirSysHdrs [ FDirName $(userlandFSIncludes) ] ;
SubDirHdrs [ FDirName $(userlandFSIncludes) private ] ;
SubDirHdrs [ FDirName $(userlandFSIncludes) shared ] ;
UsePrivateSystemHeaders ;
UsePrivateHeaders libroot ;
SEARCH_SOURCE += [ FDirName $(userlandFSTop) private ] ;
SEARCH_SOURCE += [ FDirName $(userlandFSTop) shared ] ;
@ -40,10 +43,12 @@ Application UserlandFSServer
# BeOSKernelVolume.cpp
DispatcherFileSystem.cpp
FileSystem.cpp
# haiku_block_cache.cpp
haiku_block_cache.cpp
haiku_condition_variable.cpp
# haiku_file_cache.cpp
haiku_hash.cpp
haiku_lock.cpp
haiku_slab.cpp
HaikuKernelFileSystem.cpp
HaikuKernelVolume.cpp
kernel_emu.cpp

View File

@ -23,7 +23,8 @@
#include "DispatcherDefs.h"
#include "FileSystem.h"
#include "FSInfo.h"
#include "haiku_block_cache_priv.h"
#include "haiku_block_cache.h"
#include "haiku_condition_variable.h"
#include "haiku_fs_cache.h"
#include "HaikuKernelFileSystem.h"
#include "RequestThread.h"
@ -277,6 +278,11 @@ UserlandFSServer::_CreateHaikuKernelInterface(const char* fsName,
RETURN_ERROR(B_NO_MEMORY);
ObjectDeleter<HaikuKernelFileSystem> fsDeleter(fileSystem);
// init condition variables
error = UserlandFS::HaikuKernelEmu::condition_variable_init();
if (error != B_OK)
RETURN_ERROR(error);
// init block cache
error = UserlandFS::HaikuKernelEmu::block_cache_init();
if (error != B_OK)

View File

@ -0,0 +1,22 @@
/*
* Copyright 2005, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*/
#ifndef USERLAND_FS_HAIKU_BLOCK_CACHE_H
#define USERLAND_FS_HAIKU_BLOCK_CACHE_H
#include <SupportDefs.h>
namespace UserlandFS {
namespace HaikuKernelEmu {
status_t block_cache_init(void);
size_t block_cache_used_memory();
} // namespace HaikuKernelEmu
} // namespace UserlandFS
#endif // USERLAND_FS_HAIKU_BLOCK_CACHE_H

View File

@ -1,93 +0,0 @@
/*
* Copyright 2004-2006, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef USERLAND_FS_HAIKU_BLOCK_CACHE_PRIVATE_H
#define USERLAND_FS_HAIKU_BLOCK_CACHE_PRIVATE_H
#include "haiku_lock.h"
#include <kernel/util/DoublyLinkedList.h>
namespace UserlandFS {
namespace HaikuKernelEmu {
struct hash_table;
struct vm_page;
#define DEBUG_CHANGED
struct cache_transaction;
struct cached_block;
struct block_cache;
typedef DoublyLinkedListLink<cached_block> block_link;
struct cached_block {
cached_block *next; // next in hash
cached_block *transaction_next;
block_link link;
off_t block_number;
void *current_data;
void *original_data;
void *parent_data;
#ifdef DEBUG_CHANGED
void *compare;
#endif
int32 ref_count;
int32 accessed;
bool busy : 1;
bool is_writing : 1;
bool is_dirty : 1;
bool unused : 1;
bool unmapped : 1;
cache_transaction *transaction;
cache_transaction *previous_transaction;
static int Compare(void *_cacheEntry, const void *_block);
static uint32 Hash(void *_cacheEntry, const void *_block, uint32 range);
};
typedef DoublyLinkedList<cached_block,
DoublyLinkedListMemberGetLink<cached_block,
&cached_block::link> > block_list;
struct block_cache {
hash_table *hash;
benaphore lock;
int fd;
off_t max_blocks;
size_t block_size;
int32 allocated_block_count;
int32 next_transaction_id;
cache_transaction *last_transaction;
hash_table *transaction_hash;
block_list unmapped_blocks;
block_list unused_blocks;
bool read_only;
block_cache(int fd, off_t numBlocks, size_t blockSize, bool readOnly);
~block_cache();
status_t InitCheck();
void RemoveUnusedBlocks(int32 maxAccessed = LONG_MAX, int32 count = LONG_MAX);
void FreeBlock(cached_block *block);
cached_block *NewBlock(off_t blockNumber);
void Free(void *address);
void *Allocate();
static void LowMemoryHandler(void *data, int32 level);
};
status_t block_cache_init();
} // namespace HaikuKernelEmu
} // namespace UserlandFS
#endif /* USERLAND_FS_HAIKU_BLOCK_CACHE_PRIVATE_H */

View File

@ -0,0 +1,263 @@
/*
* Copyright 2007-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#include "haiku_condition_variable.h"
#include <new>
#include <stdlib.h>
#include <string.h>
#include <Debug.h>
// libroot
#include <user_thread.h>
// system
#include <syscalls.h>
#include <user_thread_defs.h>
#include "kernel_emu.h"
#include "haiku_lock.h"
namespace UserlandFS {
namespace HaikuKernelEmu {
#define STATUS_ADDED 1
#define STATUS_WAITING 2
using UserlandFS::KernelEmu::dprintf;
using UserlandFS::KernelEmu::panic;
static const int kConditionVariableHashSize = 512;
struct ConditionVariableHashDefinition {
typedef const void* KeyType;
typedef ConditionVariable ValueType;
size_t HashKey(const void* key) const
{ return (size_t)key; }
size_t Hash(ConditionVariable* variable) const
{ return (size_t)variable->fObject; }
bool Compare(const void* key, ConditionVariable* variable) const
{ return key == variable->fObject; }
HashTableLink<ConditionVariable>* GetLink(ConditionVariable* variable) const
{ return variable; }
};
typedef OpenHashTable<ConditionVariableHashDefinition> ConditionVariableHash;
static ConditionVariableHash sConditionVariableHash;
static mutex sConditionVariablesLock;
static mutex sThreadsLock;
// #pragma mark - ConditionVariableEntry
bool
ConditionVariableEntry::Add(const void* object)
{
ASSERT(object != NULL);
fThread = find_thread(NULL);
MutexLocker _(sConditionVariablesLock);
fVariable = sConditionVariableHash.Lookup(object);
if (fVariable == NULL) {
fWaitStatus = B_ENTRY_NOT_FOUND;
return false;
}
fWaitStatus = STATUS_ADDED;
fVariable->fEntries.Add(this);
return true;
}
status_t
ConditionVariableEntry::Wait(uint32 flags, bigtime_t timeout)
{
MutexLocker conditionLocker(sConditionVariablesLock);
if (fVariable == NULL)
return fWaitStatus;
user_thread* userThread = get_user_thread();
userThread->wait_status = 1;
fWaitStatus = STATUS_WAITING;
conditionLocker.Unlock();
MutexLocker threadLocker(sThreadsLock);
status_t error;
if ((flags & (B_RELATIVE_TIMEOUT | B_ABSOLUTE_TIMEOUT)) != 0)
error = _kern_block_thread(flags, timeout);
else
error = _kern_block_thread(0, 0);
threadLocker.Unlock();
conditionLocker.Lock();
// remove entry from variable, if not done yet
if (fVariable != NULL) {
fVariable->fEntries.Remove(this);
fVariable = NULL;
}
return error;
}
status_t
ConditionVariableEntry::Wait(const void* object, uint32 flags,
bigtime_t timeout)
{
if (Add(object))
return Wait(flags, timeout);
return B_ENTRY_NOT_FOUND;
}
inline void
ConditionVariableEntry::AddToVariable(ConditionVariable* variable)
{
fThread = find_thread(NULL);
MutexLocker _(sConditionVariablesLock);
fVariable = variable;
fWaitStatus = STATUS_ADDED;
fVariable->fEntries.Add(this);
}
// #pragma mark - ConditionVariable
/*! Initialization method for anonymous (unpublished) condition variables.
*/
void
ConditionVariable::Init(const void* object, const char* objectType)
{
fObject = object;
fObjectType = objectType;
new(&fEntries) EntryList;
}
void
ConditionVariable::Publish(const void* object, const char* objectType)
{
ASSERT(object != NULL);
fObject = object;
fObjectType = objectType;
new(&fEntries) EntryList;
MutexLocker locker(sConditionVariablesLock);
ASSERT(sConditionVariableHash.Lookup(object) == NULL);
sConditionVariableHash.InsertUnchecked(this);
}
void
ConditionVariable::Unpublish(bool threadsLocked)
{
ASSERT(fObject != NULL);
MutexLocker threadLocker(threadsLocked ? NULL : &sThreadsLock);
MutexLocker locker(sConditionVariablesLock);
sConditionVariableHash.RemoveUnchecked(this);
fObject = NULL;
fObjectType = NULL;
if (!fEntries.IsEmpty())
_NotifyChecked(true, B_ENTRY_NOT_FOUND);
}
void
ConditionVariable::Add(ConditionVariableEntry* entry)
{
entry->AddToVariable(this);
}
status_t
ConditionVariable::Wait(uint32 flags, bigtime_t timeout)
{
ConditionVariableEntry entry;
Add(&entry);
return entry.Wait(flags, timeout);
}
void
ConditionVariable::_Notify(bool all, bool threadsLocked)
{
MutexLocker threadLocker(threadsLocked ? NULL : &sThreadsLock);
MutexLocker locker(sConditionVariablesLock);
if (!fEntries.IsEmpty())
_NotifyChecked(all, B_OK);
}
/*! Called with interrupts disabled and the condition variable spinlock and
thread lock held.
*/
void
ConditionVariable::_NotifyChecked(bool all, status_t result)
{
// dequeue and wake up the blocked threads
while (ConditionVariableEntry* entry = fEntries.RemoveHead()) {
entry->fVariable = NULL;
if (entry->fWaitStatus <= 0)
continue;
if (entry->fWaitStatus == STATUS_WAITING)
_kern_unblock_thread(entry->fThread, result);
entry->fWaitStatus = result;
if (!all)
break;
}
}
// #pragma mark -
status_t
condition_variable_init()
{
mutex_init(&sConditionVariablesLock, "condition variables");
mutex_init(&sThreadsLock, "threads");
new(&sConditionVariableHash) ConditionVariableHash;
status_t error = sConditionVariableHash.Init(kConditionVariableHashSize);
if (error != B_OK) {
panic("condition_variable_init(): Failed to init hash table: %s",
strerror(error));
}
return error;
}
} // namespace HaikuKernelEmu
} // namespace UserlandFS

View File

@ -0,0 +1,101 @@
/*
* Copyright 2007-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef USERLAND_FS_HAIKU_CONDITION_VARIABLE_H
#define USERLAND_FS_HAIKU_CONDITION_VARIABLE_H
#include <OS.h>
#include <kernel/util/DoublyLinkedList.h>
#include <kernel/util/OpenHashTable.h>
namespace UserlandFS {
namespace HaikuKernelEmu {
class ConditionVariable;
struct ConditionVariableEntry
: DoublyLinkedListLinkImpl<ConditionVariableEntry> {
public:
bool Add(const void* object);
status_t Wait(uint32 flags = 0, bigtime_t timeout = 0);
status_t Wait(const void* object, uint32 flags = 0,
bigtime_t timeout = 0);
inline ConditionVariable* Variable() const { return fVariable; }
private:
inline void AddToVariable(ConditionVariable* variable);
private:
ConditionVariable* fVariable;
thread_id fThread;
status_t fWaitStatus;
friend class ConditionVariable;
};
class ConditionVariable : protected HashTableLink<ConditionVariable> {
public:
void Init(const void* object,
const char* objectType);
// for anonymous (unpublished) cvars
void Publish(const void* object,
const char* objectType);
void Unpublish(bool threadsLocked = false);
inline void NotifyOne(bool threadsLocked = false);
inline void NotifyAll(bool threadsLocked = false);
void Add(ConditionVariableEntry* entry);
status_t Wait(uint32 flags = 0, bigtime_t timeout = 0);
// all-in one, i.e. doesn't need a
// ConditionVariableEntry
const void* Object() const { return fObject; }
const char* ObjectType() const { return fObjectType; }
private:
void _Notify(bool all, bool threadsLocked);
void _NotifyChecked(bool all, status_t result);
protected:
typedef DoublyLinkedList<ConditionVariableEntry> EntryList;
const void* fObject;
const char* fObjectType;
EntryList fEntries;
friend class ConditionVariableEntry;
friend class ConditionVariableHashDefinition;
};
inline void
ConditionVariable::NotifyOne(bool threadsLocked)
{
_Notify(false, threadsLocked);
}
inline void
ConditionVariable::NotifyAll(bool threadsLocked)
{
_Notify(true, threadsLocked);
}
status_t condition_variable_init();
} // namespace HaikuKernelEmu
} // namespace UserlandFS
#endif // USERLAND_FS_HAIKU_CONDITION_VARIABLE_H

View File

@ -18,43 +18,50 @@ class HaikuKernelVolume;
namespace HaikuKernelEmu {
/* transactions */
extern int32 cache_start_transaction(void *_cache);
extern status_t cache_sync_transaction(void *_cache, int32 id);
extern status_t cache_end_transaction(void *_cache, int32 id,
extern int32 cache_start_transaction(void *cache);
extern status_t cache_sync_transaction(void *cache, int32 id);
extern status_t cache_end_transaction(void *cache, int32 id,
transaction_notification_hook hook, void *data);
extern status_t cache_abort_transaction(void *_cache, int32 id);
extern int32 cache_detach_sub_transaction(void *_cache, int32 id,
extern status_t cache_abort_transaction(void *cache, int32 id);
extern int32 cache_detach_sub_transaction(void *cache, int32 id,
transaction_notification_hook hook, void *data);
extern status_t cache_abort_sub_transaction(void *_cache, int32 id);
extern status_t cache_start_sub_transaction(void *_cache, int32 id);
extern status_t cache_next_block_in_transaction(void *_cache, int32 id,
uint32 *_cookie, off_t *_blockNumber, void **_data,
void **_unchangedData);
extern int32 cache_blocks_in_transaction(void *_cache, int32 id);
extern int32 cache_blocks_in_sub_transaction(void *_cache, int32 id);
extern status_t cache_abort_sub_transaction(void *cache, int32 id);
extern status_t cache_start_sub_transaction(void *cache, int32 id);
extern status_t cache_add_transaction_listener(void *cache, int32 id,
int32 events, transaction_notification_hook hook,
void *data);
extern status_t cache_remove_transaction_listener(void *cache, int32 id,
transaction_notification_hook hook, void *data);
extern status_t cache_next_block_in_transaction(void *cache, int32 id,
bool mainOnly, long *_cookie, off_t *_blockNumber,
void **_data, void **_unchangedData);
extern int32 cache_blocks_in_transaction(void *cache, int32 id);
extern int32 cache_blocks_in_main_transaction(void *cache, int32 id);
extern int32 cache_blocks_in_sub_transaction(void *cache, int32 id);
/* block cache */
extern void block_cache_delete(void *_cache, bool allowWrites);
extern void block_cache_delete(void *cache, bool allowWrites);
extern void *block_cache_create(int fd, off_t numBlocks, size_t blockSize,
bool readOnly);
extern status_t block_cache_sync(void *_cache);
extern status_t block_cache_sync_etc(void *_cache, off_t blockNumber,
extern status_t block_cache_sync(void *cache);
extern status_t block_cache_sync_etc(void *cache, off_t blockNumber,
size_t numBlocks);
extern status_t block_cache_make_writable(void *_cache, off_t blockNumber,
extern void block_cache_discard(void *cache, off_t blockNumber,
size_t numBlocks);
extern status_t block_cache_make_writable(void *cache, off_t blockNumber,
int32 transaction);
extern void *block_cache_get_writable_etc(void *_cache, off_t blockNumber,
extern void *block_cache_get_writable_etc(void *cache, off_t blockNumber,
off_t base, off_t length, int32 transaction);
extern void *block_cache_get_writable(void *_cache, off_t blockNumber,
extern void *block_cache_get_writable(void *cache, off_t blockNumber,
int32 transaction);
extern void *block_cache_get_empty(void *_cache, off_t blockNumber,
extern void *block_cache_get_empty(void *cache, off_t blockNumber,
int32 transaction);
extern const void *block_cache_get_etc(void *_cache, off_t blockNumber,
extern const void *block_cache_get_etc(void *cache, off_t blockNumber,
off_t base, off_t length);
extern const void *block_cache_get(void *_cache, off_t blockNumber);
extern status_t block_cache_set_dirty(void *_cache, off_t blockNumber,
extern const void *block_cache_get(void *cache, off_t blockNumber);
extern status_t block_cache_set_dirty(void *cache, off_t blockNumber,
bool isDirty, int32 transaction);
extern void block_cache_put(void *_cache, off_t blockNumber);
extern void block_cache_put(void *cache, off_t blockNumber);
/* file cache */
extern void *file_cache_create(dev_t mountID, ino_t vnodeID, off_t size,

View File

@ -49,6 +49,27 @@ struct hash_table {
#define PUT_IN_NEXT(t, e, val) (*(unsigned long *)NEXT_ADDR(t, e) = (long)(val))
const uint32 kPrimes [] = {
13, 31, 61, 127, 251,
509, 1021, 2039, 4093, 8191, 16381, 32749, 65521, 131071, 262139,
524287, 1048573, 2097143, 4194301, 8388593, 16777213, 33554393, 67108859,
134217689, 268435399, 536870909, 1073741789, 2147483647, 0
};
static uint32
get_prime_table_size(uint32 size)
{
int i;
for (i = 0; kPrimes[i] != 0; i++) {
if (kPrimes[i] > size)
return kPrimes[i];
}
return kPrimes[i - 1];
}
static inline void *
next_element(hash_table *table, void *element)
{
@ -57,15 +78,59 @@ next_element(hash_table *table, void *element)
}
static status_t
hash_grow(struct hash_table *table)
{
uint32 newSize = get_prime_table_size(table->num_elements);
struct hash_element **newTable;
uint32 index;
if (table->table_size >= newSize)
return B_OK;
newTable = (struct hash_element **)malloc(sizeof(void *) * newSize);
if (newTable == NULL)
return B_NO_MEMORY;
memset(newTable, 0, sizeof(void *) * newSize);
// rehash all the entries and add them to the new table
for (index = 0; index < table->table_size; index++) {
void *element;
void *next;
for (element = table->table[index]; element != NULL; element = next) {
uint32 hash = table->hash_func(element, NULL, newSize);
next = NEXT(table, element);
PUT_IN_NEXT(table, element, newTable[hash]);
newTable[hash] = (struct hash_element *)element;
}
}
free(table->table);
table->table = newTable;
table->table_size = newSize;
TRACE(("hash_grow: grown table %p, new size %lu\n", table, newSize));
return B_OK;
}
// #pragma mark - kernel private API
struct hash_table *
hash_init(uint32 table_size, int next_ptr_offset,
int compare_func(void *e, const void *key),
uint32 hash_func(void *e, const void *key, uint32 range))
hash_init(uint32 tableSize, int nextPointerOffset,
int compareFunc(void *e, const void *key),
uint32 hashFunc(void *e, const void *key, uint32 range))
{
struct hash_table *t;
unsigned int i;
uint32 i;
if (compare_func == NULL || hash_func == NULL) {
tableSize = get_prime_table_size(tableSize);
if (compareFunc == NULL || hashFunc == NULL) {
dprintf("hash_init() called with NULL function pointer\n");
return NULL;
}
@ -74,24 +139,24 @@ hash_init(uint32 table_size, int next_ptr_offset,
if (t == NULL)
return NULL;
t->table = (struct hash_element **)malloc(sizeof(void *) * table_size);
t->table = (struct hash_element **)malloc(sizeof(void *) * tableSize);
if (t->table == NULL) {
free(t);
return NULL;
}
for (i = 0; i < table_size; i++)
for (i = 0; i < tableSize; i++)
t->table[i] = NULL;
t->table_size = table_size;
t->next_ptr_offset = next_ptr_offset;
t->table_size = tableSize;
t->next_ptr_offset = nextPointerOffset;
t->flags = 0;
t->num_elements = 0;
t->compare_func = compare_func;
t->hash_func = hash_func;
t->compare_func = compareFunc;
t->hash_func = hashFunc;
TRACE(("hash_init: created table %p, next_ptr_offset %d, compare_func %p, hash_func %p\n",
t, next_ptr_offset, compare_func, hash_func));
t, nextPointerOffset, compareFunc, hashFunc));
return t;
}
@ -115,16 +180,36 @@ hash_insert(struct hash_table *table, void *element)
uint32 hash;
ASSERT(table != NULL && element != NULL);
TRACE(("hash_insert: table 0x%x, element 0x%x\n", table, element));
TRACE(("hash_insert: table %p, element %p\n", table, element));
hash = table->hash_func(element, NULL, table->table_size);
PUT_IN_NEXT(table, element, table->table[hash]);
table->table[hash] = (struct hash_element *)element;
table->num_elements++;
// ToDo: resize hash table if it's grown too much!
return B_OK;
}
return 0;
status_t
hash_insert_grow(struct hash_table *table, void *element)
{
uint32 hash;
ASSERT(table != NULL && element != NULL);
TRACE(("hash_insert_grow: table %p, element %p\n", table, element));
hash = table->hash_func(element, NULL, table->table_size);
PUT_IN_NEXT(table, element, table->table[hash]);
table->table[hash] = (struct hash_element *)element;
table->num_elements++;
if ((uint32)table->num_elements > table->table_size) {
//dprintf("hash_insert: table has grown too much: %d in %d\n", table->num_elements, (int)table->table_size);
hash_grow(table);
}
return B_OK;
}
@ -157,32 +242,34 @@ hash_remove_current(struct hash_table *table, struct hash_iterator *iterator)
{
uint32 index = iterator->bucket;
void *element;
void *lastElement = NULL;
if (iterator->current == NULL)
panic("hash_remove_current() called too early.");
if (iterator->current == NULL || (element = table->table[index]) == NULL) {
panic("hash_remove_current(): invalid iteration state");
return;
}
for (element = table->table[index]; index < table->table_size; index++) {
void *lastElement = NULL;
while (element != NULL) {
if (element == iterator->current) {
iterator->current = lastElement;
while (element != NULL) {
if (element == iterator->current) {
iterator->current = lastElement;
if (lastElement != NULL) {
// connect the previous entry with the next one
PUT_IN_NEXT(table, lastElement, NEXT(table, element));
} else {
table->table[index] = (struct hash_element *)NEXT(table,
element);
}
table->num_elements--;
return;
if (lastElement != NULL) {
// connect the previous entry with the next one
PUT_IN_NEXT(table, lastElement, NEXT(table, element));
} else {
table->table[index] = (struct hash_element *)NEXT(table,
element);
}
element = NEXT(table, element);
table->num_elements--;
return;
}
lastElement = element;
element = NEXT(table, element);
}
panic("hash_remove_current(): current element not found!");
}
@ -309,5 +396,48 @@ hash_hash_string(const char *string)
return hash;
}
uint32
hash_count_elements(struct hash_table *table)
{
return table->num_elements;
}
uint32
hash_count_used_slots(struct hash_table *table)
{
uint32 usedSlots = 0;
uint32 i;
for (i = 0; i < table->table_size; i++) {
if (table->table[i] != NULL)
usedSlots++;
}
return usedSlots;
}
void
hash_dump_table(struct hash_table* table)
{
uint32 i;
dprintf("hash table %p, table size: %lu, elements: %u\n", table,
table->table_size, table->num_elements);
for (i = 0; i < table->table_size; i++) {
struct hash_element* element = table->table[i];
if (element != NULL) {
dprintf("%6lu:", i);
while (element != NULL) {
dprintf(" %p", element);
element = (hash_element*)NEXT(table, element);
}
dprintf("\n");
}
}
}
} // namespace HaikuKernelEmu
} // namespace UserlandFS

View File

@ -1,7 +1,10 @@
/*
** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
** Distributed under the terms of the NewOS License.
*/
/*
* Copyright 2002-2008, Haiku Inc. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#ifndef USERLAND_FS_HAIKU_HASH_H
#define USERLAND_FS_HAIKU_HASH_H
@ -10,6 +13,13 @@
namespace UserlandFS {
namespace HaikuKernelEmu {
// The use of offsetof() on non-PODs is invalid. Since many structs use
// templated members (i.e. DoublyLinkedList) which makes them non-PODs we
// can't use offsetof() anymore. This macro does the same, but requires an
// instance of the object in question.
#define offset_of_member(OBJECT, MEMBER) \
((size_t)((char*)&OBJECT.MEMBER - (char*)&OBJECT))
// can be allocated on the stack
typedef struct hash_iterator {
void *current;
@ -23,6 +33,7 @@ struct hash_table *hash_init(uint32 table_size, int next_ptr_offset,
uint32 hash_func(void *element, const void *key, uint32 range));
int hash_uninit(struct hash_table *table);
status_t hash_insert(struct hash_table *table, void *_element);
status_t hash_insert_grow(struct hash_table *table, void *_element);
status_t hash_remove(struct hash_table *table, void *_element);
void hash_remove_current(struct hash_table *table, struct hash_iterator *iterator);
void *hash_remove_first(struct hash_table *table, uint32 *_cookie);
@ -32,6 +43,9 @@ struct hash_iterator *hash_open(struct hash_table *table, struct hash_iterator *
void hash_close(struct hash_table *table, struct hash_iterator *i, bool free_iterator);
void *hash_next(struct hash_table *table, struct hash_iterator *i);
void hash_rewind(struct hash_table *table, struct hash_iterator *i);
uint32 hash_count_elements(struct hash_table *table);
uint32 hash_count_used_slots(struct hash_table *table);
void hash_dump_table(struct hash_table* table);
/* function pointers must look like this:
*

View File

@ -20,33 +20,35 @@ namespace UserlandFS {
namespace HaikuKernelEmu {
int32
recursive_lock_get_recursion(recursive_lock *lock)
sem_id
_init_semaphore(int32 count, const char* name)
{
if (lock->holder == find_thread(NULL))
return lock->recursion;
return -1;
sem_id sem = create_sem(count, name);
if (sem < 0)
panic("_init_semaphore(): Failed to create semaphore!\n");
return sem;
}
status_t
void
recursive_lock_init(recursive_lock *lock, const char *name)
{
recursive_lock_init_etc(lock, name, 0);
}
void
recursive_lock_init_etc(recursive_lock *lock, const char *name, uint32 flags)
{
if (lock == NULL)
return B_BAD_VALUE;
panic("recursive_lock_init_etc(): NULL lock\n");
if (name == NULL)
name = "recursive lock";
lock->holder = -1;
lock->recursion = 0;
lock->sem = create_sem(1, name);
if (lock->sem >= B_OK)
return B_OK;
return lock->sem;
lock->sem = _init_semaphore(1, name);
}
@ -78,6 +80,23 @@ recursive_lock_lock(recursive_lock *lock)
}
status_t
recursive_lock_trylock(recursive_lock *lock)
{
thread_id thread = find_thread(NULL);
if (thread != lock->holder) {
status_t status = acquire_sem_etc(lock->sem, 1, B_RELATIVE_TIMEOUT, 0);
if (status < B_OK)
return status;
lock->holder = thread;
}
lock->recursion++;
return B_OK;
}
void
recursive_lock_unlock(recursive_lock *lock)
{
@ -91,25 +110,38 @@ recursive_lock_unlock(recursive_lock *lock)
}
int32
recursive_lock_get_recursion(recursive_lock *lock)
{
if (lock->holder == find_thread(NULL))
return lock->recursion;
return -1;
}
// #pragma mark -
status_t
mutex_init(mutex *m, const char *name)
void
mutex_init(mutex *lock, const char *name)
{
if (m == NULL)
return EINVAL;
mutex_init_etc(lock, name, 0);
}
void
mutex_init_etc(mutex* lock, const char* name, uint32 flags)
{
if (lock == NULL)
panic("mutex_init_etc(): NULL lock\n");
if (name == NULL)
name = "mutex_sem";
m->holder = -1;
lock->holder = -1;
m->sem = create_sem(1, name);
if (m->sem >= B_OK)
return B_OK;
return m->sem;
lock->sem = _init_semaphore(1, name);
}
@ -145,6 +177,24 @@ mutex_lock(mutex *mutex)
}
status_t
mutex_trylock(mutex *mutex)
{
thread_id me = find_thread(NULL);
status_t status;
status = acquire_sem_etc(mutex->sem, 1, B_RELATIVE_TIMEOUT, 0);
if (status < B_OK)
return status;
if (me == mutex->holder)
panic("mutex_lock failure: mutex %p (sem = 0x%lx) acquired twice by thread 0x%lx\n", mutex, mutex->sem, me);
mutex->holder = me;
return B_OK;
}
void
mutex_unlock(mutex *mutex)
{
@ -163,46 +213,23 @@ mutex_unlock(mutex *mutex)
// #pragma mark -
status_t
benaphore_init(benaphore *ben, const char *name)
void
rw_lock_init(rw_lock *lock, const char *name)
{
if (ben == NULL || name == NULL)
return B_BAD_VALUE;
ben->count = 1;
ben->sem = create_sem(0, name);
if (ben->sem >= B_OK)
return B_OK;
return ben->sem;
rw_lock_init_etc(lock, name, 0);
}
void
benaphore_destroy(benaphore *ben)
{
delete_sem(ben->sem);
ben->sem = -1;
}
// #pragma mark -
status_t
rw_lock_init(rw_lock *lock, const char *name)
rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags)
{
if (lock == NULL)
return B_BAD_VALUE;
panic("rw_lock_init_etc(): NULL lock\n");
if (name == NULL)
name = "r/w lock";
lock->sem = create_sem(RW_MAX_READERS, name);
if (lock->sem >= B_OK)
return B_OK;
return lock->sem;
lock->sem = _init_semaphore(RW_MAX_READERS, name);
}

View File

@ -20,77 +20,77 @@ typedef struct recursive_lock {
int recursion;
} recursive_lock;
typedef struct mutex {
sem_id sem;
thread_id holder;
} mutex;
typedef struct benaphore {
sem_id sem;
int32 count;
} benaphore;
#define MUTEX_FLAG_CLONE_NAME 0x1
// Note: this is currently a trivial r/w lock implementation
// it will be replaced with something better later - this
// or a similar API will be made publically available at this point.
typedef struct rw_lock {
sem_id sem;
int32 count;
benaphore writeLock;
} rw_lock;
#define RW_MAX_READERS 1000000
#define RW_LOCK_FLAG_CLONE_NAME 0x1
extern status_t recursive_lock_init(recursive_lock *lock, const char *name);
#define ASSERT_LOCKED_RECURSIVE(r) do {} while (false)
#define ASSERT_LOCKED_MUTEX(m) do {} while (false)
#define ASSERT_WRITE_LOCKED_RW_LOCK(m) do {} while (false)
#define ASSERT_READ_LOCKED_RW_LOCK(l) do {} while (false)
// static initializers
#define MUTEX_INITIALIZER(name) { _init_semaphore(1, name), -1 }
#define RECURSIVE_LOCK_INITIALIZER(name) { _init_semaphore(1, name), -1, 0 }
#define RW_LOCK_INITIALIZER(name) \
{ _init_semaphore(RW_MAX_READERS, name) }
sem_id _init_semaphore(int32 count, const char* name);
// implementation private
extern void recursive_lock_init(recursive_lock *lock, const char *name);
// name is *not* cloned nor freed in recursive_lock_destroy()
extern void recursive_lock_init_etc(recursive_lock *lock, const char *name,
uint32 flags);
extern void recursive_lock_destroy(recursive_lock *lock);
extern status_t recursive_lock_lock(recursive_lock *lock);
extern status_t recursive_lock_trylock(recursive_lock *lock);
extern void recursive_lock_unlock(recursive_lock *lock);
extern int32 recursive_lock_get_recursion(recursive_lock *lock);
extern status_t mutex_init(mutex *m, const char *name);
extern void mutex_destroy(mutex *m);
extern status_t mutex_lock(mutex *m);
extern void mutex_unlock(mutex *m);
extern status_t benaphore_init(benaphore *ben, const char *name);
extern void benaphore_destroy(benaphore *ben);
extern void mutex_init(mutex* lock, const char* name);
// name is *not* cloned nor freed in mutex_destroy()
extern void mutex_init_etc(mutex* lock, const char* name, uint32 flags);
extern void mutex_destroy(mutex* lock);
//extern status_t mutex_switch_lock(mutex* from, mutex* to);
// Unlocks "from" and locks "to" such that unlocking and starting to wait
// for the lock is atomically. I.e. if "from" guards the object "to" belongs
// to, the operation is safe as long as "from" is held while destroying
// "to".
static inline status_t
benaphore_lock_etc(benaphore *ben, uint32 flags, bigtime_t timeout)
{
if (atomic_add(&ben->count, -1) <= 0)
return acquire_sem_etc(ben->sem, 1, flags, timeout);
return B_OK;
}
status_t mutex_lock(mutex* lock);
//status_t mutex_lock_threads_locked(mutex* lock);
status_t mutex_trylock(mutex* lock);
void mutex_unlock(mutex* lock);
//void mutex_transfer_lock(mutex* lock, thread_id thread);
static inline status_t
benaphore_lock(benaphore *ben)
{
if (atomic_add(&ben->count, -1) <= 0)
return acquire_sem(ben->sem);
return B_OK;
}
static inline status_t
benaphore_unlock(benaphore *ben)
{
if (atomic_add(&ben->count, 1) < 0)
return release_sem(ben->sem);
return B_OK;
}
extern status_t rw_lock_init(rw_lock *lock, const char *name);
extern void rw_lock_destroy(rw_lock *lock);
extern status_t rw_lock_read_lock(rw_lock *lock);
extern status_t rw_lock_read_unlock(rw_lock *lock);
extern status_t rw_lock_write_lock(rw_lock *lock);
extern status_t rw_lock_write_unlock(rw_lock *lock);
extern void rw_lock_init(rw_lock* lock, const char* name);
// name is *not* cloned nor freed in rw_lock_destroy()
extern void rw_lock_init_etc(rw_lock* lock, const char* name, uint32 flags);
extern void rw_lock_destroy(rw_lock* lock);
extern status_t rw_lock_read_lock(rw_lock* lock);
extern status_t rw_lock_read_unlock(rw_lock* lock);
extern status_t rw_lock_write_lock(rw_lock* lock);
extern status_t rw_lock_write_unlock(rw_lock* lock);
/* C++ Auto Locking */
@ -132,28 +132,10 @@ public:
// RecursiveLocker
typedef AutoLocker<recursive_lock, RecursiveLockLocking> RecursiveLocker;
// BenaphoreLocking
class BenaphoreLocking {
public:
inline bool Lock(benaphore *lockable)
{
return benaphore_lock(lockable) == B_OK;
}
inline void Unlock(benaphore *lockable)
{
benaphore_unlock(lockable);
}
};
// BenaphoreLocker
typedef AutoLocker<benaphore, BenaphoreLocking> BenaphoreLocker;
} // namespace HaikuKernelEmu
} // namespace UserlandFS
using UserlandFS::HaikuKernelEmu::MutexLocker;
using UserlandFS::HaikuKernelEmu::RecursiveLocker;
using UserlandFS::HaikuKernelEmu::BenaphoreLocker;
#endif /* USERLAND_FS_HAIKU_LOCK_H */

View File

@ -0,0 +1,62 @@
/*
* Copyright 2008, Axel Dörfler. All Rights Reserved.
* Copyright 2007, Hugo Santos. All Rights Reserved.
*
* Distributed under the terms of the MIT License.
*/
#ifndef USERLAND_FS_HAIKU_SLAB_SLAB_H
#define USERLAND_FS_HAIKU_SLAB_SLAB_H
#include <OS.h>
namespace UserlandFS {
namespace HaikuKernelEmu {
enum {
/* create_object_cache_etc flags */
CACHE_NO_DEPOT = 1 << 0,
CACHE_UNLOCKED_PAGES = 1 << 1,
CACHE_LARGE_SLAB = 1 << 2,
/* object_cache_alloc flags */
CACHE_DONT_SLEEP = 1 << 8,
/* internal */
CACHE_DURING_BOOT = 1 << 31
};
typedef struct object_cache object_cache;
typedef status_t (*object_cache_constructor)(void *cookie, void *object);
typedef void (*object_cache_destructor)(void *cookie, void *object);
typedef void (*object_cache_reclaimer)(void *cookie, int32 level);
object_cache *create_object_cache(const char *name, size_t object_size,
size_t alignment, void *cookie, object_cache_constructor constructor,
object_cache_destructor);
object_cache *create_object_cache_etc(const char *name, size_t object_size,
size_t alignment, size_t max_byte_usage, uint32 flags, void *cookie,
object_cache_constructor constructor, object_cache_destructor destructor,
object_cache_reclaimer reclaimer);
void delete_object_cache(object_cache *cache);
status_t object_cache_set_minimum_reserve(object_cache *cache,
size_t objectCount);
void *object_cache_alloc(object_cache *cache, uint32 flags);
void object_cache_free(object_cache *cache, void *object);
status_t object_cache_reserve(object_cache *cache, size_t object_count,
uint32 flags);
void object_cache_get_usage(object_cache *cache, size_t *_allocatedMemory);
} // namespace HaikuKernelEmu
} // namespace UserlandFS
#endif // USERLAND_FS_HAIKU_SLAB_SLAB_H