* Replaced rtm_* placeholders with a real implementation based on the boot

loader's heap. Unlike the BeBook documents, we always lock their memory,
  though.
* Added Haiku extension rtm_available() that returns how much space is left in
  a pool.
* I've disabled the undocumented functions for now - please open a bug report
  if you encounter them used in applications.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@34419 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2009-12-01 17:21:46 +00:00
parent ba829ba0f9
commit ccedee2295
2 changed files with 523 additions and 169 deletions

View File

@ -9,51 +9,32 @@
#include <SupportDefs.h>
/*! Allocation from separate "pools" of memory. Those pools will be locked
in RAM if realtime allocators are turned on in the BMediaRoster, so don't
waste this memory unless it's needed. Also, the shared pool is a scarce
resource, so it's better if you create your own pool for your own needs
and leave the shared pool for BMediaNode instances and the needs of the
Media Kit.
*/
#if defined(__cplusplus)
#ifdef __cplusplus
extern "C" {
#endif
typedef struct rtm_pool rtm_pool;
/* If out_pool is NULL, the default pool will be created if it isn't */
/* already. */
/* If the default pool is already created, it will return EALREADY. */
#if defined(__cplusplus)
status_t rtm_create_pool(rtm_pool** out_pool, size_t total_size,
#ifdef __cplusplus
status_t rtm_create_pool(rtm_pool** _pool, size_t totalSize,
const char* name = NULL);
#else
status_t rtm_create_pool(rtm_pool** out_pool, size_t total_size,
const char* name);
status_t rtm_create_pool(rtm_pool** _pool, size_t totalSize, const char* name);
#endif
status_t rtm_delete_pool(rtm_pool* pool);
/* If NULL is passed for 'pool', the default pool is used if it has been */
/* created already. */
void* rtm_alloc(rtm_pool* pool, size_t size);
status_t rtm_free(void* data);
status_t rtm_realloc(void** data, size_t new_size);
status_t rtm_size_for(void* data);
status_t rtm_phys_size_for(void* data);
size_t rtm_available(rtm_pool* pool);
/* Return the default pool, or NULL if it has not yet been initialized. */
rtm_pool* rtm_default_pool();
#if defined(__cplusplus)
#ifdef __cplusplus
}
#endif
#endif // _REALTIME_ALLOC_H

View File

@ -1,157 +1,530 @@
/***********************************************************************
* AUTHOR: Marcus Overhagen
* FILE: RealtimeAlloc.cpp
* DESCR:
***********************************************************************/
#include <SupportDefs.h>
#include <RealtimeAlloc.h>
#include <stdlib.h>
#include "debug.h"
struct rtm_pool
{
};
extern "C" {
rtm_pool * _rtm_pool;
};
status_t
rtm_create_pool(rtm_pool ** out_pool, size_t total_size, const char * name)
{
BROKEN();
*out_pool = (rtm_pool *) 0x55557777;
TRACE(" new pool = %p\n", *out_pool);
/* If out_pool is NULL, the default pool will be created if it isn't already. */
/* If the default pool is already created, it will return EALREADY. */
return B_OK;
}
status_t
rtm_delete_pool(rtm_pool * pool)
{
BROKEN();
TRACE(" pool = %p\n", pool);
return B_OK;
}
void *
rtm_alloc(rtm_pool * pool, size_t size)
{
BROKEN();
TRACE(" pool = %p\n", pool);
/* If NULL is passed for pool, the default pool is used (if created). */
void *p = malloc(size);
TRACE(" returning ptr = %p\n", p);
return p;
}
status_t
rtm_free(void * data)
{
BROKEN();
TRACE(" ptr = %p\n", data);
free(data);
return B_OK;
}
status_t
rtm_realloc(void ** data, size_t new_size)
{
BROKEN();
TRACE(" ptr = %p\n", *data);
void * newptr = realloc(*data, new_size);
if (newptr) {
*data = newptr;
TRACE(" new ptr = %p\n", *data);
return B_OK;
} else
return B_ERROR;
}
status_t
rtm_size_for(void * data)
{
UNIMPLEMENTED();
TRACE(" ptr = %p\n", data);
return 0;
}
status_t
rtm_phys_size_for(void * data)
{
UNIMPLEMENTED();
TRACE(" ptr = %p\n", data);
return 0;
}
rtm_pool *
rtm_default_pool()
{
BROKEN();
/* Return the default pool, or NULL if not yet initialized */
TRACE(" returning pool = %p\n", (void *) 0x22229999);
return (rtm_pool *) 0x22229999;
}
/****************************************************************************/
/* undocumented symboles that libmedia.so exports */
/* the following function declarations are guessed and are still wrong */
/****************************************************************************/
extern "C" {
status_t rtm_create_pool_etc(rtm_pool ** out_pool, size_t total_size, const char * name, int32 param4, int32 param5, ...);
void rtm_get_pool(rtm_pool *pool,void *data,int32 param3,int32 param4, ...);
}
/*
param5 of rtm_create_pool_etc matches
param3 of rtm_get_pool
and might be a pointer into some structure
* Copyright 2009, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*/
param4 of rtm_create_pool_etc is 0 in the Doom game,
and might be a Flags field
param4 of rtm_get_pool is 0x00000003 in the Doom game,
and might be a Flags field
/*! A simple allocator that works directly on an area, based on the boot
loader's heap. See there for more information about its inner workings.
*/
status_t
rtm_create_pool_etc(rtm_pool ** out_pool, size_t total_size, const char * name, int32 param4, int32 param5, ...)
#include <RealtimeAlloc.h>
#include <pthread.h>
#include <stdlib.h>
#include <stdio.h>
#include <OS.h>
#include <locks.h>
#include <kernel/util/DoublyLinkedList.h>
//#define TRACE_RTM
#ifdef TRACE_RTM
# define TRACE(x...) printf(x);
#else
# define TRACE(x...) ;
#endif
class FreeChunk {
public:
void SetTo(size_t size, FreeChunk* next);
uint32 Size() const;
uint32 CompleteSize() const { return fSize; }
FreeChunk* Next() const { return fNext; }
void SetNext(FreeChunk* next) { fNext = next; }
FreeChunk* Split(uint32 splitSize);
bool IsTouching(FreeChunk* link);
FreeChunk* Join(FreeChunk* link);
void Remove(rtm_pool* pool,
FreeChunk* previous = NULL);
void Enqueue(rtm_pool* pool);
void* AllocatedAddress() const;
static FreeChunk* SetToAllocated(void* allocated);
static addr_t NextOffset() { return sizeof(uint32); }
private:
uint32 fSize;
FreeChunk* fNext;
};
struct rtm_pool : DoublyLinkedListLinkImpl<rtm_pool> {
area_id area;
void* heap_base;
size_t max_size;
size_t available;
FreeChunk free_anchor;
mutex lock;
bool Contains(void* buffer) const;
void Free(void* buffer);
};
typedef DoublyLinkedList<rtm_pool> PoolList;
const static uint32 kAlignment = 256;
// all memory chunks will be a multiple of this
static mutex sPoolsLock = {-1, -1};
static PoolList sPools;
void
FreeChunk::SetTo(size_t size, FreeChunk* next)
{
BROKEN();
*out_pool = (rtm_pool *) 0x44448888;
TRACE(" new pool = %p\n", *out_pool);
TRACE(" size = %d\n",(int)total_size);
TRACE(" name = %s\n",name);
TRACE(" param4 = 0x%08x\n",(int)param4);
TRACE(" param5 = 0x%08x\n",(int)param5);
return B_OK;
fSize = size;
fNext = next;
}
/*! Returns the amount of bytes that can be allocated
in this chunk.
*/
uint32
FreeChunk::Size() const
{
return fSize - FreeChunk::NextOffset();
}
/*! Splits the upper half at the requested location
and returns it.
*/
FreeChunk*
FreeChunk::Split(uint32 splitSize)
{
splitSize = (splitSize - 1 + kAlignment) & ~(kAlignment - 1);
FreeChunk* chunk
= (FreeChunk*)((uint8*)this + FreeChunk::NextOffset() + splitSize);
chunk->fSize = fSize - splitSize - FreeChunk::NextOffset();
chunk->fNext = fNext;
fSize = splitSize + FreeChunk::NextOffset();
return chunk;
}
/*! Checks if the specified chunk touches this chunk, so
that they could be joined.
*/
bool
FreeChunk::IsTouching(FreeChunk* chunk)
{
return chunk
&& (((uint8*)this + fSize == (uint8*)chunk)
|| (uint8*)chunk + chunk->fSize == (uint8*)this);
}
/*! Joins the chunk to this chunk and returns the pointer
to the new chunk - which will either be one of the
two chunks.
Note, the chunks must be joinable, or else this method
doesn't work correctly. Use FreeChunk::IsTouching()
to check if this method can be applied.
*/
FreeChunk*
FreeChunk::Join(FreeChunk* chunk)
{
if (chunk < this) {
chunk->fSize += fSize;
chunk->fNext = fNext;
return chunk;
}
fSize += chunk->fSize;
fNext = chunk->fNext;
return this;
}
void
rtm_get_pool(rtm_pool *pool,void *data,int32 param3, int32 param4, ...)
FreeChunk::Remove(rtm_pool* pool, FreeChunk* previous)
{
UNIMPLEMENTED();
TRACE(" pool = %p\n", pool);
TRACE(" ptr = %p\n", data);
TRACE(" param3 = 0x%08x\n",(int)param3);
TRACE(" param4 = 0x%08x\n",(int)param4);
if (previous == NULL) {
// find the previous chunk in the list
FreeChunk* chunk = pool->free_anchor.fNext;
while (chunk != NULL && chunk != this) {
previous = chunk;
chunk = chunk->fNext;
}
if (chunk == NULL)
return;
}
previous->fNext = fNext;
fNext = NULL;
}
void
FreeChunk::Enqueue(rtm_pool* pool)
{
FreeChunk* chunk = pool->free_anchor.fNext;
FreeChunk* last = &pool->free_anchor;
while (chunk && chunk->Size() < fSize) {
last = chunk;
chunk = chunk->fNext;
}
fNext = chunk;
last->fNext = this;
}
void*
FreeChunk::AllocatedAddress() const
{
return (void*)&fNext;
}
FreeChunk*
FreeChunk::SetToAllocated(void* allocated)
{
return (FreeChunk*)((uint8*)allocated - FreeChunk::NextOffset());
}
// #pragma mark - rtm_pool
bool
rtm_pool::Contains(void* buffer) const
{
return (addr_t)heap_base <= (addr_t)buffer
&& (addr_t)heap_base - 1 + max_size >= (addr_t)buffer;
}
void
rtm_pool::Free(void* allocated)
{
FreeChunk* freedChunk = FreeChunk::SetToAllocated(allocated);
available += freedChunk->CompleteSize();
// try to join the new free chunk with an existing one
// it may be joined with up to two chunks
FreeChunk* chunk = free_anchor.Next();
FreeChunk* last = &free_anchor;
int32 joinCount = 0;
while (chunk) {
if (chunk->IsTouching(freedChunk)) {
// almost "insert" it into the list before joining
// because the next pointer is inherited by the chunk
freedChunk->SetNext(chunk->Next());
freedChunk = chunk->Join(freedChunk);
// remove the joined chunk from the list
last->SetNext(freedChunk->Next());
chunk = last;
if (++joinCount == 2)
break;
}
last = chunk;
chunk = chunk->Next();
}
// enqueue the link at the right position; the
// free link queue is ordered by size
freedChunk->Enqueue(this);
}
// #pragma mark -
static rtm_pool*
pool_for(void* buffer)
{
MutexLocker _(&sPoolsLock);
PoolList::Iterator iterator = sPools.GetIterator();
while (rtm_pool* pool = iterator.Next()) {
if (pool->Contains(buffer))
return pool;
}
return NULL;
}
static void
pool_init(void)
{
mutex_init(&sPoolsLock, "rtm pools");
}
// #pragma mark - public API
status_t
rtm_create_pool(rtm_pool** _pool, size_t totalSize, const char* name)
{
rtm_pool* pool = (rtm_pool*)malloc(sizeof(rtm_pool));
if (pool == NULL)
return B_NO_MEMORY;
if (name == NULL)
name = "realtime pool";
status_t status = mutex_init(&pool->lock, name);
if (status != B_OK) {
free(pool);
return status;
}
pool->max_size = (totalSize - 1 + B_PAGE_SIZE) & ~(B_PAGE_SIZE - 1);
area_id area = create_area(name, &pool->heap_base, B_ANY_ADDRESS,
pool->max_size, B_LAZY_LOCK, B_READ_AREA | B_WRITE_AREA);
if (area < 0) {
mutex_destroy(&pool->lock);
free(pool);
return area;
}
pool->area = area;
pool->available = pool->max_size - FreeChunk::NextOffset();
// declare the whole heap as one chunk, and add it
// to the free list
FreeChunk* chunk = (FreeChunk*)pool->heap_base;
chunk->SetTo(pool->max_size, NULL);
pool->free_anchor.SetTo(0, chunk);
*_pool = pool;
static pthread_once_t sOnce = PTHREAD_ONCE_INIT;
pthread_once(&sOnce, &pool_init);
MutexLocker _(&sPoolsLock);
sPools.Add(pool);
return B_OK;
}
status_t
rtm_delete_pool(rtm_pool* pool)
{
{
MutexLocker _(&sPoolsLock);
sPools.Remove(pool);
}
delete_area(pool->area);
mutex_destroy(&pool->lock);
free(pool);
return B_OK;
}
void*
rtm_alloc(rtm_pool* pool, size_t size)
{
if (pool == NULL)
return malloc(size);
if (pool->heap_base == NULL || size == 0)
return NULL;
// align the size requirement to a kAlignment bytes boundary
size = (size - 1 + kAlignment) & ~(size_t)(kAlignment - 1);
if (size > pool->available) {
TRACE("malloc(): Out of memory!\n");
return NULL;
}
FreeChunk* chunk = pool->free_anchor.Next();
FreeChunk* last = &pool->free_anchor;
while (chunk && chunk->Size() < size) {
last = chunk;
chunk = chunk->Next();
}
if (chunk == NULL) {
// could not find a free chunk as large as needed
TRACE("malloc(): Out of memory!\n");
return NULL;
}
if (chunk->Size() > size + sizeof(FreeChunk) + kAlignment) {
// if this chunk is bigger than the requested size,
// we split it to form two chunks (with a minimal
// size of kAlignment allocatable bytes).
FreeChunk* freeChunk = chunk->Split(size);
last->SetNext(freeChunk);
// re-enqueue the free chunk at the correct position
freeChunk->Remove(pool, last);
freeChunk->Enqueue(pool);
} else {
// remove the chunk from the free list
last->SetNext(chunk->Next());
}
pool->available -= size + sizeof(uint32);
TRACE("malloc(%lu) -> %p\n", size, chunk->AllocatedAddress());
return chunk->AllocatedAddress();
}
status_t
rtm_free(void* allocated)
{
if (allocated == NULL)
return B_OK;
TRACE("rtm_free(%p)\n", allocated);
// find pool
rtm_pool* pool = pool_for(allocated);
if (pool == NULL) {
free(allocated);
return B_OK;
}
pool->Free(allocated);
return B_OK;
}
status_t
rtm_realloc(void** _buffer, size_t newSize)
{
if (_buffer == NULL)
return B_BAD_VALUE;
TRACE("rtm_realloc(%p, %lu)\n", *_buffer, newSize);
void* oldBuffer = *_buffer;
// find pool
rtm_pool* pool = pool_for(oldBuffer);
if (pool == NULL) {
void* buffer = realloc(oldBuffer, newSize);
if (buffer != NULL) {
*_buffer = buffer;
return B_OK;
}
return B_NO_MEMORY;
}
if (newSize == 0) {
TRACE("realloc(%p, %lu) -> NULL\n", oldBuffer, newSize);
pool->Free(oldBuffer);
*_buffer = NULL;
return B_OK;
}
size_t copySize = newSize;
if (oldBuffer != NULL) {
FreeChunk* oldChunk = FreeChunk::SetToAllocated(oldBuffer);
// Check if the old buffer still fits, and if it makes sense to keep it
if (oldChunk->Size() >= newSize && newSize > oldChunk->Size() / 3) {
TRACE("realloc(%p, %lu) old buffer is large enough\n",
oldBuffer, newSize);
return B_OK;
}
if (copySize > oldChunk->Size())
copySize = oldChunk->Size();
}
void* newBuffer = rtm_alloc(pool, newSize);
if (newBuffer == NULL)
return B_NO_MEMORY;
if (oldBuffer != NULL) {
memcpy(newBuffer, oldBuffer, copySize);
pool->Free(oldBuffer);
}
TRACE("realloc(%p, %lu) -> %p\n", oldBuffer, newSize, newBuffer);
*_buffer = newBuffer;
return B_OK;
}
status_t
rtm_size_for(void* buffer)
{
if (buffer == NULL)
return 0;
FreeChunk* chunk = FreeChunk::SetToAllocated(buffer);
// TODO: we currently always return the actual chunk size, not the allocated
// one
return chunk->Size();
}
status_t
rtm_phys_size_for(void* buffer)
{
if (buffer == NULL)
return 0;
FreeChunk* chunk = FreeChunk::SetToAllocated(buffer);
return chunk->Size();
}
size_t
rtm_available(rtm_pool* pool)
{
if (pool == NULL) {
// whatever - might want to use system_info instead
return 1024 * 1024;
}
return pool->available;
}
rtm_pool*
rtm_default_pool()
{
// We always return NULL - the default pool will just use malloc()/free()
return NULL;
}
#if 0
extern "C" {
// undocumented symbols that BeOS exports
status_t rtm_create_pool_etc(rtm_pool ** out_pool, size_t total_size, const char * name, int32 param4, int32 param5, ...);
void rtm_get_pool(rtm_pool *pool,void *data,int32 param3,int32 param4, ...);
}
#endif