2002-07-19 05:23:24 +04:00
|
|
|
/* pools.c */
|
|
|
|
|
|
|
|
/* XXX - add documentation to this file! */
|
|
|
|
#include <kernel.h>
|
|
|
|
#include <OS.h>
|
2002-10-26 20:13:36 +04:00
|
|
|
#include <KernelExport.h>
|
2002-07-19 05:23:24 +04:00
|
|
|
#include <pools.h>
|
|
|
|
#include <vm.h>
|
2002-10-30 02:07:06 +03:00
|
|
|
#include <malloc.h>
|
2002-07-19 05:23:24 +04:00
|
|
|
#include <atomic.h>
|
|
|
|
#include <ktypes.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <debug.h>
|
|
|
|
#include <pools.h>
|
|
|
|
|
|
|
|
#define POOL_ALLOC_SZ 4 * 1024
|
|
|
|
#define ROUND_TO_PAGE_SIZE(x) (((x) + (POOL_ALLOC_SZ) - 1) & ~((POOL_ALLOC_SZ) - 1))
|
|
|
|
|
|
|
|
#ifdef WALK_POOL_LIST
|
|
|
|
void walk_pool_list(struct pool_ctl *p)
|
|
|
|
{
|
|
|
|
struct pool_mem *pb = p->list;
|
|
|
|
|
|
|
|
dprintf("Pool: %p\n", p);
|
|
|
|
dprintf(" -> list = %p\n", pb);
|
|
|
|
while (pb) {
|
|
|
|
dprintf(" -> mem_block %p, %p\n", pb, pb->next);
|
|
|
|
pb = pb->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
void pool_debug_walk(struct pool_ctl *p)
|
|
|
|
{
|
2002-11-28 17:10:20 +03:00
|
|
|
struct free_blk *ptr;
|
2002-07-19 05:23:24 +04:00
|
|
|
int i = 1;
|
|
|
|
|
|
|
|
dprintf("%ld byte blocks allocated, but now free:\n\n", p->alloc_size);
|
|
|
|
|
|
|
|
#if POOL_USES_BENAPHORES
|
|
|
|
ACQUIRE_BENAPHORE(p->lock);
|
|
|
|
#else
|
|
|
|
ACQUIRE_READ_LOCK(p->lock);
|
|
|
|
#endif
|
|
|
|
ptr = p->freelist;
|
|
|
|
while (ptr) {
|
2002-11-28 17:10:20 +03:00
|
|
|
ASSERT(ptr->magic == FREE_MAGIC);
|
|
|
|
ASSERT(FREE_MAGIC + (uint32)ptr->next == ptr->magic_check);
|
2002-07-19 05:23:24 +04:00
|
|
|
dprintf(" %02d: %p\n", i++, ptr);
|
2002-11-28 17:10:20 +03:00
|
|
|
ptr = ptr->next;
|
2002-07-19 05:23:24 +04:00
|
|
|
}
|
|
|
|
#if POOL_USES_BENAPHORES
|
|
|
|
RELEASE_BENAPHORE(p->lock);
|
|
|
|
#else
|
|
|
|
RELEASE_READ_LOCK(p->lock);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
void pool_debug(struct pool_ctl *p, char *name)
|
|
|
|
{
|
|
|
|
p->debug = 1;
|
2002-11-28 05:25:04 +03:00
|
|
|
strlcpy(p->name, name, POOL_DEBUG_NAME_SZ);
|
2002-07-19 05:23:24 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct pool_mem *get_mem_block(struct pool_ctl *pool)
|
|
|
|
{
|
|
|
|
struct pool_mem *block;
|
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
block = (struct pool_mem *)malloc(sizeof(struct pool_mem));
|
2002-07-19 05:23:24 +04:00
|
|
|
if (block == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
memset(block, 0, sizeof(*block));
|
|
|
|
|
|
|
|
block->aid = vm_create_anonymous_region(vm_get_kernel_aspace_id(),
|
2002-11-28 18:05:58 +03:00
|
|
|
"some pool block",
|
2002-07-19 05:23:24 +04:00
|
|
|
(void**)&block->base_addr,
|
|
|
|
REGION_ADDR_ANY_ADDRESS, pool->block_size,
|
|
|
|
REGION_WIRING_WIRED_CONTIG,
|
|
|
|
LOCK_KERNEL|LOCK_RW);
|
|
|
|
if (block->aid < 0) {
|
2002-10-30 02:07:06 +03:00
|
|
|
free(block);
|
2002-07-19 05:23:24 +04:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
block->mem_size = block->avail = pool->block_size;
|
|
|
|
block->ptr = block->base_addr;
|
|
|
|
INIT_BENAPHORE(block->lock, "pool_mem_lock");
|
|
|
|
|
|
|
|
if (CHECK_BENAPHORE(block->lock) >= 0) {
|
|
|
|
#if POOL_USES_BENAPHORES
|
|
|
|
ACQUIRE_BENAPHORE(pool->lock);
|
|
|
|
#else
|
|
|
|
ACQUIRE_WRITE_LOCK(pool->lock);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// insert block at the beginning of the pools
|
2002-11-28 17:10:20 +03:00
|
|
|
block->next = pool->list;
|
2002-07-19 05:23:24 +04:00
|
|
|
pool->list = block;
|
|
|
|
|
|
|
|
#ifdef WALK_POOL_LIST
|
|
|
|
walk_pool_list(pool);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#if POOL_USES_BENAPHORES
|
|
|
|
RELEASE_BENAPHORE(pool->lock);
|
|
|
|
#else
|
|
|
|
RELEASE_WRITE_LOCK(pool->lock);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return block;
|
|
|
|
}
|
|
|
|
UNINIT_BENAPHORE(block->lock);
|
|
|
|
|
|
|
|
vm_delete_region(vm_get_kernel_aspace_id(), block->aid);
|
2002-10-30 02:07:06 +03:00
|
|
|
free(block);
|
2002-07-19 05:23:24 +04:00
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int32 pool_init(struct pool_ctl **_newPool, size_t size)
|
|
|
|
{
|
|
|
|
struct pool_ctl *pool = NULL;
|
2002-11-28 05:25:04 +03:00
|
|
|
|
|
|
|
/* if the init failes, the new pool will be set to NULL */
|
|
|
|
*_newPool = NULL;
|
2002-07-19 05:23:24 +04:00
|
|
|
|
|
|
|
/* minimum block size is sizeof the free_blk structure */
|
|
|
|
if (size < sizeof(struct free_blk))
|
2002-11-28 18:05:58 +03:00
|
|
|
size = sizeof(struct free_blk);
|
2002-07-19 05:23:24 +04:00
|
|
|
|
2002-10-30 02:07:06 +03:00
|
|
|
pool = (struct pool_ctl *)malloc(sizeof(struct pool_ctl));
|
2002-07-19 05:23:24 +04:00
|
|
|
if (pool == NULL)
|
|
|
|
return ENOMEM;
|
|
|
|
|
|
|
|
memset(pool, 0, sizeof(*pool));
|
|
|
|
|
|
|
|
#if POOL_USES_BENAPHORES
|
|
|
|
INIT_BENAPHORE(pool->lock, "pool_lock");
|
|
|
|
if (CHECK_BENAPHORE(pool->lock) < 0) {
|
2002-10-30 02:07:06 +03:00
|
|
|
free(pool);
|
2002-07-19 05:23:24 +04:00
|
|
|
return ENOLCK;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
INIT_RW_LOCK(pool->lock, "pool_lock");
|
|
|
|
if (CHECK_RW_LOCK(pool->lock) < 0) {
|
2002-10-30 02:07:06 +03:00
|
|
|
free(pool);
|
2002-07-19 05:23:24 +04:00
|
|
|
return ENOLCK;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// 4 puddles will always fit in one pool
|
|
|
|
pool->block_size = ROUND_TO_PAGE_SIZE(size * 8);
|
|
|
|
pool->alloc_size = size;
|
|
|
|
pool->list = NULL;
|
|
|
|
pool->freelist = NULL;
|
|
|
|
|
|
|
|
/* now add a first block */
|
|
|
|
get_mem_block(pool);
|
|
|
|
if (!pool->list) {
|
|
|
|
#if POOL_USES_BENAPHORES
|
|
|
|
UNINIT_BENAPHORE(pool->lock);
|
|
|
|
#else
|
|
|
|
UNINIT_RW_LOCK(pool->lock);
|
|
|
|
#endif
|
2002-10-30 02:07:06 +03:00
|
|
|
free(pool);
|
2002-07-19 05:23:24 +04:00
|
|
|
return ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
*_newPool = pool;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-11-28 18:05:58 +03:00
|
|
|
void *pool_get(struct pool_ctl *p)
|
2002-07-19 05:23:24 +04:00
|
|
|
{
|
|
|
|
/* ok, so now we look for a suitable block... */
|
|
|
|
struct pool_mem *mp = p->list;
|
2002-11-28 17:10:20 +03:00
|
|
|
struct free_blk *rv = NULL;
|
2002-07-19 05:23:24 +04:00
|
|
|
|
|
|
|
#if POOL_USES_BENAPHORES
|
|
|
|
ACQUIRE_BENAPHORE(p->lock);
|
|
|
|
#else
|
|
|
|
ACQUIRE_WRITE_LOCK(p->lock);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (p->freelist) {
|
|
|
|
/* woohoo, just grab a block! */
|
|
|
|
|
|
|
|
rv = p->freelist;
|
2002-11-28 17:10:20 +03:00
|
|
|
ASSERT(rv->magic == FREE_MAGIC);
|
|
|
|
ASSERT(FREE_MAGIC + (uint32)rv->next == rv->magic_check);
|
2002-07-19 05:23:24 +04:00
|
|
|
|
|
|
|
if (p->debug)
|
|
|
|
dprintf("%s: allocating %p, setting freelist to %p\n",
|
2002-11-28 17:10:20 +03:00
|
|
|
p->name, p->freelist, rv->next);
|
2002-07-19 05:23:24 +04:00
|
|
|
|
2002-11-28 17:10:20 +03:00
|
|
|
p->freelist = rv->next;
|
2002-07-19 05:23:24 +04:00
|
|
|
|
|
|
|
#if POOL_USES_BENAPHORES
|
|
|
|
RELEASE_BENAPHORE(p->lock);
|
|
|
|
#else
|
|
|
|
RELEASE_WRITE_LOCK(p->lock);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
memset(rv, 0, p->alloc_size);
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
#if !POOL_USES_BENAPHORES
|
|
|
|
RELEASE_WRITE_LOCK(p->lock);
|
|
|
|
ACQUIRE_READ_LOCK(p->lock);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* no free blocks, try to allocate of the top of the memory blocks
|
|
|
|
** we must hold the global pool lock while iterating through the list!
|
|
|
|
*/
|
|
|
|
|
|
|
|
do {
|
|
|
|
ACQUIRE_BENAPHORE(mp->lock);
|
|
|
|
|
|
|
|
if (mp->avail >= p->alloc_size) {
|
2002-11-29 11:39:34 +03:00
|
|
|
rv = (struct free_blk *)mp->ptr;
|
2002-07-19 05:23:24 +04:00
|
|
|
mp->ptr += p->alloc_size;
|
|
|
|
mp->avail -= p->alloc_size;
|
|
|
|
RELEASE_BENAPHORE(mp->lock);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
RELEASE_BENAPHORE(mp->lock);
|
|
|
|
} while ((mp = mp->next) != NULL);
|
|
|
|
|
|
|
|
#if POOL_USES_BENAPHORES
|
|
|
|
RELEASE_BENAPHORE(p->lock);
|
|
|
|
#else
|
|
|
|
RELEASE_READ_LOCK(p->lock);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (rv) {
|
|
|
|
memset(rv, 0, p->alloc_size);
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
mp = get_mem_block(p);
|
|
|
|
if (mp == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
ACQUIRE_BENAPHORE(mp->lock);
|
|
|
|
|
|
|
|
if (mp->avail >= p->alloc_size) {
|
2002-11-29 11:39:34 +03:00
|
|
|
rv = (struct free_blk *)mp->ptr;
|
2002-07-19 05:23:24 +04:00
|
|
|
mp->ptr += p->alloc_size;
|
|
|
|
mp->avail -= p->alloc_size;
|
|
|
|
}
|
|
|
|
RELEASE_BENAPHORE(mp->lock);
|
|
|
|
|
|
|
|
memset(rv, 0, p->alloc_size);
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void pool_put(struct pool_ctl *p, void *ptr)
|
|
|
|
{
|
|
|
|
#if POOL_USES_BENAPHORES
|
|
|
|
ACQUIRE_BENAPHORE(p->lock);
|
|
|
|
#else
|
|
|
|
ACQUIRE_WRITE_LOCK(p->lock);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
memset(ptr, 0, p->alloc_size);
|
2002-11-28 17:10:20 +03:00
|
|
|
|
2002-07-19 05:23:24 +04:00
|
|
|
((struct free_blk*)ptr)->next = p->freelist;
|
2002-11-28 17:10:20 +03:00
|
|
|
((struct free_blk*)ptr)->magic = FREE_MAGIC;
|
|
|
|
((struct free_blk*)ptr)->magic_check = FREE_MAGIC + (uint32)p->freelist;
|
2002-07-19 05:23:24 +04:00
|
|
|
|
|
|
|
if (p->debug) {
|
|
|
|
dprintf("%s: adding %p, setting next = %p\n",
|
|
|
|
p->name, ptr, p->freelist);
|
|
|
|
}
|
|
|
|
|
|
|
|
p->freelist = ptr;
|
|
|
|
|
|
|
|
if (p->debug)
|
|
|
|
dprintf("%s: freelist = %p\n", p->name, p->freelist);
|
|
|
|
|
|
|
|
#if POOL_USES_BENAPHORES
|
|
|
|
RELEASE_BENAPHORE(p->lock);
|
|
|
|
#else
|
|
|
|
RELEASE_WRITE_LOCK(p->lock);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void pool_destroy(struct pool_ctl *p)
|
|
|
|
{
|
|
|
|
struct pool_mem *mp,*temp;
|
|
|
|
|
|
|
|
if (p == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* the semaphore will be deleted, so we don't have to unlock */
|
|
|
|
ACQUIRE_WRITE_LOCK(p->lock);
|
|
|
|
|
|
|
|
mp = p->list;
|
|
|
|
while (mp != NULL) {
|
|
|
|
vm_delete_region(vm_get_kernel_aspace_id(), mp->aid);
|
|
|
|
temp = mp;
|
|
|
|
mp = mp->next;
|
|
|
|
UNINIT_BENAPHORE(mp->lock);
|
2002-10-30 02:07:06 +03:00
|
|
|
free(temp);
|
2002-07-19 05:23:24 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
#if POOL_USES_BENAPHORES
|
|
|
|
UNINIT_BENAPHORE(p->lock);
|
|
|
|
#else
|
|
|
|
UNINIT_RW_LOCK(p->lock);
|
|
|
|
#endif
|
2002-10-30 02:07:06 +03:00
|
|
|
free(p);
|
2002-07-19 05:23:24 +04:00
|
|
|
}
|