2007-04-26 07:41:24 +04:00
|
|
|
/*
|
2008-01-16 23:30:16 +03:00
|
|
|
* Copyright 2008, Axel Dörfler. All Rights Reserved.
|
2007-04-26 07:41:24 +04:00
|
|
|
* Copyright 2007, Hugo Santos. All Rights Reserved.
|
|
|
|
*
|
2008-01-16 23:30:16 +03:00
|
|
|
* Distributed under the terms of the MIT License.
|
2007-04-26 07:41:24 +04:00
|
|
|
*/
|
|
|
|
#ifndef _SLAB_SLAB_H_
|
|
|
|
#define _SLAB_SLAB_H_
|
|
|
|
|
2007-10-22 00:44:19 +04:00
|
|
|
|
2010-01-27 15:45:53 +03:00
|
|
|
#include <heap.h>
|
2007-10-22 00:44:19 +04:00
|
|
|
|
|
|
|
|
|
|
|
enum {
|
2010-01-27 15:45:53 +03:00
|
|
|
/* object_cache_{alloc,free}() flags */
|
|
|
|
CACHE_DONT_WAIT_FOR_MEMORY = HEAP_DONT_WAIT_FOR_MEMORY,
|
|
|
|
CACHE_DONT_LOCK_KERNEL_SPACE = HEAP_DONT_LOCK_KERNEL_SPACE,
|
|
|
|
CACHE_PRIORITY_VIP = HEAP_PRIORITY_VIP,
|
|
|
|
CACHE_ALLOC_FLAGS = CACHE_DONT_WAIT_FOR_MEMORY
|
|
|
|
| CACHE_DONT_LOCK_KERNEL_SPACE
|
|
|
|
| CACHE_PRIORITY_VIP,
|
2007-10-22 00:44:19 +04:00
|
|
|
|
2010-01-27 15:45:53 +03:00
|
|
|
/* create_object_cache_etc flags */
|
|
|
|
CACHE_NO_DEPOT = 0x08000000,
|
|
|
|
CACHE_UNLOCKED_PAGES = 0x10000000, // unsupported
|
|
|
|
CACHE_LARGE_SLAB = 0x20000000,
|
2007-10-22 00:44:19 +04:00
|
|
|
|
|
|
|
/* internal */
|
2010-01-27 15:45:53 +03:00
|
|
|
CACHE_ALIGN_ON_SIZE = 0x40000000,
|
|
|
|
CACHE_DURING_BOOT = 0x80000000
|
2007-10-22 00:44:19 +04:00
|
|
|
};
|
|
|
|
|
2010-01-19 22:13:25 +03:00
|
|
|
struct ObjectCache;
|
|
|
|
typedef struct ObjectCache object_cache;
|
2007-10-22 00:44:19 +04:00
|
|
|
|
2010-01-19 22:13:25 +03:00
|
|
|
typedef status_t (*object_cache_constructor)(void* cookie, void* object);
|
|
|
|
typedef void (*object_cache_destructor)(void* cookie, void* object);
|
|
|
|
typedef void (*object_cache_reclaimer)(void* cookie, int32 level);
|
2007-10-22 00:44:19 +04:00
|
|
|
|
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2010-01-19 22:13:25 +03:00
|
|
|
object_cache* create_object_cache(const char* name, size_t object_size,
|
|
|
|
size_t alignment, void* cookie, object_cache_constructor constructor,
|
2007-10-22 00:44:19 +04:00
|
|
|
object_cache_destructor);
|
2010-01-19 22:13:25 +03:00
|
|
|
object_cache* create_object_cache_etc(const char* name, size_t object_size,
|
|
|
|
size_t alignment, size_t max_byte_usage, uint32 flags, void* cookie,
|
2007-10-22 00:44:19 +04:00
|
|
|
object_cache_constructor constructor, object_cache_destructor destructor,
|
|
|
|
object_cache_reclaimer reclaimer);
|
|
|
|
|
2010-01-19 22:13:25 +03:00
|
|
|
void delete_object_cache(object_cache* cache);
|
2007-10-22 00:44:19 +04:00
|
|
|
|
2010-01-19 22:13:25 +03:00
|
|
|
status_t object_cache_set_minimum_reserve(object_cache* cache,
|
2008-08-21 07:21:37 +04:00
|
|
|
size_t objectCount);
|
|
|
|
|
2010-01-19 22:13:25 +03:00
|
|
|
void* object_cache_alloc(object_cache* cache, uint32 flags);
|
slab allocator:
* Implemented a more elaborated raw memory allocation backend (MemoryManager).
We allocate 8 MB areas whose pages we allocate and map when needed. An area is
divided into equally-sized chunks which form the basic units of allocation. We
have areas with three possible chunk sizes (small, medium, large), which is
basically what the ObjectCache implementations were using anyway.
* Added "uint32 flags" parameter to several of the slab allocator's object
cache and object depot functions. E.g. object_depot_store() potentially wants
to allocate memory for a magazine. But also in pure freeing functions it
might eventually become useful to have those flags, since they could end up
deleting an area, which might not be allowable in all situations. We should
introduce specific flags to indicate that.
* Reworked the block allocator. Since the MemoryManager allocates block-aligned
areas, maintains a hash table for lookup, and maps chunks to object caches,
we can quickly find out which object cache a to be freed allocation belongs
to and thus don't need the boundary tags anymore.
* Reworked the slab boot strap process. We allocate from the initial area only
when really necessary, i.e. when the object cache for the respective
allocation size has not been created yet. A single page is thus sufficient.
other:
* vm_allocate_early(): Added boolean "blockAlign" parameter. If true, the
semantics is the same as for B_ANY_KERNEL_BLOCK_ADDRESS.
* Use an object cache for page mappings. This significantly reduces the
contention on the heap bin locks.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35232 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-01-22 02:10:52 +03:00
|
|
|
void object_cache_free(object_cache* cache, void* object, uint32 flags);
|
2007-10-22 00:44:19 +04:00
|
|
|
|
2010-01-19 22:13:25 +03:00
|
|
|
status_t object_cache_reserve(object_cache* cache, size_t object_count,
|
2007-10-22 00:44:19 +04:00
|
|
|
uint32 flags);
|
|
|
|
|
2010-01-19 22:13:25 +03:00
|
|
|
void object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory);
|
2008-08-06 04:07:55 +04:00
|
|
|
|
2007-10-22 00:44:19 +04:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
2007-04-26 07:41:24 +04:00
|
|
|
#endif
|
2007-10-22 00:44:19 +04:00
|
|
|
|
|
|
|
#endif /* _SLAB_SLAB_H_ */
|