haiku/headers/private/kernel/slab/Slab.h

71 lines
2.0 KiB
C
Raw Normal View History

/*
* Copyright 2008, Axel Dörfler. All Rights Reserved.
* Copyright 2007, Hugo Santos. All Rights Reserved.
*
* Distributed under the terms of the MIT License.
*/
#ifndef _SLAB_SLAB_H_
#define _SLAB_SLAB_H_
#include <heap.h>
enum {
/* object_cache_{alloc,free}() flags */
CACHE_DONT_WAIT_FOR_MEMORY = HEAP_DONT_WAIT_FOR_MEMORY,
CACHE_DONT_LOCK_KERNEL_SPACE = HEAP_DONT_LOCK_KERNEL_SPACE,
CACHE_PRIORITY_VIP = HEAP_PRIORITY_VIP,
CACHE_ALLOC_FLAGS = CACHE_DONT_WAIT_FOR_MEMORY
| CACHE_DONT_LOCK_KERNEL_SPACE
| CACHE_PRIORITY_VIP,
/* create_object_cache_etc flags */
CACHE_NO_DEPOT = 0x08000000,
CACHE_UNLOCKED_PAGES = 0x10000000, // unsupported
CACHE_LARGE_SLAB = 0x20000000,
/* internal */
CACHE_ALIGN_ON_SIZE = 0x40000000,
CACHE_DURING_BOOT = 0x80000000
};
struct ObjectCache;
typedef struct ObjectCache object_cache;
typedef status_t (*object_cache_constructor)(void* cookie, void* object);
typedef void (*object_cache_destructor)(void* cookie, void* object);
typedef void (*object_cache_reclaimer)(void* cookie, int32 level);
#ifdef __cplusplus
extern "C" {
#endif
object_cache* create_object_cache(const char* name, size_t object_size,
size_t alignment, void* cookie, object_cache_constructor constructor,
object_cache_destructor);
object_cache* create_object_cache_etc(const char* name, size_t object_size,
size_t alignment, size_t max_byte_usage, uint32 flags, void* cookie,
object_cache_constructor constructor, object_cache_destructor destructor,
object_cache_reclaimer reclaimer);
void delete_object_cache(object_cache* cache);
status_t object_cache_set_minimum_reserve(object_cache* cache,
size_t objectCount);
void* object_cache_alloc(object_cache* cache, uint32 flags);
slab allocator: * Implemented a more elaborated raw memory allocation backend (MemoryManager). We allocate 8 MB areas whose pages we allocate and map when needed. An area is divided into equally-sized chunks which form the basic units of allocation. We have areas with three possible chunk sizes (small, medium, large), which is basically what the ObjectCache implementations were using anyway. * Added "uint32 flags" parameter to several of the slab allocator's object cache and object depot functions. E.g. object_depot_store() potentially wants to allocate memory for a magazine. But also in pure freeing functions it might eventually become useful to have those flags, since they could end up deleting an area, which might not be allowable in all situations. We should introduce specific flags to indicate that. * Reworked the block allocator. Since the MemoryManager allocates block-aligned areas, maintains a hash table for lookup, and maps chunks to object caches, we can quickly find out which object cache a to be freed allocation belongs to and thus don't need the boundary tags anymore. * Reworked the slab boot strap process. We allocate from the initial area only when really necessary, i.e. when the object cache for the respective allocation size has not been created yet. A single page is thus sufficient. other: * vm_allocate_early(): Added boolean "blockAlign" parameter. If true, the semantics is the same as for B_ANY_KERNEL_BLOCK_ADDRESS. * Use an object cache for page mappings. This significantly reduces the contention on the heap bin locks. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35232 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-01-22 02:10:52 +03:00
void object_cache_free(object_cache* cache, void* object, uint32 flags);
status_t object_cache_reserve(object_cache* cache, size_t object_count,
uint32 flags);
void object_cache_get_usage(object_cache* cache, size_t* _allocatedMemory);
#ifdef __cplusplus
}
#endif
#endif /* _SLAB_SLAB_H_ */