2007-04-26 07:41:24 +04:00
|
|
|
/*
|
2010-02-24 17:43:20 +03:00
|
|
|
* Copyright 2010, Axel Dörfler. All Rights Reserved.
|
2007-04-26 07:41:24 +04:00
|
|
|
* Copyright 2007, Hugo Santos. All Rights Reserved.
|
|
|
|
* Distributed under the terms of the MIT License.
|
|
|
|
*/
|
2010-01-19 13:12:48 +03:00
|
|
|
#ifndef _SLAB_OBJECT_DEPOT_H_
|
|
|
|
#define _SLAB_OBJECT_DEPOT_H_
|
2007-04-26 07:41:24 +04:00
|
|
|
|
2008-02-02 15:12:54 +03:00
|
|
|
|
2007-04-26 07:41:24 +04:00
|
|
|
#include <lock.h>
|
2007-04-29 01:35:23 +04:00
|
|
|
#include <KernelExport.h>
|
2007-04-26 07:41:24 +04:00
|
|
|
|
|
|
|
|
2010-01-19 22:13:25 +03:00
|
|
|
struct DepotMagazine;
|
2007-04-26 07:41:24 +04:00
|
|
|
|
2010-01-19 22:13:25 +03:00
|
|
|
typedef struct object_depot {
|
2010-01-20 16:00:57 +03:00
|
|
|
rw_lock outer_lock;
|
|
|
|
spinlock inner_lock;
|
2010-01-19 22:13:25 +03:00
|
|
|
DepotMagazine* full;
|
|
|
|
DepotMagazine* empty;
|
|
|
|
size_t full_count;
|
|
|
|
size_t empty_count;
|
2010-02-24 17:43:20 +03:00
|
|
|
size_t max_count;
|
|
|
|
size_t magazine_capacity;
|
2010-01-19 22:13:25 +03:00
|
|
|
struct depot_cpu_store* stores;
|
2010-02-24 17:43:20 +03:00
|
|
|
void* cookie;
|
2010-01-19 22:13:25 +03:00
|
|
|
|
|
|
|
void (*return_object)(struct object_depot* depot, void* cookie,
|
slab allocator:
* Implemented a more elaborated raw memory allocation backend (MemoryManager).
We allocate 8 MB areas whose pages we allocate and map when needed. An area is
divided into equally-sized chunks which form the basic units of allocation. We
have areas with three possible chunk sizes (small, medium, large), which is
basically what the ObjectCache implementations were using anyway.
* Added "uint32 flags" parameter to several of the slab allocator's object
cache and object depot functions. E.g. object_depot_store() potentially wants
to allocate memory for a magazine. But also in pure freeing functions it
might eventually become useful to have those flags, since they could end up
deleting an area, which might not be allowable in all situations. We should
introduce specific flags to indicate that.
* Reworked the block allocator. Since the MemoryManager allocates block-aligned
areas, maintains a hash table for lookup, and maps chunks to object caches,
we can quickly find out which object cache a to be freed allocation belongs
to and thus don't need the boundary tags anymore.
* Reworked the slab boot strap process. We allocate from the initial area only
when really necessary, i.e. when the object cache for the respective
allocation size has not been created yet. A single page is thus sufficient.
other:
* vm_allocate_early(): Added boolean "blockAlign" parameter. If true, the
semantics is the same as for B_ANY_KERNEL_BLOCK_ADDRESS.
* Use an object cache for page mappings. This significantly reduces the
contention on the heap bin locks.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35232 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-01-22 02:10:52 +03:00
|
|
|
void* object, uint32 flags);
|
2007-04-29 01:35:23 +04:00
|
|
|
} object_depot;
|
2007-04-26 07:41:24 +04:00
|
|
|
|
2007-04-28 22:53:58 +04:00
|
|
|
|
2008-02-02 15:12:54 +03:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2010-02-24 17:43:20 +03:00
|
|
|
status_t object_depot_init(object_depot* depot, size_t capacity,
|
|
|
|
size_t maxCount, uint32 flags, void* cookie,
|
slab allocator:
* Implemented a more elaborated raw memory allocation backend (MemoryManager).
We allocate 8 MB areas whose pages we allocate and map when needed. An area is
divided into equally-sized chunks which form the basic units of allocation. We
have areas with three possible chunk sizes (small, medium, large), which is
basically what the ObjectCache implementations were using anyway.
* Added "uint32 flags" parameter to several of the slab allocator's object
cache and object depot functions. E.g. object_depot_store() potentially wants
to allocate memory for a magazine. But also in pure freeing functions it
might eventually become useful to have those flags, since they could end up
deleting an area, which might not be allowable in all situations. We should
introduce specific flags to indicate that.
* Reworked the block allocator. Since the MemoryManager allocates block-aligned
areas, maintains a hash table for lookup, and maps chunks to object caches,
we can quickly find out which object cache a to be freed allocation belongs
to and thus don't need the boundary tags anymore.
* Reworked the slab boot strap process. We allocate from the initial area only
when really necessary, i.e. when the object cache for the respective
allocation size has not been created yet. A single page is thus sufficient.
other:
* vm_allocate_early(): Added boolean "blockAlign" parameter. If true, the
semantics is the same as for B_ANY_KERNEL_BLOCK_ADDRESS.
* Use an object cache for page mappings. This significantly reduces the
contention on the heap bin locks.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35232 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-01-22 02:10:52 +03:00
|
|
|
void (*returnObject)(object_depot* depot, void* cookie, void* object,
|
|
|
|
uint32 flags));
|
|
|
|
void object_depot_destroy(object_depot* depot, uint32 flags);
|
2007-04-28 22:53:58 +04:00
|
|
|
|
2010-01-19 22:13:25 +03:00
|
|
|
void* object_depot_obtain(object_depot* depot);
|
2010-02-24 22:04:41 +03:00
|
|
|
void object_depot_store(object_depot* depot, void* object, uint32 flags);
|
2007-04-26 07:41:24 +04:00
|
|
|
|
slab allocator:
* Implemented a more elaborated raw memory allocation backend (MemoryManager).
We allocate 8 MB areas whose pages we allocate and map when needed. An area is
divided into equally-sized chunks which form the basic units of allocation. We
have areas with three possible chunk sizes (small, medium, large), which is
basically what the ObjectCache implementations were using anyway.
* Added "uint32 flags" parameter to several of the slab allocator's object
cache and object depot functions. E.g. object_depot_store() potentially wants
to allocate memory for a magazine. But also in pure freeing functions it
might eventually become useful to have those flags, since they could end up
deleting an area, which might not be allowable in all situations. We should
introduce specific flags to indicate that.
* Reworked the block allocator. Since the MemoryManager allocates block-aligned
areas, maintains a hash table for lookup, and maps chunks to object caches,
we can quickly find out which object cache a to be freed allocation belongs
to and thus don't need the boundary tags anymore.
* Reworked the slab boot strap process. We allocate from the initial area only
when really necessary, i.e. when the object cache for the respective
allocation size has not been created yet. A single page is thus sufficient.
other:
* vm_allocate_early(): Added boolean "blockAlign" parameter. If true, the
semantics is the same as for B_ANY_KERNEL_BLOCK_ADDRESS.
* Use an object cache for page mappings. This significantly reduces the
contention on the heap bin locks.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35232 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-01-22 02:10:52 +03:00
|
|
|
void object_depot_make_empty(object_depot* depot, uint32 flags);
|
2007-04-26 07:41:24 +04:00
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-02-24 17:43:20 +03:00
|
|
|
|
2010-01-19 13:12:48 +03:00
|
|
|
#endif /* _SLAB_OBJECT_DEPOT_H_ */
|