haiku/headers/private/kernel/heap.h

141 lines
3.0 KiB
C
Raw Normal View History

/*
* Copyright 2002-2006, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
Complete rework of the heap implementation. Freelists are now part of the pages and pages are now kept in lists as well. This allows to return free pages once a bin does not need them anymore. Partially filled pages are kept in a sorted linked list so that allocation will always happen on the fullest page - this favours having full pages and makes it more likely lightly used pages will get completely empty so they can be returned. Generally this now goes more in the direction of a slab allocator. The allocation logic has been extracted, so a heap is now simply attachable to a region of memory. This allows for multiple heaps and for dynamic growing. In case the allocator runs out of free pages, an asynchronous growing thread is notified to create a new area and attach a new heap to it. By default the kernel heap is now set to 16MB and grows by 8MB each time all heaps run full. This should solve quite a few issues, like certain bins just claiming all pages so that even if there is free space nothing can be allocated. Also it obviously does aways with filling the heap page by page until it overgrows. I think this is now a well performing and scalable allocator we can live with for quite some time. It is well tested under emulation and real hardware and performs as expected. If problems come up there is an extensive sanity checker that can be enabled by PARANOID_VALIDATION that covers most aspects of the allocator. For normal operation this is not necessary though and is therefore disabled by default. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23939 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-11 00:00:13 +03:00
#ifndef _KERNEL_HEAP_H
#define _KERNEL_HEAP_H
#include <OS.h>
#include "kernel_debug_config.h"
Complete rework of the heap implementation. Freelists are now part of the pages and pages are now kept in lists as well. This allows to return free pages once a bin does not need them anymore. Partially filled pages are kept in a sorted linked list so that allocation will always happen on the fullest page - this favours having full pages and makes it more likely lightly used pages will get completely empty so they can be returned. Generally this now goes more in the direction of a slab allocator. The allocation logic has been extracted, so a heap is now simply attachable to a region of memory. This allows for multiple heaps and for dynamic growing. In case the allocator runs out of free pages, an asynchronous growing thread is notified to create a new area and attach a new heap to it. By default the kernel heap is now set to 16MB and grows by 8MB each time all heaps run full. This should solve quite a few issues, like certain bins just claiming all pages so that even if there is free space nothing can be allocated. Also it obviously does aways with filling the heap page by page until it overgrows. I think this is now a well performing and scalable allocator we can live with for quite some time. It is well tested under emulation and real hardware and performs as expected. If problems come up there is an extensive sanity checker that can be enabled by PARANOID_VALIDATION that covers most aspects of the allocator. For normal operation this is not necessary though and is therefore disabled by default. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23939 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-11 00:00:13 +03:00
// allocate 16MB initial heap for the kernel
#define INITIAL_HEAP_SIZE 16 * 1024 * 1024
// grow by another 4MB each time the heap runs out of memory
#define HEAP_GROW_SIZE 4 * 1024 * 1024
// allocate a dedicated 1MB area for dynamic growing
#define HEAP_DEDICATED_GROW_SIZE 1 * 1024 * 1024
// use areas for allocations bigger than 1MB
#define HEAP_AREA_USE_THRESHOLD 1 * 1024 * 1024
// allocation/deallocation flags for {malloc,free}_etc()
#define HEAP_DONT_WAIT_FOR_MEMORY 0x01
#define HEAP_DONT_LOCK_KERNEL_SPACE 0x02
#define HEAP_PRIORITY_VIP 0x04
typedef struct heap_class_s {
const char *name;
uint32 initial_percentage;
size_t max_allocation_size;
size_t page_size;
size_t min_bin_size;
size_t bin_alignment;
uint32 min_count_per_page;
size_t max_waste_per_page;
} heap_class;
typedef struct heap_allocator_s heap_allocator;
#ifdef __cplusplus
extern "C" {
#endif
void* memalign_etc(size_t alignment, size_t size, uint32 flags);
void free_etc(void* address, uint32 flags);
* The alphabet is obviously hard, moved some tracing defines at their (hopefully) correct place. * It seems to be even harder to understand basic locking primitives: when you think about it, it shouldn't surprise you that conditional variables never return B_WOULD_BLOCK. This fixes gdb again. * Added tracing support to the ports subsystem. * get_port_message() will now resize the port heap if needed (but will also take timeouts into account while doing so, more or less). The initial port space is 4MB (as before), the growth rate is the same, and the system wide limit is arbitrarily set to 64 MB (all swappable). A team limit has been set to 8 MB, but is not enforced yet. Since ports are using up address space in the kernel, those seems to be proper limits. * This also fixes a strange, and rare lockup where the mouse cursor would still move, but everything else would basically hang, but look perfectly normal from KDL on the first look. As recently happened on Brecht's laptop, and debugged by mmlr and me: the cbuf space got used up when lots of windows wanted to redraw after a workspace switch. The app_server wouldn't answer anymore to client requests, but thought it would have done so, as LinkSender::Flush() doesn't care if it got a B_NO_MEMORY (the ports will now block until memory is available if possible, so that should not be a problem anymore). * Improved "port" KDL command, it now also prints the messages in the port. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@33735 a95241bf-73f2-0310-859d-f6bbb57e9c96
2009-10-23 03:14:10 +04:00
void* memalign(size_t alignment, size_t size);
void deferred_free(void* block);
void* malloc_referenced(size_t size);
void* malloc_referenced_acquire(void* data);
void malloc_referenced_release(void* data);
* The alphabet is obviously hard, moved some tracing defines at their (hopefully) correct place. * It seems to be even harder to understand basic locking primitives: when you think about it, it shouldn't surprise you that conditional variables never return B_WOULD_BLOCK. This fixes gdb again. * Added tracing support to the ports subsystem. * get_port_message() will now resize the port heap if needed (but will also take timeouts into account while doing so, more or less). The initial port space is 4MB (as before), the growth rate is the same, and the system wide limit is arbitrarily set to 64 MB (all swappable). A team limit has been set to 8 MB, but is not enforced yet. Since ports are using up address space in the kernel, those seems to be proper limits. * This also fixes a strange, and rare lockup where the mouse cursor would still move, but everything else would basically hang, but look perfectly normal from KDL on the first look. As recently happened on Brecht's laptop, and debugged by mmlr and me: the cbuf space got used up when lots of windows wanted to redraw after a workspace switch. The app_server wouldn't answer anymore to client requests, but thought it would have done so, as LinkSender::Flush() doesn't care if it got a B_NO_MEMORY (the ports will now block until memory is available if possible, so that should not be a problem anymore). * Improved "port" KDL command, it now also prints the messages in the port. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@33735 a95241bf-73f2-0310-859d-f6bbb57e9c96
2009-10-23 03:14:10 +04:00
void heap_add_area(heap_allocator* heap, area_id areaID, addr_t base,
size_t size);
heap_allocator* heap_create_allocator(const char* name, addr_t base,
size_t size, const heap_class* heapClass, bool allocateOnHeap);
void* heap_memalign(heap_allocator* heap, size_t alignment, size_t size);
status_t heap_free(heap_allocator* heap, void* address);
#if KERNEL_HEAP_LEAK_CHECK
void heap_set_get_caller(heap_allocator* heap, addr_t (*getCaller)());
#endif
status_t heap_init(addr_t heapBase, size_t heapSize);
status_t heap_init_post_area();
Complete rework of the heap implementation. Freelists are now part of the pages and pages are now kept in lists as well. This allows to return free pages once a bin does not need them anymore. Partially filled pages are kept in a sorted linked list so that allocation will always happen on the fullest page - this favours having full pages and makes it more likely lightly used pages will get completely empty so they can be returned. Generally this now goes more in the direction of a slab allocator. The allocation logic has been extracted, so a heap is now simply attachable to a region of memory. This allows for multiple heaps and for dynamic growing. In case the allocator runs out of free pages, an asynchronous growing thread is notified to create a new area and attach a new heap to it. By default the kernel heap is now set to 16MB and grows by 8MB each time all heaps run full. This should solve quite a few issues, like certain bins just claiming all pages so that even if there is free space nothing can be allocated. Also it obviously does aways with filling the heap page by page until it overgrows. I think this is now a well performing and scalable allocator we can live with for quite some time. It is well tested under emulation and real hardware and performs as expected. If problems come up there is an extensive sanity checker that can be enabled by PARANOID_VALIDATION that covers most aspects of the allocator. For normal operation this is not necessary though and is therefore disabled by default. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23939 a95241bf-73f2-0310-859d-f6bbb57e9c96
2008-02-11 00:00:13 +03:00
status_t heap_init_post_sem();
status_t heap_init_post_thread();
#ifdef __cplusplus
}
#endif
static inline void*
malloc_etc(size_t size, uint32 flags)
{
return memalign_etc(0, size, flags);
}
#ifdef __cplusplus
#include <new>
#include <util/SinglyLinkedList.h>
struct malloc_flags {
uint32 flags;
malloc_flags(uint32 flags)
:
flags(flags)
{
}
malloc_flags(const malloc_flags& other)
:
flags(other.flags)
{
}
};
inline void*
operator new(size_t size, const malloc_flags& flags) throw()
{
return malloc_etc(size, flags.flags);
}
inline void*
operator new[](size_t size, const malloc_flags& flags) throw()
{
return malloc_etc(size, flags.flags);
}
class DeferredDeletable : public SinglyLinkedListLinkImpl<DeferredDeletable> {
public:
virtual ~DeferredDeletable();
};
void deferred_delete(DeferredDeletable* deletable);
#endif /* __cplusplus */
#endif /* _KERNEL_MEMHEAP_H */