Sync kernel heap with alignment handling fixes, simplifications and performance
improvements from debug heap in r35422. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35423 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
638a24fa52
commit
b7d3ddaa0e
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2008-2009, Michael Lotz, mmlr@mlotz.ch.
|
||||
* Copyright 2008-2010, Michael Lotz, mmlr@mlotz.ch.
|
||||
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
@ -1308,31 +1308,47 @@ heap_unlink_page(heap_page *page, heap_page **list)
|
||||
|
||||
|
||||
static heap_page *
|
||||
heap_allocate_contiguous_pages(heap_allocator *heap, uint32 pageCount)
|
||||
heap_allocate_contiguous_pages(heap_allocator *heap, uint32 pageCount,
|
||||
size_t alignment)
|
||||
{
|
||||
MutexLocker pageLocker(heap->page_lock);
|
||||
heap_area *area = heap->areas;
|
||||
while (area) {
|
||||
bool found = false;
|
||||
if (area->free_page_count < pageCount) {
|
||||
area = area->next;
|
||||
continue;
|
||||
}
|
||||
|
||||
uint32 step = 1;
|
||||
uint32 firstValid = 0;
|
||||
const uint32 lastValid = area->page_count - pageCount + 1;
|
||||
|
||||
if (alignment > heap->page_size) {
|
||||
firstValid = (ROUNDUP(area->base, alignment) - area->base)
|
||||
/ heap->page_size;
|
||||
step = alignment / heap->page_size;
|
||||
}
|
||||
|
||||
int32 first = -1;
|
||||
for (uint32 i = 0; i < area->page_count; i++) {
|
||||
if (area->page_table[i].in_use) {
|
||||
first = -1;
|
||||
for (uint32 i = firstValid; i < lastValid; i += step) {
|
||||
if (area->page_table[i].in_use)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (first < 0)
|
||||
first = i;
|
||||
first = i;
|
||||
|
||||
if (first >= 0) {
|
||||
if ((i + 1 - first) == pageCount) {
|
||||
found = true;
|
||||
for (uint32 j = 1; j < pageCount; j++) {
|
||||
if (area->page_table[i + j].in_use) {
|
||||
first = -1;
|
||||
i += j;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (first >= 0)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
if (first < 0) {
|
||||
area = area->next;
|
||||
continue;
|
||||
}
|
||||
@ -1357,111 +1373,120 @@ heap_allocate_contiguous_pages(heap_allocator *heap, uint32 pageCount)
|
||||
}
|
||||
|
||||
|
||||
static void *
|
||||
heap_raw_alloc(heap_allocator *heap, size_t size, uint32 binIndex)
|
||||
{
|
||||
TRACE(("raw_alloc: heap %p; size %lu; bin %lu\n", heap, size, binIndex));
|
||||
if (binIndex < heap->bin_count) {
|
||||
heap_bin *bin = &heap->bins[binIndex];
|
||||
MutexLocker binLocker(bin->lock);
|
||||
|
||||
heap_page *page = bin->page_list;
|
||||
if (page == NULL) {
|
||||
MutexLocker pageLocker(heap->page_lock);
|
||||
heap_area *area = heap->areas;
|
||||
if (area == NULL) {
|
||||
TRACE(("heap %p: no free pages to allocate %lu bytes\n", heap,
|
||||
size));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// by design there are only areas in the list that still have
|
||||
// free pages available
|
||||
page = area->free_pages;
|
||||
area->free_pages = page->next;
|
||||
if (page->next)
|
||||
page->next->prev = NULL;
|
||||
|
||||
heap_free_pages_removed(heap, area, 1);
|
||||
|
||||
if (page->in_use)
|
||||
panic("got an in use page from the free pages list\n");
|
||||
page->in_use = 1;
|
||||
|
||||
pageLocker.Unlock();
|
||||
|
||||
page->bin_index = binIndex;
|
||||
page->free_count = bin->max_free_count;
|
||||
page->empty_index = 0;
|
||||
page->free_list = NULL;
|
||||
page->next = page->prev = NULL;
|
||||
bin->page_list = page;
|
||||
}
|
||||
|
||||
// we have a page where we have a free slot
|
||||
void *address = NULL;
|
||||
if (page->free_list) {
|
||||
// there's a previously freed entry we can use
|
||||
address = page->free_list;
|
||||
page->free_list = (addr_t *)*page->free_list;
|
||||
} else {
|
||||
// the page hasn't been fully allocated so use the next empty_index
|
||||
address = (void *)(page->area->base + page->index * heap->page_size
|
||||
+ page->empty_index * bin->element_size);
|
||||
page->empty_index++;
|
||||
}
|
||||
|
||||
page->free_count--;
|
||||
if (page->free_count == 0) {
|
||||
// the page is now full so we remove it from the page_list
|
||||
bin->page_list = page->next;
|
||||
if (page->next)
|
||||
page->next->prev = NULL;
|
||||
page->next = page->prev = NULL;
|
||||
}
|
||||
|
||||
binLocker.Unlock();
|
||||
|
||||
#if KERNEL_HEAP_LEAK_CHECK
|
||||
heap_leak_check_info *info = (heap_leak_check_info *)((addr_t)address
|
||||
+ bin->element_size - sizeof(heap_leak_check_info));
|
||||
info->size = size - sizeof(heap_leak_check_info);
|
||||
info->thread = (gKernelStartup ? 0 : thread_get_current_thread_id());
|
||||
info->team = (gKernelStartup ? 0 : team_get_current_team_id());
|
||||
info->caller = heap->get_caller();
|
||||
static void
|
||||
heap_add_leak_check_info(addr_t address, size_t allocated, size_t size)
|
||||
{
|
||||
heap_leak_check_info *info = (heap_leak_check_info *)(address + allocated
|
||||
- sizeof(heap_leak_check_info));
|
||||
info->size = size - sizeof(heap_leak_check_info);
|
||||
info->thread = (gKernelStartup ? 0 : thread_get_current_thread_id());
|
||||
info->team = (gKernelStartup ? 0 : team_get_current_team_id());
|
||||
info->caller = heap->get_caller();
|
||||
}
|
||||
#endif
|
||||
return address;
|
||||
}
|
||||
|
||||
|
||||
static void *
|
||||
heap_raw_alloc(heap_allocator *heap, size_t size, size_t alignment)
|
||||
{
|
||||
TRACE(("heap %p: allocate %lu bytes from raw pages with alignment %lu\n",
|
||||
heap, size, alignment));
|
||||
|
||||
uint32 pageCount = (size + heap->page_size - 1) / heap->page_size;
|
||||
heap_page *firstPage = heap_allocate_contiguous_pages(heap, pageCount);
|
||||
heap_page *firstPage = heap_allocate_contiguous_pages(heap, pageCount,
|
||||
alignment);
|
||||
if (firstPage == NULL) {
|
||||
TRACE(("heap %p: found no contiguous pages to allocate %ld bytes\n",
|
||||
heap, size));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
addr_t address = firstPage->area->base + firstPage->index * heap->page_size;
|
||||
#if KERNEL_HEAP_LEAK_CHECK
|
||||
heap_leak_check_info *info = (heap_leak_check_info *)(firstPage->area->base
|
||||
+ (firstPage->index + pageCount) * heap->page_size
|
||||
- sizeof(heap_leak_check_info));
|
||||
info->size = size - sizeof(heap_leak_check_info);
|
||||
info->thread = (gKernelStartup ? 0 : thread_get_current_thread_id());
|
||||
info->team = (gKernelStartup ? 0 : team_get_current_team_id());
|
||||
info->caller = heap->get_caller();
|
||||
heap_add_leak_check_info(address, pageCount * heap->page_size, size);
|
||||
#endif
|
||||
return (void *)(firstPage->area->base + firstPage->index * heap->page_size);
|
||||
return (void *)address;
|
||||
}
|
||||
|
||||
|
||||
static void *
|
||||
heap_allocate_from_bin(heap_allocator *heap, uint32 binIndex, size_t size)
|
||||
{
|
||||
heap_bin *bin = &heap->bins[binIndex];
|
||||
TRACE(("heap %p: allocate %lu bytes from bin %lu with element_size %lu\n",
|
||||
heap, size, binIndex, bin->element_size));
|
||||
|
||||
MutexLocker binLocker(bin->lock);
|
||||
heap_page *page = bin->page_list;
|
||||
if (page == NULL) {
|
||||
MutexLocker pageLocker(heap->page_lock);
|
||||
heap_area *area = heap->areas;
|
||||
if (area == NULL) {
|
||||
TRACE(("heap %p: no free pages to allocate %lu bytes\n", heap,
|
||||
size));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// by design there are only areas in the list that still have
|
||||
// free pages available
|
||||
page = area->free_pages;
|
||||
area->free_pages = page->next;
|
||||
if (page->next)
|
||||
page->next->prev = NULL;
|
||||
|
||||
heap_free_pages_removed(heap, area, 1);
|
||||
|
||||
if (page->in_use)
|
||||
panic("got an in use page from the free pages list\n");
|
||||
page->in_use = 1;
|
||||
|
||||
pageLocker.Unlock();
|
||||
|
||||
page->bin_index = binIndex;
|
||||
page->free_count = bin->max_free_count;
|
||||
page->empty_index = 0;
|
||||
page->free_list = NULL;
|
||||
page->next = page->prev = NULL;
|
||||
bin->page_list = page;
|
||||
}
|
||||
|
||||
// we have a page where we have a free slot
|
||||
void *address = NULL;
|
||||
if (page->free_list) {
|
||||
// there's a previously freed entry we can use
|
||||
address = page->free_list;
|
||||
page->free_list = (addr_t *)*page->free_list;
|
||||
} else {
|
||||
// the page hasn't been fully allocated so use the next empty_index
|
||||
address = (void *)(page->area->base + page->index * heap->page_size
|
||||
+ page->empty_index * bin->element_size);
|
||||
page->empty_index++;
|
||||
}
|
||||
|
||||
page->free_count--;
|
||||
if (page->free_count == 0) {
|
||||
// the page is now full so we remove it from the page_list
|
||||
bin->page_list = page->next;
|
||||
if (page->next)
|
||||
page->next->prev = NULL;
|
||||
page->next = page->prev = NULL;
|
||||
}
|
||||
|
||||
#if KERNEL_HEAP_LEAK_CHECK
|
||||
binLocker.Unlock();
|
||||
heap_add_leak_check_info((addr_t)address, bin->element_size, size);
|
||||
#endif
|
||||
return address;
|
||||
}
|
||||
|
||||
|
||||
#if DEBUG
|
||||
static bool
|
||||
is_valid_alignment(size_t number)
|
||||
{
|
||||
// this cryptic line accepts zero and all powers of two
|
||||
return ((~number + 1) | ((number << 1) - 1)) == ~0UL;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
inline bool
|
||||
@ -1486,23 +1511,35 @@ heap_memalign(heap_allocator *heap, size_t alignment, size_t size)
|
||||
size += sizeof(heap_leak_check_info);
|
||||
#endif
|
||||
|
||||
if (alignment != 0) {
|
||||
// TODO: The alignment is done by ensuring that the element size
|
||||
// of the target bin is aligned with the requested alignment. This
|
||||
// has the problem that it wastes space because a better (smaller) bin
|
||||
// could possibly be selected. We should pick the best bin and check
|
||||
// if there is an aligned block in the free list or if a new (page
|
||||
// aligned) page has to be allocated anyway.
|
||||
size = ROUNDUP(size, alignment);
|
||||
void *address = NULL;
|
||||
if (alignment < B_PAGE_SIZE) {
|
||||
if (alignment != 0) {
|
||||
// TODO: The alignment is done by ensuring that the element size
|
||||
// of the target bin is aligned with the requested alignment. This
|
||||
// has the problem that it wastes space because a better (smaller)
|
||||
// bin could possibly be selected. We should pick the best bin and
|
||||
// check if there is an aligned block in the free list or if a new
|
||||
// (page aligned) page has to be allocated anyway.
|
||||
size = ROUNDUP(size, alignment);
|
||||
for (uint32 i = 0; i < heap->bin_count; i++) {
|
||||
if (size <= heap->bins[i].element_size
|
||||
&& is_valid_alignment(heap->bins[i].element_size)) {
|
||||
address = heap_allocate_from_bin(heap, i, size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (uint32 i = 0; i < heap->bin_count; i++) {
|
||||
if (size <= heap->bins[i].element_size) {
|
||||
address = heap_allocate_from_bin(heap, i, size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
uint32 binIndex;
|
||||
for (binIndex = 0; binIndex < heap->bin_count; binIndex++) {
|
||||
if (size <= heap->bins[binIndex].element_size)
|
||||
break;
|
||||
}
|
||||
|
||||
void *address = heap_raw_alloc(heap, size, binIndex);
|
||||
if (address == NULL)
|
||||
address = heap_raw_alloc(heap, size, alignment);
|
||||
|
||||
#if KERNEL_HEAP_LEAK_CHECK
|
||||
size -= sizeof(heap_leak_check_info);
|
||||
@ -2183,9 +2220,9 @@ memalign(size_t alignment, size_t size)
|
||||
|
||||
address = (void *)((addr_t)address + sizeof(area_allocation_info));
|
||||
if (alignment != 0) {
|
||||
address = (void *)ROUNDUP((addr_t)address, alignment);
|
||||
ASSERT((addr_t)address % alignment == 0);
|
||||
ASSERT((addr_t)address + size - 1 < (addr_t)info + areaSize - 1);
|
||||
address = (void *)ROUNDUP((addr_t)address, alignment);
|
||||
}
|
||||
|
||||
TRACE(("heap: allocated area %ld for huge allocation of %lu bytes\n",
|
||||
|
Loading…
Reference in New Issue
Block a user