* Set freed memory to 0xdeadbeef before returning it to page. This allows us to

not hold the bin lock while setting and only protect the actual freeing. May
  reduce bin lock contention a little bit.
* Tiny optimization for force-clearing 0xdeadbeef. Do it after setting to 0xcc,
  so that it's less likely we have to do it.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@32473 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Michael Lotz 2009-08-17 17:25:24 +00:00
parent dacf06e665
commit 73c035dc11

View File

@ -1433,6 +1433,10 @@ heap_memalign(heap_allocator *heap, size_t alignment, size_t size)
if (address == NULL)
return address;
#if PARANOID_KERNEL_MALLOC
memset(address, 0xcc, size);
#endif
#if PARANOID_KERNEL_FREE
// make sure 0xdeadbeef is cleared if we do not overwrite the memory
// and the user does not clear it
@ -1440,10 +1444,6 @@ heap_memalign(heap_allocator *heap, size_t alignment, size_t size)
((uint32 *)address)[1] = 0xcccccccc;
#endif
#if PARANOID_KERNEL_MALLOC
memset(address, 0xcc, size);
#endif
return address;
}
@ -1495,19 +1495,12 @@ heap_free(heap_allocator *heap, void *address)
if (page->bin_index < heap->bin_count) {
// small allocation
heap_bin *bin = &heap->bins[page->bin_index];
MutexLocker binLocker(bin->lock);
if (((addr_t)address - area->base - page->index
* heap->page_size) % bin->element_size != 0) {
panic("free(): passed invalid pointer %p supposed to be in bin for "
"element size %ld\n", address, bin->element_size);
return B_ERROR;
}
#if PARANOID_KERNEL_FREE
if (((uint32 *)address)[1] == 0xdeadbeef) {
// This block looks like it was freed already, walk the free list
// on this page to make sure this address doesn't exist.
MutexLocker binLocker(bin->lock);
for (addr_t *temp = page->free_list; temp != NULL;
temp = (addr_t *)*temp) {
if (temp == address) {
@ -1518,19 +1511,21 @@ heap_free(heap_allocator *heap, void *address)
}
}
uint32 *dead = (uint32 *)address;
if (bin->element_size % 4 != 0) {
panic("free(): didn't expect a bin element size that is not a "
"multiple of 4\n");
return B_ERROR;
}
// the first 4 bytes are overwritten with the next free list pointer
// later
uint32 *dead = (uint32 *)address;
for (uint32 i = 1; i < bin->element_size / sizeof(uint32); i++)
dead[i] = 0xdeadbeef;
#endif
MutexLocker binLocker(bin->lock);
if (((addr_t)address - area->base - page->index
* heap->page_size) % bin->element_size != 0) {
panic("free(): passed invalid pointer %p supposed to be in bin for "
"element size %ld\n", address, bin->element_size);
return B_ERROR;
}
// add the address to the page free list
*(addr_t *)address = (addr_t)page->free_list;
page->free_list = (addr_t *)address;