anevilyak+korli+mmlr:

* Check for overflows in memory allocation. If someone happened to (erroneously)
  try to allocate a negative amount of memory we could overflow and crash
  because of the sizes getting messed up.
* Review and update the alignment logic which was a bit broken for the huge
  allocation case (reaching the area threshold). Also assert the results so
  next time this will be easier to spot.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@33638 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Michael Lotz 2009-10-18 12:35:49 +00:00
parent 599586baa6
commit b501a0387d
2 changed files with 20 additions and 10 deletions

View File

@ -1986,12 +1986,14 @@ memalign(size_t alignment, size_t size)
if (!gKernelStartup && size > HEAP_AREA_USE_THRESHOLD) {
// don't even attempt such a huge allocation - use areas instead
size_t areaSize = size + sizeof(area_allocation_info);
if (alignment != 0)
areaSize = ROUNDUP(areaSize, alignment);
size_t areaSize = ROUNDUP(size + sizeof(area_allocation_info)
+ alignment, B_PAGE_SIZE);
if (areaSize < size) {
// the size overflowed
return NULL;
}
void *address = NULL;
areaSize = ROUNDUP(areaSize, B_PAGE_SIZE);
area_id allocationArea = create_area("memalign area", &address,
B_ANY_KERNEL_BLOCK_ADDRESS, areaSize, B_FULL_LOCK,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
@ -2009,8 +2011,11 @@ memalign(size_t alignment, size_t size)
info->allocation_alignment = alignment;
address = (void *)((addr_t)address + sizeof(area_allocation_info));
if (alignment != 0)
if (alignment != 0) {
ASSERT((addr_t)address % alignment == 0);
ASSERT((addr_t)address + size - 1 < (addr_t)info + areaSize - 1);
address = (void *)ROUNDUP((addr_t)address, alignment);
}
TRACE(("heap: allocated area %ld for huge allocation of %lu bytes\n",
allocationArea, size));

View File

@ -1550,12 +1550,14 @@ memalign(size_t alignment, size_t size)
{
if (size > HEAP_AREA_USE_THRESHOLD) {
// don't even attempt such a huge allocation - use areas instead
size_t areaSize = size + sizeof(area_allocation_info);
if (alignment != 0)
areaSize = ROUNDUP(areaSize, alignment);
size_t areaSize = ROUNDUP(size + sizeof(area_allocation_info)
+ alignment, B_PAGE_SIZE);
if (areaSize < size) {
// the size overflowed
return NULL;
}
void *address = NULL;
areaSize = ROUNDUP(areaSize, B_PAGE_SIZE);
area_id allocationArea = create_area("memalign area", &address,
B_ANY_ADDRESS, areaSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
if (allocationArea < B_OK) {
@ -1573,8 +1575,11 @@ memalign(size_t alignment, size_t size)
info->allocation_alignment = alignment;
address = (void *)((addr_t)address + sizeof(area_allocation_info));
if (alignment != 0)
if (alignment != 0) {
address = (void *)ROUNDUP((addr_t)address, alignment);
ASSERT((addr_t)address % alignment == 0);
ASSERT((addr_t)address + size - 1 < (addr_t)info + areaSize - 1);
}
INFO(("heap: allocated area %ld for huge allocation of %lu bytes\n",
allocationArea, size));