bonefish+mmlr:

* Introduce "paranoid" malloc/free into the slab allocator (initializing
  allocated memory to 0xcc and setting freed memory to 0xdeadbeef).
* Allow for optional stack traces for slab object cache tracing.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@43046 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Michael Lotz 2011-10-31 21:58:00 +00:00
parent fe8f0f4601
commit 72156a402f
8 changed files with 159 additions and 5 deletions

View File

@ -48,6 +48,7 @@
#define SIGNAL_TRACING 0
#define SLAB_MEMORY_MANAGER_TRACING 0
#define SLAB_OBJECT_CACHE_TRACING 0
#define SLAB_OBJECT_CACHE_TRACING_STACK_TRACE 0 /* stack trace depth */
#define SWAP_TRACING 0
#define SYSCALL_TRACING 0
#define SYSCALL_TRACING_IGNORE_KTRACE_OUTPUT 1

View File

@ -45,6 +45,10 @@ void object_depot_store(object_depot* depot, void* object, uint32 flags);
void object_depot_make_empty(object_depot* depot, uint32 flags);
#if PARANOID_KERNEL_FREE
bool object_depot_contains_object(object_depot* depot, void* object);
#endif
#ifdef __cplusplus
}
#endif

View File

@ -617,7 +617,11 @@ MemoryManager::AllocateRaw(size_t size, uint32 flags, void*& _pages)
? CREATE_AREA_DONT_WAIT : 0)
| CREATE_AREA_DONT_CLEAR,
&virtualRestrictions, &physicalRestrictions, &_pages);
return area >= 0 ? B_OK : area;
status_t result = area >= 0 ? B_OK : area;
if (result == B_OK)
fill_allocated_block(_pages, size);
return result;
}
// determine chunk size (small or medium)
@ -656,6 +660,8 @@ MemoryManager::AllocateRaw(size_t size, uint32 flags, void*& _pages)
chunk->reference = (addr_t)chunkAddress + size - 1;
_pages = (void*)chunkAddress;
fill_allocated_block(_pages, size);
TRACE("MemoryManager::AllocateRaw() done: %p (meta chunk: %d, chunk %d)\n",
_pages, int(metaChunk - area->metaChunks),
int(chunk - metaChunk->chunks));

View File

@ -238,3 +238,32 @@ ObjectCache::ReturnObjectToSlab(slab* source, void* object, uint32 flags)
partial.Add(source);
}
}
#if PARANOID_KERNEL_FREE
bool
ObjectCache::AssertObjectNotFreed(void* object)
{
MutexLocker locker(lock);
slab* source = ObjectSlab(object);
if (!partial.Contains(source) && !full.Contains(source)) {
panic("object_cache: to be freed object slab not part of cache!");
return false;
}
object_link* link = object_to_link(object, object_size);
for (object_link* freeLink = source->free; freeLink != NULL;
freeLink = freeLink->next) {
if (freeLink == link) {
panic("object_cache: double free of %p (slab %p, cache %p)",
object, source, this);
return false;
}
}
return true;
}
#endif // PARANOID_KERNEL_FREE

View File

@ -107,6 +107,10 @@ public:
void FreePages(void* pages);
status_t EarlyAllocatePages(void** pages, uint32 flags);
void EarlyFreePages(void* pages);
#if PARANOID_KERNEL_FREE
bool AssertObjectNotFreed(void* object);
#endif
};

View File

@ -31,6 +31,10 @@ public:
inline void* Pop();
inline bool Push(void* object);
#if PARANOID_KERNEL_FREE
bool ContainsObject(void* object) const;
#endif
};
@ -72,6 +76,22 @@ DepotMagazine::Push(void* object)
}
#if PARANOID_KERNEL_FREE
bool
DepotMagazine::ContainsObject(void* object) const
{
for (uint16 i = 0; i < current_round; i++) {
if (rounds[i] == object)
return true;
}
return false;
}
#endif // PARANOID_KERNEL_FREE
// #pragma mark -
@ -352,6 +372,40 @@ object_depot_make_empty(object_depot* depot, uint32 flags)
}
#if PARANOID_KERNEL_FREE
bool
object_depot_contains_object(object_depot* depot, void* object)
{
WriteLocker writeLocker(depot->outer_lock);
int cpuCount = smp_get_num_cpus();
for (int i = 0; i < cpuCount; i++) {
depot_cpu_store& store = depot->stores[i];
if (store.loaded != NULL && !store.loaded->IsEmpty()) {
if (store.loaded->ContainsObject(object))
return true;
}
if (store.previous != NULL && !store.previous->IsEmpty()) {
if (store.previous->ContainsObject(object))
return true;
}
}
for (DepotMagazine* magazine = depot->full; magazine != NULL;
magazine = magazine->next) {
if (magazine->ContainsObject(object))
return true;
}
return false;
}
#endif // PARANOID_KERNEL_FREE
// #pragma mark - private kernel API

View File

@ -54,10 +54,12 @@ static ConditionVariable sMaintenanceCondition;
namespace SlabObjectCacheTracing {
class ObjectCacheTraceEntry : public AbstractTraceEntry {
class ObjectCacheTraceEntry
: public TRACE_ENTRY_SELECTOR(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE) {
public:
ObjectCacheTraceEntry(ObjectCache* cache)
:
TraceEntryBase(SLAB_OBJECT_CACHE_TRACING_STACK_TRACE, 0, true),
fCache(cache)
{
}
@ -668,7 +670,7 @@ object_cache_alloc(object_cache* cache, uint32 flags)
void* object = object_depot_obtain(&cache->depot);
if (object) {
T(Alloc(cache, flags, object));
return object;
return fill_allocated_block(object, cache->object_size);
}
}
@ -717,7 +719,7 @@ object_cache_alloc(object_cache* cache, uint32 flags)
void* object = link_to_object(link, cache->object_size);
T(Alloc(cache, flags, object));
return object;
return fill_allocated_block(object, cache->object_size);
}
@ -729,7 +731,24 @@ object_cache_free(object_cache* cache, void* object, uint32 flags)
T(Free(cache, object));
if (!(cache->flags & CACHE_NO_DEPOT)) {
#if PARANOID_KERNEL_FREE
// TODO: allow forcing the check even if we don't find deadbeef
if (*(uint32*)object == 0xdeadbeef) {
if (!cache->AssertObjectNotFreed(object))
return;
if ((cache->flags & CACHE_NO_DEPOT) == 0) {
if (object_depot_contains_object(&cache->depot, object)) {
panic("object_cache: object %p is already freed", object);
return;
}
}
}
fill_freed_block(object, cache->object_size);
#endif
if ((cache->flags & CACHE_NO_DEPOT) == 0) {
object_depot_store(&cache->depot, object, flags);
return;
}

View File

@ -84,4 +84,41 @@ slab_internal_free(void* buffer, uint32 flags)
}
#if PARANOID_KERNEL_MALLOC || PARANOID_KERNEL_FREE
static inline void*
fill_block(void* buffer, size_t size, uint32 pattern)
{
if (buffer == NULL)
return NULL;
size &= ~(sizeof(pattern) - 1);
for (size_t i = 0; i < size / sizeof(pattern); i++)
((uint32*)buffer)[i] = pattern;
return buffer;
}
#endif
static inline void*
fill_allocated_block(void* buffer, size_t size)
{
#if PARANOID_KERNEL_MALLOC
return fill_block(buffer, size, 0xcccccccc);
#else
return buffer;
#endif
}
static inline void*
fill_freed_block(void* buffer, size_t size)
{
#if PARANOID_KERNEL_FREE
return fill_block(buffer, size, 0xdeadbeef);
#else
return buffer;
#endif
}
#endif // SLAB_PRIVATE_H