From faed617707da8b09adc50f3dc22642f7d3d8a12f Mon Sep 17 00:00:00 2001 From: Ingo Weinhold Date: Mon, 28 Mar 2005 15:34:20 +0000 Subject: [PATCH] Implemented fork() support. We basically need to re-get the heap area after fork()ing and recreate all semaphores we've been using. To do the latter, we simply maintain a list of all locks. fork()ing multithreaded teams can still be troublesome. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@12097 a95241bf-73f2-0310-859d-f6bbb57e9c96 --- .../libroot/posix/malloc/arch-specific.cpp | 107 +++++++++++++++--- .../libroot/posix/malloc/arch-specific.h | 17 +-- 2 files changed, 103 insertions(+), 21 deletions(-) diff --git a/src/kernel/libroot/posix/malloc/arch-specific.cpp b/src/kernel/libroot/posix/malloc/arch-specific.cpp index c86547d5f5..c480cddd6a 100644 --- a/src/kernel/libroot/posix/malloc/arch-specific.cpp +++ b/src/kernel/libroot/posix/malloc/arch-specific.cpp @@ -23,6 +23,7 @@ #include #include +#include #include using namespace BPrivate; @@ -33,17 +34,43 @@ struct free_chunk { size_t size; }; +typedef DoublyLinkedList LockList; + + static const size_t kInitialHeapSize = 50 * B_PAGE_SIZE; // that's about what hoard allocates anyway static area_id sHeapArea; static hoardLockType sHeapLock; -static addr_t sHeapBase; -static size_t sHeapSize, sHeapAreaSize; +static void *sHeapBase; +static addr_t sFreeHeapBase; +static size_t sFreeHeapSize, sHeapAreaSize; static free_chunk *sFreeChunks; +static LockList sLockList; + +static void initialize_hoard_lock(hoardLockType &lock, const char *name); +static void reinitialize_hoard_lock(hoardLockType &lock); -// ToDo: add real fork() support! +static void +init_after_fork() +{ + // re-initialize all locks + for (LockList::Iterator it = sLockList.GetIterator(); it.HasNext();) { + hoardLockType *lock = it.Next(); + reinitialize_hoard_lock(*lock); + } + + // find the heap area + sHeapArea = area_for(sHeapBase); + if (sHeapArea < 0) { + // Where is it gone? + debug_printf("hoard: init_after_fork(): thread %ld, Heap area not " + "found! Base address: %p\n", find_thread(NULL), sHeapBase); + exit(1); + } +} + extern "C" status_t __init_heap(void) @@ -54,16 +81,62 @@ __init_heap(void) // ToDo: add a VM call that instructs other areas to avoid the space after the heap when possible // (and if not, create it at the end of that range, so that the heap can grow as much as possible) // Then, move the heap back to 256 or 512 MB - sHeapBase = 0x30000000; + sHeapBase = (void*)0x30000000; // let the heap start at 3*256 MB for now sHeapArea = create_area("heap", (void **)&sHeapBase, B_BASE_ADDRESS, sHeapAreaSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA); + sFreeHeapBase = (addr_t)sHeapBase; + + // init the lock list, and the heap lock + // Thereafter all locks should be initialized with hoardLockInit(). They + // will be properly re-initialized after a fork(). Note, that also the + // heap lock is initialized with hoardLockInit() -- this works fine + // and has the advantage, that it is in the lock list itself and we won't + // need any special handling on fork(). + new (&sLockList) LockList; hoardLockInit(sHeapLock, "heap"); - return sHeapArea >= B_OK ? B_OK : sHeapArea; + + if (sHeapArea < 0) + return sHeapArea; + + atfork(&init_after_fork); + // Note: Needs malloc(). Hence we need to be fully initialized. + // ToDo: We should actually also install a hook that is called before + // fork() is being executed. In a multithreaded app it would need to + // acquire *all* allocator locks, so that we don't fork() an + // inconsistent state. + + return B_OK; } + +static void +initialize_hoard_lock(hoardLockType &lock, const char *name) +{ + lock.ben = 0; + lock.sem = create_sem(0, name); + if (lock.sem < 0) { + debug_printf("hoard: initialize_hoard_lock(): Failed to create " + "semaphore"); + } +} + + +static void +reinitialize_hoard_lock(hoardLockType &lock) +{ + // Get an info for the original semaphore, so we can name it just the same. + // This can fail e.g. in case the original team is already gone. + sem_info info; + if (get_sem_info(lock.sem, &info) == B_OK) + initialize_hoard_lock(lock, info.name); + else + initialize_hoard_lock(lock, "reinitialized hoard lock"); +} + + namespace BPrivate { void * @@ -77,7 +150,6 @@ hoardSbrk(long size) hoardLock(sHeapLock); // find chunk in free list - free_chunk *chunk = sFreeChunks, *last = NULL; for (; chunk != NULL; chunk = chunk->next, last = chunk) { if (chunk->size < (size_t)size) @@ -101,22 +173,23 @@ hoardSbrk(long size) sFreeChunks = chunk; hoardUnlock(sHeapLock); + return address; } // There was no chunk, let's see if the area is large enough - size_t oldHeapSize = sHeapSize; - sHeapSize += size; + size_t oldHeapSize = sFreeHeapSize; + sFreeHeapSize += size; // round to next page size - size_t pageSize = (sHeapSize + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1); + size_t pageSize = (sFreeHeapSize + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1); if (pageSize < sHeapAreaSize) { SERIAL_PRINT(("HEAP-%ld: heap area large enough for %ld\n", find_thread(NULL), size)); // the area is large enough already hoardUnlock(sHeapLock); - return (void *)(sHeapBase + oldHeapSize); + return (void *)(sFreeHeapBase + oldHeapSize); } // We need to grow the area @@ -133,7 +206,8 @@ hoardSbrk(long size) sHeapAreaSize = pageSize; hoardUnlock(sHeapLock); - return (void *)(sHeapBase + oldHeapSize); + + return (void *)(sFreeHeapBase + oldHeapSize); } @@ -147,8 +221,15 @@ hoardUnsbrk(void *ptr, long size) void hoardLockInit(hoardLockType &lock, const char *name) { - lock.ben = 0; - lock.sem = create_sem(0, name); + new (&lock) hoardLockType; + // init's the list link + + initialize_hoard_lock(lock, name); + + // add the lock to the lock list (the heap lock also protects the lock list) + hoardLock(sHeapLock); + sLockList.Add(&lock); + hoardUnlock(sHeapLock); } diff --git a/src/kernel/libroot/posix/malloc/arch-specific.h b/src/kernel/libroot/posix/malloc/arch-specific.h index e717f65d02..fced5f1c7b 100644 --- a/src/kernel/libroot/posix/malloc/arch-specific.h +++ b/src/kernel/libroot/posix/malloc/arch-specific.h @@ -22,20 +22,21 @@ #include "config.h" +#include + #include #include +#include -typedef struct { + +// Note: Since we're currently locks are never uninitialized, a singly linked +// list would suffice. But we may change that some day, and the singly linked +// list interface is ugly, anyway. ;-) +struct hoardLockType : DoublyLinkedListLinkImpl { int32 ben; sem_id sem; -} hoardLockType; - -inline void * -operator new(size_t, void *_P) -{ - return _P; -} +}; namespace BPrivate {