Implemented fork() support. We basically need to re-get the heap area
after fork()ing and recreate all semaphores we've been using. To do the latter, we simply maintain a list of all locks. fork()ing multithreaded teams can still be troublesome. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@12097 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
576c52f8b1
commit
faed617707
@ -23,6 +23,7 @@
|
|||||||
#include <OS.h>
|
#include <OS.h>
|
||||||
#include <Debug.h>
|
#include <Debug.h>
|
||||||
|
|
||||||
|
#include <stdlib.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
|
|
||||||
using namespace BPrivate;
|
using namespace BPrivate;
|
||||||
@ -33,17 +34,43 @@ struct free_chunk {
|
|||||||
size_t size;
|
size_t size;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
typedef DoublyLinkedList<hoardLockType> LockList;
|
||||||
|
|
||||||
|
|
||||||
static const size_t kInitialHeapSize = 50 * B_PAGE_SIZE;
|
static const size_t kInitialHeapSize = 50 * B_PAGE_SIZE;
|
||||||
// that's about what hoard allocates anyway
|
// that's about what hoard allocates anyway
|
||||||
|
|
||||||
static area_id sHeapArea;
|
static area_id sHeapArea;
|
||||||
static hoardLockType sHeapLock;
|
static hoardLockType sHeapLock;
|
||||||
static addr_t sHeapBase;
|
static void *sHeapBase;
|
||||||
static size_t sHeapSize, sHeapAreaSize;
|
static addr_t sFreeHeapBase;
|
||||||
|
static size_t sFreeHeapSize, sHeapAreaSize;
|
||||||
static free_chunk *sFreeChunks;
|
static free_chunk *sFreeChunks;
|
||||||
|
static LockList sLockList;
|
||||||
|
|
||||||
|
static void initialize_hoard_lock(hoardLockType &lock, const char *name);
|
||||||
|
static void reinitialize_hoard_lock(hoardLockType &lock);
|
||||||
|
|
||||||
|
|
||||||
// ToDo: add real fork() support!
|
static void
|
||||||
|
init_after_fork()
|
||||||
|
{
|
||||||
|
// re-initialize all locks
|
||||||
|
for (LockList::Iterator it = sLockList.GetIterator(); it.HasNext();) {
|
||||||
|
hoardLockType *lock = it.Next();
|
||||||
|
reinitialize_hoard_lock(*lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
// find the heap area
|
||||||
|
sHeapArea = area_for(sHeapBase);
|
||||||
|
if (sHeapArea < 0) {
|
||||||
|
// Where is it gone?
|
||||||
|
debug_printf("hoard: init_after_fork(): thread %ld, Heap area not "
|
||||||
|
"found! Base address: %p\n", find_thread(NULL), sHeapBase);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
extern "C" status_t
|
extern "C" status_t
|
||||||
__init_heap(void)
|
__init_heap(void)
|
||||||
@ -54,16 +81,62 @@ __init_heap(void)
|
|||||||
// ToDo: add a VM call that instructs other areas to avoid the space after the heap when possible
|
// ToDo: add a VM call that instructs other areas to avoid the space after the heap when possible
|
||||||
// (and if not, create it at the end of that range, so that the heap can grow as much as possible)
|
// (and if not, create it at the end of that range, so that the heap can grow as much as possible)
|
||||||
// Then, move the heap back to 256 or 512 MB
|
// Then, move the heap back to 256 or 512 MB
|
||||||
sHeapBase = 0x30000000;
|
sHeapBase = (void*)0x30000000;
|
||||||
// let the heap start at 3*256 MB for now
|
// let the heap start at 3*256 MB for now
|
||||||
|
|
||||||
sHeapArea = create_area("heap", (void **)&sHeapBase, B_BASE_ADDRESS,
|
sHeapArea = create_area("heap", (void **)&sHeapBase, B_BASE_ADDRESS,
|
||||||
sHeapAreaSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
|
sHeapAreaSize, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
|
||||||
|
sFreeHeapBase = (addr_t)sHeapBase;
|
||||||
|
|
||||||
|
// init the lock list, and the heap lock
|
||||||
|
// Thereafter all locks should be initialized with hoardLockInit(). They
|
||||||
|
// will be properly re-initialized after a fork(). Note, that also the
|
||||||
|
// heap lock is initialized with hoardLockInit() -- this works fine
|
||||||
|
// and has the advantage, that it is in the lock list itself and we won't
|
||||||
|
// need any special handling on fork().
|
||||||
|
new (&sLockList) LockList;
|
||||||
|
|
||||||
hoardLockInit(sHeapLock, "heap");
|
hoardLockInit(sHeapLock, "heap");
|
||||||
return sHeapArea >= B_OK ? B_OK : sHeapArea;
|
|
||||||
|
if (sHeapArea < 0)
|
||||||
|
return sHeapArea;
|
||||||
|
|
||||||
|
atfork(&init_after_fork);
|
||||||
|
// Note: Needs malloc(). Hence we need to be fully initialized.
|
||||||
|
// ToDo: We should actually also install a hook that is called before
|
||||||
|
// fork() is being executed. In a multithreaded app it would need to
|
||||||
|
// acquire *all* allocator locks, so that we don't fork() an
|
||||||
|
// inconsistent state.
|
||||||
|
|
||||||
|
return B_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void
|
||||||
|
initialize_hoard_lock(hoardLockType &lock, const char *name)
|
||||||
|
{
|
||||||
|
lock.ben = 0;
|
||||||
|
lock.sem = create_sem(0, name);
|
||||||
|
if (lock.sem < 0) {
|
||||||
|
debug_printf("hoard: initialize_hoard_lock(): Failed to create "
|
||||||
|
"semaphore");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void
|
||||||
|
reinitialize_hoard_lock(hoardLockType &lock)
|
||||||
|
{
|
||||||
|
// Get an info for the original semaphore, so we can name it just the same.
|
||||||
|
// This can fail e.g. in case the original team is already gone.
|
||||||
|
sem_info info;
|
||||||
|
if (get_sem_info(lock.sem, &info) == B_OK)
|
||||||
|
initialize_hoard_lock(lock, info.name);
|
||||||
|
else
|
||||||
|
initialize_hoard_lock(lock, "reinitialized hoard lock");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
namespace BPrivate {
|
namespace BPrivate {
|
||||||
|
|
||||||
void *
|
void *
|
||||||
@ -77,7 +150,6 @@ hoardSbrk(long size)
|
|||||||
hoardLock(sHeapLock);
|
hoardLock(sHeapLock);
|
||||||
|
|
||||||
// find chunk in free list
|
// find chunk in free list
|
||||||
|
|
||||||
free_chunk *chunk = sFreeChunks, *last = NULL;
|
free_chunk *chunk = sFreeChunks, *last = NULL;
|
||||||
for (; chunk != NULL; chunk = chunk->next, last = chunk) {
|
for (; chunk != NULL; chunk = chunk->next, last = chunk) {
|
||||||
if (chunk->size < (size_t)size)
|
if (chunk->size < (size_t)size)
|
||||||
@ -101,22 +173,23 @@ hoardSbrk(long size)
|
|||||||
sFreeChunks = chunk;
|
sFreeChunks = chunk;
|
||||||
|
|
||||||
hoardUnlock(sHeapLock);
|
hoardUnlock(sHeapLock);
|
||||||
|
|
||||||
return address;
|
return address;
|
||||||
}
|
}
|
||||||
|
|
||||||
// There was no chunk, let's see if the area is large enough
|
// There was no chunk, let's see if the area is large enough
|
||||||
|
|
||||||
size_t oldHeapSize = sHeapSize;
|
size_t oldHeapSize = sFreeHeapSize;
|
||||||
sHeapSize += size;
|
sFreeHeapSize += size;
|
||||||
|
|
||||||
// round to next page size
|
// round to next page size
|
||||||
size_t pageSize = (sHeapSize + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
|
size_t pageSize = (sFreeHeapSize + B_PAGE_SIZE - 1) & ~(B_PAGE_SIZE - 1);
|
||||||
|
|
||||||
if (pageSize < sHeapAreaSize) {
|
if (pageSize < sHeapAreaSize) {
|
||||||
SERIAL_PRINT(("HEAP-%ld: heap area large enough for %ld\n", find_thread(NULL), size));
|
SERIAL_PRINT(("HEAP-%ld: heap area large enough for %ld\n", find_thread(NULL), size));
|
||||||
// the area is large enough already
|
// the area is large enough already
|
||||||
hoardUnlock(sHeapLock);
|
hoardUnlock(sHeapLock);
|
||||||
return (void *)(sHeapBase + oldHeapSize);
|
return (void *)(sFreeHeapBase + oldHeapSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
// We need to grow the area
|
// We need to grow the area
|
||||||
@ -133,7 +206,8 @@ hoardSbrk(long size)
|
|||||||
sHeapAreaSize = pageSize;
|
sHeapAreaSize = pageSize;
|
||||||
|
|
||||||
hoardUnlock(sHeapLock);
|
hoardUnlock(sHeapLock);
|
||||||
return (void *)(sHeapBase + oldHeapSize);
|
|
||||||
|
return (void *)(sFreeHeapBase + oldHeapSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -147,8 +221,15 @@ hoardUnsbrk(void *ptr, long size)
|
|||||||
void
|
void
|
||||||
hoardLockInit(hoardLockType &lock, const char *name)
|
hoardLockInit(hoardLockType &lock, const char *name)
|
||||||
{
|
{
|
||||||
lock.ben = 0;
|
new (&lock) hoardLockType;
|
||||||
lock.sem = create_sem(0, name);
|
// init's the list link
|
||||||
|
|
||||||
|
initialize_hoard_lock(lock, name);
|
||||||
|
|
||||||
|
// add the lock to the lock list (the heap lock also protects the lock list)
|
||||||
|
hoardLock(sHeapLock);
|
||||||
|
sLockList.Add(&lock);
|
||||||
|
hoardUnlock(sHeapLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -22,20 +22,21 @@
|
|||||||
|
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
|
#include <new.h>
|
||||||
|
|
||||||
#include <OS.h>
|
#include <OS.h>
|
||||||
#include <assert.h>
|
#include <assert.h>
|
||||||
|
|
||||||
|
#include <util/DoublyLinkedList.h>
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
|
// Note: Since we're currently locks are never uninitialized, a singly linked
|
||||||
|
// list would suffice. But we may change that some day, and the singly linked
|
||||||
|
// list interface is ugly, anyway. ;-)
|
||||||
|
struct hoardLockType : DoublyLinkedListLinkImpl<hoardLockType> {
|
||||||
int32 ben;
|
int32 ben;
|
||||||
sem_id sem;
|
sem_id sem;
|
||||||
} hoardLockType;
|
};
|
||||||
|
|
||||||
inline void *
|
|
||||||
operator new(size_t, void *_P)
|
|
||||||
{
|
|
||||||
return _P;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace BPrivate {
|
namespace BPrivate {
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user