Replaced CACHE_DONT_SLEEP by two new flags CACHE_DONT_WAIT_FOR_MEMORY and

CACHE_DONT_LOCK_KERNEL_SPACE. If the former is given, the slab memory manager
does not wait when reserving memory or pages. The latter prevents area
operations. The new flags add a bit of flexibility. E.g. when allocating page
mapping objects for userland areas CACHE_DONT_WAIT_FOR_MEMORY is sufficient,
i.e. the allocation will succeed as long as pages are available.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35246 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2010-01-22 21:19:23 +00:00
parent 86a0da42a5
commit 8d1316fd23
10 changed files with 64 additions and 42 deletions

View File

@ -14,16 +14,17 @@
enum {
/* create_object_cache_etc flags */
CACHE_NO_DEPOT = 1 << 0,
CACHE_UNLOCKED_PAGES = 1 << 1,
CACHE_LARGE_SLAB = 1 << 2,
CACHE_NO_DEPOT = 1 << 0,
CACHE_UNLOCKED_PAGES = 1 << 1,
CACHE_LARGE_SLAB = 1 << 2,
/* object_cache_alloc flags */
CACHE_DONT_SLEEP = 1 << 8,
/* object_cache_{alloc,free}() flags */
CACHE_DONT_WAIT_FOR_MEMORY = 1 << 8,
CACHE_DONT_LOCK_KERNEL_SPACE = 1 << 9,
/* internal */
CACHE_ALIGN_ON_SIZE = 1 << 30,
CACHE_DURING_BOOT = 1 << 31
CACHE_ALIGN_ON_SIZE = 1 << 30,
CACHE_DURING_BOOT = 1 << 31
};
struct ObjectCache;

View File

@ -36,7 +36,7 @@ static int
m_to_oc_flags(int how)
{
if (how & M_NOWAIT)
return CACHE_DONT_SLEEP;
return CACHE_DONT_WAIT_FOR_MEMORY;
return 0;
}

View File

@ -518,8 +518,12 @@ PPCVMTranslationMap::UnmapPage(VMArea* area, addr_t address)
locker.Unlock();
if (mapping != NULL)
object_cache_free(gPageMappingsObjectCache, mapping, CACHE_DONT_SLEEP);
if (mapping != NULL) {
bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
object_cache_free(gPageMappingsObjectCache, mapping,
CACHE_DONT_WAIT_FOR_MEMORY
| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0));
}
return B_OK;
}

View File

@ -628,8 +628,12 @@ X86VMTranslationMap::UnmapPage(VMArea* area, addr_t address)
locker.Unlock();
if (mapping != NULL)
object_cache_free(gPageMappingsObjectCache, mapping, CACHE_DONT_SLEEP);
if (mapping != NULL) {
bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
object_cache_free(gPageMappingsObjectCache, mapping,
CACHE_DONT_WAIT_FOR_MEMORY
| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0));
}
return B_OK;
}
@ -738,8 +742,11 @@ X86VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size)
locker.Unlock();
// free removed mappings
bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
while (vm_page_mapping* mapping = queue.RemoveHead())
object_cache_free(gPageMappingsObjectCache, mapping, CACHE_DONT_SLEEP);
object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
}
@ -823,8 +830,11 @@ X86VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
locker.Unlock();
bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
while (vm_page_mapping* mapping = mappings.RemoveHead())
object_cache_free(gPageMappingsObjectCache, mapping, CACHE_DONT_SLEEP);
object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
}

View File

@ -140,7 +140,6 @@ MemoryManager::Allocate(ObjectCache* cache, uint32 flags, void*& _pages)
} else
chunk = _pop(area->unmappedFreeChunks);
if (++area->usedChunkCount == area->chunkCount) {
areaPool->partialAreas.Remove(area);
areaPool->fullAreas.Add(area);
@ -160,7 +159,6 @@ MemoryManager::Allocate(ObjectCache* cache, uint32 flags, void*& _pages)
}
}
chunk->cache = cache;
_pages = (void*)chunkAddress;
@ -246,6 +244,13 @@ MemoryManager::_AreaPoolFor(size_t chunkSize)
/*static*/ status_t
MemoryManager::_GetPartialArea(AreaPool* areaPool, uint32 flags, Area*& _area)
{
if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
// We can't create an area with this limitation and we must not wait for
// someone else doing that.
_area = areaPool->partialAreas.Head();
return _area != NULL ? B_OK : B_WOULD_BLOCK;
}
while (true) {
Area* area = areaPool->partialAreas.Head();
if (area != NULL) {
@ -259,7 +264,7 @@ MemoryManager::_GetPartialArea(AreaPool* areaPool, uint32 flags, Area*& _area)
if (sAllocationEntryDontWait != NULL) {
allocationEntry = sAllocationEntryDontWait;
} else if (sAllocationEntryCanWait != NULL
&& (flags & CACHE_DONT_SLEEP) == 0) {
&& (flags & CACHE_DONT_WAIT_FOR_MEMORY) == 0) {
allocationEntry = sAllocationEntryCanWait;
} else
break;
@ -273,8 +278,9 @@ MemoryManager::_GetPartialArea(AreaPool* areaPool, uint32 flags, Area*& _area)
}
// prepare the allocation entry others can wait on
AllocationEntry*& allocationEntry = (flags & CACHE_DONT_SLEEP) != 0
? sAllocationEntryDontWait : sAllocationEntryCanWait;
AllocationEntry*& allocationEntry
= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
? sAllocationEntryDontWait : sAllocationEntryCanWait;
AllocationEntry myResizeEntry;
allocationEntry = &myResizeEntry;
@ -308,10 +314,7 @@ MemoryManager::_AllocateArea(size_t chunkSize, uint32 flags, Area*& _area)
TRACE("MemoryManager::_AllocateArea(%" B_PRIuSIZE ", %#" B_PRIx32 ")\n",
chunkSize, flags);
if ((flags & CACHE_DONT_SLEEP) != 0)
return B_WOULD_BLOCK;
// TODO: Support CACHE_DONT_SLEEP for real! We already consider it in
// most cases below, but not for vm_create_null_area().
ASSERT((flags & CACHE_DONT_LOCK_KERNEL_SPACE) == 0);
mutex_unlock(&sLock);
@ -404,14 +407,11 @@ MemoryManager::_FreeArea(Area* area, uint32 flags)
sAreaTable.RemoveUnchecked(area);
writeLocker.Unlock();
if (area->vmArea == NULL) {
if (area->vmArea == NULL || (flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0) {
_push(sFreeAreas, area);
return;
}
// TODO: Do we need to handle CACHE_DONT_SLEEP here? Is delete_area()
// problematic?
mutex_unlock(&sLock);
size_t reservedMemory = area->reserved_memory_for_mapping;
@ -468,14 +468,14 @@ MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size,
// reserve memory for the chunk
size_t reservedMemory = size + reserveAdditionalMemory;
status_t error = vm_try_reserve_memory(size,
(flags & CACHE_DONT_SLEEP) != 0 ? 0 : 1000000);
(flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 ? 0 : 1000000);
if (error != B_OK)
return error;
// reserve the pages we need now
size_t reservedPages = size / B_PAGE_SIZE
+ translationMap->MaxPagesNeededToMap(address, address + size - 1);
if ((flags & CACHE_DONT_SLEEP) != 0) {
if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0) {
if (!vm_page_try_reserve_pages(reservedPages)) {
vm_unreserve_memory(reservedMemory);
return B_WOULD_BLOCK;

View File

@ -77,11 +77,10 @@ DepotMagazine::Push(void* object)
static DepotMagazine*
alloc_magazine()
alloc_magazine(uint32 flags)
{
DepotMagazine* magazine = (DepotMagazine*)slab_internal_alloc(
sizeof(DepotMagazine) + kMagazineCapacity * sizeof(void*),
CACHE_DONT_SLEEP);
sizeof(DepotMagazine) + kMagazineCapacity * sizeof(void*), flags);
if (magazine) {
magazine->next = NULL;
magazine->current_round = 0;
@ -264,7 +263,7 @@ object_depot_store(object_depot* depot, void* object, uint32 flags)
interruptsLocker.Unlock();
readLocker.Unlock();
DepotMagazine* magazine = alloc_magazine();
DepotMagazine* magazine = alloc_magazine(flags);
if (magazine == NULL)
return 0;

View File

@ -312,7 +312,7 @@ object_cache_reserve_internal(ObjectCache* cache, size_t objectCount,
if (cache->resize_entry_dont_wait != NULL) {
resizeEntry = cache->resize_entry_dont_wait;
} else if (cache->resize_entry_can_wait != NULL
&& (flags & CACHE_DONT_SLEEP) == 0) {
&& (flags & CACHE_DONT_WAIT_FOR_MEMORY) == 0) {
resizeEntry = cache->resize_entry_can_wait;
} else
break;
@ -326,8 +326,9 @@ object_cache_reserve_internal(ObjectCache* cache, size_t objectCount,
}
// prepare the resize entry others can wait on
ObjectCacheResizeEntry*& resizeEntry = (flags & CACHE_DONT_SLEEP) != 0
? cache->resize_entry_dont_wait : cache->resize_entry_can_wait;
ObjectCacheResizeEntry*& resizeEntry
= (flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0
? cache->resize_entry_dont_wait : cache->resize_entry_can_wait;
ObjectCacheResizeEntry myResizeEntry;
resizeEntry = &myResizeEntry;

View File

@ -76,12 +76,15 @@ block_alloc(size_t size, uint32 flags)
return object_cache_alloc(sBlockCaches[index], flags);
// the allocation is too large for our object caches -- create an area
if ((flags & CACHE_DONT_LOCK_KERNEL_SPACE) != 0)
return NULL;
void* block;
area_id area = create_area_etc(VMAddressSpace::KernelID(),
"alloc'ed block", &block, B_ANY_KERNEL_ADDRESS,
ROUNDUP(size, B_PAGE_SIZE), B_FULL_LOCK,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0,
(flags & CACHE_DONT_SLEEP) != 0 ? CREATE_AREA_DONT_WAIT : 0);
(flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 ? CREATE_AREA_DONT_WAIT : 0);
if (area < 0)
return NULL;

View File

@ -779,7 +779,7 @@ VMAnonymousCache::_SwapBlockBuild(off_t startPageIndex,
swap_block* swap = sSwapHashTable.Lookup(key);
while (swap == NULL) {
swap = (swap_block*)object_cache_alloc(sSwapBlockCache,
CACHE_DONT_SLEEP);
CACHE_DONT_WAIT_FOR_MEMORY | CACHE_DONT_LOCK_KERNEL_SPACE);
if (swap == NULL) {
// Wait a short time until memory is available again.
locker.Unlock();
@ -831,7 +831,8 @@ VMAnonymousCache::_SwapBlockFree(off_t startPageIndex, uint32 count)
swap->used -= j;
if (swap->used == 0) {
sSwapHashTable.RemoveUnchecked(swap);
object_cache_free(sSwapBlockCache, swap, CACHE_DONT_SLEEP);
object_cache_free(sSwapBlockCache, swap,
CACHE_DONT_WAIT_FOR_MEMORY | CACHE_DONT_LOCK_KERNEL_SPACE);
}
}
}
@ -1042,7 +1043,7 @@ VMAnonymousCache::_MergeSwapPages(VMAnonymousCache* source)
// All swap pages have been freed -- we can discard the source swap
// block.
object_cache_free(sSwapBlockCache, sourceSwapBlock,
CACHE_DONT_SLEEP);
CACHE_DONT_WAIT_FOR_MEMORY | CACHE_DONT_LOCK_KERNEL_SPACE);
} else if (swapBlock == NULL) {
// We need to take over some of the source's swap pages and there's
// no swap block in the consumer cache. Just take over the source
@ -1061,7 +1062,7 @@ VMAnonymousCache::_MergeSwapPages(VMAnonymousCache* source)
}
object_cache_free(sSwapBlockCache, sourceSwapBlock,
CACHE_DONT_SLEEP);
CACHE_DONT_WAIT_FOR_MEMORY | CACHE_DONT_LOCK_KERNEL_SPACE);
}
}
}

View File

@ -471,8 +471,11 @@ map_page(VMArea* area, vm_page* page, addr_t address, uint32 protection)
if (area->wiring == B_NO_LOCK) {
DEBUG_PAGE_ACCESS_CHECK(page);
bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
vm_page_mapping* mapping = (vm_page_mapping*)object_cache_alloc(
gPageMappingsObjectCache, CACHE_DONT_SLEEP);
gPageMappingsObjectCache,
CACHE_DONT_WAIT_FOR_MEMORY
| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0));
if (mapping == NULL)
return B_NO_MEMORY;