* The system now holds back a small reserve of committable memory and pages. The
memory and page reservation functions have a new "priority" parameter that indicates how deep the function may tap into that reserve. The currently existing priority levels are "user", "system", and "VIP". The idea is that user programs should never be able to cause a state that gets the kernel into trouble due to heavy battling for memory. The "VIP" level (not really used yet) is intended for allocations that are required to free memory eventually (in the page writer). More levels are thinkable in the future, like "user real time" or "user system server". * Added "priority" parameters to several VMCache methods. * Replaced the map_backing_store() "unmapAddressRange" parameter by a "flags" parameter. * Added area creation flag CREATE_AREA_PRIORITY_VIP and slab allocator flag CACHE_PRIORITY_VIP indicating the importance of the request. * Changed most code to pass the right priorities/flags. These changes already significantly improve the behavior in low memory situations. I've tested a bit with 64 MB (virtual) RAM and, while not particularly fast and responsive, the system remains at least usable under high memory pressure. As a side effect the slab allocator can now be used as general memory allocator. Not done by default yet, though. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35295 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
c5d6e9b06e
commit
cff6e9e406
@ -21,6 +21,7 @@ enum {
|
||||
/* object_cache_{alloc,free}() flags */
|
||||
CACHE_DONT_WAIT_FOR_MEMORY = 1 << 8,
|
||||
CACHE_DONT_LOCK_KERNEL_SPACE = 1 << 9,
|
||||
CACHE_PRIORITY_VIP = 1 << 10,
|
||||
|
||||
/* internal */
|
||||
CACHE_ALIGN_ON_SIZE = 1 << 30,
|
||||
|
@ -105,8 +105,9 @@ public:
|
||||
uint32 CountWritableAreas(VMArea* ignoreArea) const;
|
||||
|
||||
status_t WriteModified();
|
||||
status_t SetMinimalCommitment(off_t commitment);
|
||||
status_t Resize(off_t newSize);
|
||||
status_t SetMinimalCommitment(off_t commitment,
|
||||
int priority);
|
||||
status_t Resize(off_t newSize, int priority);
|
||||
|
||||
status_t FlushAndRemoveAllPages();
|
||||
|
||||
@ -122,7 +123,7 @@ public:
|
||||
{ return fRefCount; }
|
||||
|
||||
// backing store operations
|
||||
virtual status_t Commit(off_t size);
|
||||
virtual status_t Commit(off_t size, int priority);
|
||||
virtual bool HasPage(off_t offset);
|
||||
|
||||
virtual status_t Read(off_t offset, const iovec *vecs,
|
||||
@ -200,12 +201,13 @@ class VMCacheFactory {
|
||||
public:
|
||||
static status_t CreateAnonymousCache(VMCache*& cache,
|
||||
bool canOvercommit, int32 numPrecommittedPages,
|
||||
int32 numGuardPages, bool swappable);
|
||||
int32 numGuardPages, bool swappable,
|
||||
int priority);
|
||||
static status_t CreateVnodeCache(VMCache*& cache,
|
||||
struct vnode* vnode);
|
||||
static status_t CreateDeviceCache(VMCache*& cache,
|
||||
addr_t baseAddress);
|
||||
static status_t CreateNullCache(VMCache*& cache);
|
||||
static status_t CreateNullCache(int priority, VMCache*& cache);
|
||||
};
|
||||
|
||||
|
||||
|
@ -30,6 +30,20 @@ struct vnode;
|
||||
#define CREATE_AREA_DONT_WAIT 0x01
|
||||
#define CREATE_AREA_UNMAP_ADDRESS_RANGE 0x02
|
||||
#define CREATE_AREA_DONT_CLEAR 0x04
|
||||
#define CREATE_AREA_PRIORITY_VIP 0x08
|
||||
|
||||
// memory/page allocation priorities
|
||||
#define VM_PRIORITY_USER 0
|
||||
#define VM_PRIORITY_SYSTEM 1
|
||||
#define VM_PRIORITY_VIP 2
|
||||
|
||||
// page reserves
|
||||
#define VM_PAGE_RESERVE_USER 512
|
||||
#define VM_PAGE_RESERVE_SYSTEM 128
|
||||
|
||||
// memory reserves
|
||||
#define VM_MEMORY_RESERVE_USER (VM_PAGE_RESERVE_USER * B_PAGE_SIZE)
|
||||
#define VM_MEMORY_RESERVE_SYSTEM (VM_PAGE_RESERVE_SYSTEM * B_PAGE_SIZE)
|
||||
|
||||
|
||||
extern struct ObjectCache* gPageMappingsObjectCache;
|
||||
@ -84,7 +98,7 @@ area_id vm_map_file(team_id aid, const char *name, void **address,
|
||||
struct VMCache *vm_area_get_locked_cache(struct VMArea *area);
|
||||
void vm_area_put_locked_cache(struct VMCache *cache);
|
||||
area_id vm_create_null_area(team_id team, const char *name, void **address,
|
||||
uint32 addressSpec, addr_t size);
|
||||
uint32 addressSpec, addr_t size, uint32 flags);
|
||||
area_id vm_copy_area(team_id team, const char *name, void **_address,
|
||||
uint32 addressSpec, uint32 protection, area_id sourceID);
|
||||
area_id vm_clone_area(team_id team, const char *name, void **address,
|
||||
|
@ -46,13 +46,14 @@ void vm_page_schedule_write_page_range(struct VMCache *cache,
|
||||
uint32 firstPage, uint32 endPage);
|
||||
|
||||
void vm_page_unreserve_pages(uint32 count);
|
||||
void vm_page_reserve_pages(uint32 count);
|
||||
bool vm_page_try_reserve_pages(uint32 count);
|
||||
void vm_page_reserve_pages(uint32 count, int priority);
|
||||
bool vm_page_try_reserve_pages(uint32 count, int priority);
|
||||
|
||||
struct vm_page *vm_page_allocate_page(int pageState);
|
||||
struct vm_page *vm_page_allocate_page_run(int state, addr_t base,
|
||||
addr_t length);
|
||||
struct vm_page *vm_page_allocate_page_run_no_base(int state, addr_t count);
|
||||
addr_t length, int priority);
|
||||
struct vm_page *vm_page_allocate_page_run_no_base(int state, addr_t count,
|
||||
int priority);
|
||||
struct vm_page *vm_page_at_index(int32 index);
|
||||
struct vm_page *vm_lookup_page(addr_t pageNumber);
|
||||
|
||||
|
@ -30,7 +30,7 @@ extern "C" {
|
||||
status_t vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite,
|
||||
bool isUser, addr_t *newip);
|
||||
void vm_unreserve_memory(size_t bytes);
|
||||
status_t vm_try_reserve_memory(size_t bytes, bigtime_t timeout);
|
||||
status_t vm_try_reserve_memory(size_t bytes, int priority, bigtime_t timeout);
|
||||
void vm_schedule_page_scanner(uint32 target);
|
||||
status_t vm_daemon_init(void);
|
||||
|
||||
|
@ -539,7 +539,8 @@ Aperture::AllocateMemory(aperture_memory *memory, uint32 flags)
|
||||
uint32 count = size / B_PAGE_SIZE;
|
||||
|
||||
if ((flags & B_APERTURE_NEED_PHYSICAL) != 0) {
|
||||
memory->page = vm_page_allocate_page_run(PAGE_STATE_CLEAR, 0, count);
|
||||
memory->page = vm_page_allocate_page_run(PAGE_STATE_CLEAR, 0, count,
|
||||
VM_PRIORITY_SYSTEM);
|
||||
if (memory->page == NULL)
|
||||
return B_NO_MEMORY;
|
||||
} else {
|
||||
@ -548,7 +549,7 @@ Aperture::AllocateMemory(aperture_memory *memory, uint32 flags)
|
||||
if (memory->pages == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
vm_page_reserve_pages(count);
|
||||
vm_page_reserve_pages(count, VM_PRIORITY_SYSTEM);
|
||||
for (uint32 i = 0; i < count; i++)
|
||||
memory->pages[i] = vm_page_allocate_page(PAGE_STATE_CLEAR);
|
||||
vm_page_unreserve_pages(count);
|
||||
|
@ -308,7 +308,8 @@ generic_vm_physical_page_mapper_init_post_area(kernel_args *args)
|
||||
TRACE(("generic_vm_physical_page_mapper_init_post_area: creating iospace\n"));
|
||||
temp = (void *)sIOSpaceBase;
|
||||
area_id ioSpaceArea = vm_create_null_area(VMAddressSpace::KernelID(),
|
||||
"iospace", &temp, B_EXACT_ADDRESS, sIOSpaceSize);
|
||||
"iospace", &temp, B_EXACT_ADDRESS, sIOSpaceSize,
|
||||
CREATE_AREA_PRIORITY_VIP);
|
||||
if (ioSpaceArea < 0) {
|
||||
panic("generic_vm_physical_page_mapper_init_post_area(): Failed to "
|
||||
"create null area for IO space!\n");
|
||||
|
@ -1369,7 +1369,7 @@ m68k_vm_translation_map_init_post_area(kernel_args *args)
|
||||
|
||||
area = vm_create_null_area(VMAddressSpace::KernelID(),
|
||||
"interrupt query pages", (void **)&queryPage, B_ANY_ADDRESS,
|
||||
B_PAGE_SIZE);
|
||||
B_PAGE_SIZE, CREATE_AREA_PRIORITY_VIP);
|
||||
if (area < B_OK)
|
||||
return area;
|
||||
|
||||
|
@ -546,7 +546,7 @@ LargeMemoryPhysicalPageMapper::InitPostArea(kernel_args* args)
|
||||
temp = (void*)fInitialPool.virtualBase;
|
||||
area = vm_create_null_area(VMAddressSpace::KernelID(),
|
||||
"physical page pool space", &temp, B_EXACT_ADDRESS,
|
||||
1024 * B_PAGE_SIZE);
|
||||
1024 * B_PAGE_SIZE, CREATE_AREA_PRIORITY_VIP);
|
||||
if (area < B_OK) {
|
||||
panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
|
||||
"create area for physical page pool space.");
|
||||
@ -876,7 +876,8 @@ LargeMemoryPhysicalPageMapper::_AllocatePool(PhysicalPageSlotPool*& _pool)
|
||||
void* virtualBase;
|
||||
area_id virtualArea = vm_create_null_area(
|
||||
VMAddressSpace::KernelID(), "physical page pool space",
|
||||
&virtualBase, B_ANY_KERNEL_BLOCK_ADDRESS, 1024 * B_PAGE_SIZE);
|
||||
&virtualBase, B_ANY_KERNEL_BLOCK_ADDRESS, 1024 * B_PAGE_SIZE,
|
||||
CREATE_AREA_PRIORITY_VIP);
|
||||
if (virtualArea < 0) {
|
||||
delete_area(dataArea);
|
||||
return virtualArea;
|
||||
|
8
src/system/kernel/cache/file_cache.cpp
vendored
8
src/system/kernel/cache/file_cache.cpp
vendored
@ -316,7 +316,7 @@ reserve_pages(file_cache_ref* ref, size_t reservePages, bool isWrite)
|
||||
cache->Unlock();
|
||||
}
|
||||
|
||||
vm_page_reserve_pages(reservePages);
|
||||
vm_page_reserve_pages(reservePages, VM_PRIORITY_USER);
|
||||
}
|
||||
|
||||
|
||||
@ -966,7 +966,7 @@ cache_prefetch_vnode(struct vnode* vnode, off_t offset, size_t size)
|
||||
size_t bytesToRead = 0;
|
||||
off_t lastOffset = offset;
|
||||
|
||||
vm_page_reserve_pages(reservePages);
|
||||
vm_page_reserve_pages(reservePages, VM_PRIORITY_USER);
|
||||
|
||||
cache->Lock();
|
||||
|
||||
@ -1229,7 +1229,9 @@ file_cache_set_size(void* _cacheRef, off_t newSize)
|
||||
AutoLocker<VMCache> _(cache);
|
||||
|
||||
off_t oldSize = cache->virtual_end;
|
||||
status_t status = cache->Resize(newSize);
|
||||
status_t status = cache->Resize(newSize, VM_PRIORITY_USER);
|
||||
// Note, the priority doesn't really matter, since this cache doesn't
|
||||
// reserve any memory.
|
||||
if (status == B_OK && newSize < oldSize) {
|
||||
// We may have a new partial page at the end of the cache that must be
|
||||
// cleared.
|
||||
|
@ -47,14 +47,14 @@ static const bigtime_t kLowResourceInterval = 3000000; // 3 secs
|
||||
static const bigtime_t kWarnResourceInterval = 500000; // 0.5 secs
|
||||
|
||||
// page limits
|
||||
static const size_t kNotePagesLimit = 2048;
|
||||
static const size_t kWarnPagesLimit = 256;
|
||||
static const size_t kCriticalPagesLimit = 32;
|
||||
static const size_t kNotePagesLimit = VM_PAGE_RESERVE_USER * 4;
|
||||
static const size_t kWarnPagesLimit = VM_PAGE_RESERVE_USER;
|
||||
static const size_t kCriticalPagesLimit = VM_PAGE_RESERVE_SYSTEM;
|
||||
|
||||
// memory limits
|
||||
static const off_t kMinNoteMemoryLimit = 16 * 1024 * 1024;
|
||||
static const off_t kMinWarnMemoryLimit = 4 * 1024 * 1024;
|
||||
static const off_t kMinCriticalMemoryLimit = 1 * 1024 * 1024;
|
||||
static const off_t kMinNoteMemoryLimit = VM_MEMORY_RESERVE_USER * 4;
|
||||
static const off_t kMinWarnMemoryLimit = VM_MEMORY_RESERVE_USER;
|
||||
static const off_t kMinCriticalMemoryLimit = VM_MEMORY_RESERVE_SYSTEM;
|
||||
static off_t sNoteMemoryLimit;
|
||||
static off_t sWarnMemoryLimit;
|
||||
static off_t sCriticalMemoryLimit;
|
||||
|
@ -1181,8 +1181,11 @@ MemoryManager::_AllocateArea(uint32 flags, Area*& _area)
|
||||
|
||||
if (sKernelArgs == NULL) {
|
||||
// create an area
|
||||
uint32 areaCreationFlags = (flags & CACHE_PRIORITY_VIP) != 0
|
||||
? CREATE_AREA_PRIORITY_VIP : 0;
|
||||
area_id areaID = vm_create_null_area(B_SYSTEM_TEAM, kSlabAreaName,
|
||||
(void**)&area, B_ANY_KERNEL_BLOCK_ADDRESS, SLAB_AREA_SIZE);
|
||||
(void**)&area, B_ANY_KERNEL_BLOCK_ADDRESS, SLAB_AREA_SIZE,
|
||||
areaCreationFlags);
|
||||
if (areaID < 0) {
|
||||
mutex_lock(&sLock);
|
||||
return areaID;
|
||||
@ -1315,8 +1318,10 @@ MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size,
|
||||
VMTranslationMap* translationMap = addressSpace->TranslationMap();
|
||||
|
||||
// reserve memory for the chunk
|
||||
int priority = (flags & CACHE_PRIORITY_VIP) != 0
|
||||
? VM_PRIORITY_VIP : VM_PRIORITY_SYSTEM;
|
||||
size_t reservedMemory = size + reserveAdditionalMemory;
|
||||
status_t error = vm_try_reserve_memory(size,
|
||||
status_t error = vm_try_reserve_memory(size, priority,
|
||||
(flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0 ? 0 : 1000000);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
@ -1325,12 +1330,12 @@ MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size,
|
||||
size_t reservedPages = size / B_PAGE_SIZE
|
||||
+ translationMap->MaxPagesNeededToMap(address, address + size - 1);
|
||||
if ((flags & CACHE_DONT_WAIT_FOR_MEMORY) != 0) {
|
||||
if (!vm_page_try_reserve_pages(reservedPages)) {
|
||||
if (!vm_page_try_reserve_pages(reservedPages, priority)) {
|
||||
vm_unreserve_memory(reservedMemory);
|
||||
return B_WOULD_BLOCK;
|
||||
}
|
||||
} else
|
||||
vm_page_reserve_pages(reservedPages);
|
||||
vm_page_reserve_pages(reservedPages, priority);
|
||||
|
||||
VMCache* cache = vm_area_get_locked_cache(vmArea);
|
||||
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_priv.h>
|
||||
#include <vm/VMAddressSpace.h>
|
||||
|
||||
#include "IORequest.h"
|
||||
|
||||
@ -468,7 +469,7 @@ VMAnonymousCache::Init(bool canOvercommit, int32 numPrecommittedPages,
|
||||
|
||||
|
||||
status_t
|
||||
VMAnonymousCache::Commit(off_t size)
|
||||
VMAnonymousCache::Commit(off_t size, int priority)
|
||||
{
|
||||
TRACE("%p->VMAnonymousCache::Commit(%lld)\n", this, size);
|
||||
|
||||
@ -484,7 +485,7 @@ VMAnonymousCache::Commit(off_t size)
|
||||
size = precommitted;
|
||||
}
|
||||
|
||||
return _Commit(size);
|
||||
return _Commit(size, priority);
|
||||
}
|
||||
|
||||
|
||||
@ -716,10 +717,14 @@ VMAnonymousCache::Fault(struct VMAddressSpace* aspace, off_t offset)
|
||||
|
||||
if (fPrecommittedPages == 0) {
|
||||
// try to commit additional swap space/memory
|
||||
if (swap_space_reserve(B_PAGE_SIZE) == B_PAGE_SIZE)
|
||||
if (swap_space_reserve(B_PAGE_SIZE) == B_PAGE_SIZE) {
|
||||
fCommittedSwapSize += B_PAGE_SIZE;
|
||||
else if (vm_try_reserve_memory(B_PAGE_SIZE, 0) != B_OK)
|
||||
return B_NO_MEMORY;
|
||||
} else {
|
||||
int priority = aspace == VMAddressSpace::Kernel()
|
||||
? VM_PRIORITY_SYSTEM : VM_PRIORITY_USER;
|
||||
if (vm_try_reserve_memory(B_PAGE_SIZE, priority, 0) != B_OK)
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
|
||||
committed_size += B_PAGE_SIZE;
|
||||
} else
|
||||
@ -749,7 +754,7 @@ VMAnonymousCache::Merge(VMCache* _source)
|
||||
|
||||
off_t actualSize = virtual_end - virtual_base;
|
||||
if (committed_size > actualSize)
|
||||
_Commit(actualSize);
|
||||
_Commit(actualSize, VM_PRIORITY_USER);
|
||||
|
||||
// Move all not shadowed swap pages from the source to the consumer cache.
|
||||
// Also remove all source pages that are shadowed by consumer swap pages.
|
||||
@ -857,7 +862,7 @@ VMAnonymousCache::_SwapBlockGetAddress(off_t pageIndex)
|
||||
|
||||
|
||||
status_t
|
||||
VMAnonymousCache::_Commit(off_t size)
|
||||
VMAnonymousCache::_Commit(off_t size, int priority)
|
||||
{
|
||||
TRACE("%p->VMAnonymousCache::_Commit(%lld), already committed: %lld "
|
||||
"(%lld swap)\n", this, size, committed_size, fCommittedSwapSize);
|
||||
@ -907,7 +912,7 @@ VMAnonymousCache::_Commit(off_t size)
|
||||
// the start of the method, so we try to reserve real memory, now.
|
||||
|
||||
off_t toReserve = size - committed_size;
|
||||
if (vm_try_reserve_memory(toReserve, 1000000) != B_OK) {
|
||||
if (vm_try_reserve_memory(toReserve, priority, 1000000) != B_OK) {
|
||||
dprintf("%p->VMAnonymousCache::_Commit(%lld): Failed to reserve %lld "
|
||||
"bytes of RAM\n", this, size, toReserve);
|
||||
return B_NO_MEMORY;
|
||||
|
@ -37,7 +37,7 @@ public:
|
||||
int32 numPrecommittedPages,
|
||||
int32 numGuardPages);
|
||||
|
||||
virtual status_t Commit(off_t size);
|
||||
virtual status_t Commit(off_t size, int priority);
|
||||
virtual bool HasPage(off_t offset);
|
||||
|
||||
virtual status_t Read(off_t offset, const iovec* vecs,
|
||||
@ -66,7 +66,7 @@ private:
|
||||
swap_addr_t slotIndex, uint32 count);
|
||||
void _SwapBlockFree(off_t pageIndex, uint32 count);
|
||||
swap_addr_t _SwapBlockGetAddress(off_t pageIndex);
|
||||
status_t _Commit(off_t size);
|
||||
status_t _Commit(off_t size, int priority);
|
||||
|
||||
void _MergePagesSmallerSource(
|
||||
VMAnonymousCache* source);
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <heap.h>
|
||||
#include <KernelExport.h>
|
||||
#include <vm/vm_priv.h>
|
||||
#include <vm/VMAddressSpace.h>
|
||||
|
||||
|
||||
//#define TRACE_STORE
|
||||
@ -57,7 +58,7 @@ VMAnonymousNoSwapCache::Init(bool canOvercommit, int32 numPrecommittedPages,
|
||||
|
||||
|
||||
status_t
|
||||
VMAnonymousNoSwapCache::Commit(off_t size)
|
||||
VMAnonymousNoSwapCache::Commit(off_t size, int priority)
|
||||
{
|
||||
// if we can overcommit, we don't commit here, but in anonymous_fault()
|
||||
if (fCanOvercommit) {
|
||||
@ -75,8 +76,10 @@ VMAnonymousNoSwapCache::Commit(off_t size)
|
||||
|
||||
if (size > committed_size) {
|
||||
// try to commit
|
||||
if (vm_try_reserve_memory(size - committed_size, 1000000) != B_OK)
|
||||
if (vm_try_reserve_memory(size - committed_size, priority, 1000000)
|
||||
!= B_OK) {
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
} else {
|
||||
// we can release some
|
||||
vm_unreserve_memory(committed_size - size);
|
||||
@ -136,7 +139,9 @@ VMAnonymousNoSwapCache::Fault(struct VMAddressSpace *aspace, off_t offset)
|
||||
|
||||
if (fPrecommittedPages == 0) {
|
||||
// try to commit additional memory
|
||||
if (vm_try_reserve_memory(B_PAGE_SIZE, 0) != B_OK)
|
||||
int priority = aspace == VMAddressSpace::Kernel()
|
||||
? VM_PRIORITY_SYSTEM : VM_PRIORITY_USER;
|
||||
if (vm_try_reserve_memory(B_PAGE_SIZE, priority, 0) != B_OK)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
committed_size += B_PAGE_SIZE;
|
||||
|
@ -20,7 +20,7 @@ public:
|
||||
status_t Init(bool canOvercommit, int32 numPrecommittedPages,
|
||||
int32 numGuardPages);
|
||||
|
||||
virtual status_t Commit(off_t size);
|
||||
virtual status_t Commit(off_t size, int priority);
|
||||
virtual bool HasPage(off_t offset);
|
||||
|
||||
virtual status_t Read(off_t offset, const iovec *vecs, size_t count,
|
||||
|
@ -1000,7 +1000,7 @@ VMCache::WriteModified()
|
||||
Assumes you have the cache's lock held.
|
||||
*/
|
||||
status_t
|
||||
VMCache::SetMinimalCommitment(off_t commitment)
|
||||
VMCache::SetMinimalCommitment(off_t commitment, int priority)
|
||||
{
|
||||
TRACE(("VMCache::SetMinimalCommitment(cache %p, commitment %Ld)\n",
|
||||
this, commitment));
|
||||
@ -1017,7 +1017,7 @@ VMCache::SetMinimalCommitment(off_t commitment)
|
||||
// enough for a commitment of that size?
|
||||
|
||||
// try to commit more memory
|
||||
status = Commit(commitment);
|
||||
status = Commit(commitment, priority);
|
||||
}
|
||||
|
||||
return status;
|
||||
@ -1034,7 +1034,7 @@ VMCache::SetMinimalCommitment(off_t commitment)
|
||||
has to wait for busy pages.
|
||||
*/
|
||||
status_t
|
||||
VMCache::Resize(off_t newSize)
|
||||
VMCache::Resize(off_t newSize, int priority)
|
||||
{
|
||||
// TODO: This method must be virtual as VMAnonymousCache needs to free allocated
|
||||
// swap pages!
|
||||
@ -1044,7 +1044,7 @@ VMCache::Resize(off_t newSize)
|
||||
|
||||
T(Resize(this, newSize));
|
||||
|
||||
status_t status = Commit(newSize - virtual_base);
|
||||
status_t status = Commit(newSize - virtual_base, priority);
|
||||
if (status != B_OK)
|
||||
return status;
|
||||
|
||||
@ -1138,7 +1138,7 @@ VMCache::FlushAndRemoveAllPages()
|
||||
|
||||
|
||||
status_t
|
||||
VMCache::Commit(off_t size)
|
||||
VMCache::Commit(off_t size, int priority)
|
||||
{
|
||||
committed_size = size;
|
||||
return B_OK;
|
||||
@ -1343,10 +1343,12 @@ VMCache::_RemoveConsumer(VMCache* consumer)
|
||||
|
||||
/*static*/ status_t
|
||||
VMCacheFactory::CreateAnonymousCache(VMCache*& _cache, bool canOvercommit,
|
||||
int32 numPrecommittedPages, int32 numGuardPages, bool swappable)
|
||||
int32 numPrecommittedPages, int32 numGuardPages, bool swappable,
|
||||
int priority)
|
||||
{
|
||||
#if ENABLE_SWAP_SUPPORT
|
||||
if (swappable) {
|
||||
// TODO: Respect priority!
|
||||
VMAnonymousCache* cache = new(nogrow) VMAnonymousCache;
|
||||
if (cache == NULL)
|
||||
return B_NO_MEMORY;
|
||||
@ -1424,8 +1426,9 @@ VMCacheFactory::CreateDeviceCache(VMCache*& _cache, addr_t baseAddress)
|
||||
|
||||
|
||||
/*static*/ status_t
|
||||
VMCacheFactory::CreateNullCache(VMCache*& _cache)
|
||||
VMCacheFactory::CreateNullCache(int priority, VMCache*& _cache)
|
||||
{
|
||||
// TODO: Respect priority!
|
||||
VMNullCache* cache = new(nogrow) VMNullCache;
|
||||
if (cache == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
@ -222,6 +222,14 @@ private:
|
||||
};
|
||||
|
||||
|
||||
// The memory reserve an allocation of the certain priority must not touch.
|
||||
static const size_t kMemoryReserveForPriority[] = {
|
||||
VM_MEMORY_RESERVE_USER, // user
|
||||
VM_MEMORY_RESERVE_SYSTEM, // system
|
||||
0 // VIP
|
||||
};
|
||||
|
||||
|
||||
ObjectCache* gPageMappingsObjectCache;
|
||||
|
||||
static rw_lock sAreaCacheLock = RW_LOCK_INITIALIZER("area->cache");
|
||||
@ -255,7 +263,7 @@ static status_t vm_soft_fault(VMAddressSpace* addressSpace, addr_t address,
|
||||
static status_t map_backing_store(VMAddressSpace* addressSpace,
|
||||
VMCache* cache, void** _virtualAddress, off_t offset, addr_t size,
|
||||
uint32 addressSpec, int wiring, int protection, int mapping,
|
||||
VMArea** _area, const char* areaName, bool unmapAddressRange, bool kernel);
|
||||
VMArea** _area, const char* areaName, uint32 flags, bool kernel);
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
@ -579,7 +587,9 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
|
||||
// Since VMCache::Resize() can temporarily drop the lock, we must
|
||||
// unlock all lower caches to prevent locking order inversion.
|
||||
cacheChainLocker.Unlock(cache);
|
||||
cache->Resize(cache->virtual_base + newSize);
|
||||
cache->Resize(cache->virtual_base + newSize,
|
||||
addressSpace == VMAddressSpace::Kernel()
|
||||
? VM_PRIORITY_SYSTEM : VM_PRIORITY_USER);
|
||||
cache->ReleaseRefAndUnlock();
|
||||
}
|
||||
|
||||
@ -634,7 +644,7 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
|
||||
error = map_backing_store(addressSpace, cache, &secondBaseAddress,
|
||||
area->cache_offset + (secondBase - area->Base()), secondSize,
|
||||
B_EXACT_ADDRESS, area->wiring, area->protection, REGION_NO_PRIVATE_MAP,
|
||||
&secondArea, area->name, false, kernel);
|
||||
&secondArea, area->name, 0, kernel);
|
||||
if (error != B_OK) {
|
||||
addressSpace->ShrinkAreaTail(area, oldSize);
|
||||
return error;
|
||||
@ -697,7 +707,7 @@ static status_t
|
||||
map_backing_store(VMAddressSpace* addressSpace, VMCache* cache,
|
||||
void** _virtualAddress, off_t offset, addr_t size, uint32 addressSpec,
|
||||
int wiring, int protection, int mapping, VMArea** _area,
|
||||
const char* areaName, bool unmapAddressRange, bool kernel)
|
||||
const char* areaName, uint32 flags, bool kernel)
|
||||
{
|
||||
TRACE(("map_backing_store: aspace %p, cache %p, *vaddr %p, offset 0x%Lx, "
|
||||
"size %lu, addressSpec %ld, wiring %d, protection %d, area %p, areaName "
|
||||
@ -719,7 +729,8 @@ map_backing_store(VMAddressSpace* addressSpace, VMCache* cache,
|
||||
|
||||
// create an anonymous cache
|
||||
status = VMCacheFactory::CreateAnonymousCache(newCache,
|
||||
(protection & B_STACK_AREA) != 0, 0, USER_STACK_GUARD_PAGES, true);
|
||||
(protection & B_STACK_AREA) != 0, 0, USER_STACK_GUARD_PAGES, true,
|
||||
VM_PRIORITY_USER);
|
||||
if (status != B_OK)
|
||||
goto err1;
|
||||
|
||||
@ -734,7 +745,15 @@ map_backing_store(VMAddressSpace* addressSpace, VMCache* cache,
|
||||
cache = newCache;
|
||||
}
|
||||
|
||||
status = cache->SetMinimalCommitment(size);
|
||||
int priority;
|
||||
if (addressSpace != VMAddressSpace::Kernel())
|
||||
priority = VM_PRIORITY_USER;
|
||||
else if ((flags & CREATE_AREA_PRIORITY_VIP) != 0)
|
||||
priority = VM_PRIORITY_VIP;
|
||||
else
|
||||
priority = VM_PRIORITY_SYSTEM;
|
||||
|
||||
status = cache->SetMinimalCommitment(size, priority);
|
||||
if (status != B_OK)
|
||||
goto err2;
|
||||
|
||||
@ -746,7 +765,8 @@ map_backing_store(VMAddressSpace* addressSpace, VMCache* cache,
|
||||
goto err2;
|
||||
}
|
||||
|
||||
if (addressSpec == B_EXACT_ADDRESS && unmapAddressRange) {
|
||||
if (addressSpec == B_EXACT_ADDRESS
|
||||
&& (flags & CREATE_AREA_UNMAP_ADDRESS_RANGE) != 0) {
|
||||
status = unmap_address_range(addressSpace, (addr_t)*_virtualAddress,
|
||||
size, kernel);
|
||||
if (status != B_OK)
|
||||
@ -816,7 +836,8 @@ vm_block_address_range(const char* name, void* address, addr_t size)
|
||||
|
||||
// create an anonymous cache
|
||||
VMCache* cache;
|
||||
status = VMCacheFactory::CreateAnonymousCache(cache, false, 0, 0, false);
|
||||
status = VMCacheFactory::CreateAnonymousCache(cache, false, 0, 0, false,
|
||||
VM_PRIORITY_SYSTEM);
|
||||
if (status != B_OK)
|
||||
return status;
|
||||
|
||||
@ -829,7 +850,7 @@ vm_block_address_range(const char* name, void* address, addr_t size)
|
||||
void* areaAddress = address;
|
||||
status = map_backing_store(addressSpace, cache, &areaAddress, 0, size,
|
||||
B_EXACT_ADDRESS, B_ALREADY_WIRED, 0, REGION_NO_PRIVATE_MAP, &area, name,
|
||||
false, true);
|
||||
0, true);
|
||||
if (status != B_OK) {
|
||||
cache->ReleaseRefAndUnlock();
|
||||
return status;
|
||||
@ -953,6 +974,14 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
reservedMapPages = map->MaxPagesNeededToMap(0, size - 1);
|
||||
}
|
||||
|
||||
int priority;
|
||||
if (team != VMAddressSpace::KernelID())
|
||||
priority = VM_PRIORITY_USER;
|
||||
else if ((flags & CREATE_AREA_PRIORITY_VIP) != 0)
|
||||
priority = VM_PRIORITY_VIP;
|
||||
else
|
||||
priority = VM_PRIORITY_SYSTEM;
|
||||
|
||||
// Reserve memory before acquiring the address space lock. This reduces the
|
||||
// chances of failure, since while holding the write lock to the address
|
||||
// space (if it is the kernel address space that is), the low memory handler
|
||||
@ -960,7 +989,7 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
addr_t reservedMemory = 0;
|
||||
if (doReserveMemory) {
|
||||
bigtime_t timeout = (flags & CREATE_AREA_DONT_WAIT) != 0 ? 0 : 1000000;
|
||||
if (vm_try_reserve_memory(size, timeout) != B_OK)
|
||||
if (vm_try_reserve_memory(size, priority, timeout) != B_OK)
|
||||
return B_NO_MEMORY;
|
||||
reservedMemory = size;
|
||||
// TODO: We don't reserve the memory for the pages for the page
|
||||
@ -982,13 +1011,13 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
reservedPages += size / B_PAGE_SIZE;
|
||||
if (reservedPages > 0) {
|
||||
if ((flags & CREATE_AREA_DONT_WAIT) != 0) {
|
||||
if (!vm_page_try_reserve_pages(reservedPages)) {
|
||||
if (!vm_page_try_reserve_pages(reservedPages, priority)) {
|
||||
reservedPages = 0;
|
||||
status = B_WOULD_BLOCK;
|
||||
goto err0;
|
||||
}
|
||||
} else
|
||||
vm_page_reserve_pages(reservedPages);
|
||||
vm_page_reserve_pages(reservedPages, priority);
|
||||
}
|
||||
|
||||
status = locker.SetTo(team);
|
||||
@ -1001,7 +1030,7 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
// we try to allocate the page run here upfront as this may easily
|
||||
// fail for obvious reasons
|
||||
page = vm_page_allocate_page_run(newPageState, physicalAddress,
|
||||
size / B_PAGE_SIZE);
|
||||
size / B_PAGE_SIZE, priority);
|
||||
if (page == NULL) {
|
||||
status = B_NO_MEMORY;
|
||||
goto err0;
|
||||
@ -1014,7 +1043,7 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
? USER_STACK_GUARD_PAGES : KERNEL_STACK_GUARD_PAGES) : 0;
|
||||
status = VMCacheFactory::CreateAnonymousCache(cache, canOvercommit,
|
||||
isStack ? (min_c(2, size / B_PAGE_SIZE - guardPages)) : 0, guardPages,
|
||||
wiring == B_NO_LOCK);
|
||||
wiring == B_NO_LOCK, priority);
|
||||
if (status != B_OK)
|
||||
goto err1;
|
||||
|
||||
@ -1040,7 +1069,7 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
|
||||
status = map_backing_store(addressSpace, cache, address, 0, size,
|
||||
addressSpec, wiring, protection, REGION_NO_PRIVATE_MAP, &area, name,
|
||||
(flags & CREATE_AREA_UNMAP_ADDRESS_RANGE) != 0, kernel);
|
||||
flags, kernel);
|
||||
|
||||
if (status != B_OK) {
|
||||
cache->ReleaseRefAndUnlock();
|
||||
@ -1247,7 +1276,7 @@ vm_map_physical_memory(team_id team, const char* name, void** _address,
|
||||
|
||||
status = map_backing_store(locker.AddressSpace(), cache, _address,
|
||||
0, size, addressSpec & ~B_MTR_MASK, B_FULL_LOCK, protection,
|
||||
REGION_NO_PRIVATE_MAP, &area, name, false, true);
|
||||
REGION_NO_PRIVATE_MAP, &area, name, 0, true);
|
||||
|
||||
if (status < B_OK)
|
||||
cache->ReleaseRefLocked();
|
||||
@ -1269,7 +1298,9 @@ vm_map_physical_memory(team_id team, const char* name, void** _address,
|
||||
size_t reservePages = map->MaxPagesNeededToMap(area->Base(),
|
||||
area->Base() + (size - 1));
|
||||
|
||||
vm_page_reserve_pages(reservePages);
|
||||
vm_page_reserve_pages(reservePages,
|
||||
team == VMAddressSpace::KernelID()
|
||||
? VM_PRIORITY_SYSTEM : VM_PRIORITY_USER);
|
||||
map->Lock();
|
||||
|
||||
for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
|
||||
@ -1341,7 +1372,7 @@ vm_map_physical_memory_vecs(team_id team, const char* name, void** _address,
|
||||
VMArea* area;
|
||||
result = map_backing_store(locker.AddressSpace(), cache, _address,
|
||||
0, size, addressSpec & ~B_MTR_MASK, B_FULL_LOCK, protection,
|
||||
REGION_NO_PRIVATE_MAP, &area, name, false, true);
|
||||
REGION_NO_PRIVATE_MAP, &area, name, 0, true);
|
||||
|
||||
if (result != B_OK)
|
||||
cache->ReleaseRefLocked();
|
||||
@ -1355,7 +1386,9 @@ vm_map_physical_memory_vecs(team_id team, const char* name, void** _address,
|
||||
size_t reservePages = map->MaxPagesNeededToMap(area->Base(),
|
||||
area->Base() + (size - 1));
|
||||
|
||||
vm_page_reserve_pages(reservePages);
|
||||
vm_page_reserve_pages(reservePages,
|
||||
team == VMAddressSpace::KernelID()
|
||||
? VM_PRIORITY_SYSTEM : VM_PRIORITY_USER);
|
||||
map->Lock();
|
||||
|
||||
uint32 vecIndex = 0;
|
||||
@ -1388,32 +1421,33 @@ vm_map_physical_memory_vecs(team_id team, const char* name, void** _address,
|
||||
|
||||
area_id
|
||||
vm_create_null_area(team_id team, const char* name, void** address,
|
||||
uint32 addressSpec, addr_t size)
|
||||
uint32 addressSpec, addr_t size, uint32 flags)
|
||||
{
|
||||
VMArea* area;
|
||||
VMCache* cache;
|
||||
status_t status;
|
||||
|
||||
AddressSpaceWriteLocker locker(team);
|
||||
if (!locker.IsLocked())
|
||||
return B_BAD_TEAM_ID;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
// create an null cache
|
||||
status = VMCacheFactory::CreateNullCache(cache);
|
||||
// create a null cache
|
||||
int priority = (flags & CREATE_AREA_PRIORITY_VIP) != 0
|
||||
? VM_PRIORITY_VIP : VM_PRIORITY_SYSTEM;
|
||||
VMCache* cache;
|
||||
status_t status = VMCacheFactory::CreateNullCache(priority, cache);
|
||||
if (status != B_OK)
|
||||
return status;
|
||||
|
||||
// tell the page scanner to skip over this area, no pages will be mapped here
|
||||
// tell the page scanner to skip over this area, no pages will be mapped
|
||||
// here
|
||||
cache->scan_skip = 1;
|
||||
cache->virtual_end = size;
|
||||
|
||||
cache->Lock();
|
||||
|
||||
VMArea* area;
|
||||
status = map_backing_store(locker.AddressSpace(), cache, address, 0, size,
|
||||
addressSpec, 0, B_KERNEL_READ_AREA, REGION_NO_PRIVATE_MAP, &area, name,
|
||||
false, true);
|
||||
flags, true);
|
||||
|
||||
if (status < B_OK) {
|
||||
cache->ReleaseRefAndUnlock();
|
||||
@ -1533,7 +1567,9 @@ _vm_map_file(team_id team, const char* name, void** _address,
|
||||
|
||||
locker.Unlock();
|
||||
|
||||
vm_page_reserve_pages(reservedPreMapPages);
|
||||
vm_page_reserve_pages(reservedPreMapPages,
|
||||
team == VMAddressSpace::KernelID()
|
||||
? VM_PRIORITY_SYSTEM : VM_PRIORITY_USER);
|
||||
}
|
||||
|
||||
struct PageUnreserver {
|
||||
@ -1566,7 +1602,7 @@ _vm_map_file(team_id team, const char* name, void** _address,
|
||||
VMArea* area;
|
||||
status = map_backing_store(locker.AddressSpace(), cache, _address,
|
||||
offset, size, addressSpec, 0, protection, mapping, &area, name,
|
||||
unmapAddressRange, kernel);
|
||||
unmapAddressRange ? CREATE_AREA_UNMAP_ADDRESS_RANGE : 0, kernel);
|
||||
|
||||
if (status != B_OK || mapping == REGION_PRIVATE_MAP) {
|
||||
// map_backing_store() cannot know we no longer need the ref
|
||||
@ -1707,8 +1743,7 @@ vm_clone_area(team_id team, const char* name, void** address,
|
||||
else {
|
||||
status = map_backing_store(targetAddressSpace, cache, address,
|
||||
sourceArea->cache_offset, sourceArea->Size(), addressSpec,
|
||||
sourceArea->wiring, protection, mapping, &newArea, name, false,
|
||||
kernel);
|
||||
sourceArea->wiring, protection, mapping, &newArea, name, 0, kernel);
|
||||
}
|
||||
if (status == B_OK && mapping != REGION_PRIVATE_MAP) {
|
||||
// If the mapping is REGION_PRIVATE_MAP, map_backing_store() needed
|
||||
@ -1735,7 +1770,9 @@ vm_clone_area(team_id team, const char* name, void** address,
|
||||
size_t reservePages = map->MaxPagesNeededToMap(newArea->Base(),
|
||||
newArea->Base() + (newArea->Size() - 1));
|
||||
|
||||
vm_page_reserve_pages(reservePages);
|
||||
vm_page_reserve_pages(reservePages,
|
||||
targetAddressSpace == VMAddressSpace::Kernel()
|
||||
? VM_PRIORITY_SYSTEM : VM_PRIORITY_USER);
|
||||
map->Lock();
|
||||
|
||||
for (addr_t offset = 0; offset < newArea->Size();
|
||||
@ -1750,7 +1787,9 @@ vm_clone_area(team_id team, const char* name, void** address,
|
||||
VMTranslationMap* map = targetAddressSpace->TranslationMap();
|
||||
size_t reservePages = map->MaxPagesNeededToMap(
|
||||
newArea->Base(), newArea->Base() + (newArea->Size() - 1));
|
||||
vm_page_reserve_pages(reservePages);
|
||||
vm_page_reserve_pages(reservePages,
|
||||
targetAddressSpace == VMAddressSpace::Kernel()
|
||||
? VM_PRIORITY_SYSTEM : VM_PRIORITY_USER);
|
||||
|
||||
// map in all pages from source
|
||||
for (VMCachePagesTree::Iterator it = cache->pages.GetIterator();
|
||||
@ -1861,7 +1900,7 @@ vm_copy_on_write_area(VMCache* lowerCache)
|
||||
|
||||
// create an anonymous cache
|
||||
status_t status = VMCacheFactory::CreateAnonymousCache(upperCache, false, 0,
|
||||
0, true);
|
||||
0, true, VM_PRIORITY_USER);
|
||||
if (status != B_OK)
|
||||
return status;
|
||||
|
||||
@ -1944,7 +1983,7 @@ vm_copy_area(team_id team, const char* name, void** _address,
|
||||
status = map_backing_store(targetAddressSpace, cache, _address,
|
||||
source->cache_offset, source->Size(), addressSpec, source->wiring,
|
||||
protection, sharedArea ? REGION_NO_PRIVATE_MAP : REGION_PRIVATE_MAP,
|
||||
&target, name, false, true);
|
||||
&target, name, 0, true);
|
||||
if (status < B_OK)
|
||||
return status;
|
||||
|
||||
@ -2013,7 +2052,9 @@ vm_set_area_protection(team_id team, area_id areaID, uint32 newProtection,
|
||||
// we can change the cache's commitment to take only those pages
|
||||
// into account that really are in this cache.
|
||||
|
||||
status = cache->Commit(cache->page_count * B_PAGE_SIZE);
|
||||
status = cache->Commit(cache->page_count * B_PAGE_SIZE,
|
||||
team == VMAddressSpace::KernelID()
|
||||
? VM_PRIORITY_SYSTEM : VM_PRIORITY_USER);
|
||||
|
||||
// TODO: we may be able to join with our source cache, if
|
||||
// count == 0
|
||||
@ -2043,8 +2084,9 @@ vm_set_area_protection(team_id team, area_id areaID, uint32 newProtection,
|
||||
// No consumers, so we don't need to insert a new one.
|
||||
if (cache->source != NULL && cache->temporary) {
|
||||
// the cache's commitment must contain all possible pages
|
||||
status = cache->Commit(cache->virtual_end
|
||||
- cache->virtual_base);
|
||||
status = cache->Commit(cache->virtual_end - cache->virtual_base,
|
||||
team == VMAddressSpace::KernelID()
|
||||
? VM_PRIORITY_SYSTEM : VM_PRIORITY_USER);
|
||||
}
|
||||
|
||||
if (status == B_OK && cache->source != NULL) {
|
||||
@ -3756,7 +3798,9 @@ vm_soft_fault(VMAddressSpace* addressSpace, addr_t originalAddress,
|
||||
size_t reservePages = 2 + context.map->MaxPagesNeededToMap(originalAddress,
|
||||
originalAddress);
|
||||
context.addressSpaceLocker.Unlock();
|
||||
vm_page_reserve_pages(reservePages);
|
||||
vm_page_reserve_pages(reservePages,
|
||||
addressSpace == VMAddressSpace::Kernel()
|
||||
? VM_PRIORITY_SYSTEM : VM_PRIORITY_USER);
|
||||
|
||||
while (true) {
|
||||
context.addressSpaceLocker.Lock();
|
||||
@ -4011,13 +4055,15 @@ vm_unreserve_memory(size_t amount)
|
||||
|
||||
|
||||
status_t
|
||||
vm_try_reserve_memory(size_t amount, bigtime_t timeout)
|
||||
vm_try_reserve_memory(size_t amount, int priority, bigtime_t timeout)
|
||||
{
|
||||
size_t reserve = kMemoryReserveForPriority[priority];
|
||||
|
||||
MutexLocker locker(sAvailableMemoryLock);
|
||||
|
||||
//dprintf("try to reserve %lu bytes, %Lu left\n", amount, sAvailableMemory);
|
||||
|
||||
if (sAvailableMemory >= amount) {
|
||||
if (sAvailableMemory >= amount + reserve) {
|
||||
sAvailableMemory -= amount;
|
||||
return B_OK;
|
||||
}
|
||||
@ -4040,7 +4086,7 @@ vm_try_reserve_memory(size_t amount, bigtime_t timeout)
|
||||
|
||||
sNeededMemory -= amount;
|
||||
|
||||
if (sAvailableMemory >= amount) {
|
||||
if (sAvailableMemory >= amount + reserve) {
|
||||
sAvailableMemory -= amount;
|
||||
return B_OK;
|
||||
}
|
||||
@ -4169,20 +4215,25 @@ vm_resize_area(area_id areaID, size_t newSize, bool kernel)
|
||||
if (cache->type != CACHE_TYPE_RAM)
|
||||
return B_NOT_ALLOWED;
|
||||
|
||||
bool anyKernelArea = false;
|
||||
if (oldSize < newSize) {
|
||||
// We need to check if all areas of this cache can be resized
|
||||
for (VMArea* current = cache->areas; current != NULL;
|
||||
current = current->cache_next) {
|
||||
if (!current->address_space->CanResizeArea(current, newSize))
|
||||
return B_ERROR;
|
||||
anyKernelArea |= current->address_space == VMAddressSpace::Kernel();
|
||||
}
|
||||
}
|
||||
|
||||
// Okay, looks good so far, so let's do it
|
||||
|
||||
int priority = kernel && anyKernelArea
|
||||
? VM_PRIORITY_SYSTEM : VM_PRIORITY_USER;
|
||||
|
||||
if (oldSize < newSize) {
|
||||
// Growing the cache can fail, so we do it first.
|
||||
status = cache->Resize(cache->virtual_base + newSize);
|
||||
status = cache->Resize(cache->virtual_base + newSize, priority);
|
||||
if (status != B_OK)
|
||||
return status;
|
||||
}
|
||||
@ -4208,7 +4259,7 @@ vm_resize_area(area_id areaID, size_t newSize, bool kernel)
|
||||
|
||||
// shrinking the cache can't fail, so we do it now
|
||||
if (status == B_OK && newSize < oldSize)
|
||||
status = cache->Resize(cache->virtual_base + newSize);
|
||||
status = cache->Resize(cache->virtual_base + newSize, priority);
|
||||
|
||||
if (status != B_OK) {
|
||||
// Something failed -- resize the areas back to their original size.
|
||||
@ -4222,7 +4273,7 @@ vm_resize_area(area_id areaID, size_t newSize, bool kernel)
|
||||
}
|
||||
}
|
||||
|
||||
cache->Resize(cache->virtual_base + oldSize);
|
||||
cache->Resize(cache->virtual_base + oldSize, priority);
|
||||
}
|
||||
|
||||
// TODO: we must honour the lock restrictions of this area
|
||||
|
@ -12,6 +12,8 @@
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include <KernelExport.h>
|
||||
#include <OS.h>
|
||||
|
||||
@ -59,6 +61,16 @@
|
||||
// be written
|
||||
|
||||
|
||||
// The page reserve an allocation of the certain priority must not touch.
|
||||
static const size_t kPageReserveForPriority[] = {
|
||||
VM_PAGE_RESERVE_USER, // user
|
||||
VM_PAGE_RESERVE_SYSTEM, // system
|
||||
0 // VIP
|
||||
};
|
||||
|
||||
static const uint32 kMinimumSystemReserve = VM_PAGE_RESERVE_USER;
|
||||
|
||||
|
||||
int32 gMappedPagesCount;
|
||||
|
||||
static VMPageQueue sFreePageQueue;
|
||||
@ -71,6 +83,7 @@ static vm_page *sPages;
|
||||
static addr_t sPhysicalPageOffset;
|
||||
static size_t sNumPages;
|
||||
static vint32 sUnreservedFreePages;
|
||||
static vint32 sSystemReservedPages;
|
||||
static vint32 sPageDeficit;
|
||||
static vint32 sModifiedTemporaryPages;
|
||||
|
||||
@ -560,6 +573,7 @@ dump_page_stats(int argc, char **argv)
|
||||
counter[PAGE_STATE_WIRED], counter[PAGE_STATE_MODIFIED],
|
||||
counter[PAGE_STATE_FREE], counter[PAGE_STATE_CLEAR]);
|
||||
kprintf("unreserved free pages: %" B_PRId32 "\n", sUnreservedFreePages);
|
||||
kprintf("system reserved pages: %" B_PRId32 "\n", sSystemReservedPages);
|
||||
kprintf("page deficit: %lu\n", sPageDeficit);
|
||||
kprintf("mapped pages: %lu\n", gMappedPagesCount);
|
||||
|
||||
@ -578,6 +592,32 @@ dump_page_stats(int argc, char **argv)
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
static void
|
||||
unreserve_page()
|
||||
{
|
||||
int32 systemReserve = sSystemReservedPages;
|
||||
if (systemReserve >= (int32)kMinimumSystemReserve) {
|
||||
atomic_add(&sUnreservedFreePages, 1);
|
||||
} else {
|
||||
// Note: Due to the race condition, we might increment
|
||||
// sSystemReservedPages beyond its desired count. That doesn't matter
|
||||
// all that much, though, since its only about a single page and
|
||||
// vm_page_reserve_pages() will correct this when the general reserve
|
||||
// is running low.
|
||||
atomic_add(&sSystemReservedPages, 1);
|
||||
}
|
||||
|
||||
if (sPageDeficit > 0) {
|
||||
MutexLocker pageDeficitLocker(sPageDeficitLock);
|
||||
if (sPageDeficit > 0)
|
||||
sFreePageCondition.NotifyAll();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
free_page(vm_page* page, bool clear)
|
||||
{
|
||||
@ -640,7 +680,7 @@ free_page(vm_page* page, bool clear)
|
||||
|
||||
locker.Unlock();
|
||||
|
||||
atomic_add(&sUnreservedFreePages, 1);
|
||||
unreserve_page();
|
||||
}
|
||||
|
||||
|
||||
@ -807,7 +847,7 @@ page_scrubber(void *unused)
|
||||
// reservation warranty. The following is usually stricter than
|
||||
// necessary, because we don't have information on how many of the
|
||||
// reserved pages have already been allocated.
|
||||
if (!vm_page_try_reserve_pages(SCRUB_SIZE))
|
||||
if (!vm_page_try_reserve_pages(SCRUB_SIZE, VM_PRIORITY_USER))
|
||||
continue;
|
||||
|
||||
// get some pages from the free queue
|
||||
@ -1613,7 +1653,7 @@ steal_pages(vm_page **pages, size_t count)
|
||||
sFreePageQueue.PrependUnlocked(page);
|
||||
locker.Unlock();
|
||||
|
||||
atomic_add(&sUnreservedFreePages, 1);
|
||||
unreserve_page();
|
||||
|
||||
T(StolenPage());
|
||||
|
||||
@ -1902,7 +1942,7 @@ vm_page_init(kernel_args *args)
|
||||
sFreePageQueue.Append(&sPages[i]);
|
||||
}
|
||||
|
||||
atomic_add(&sUnreservedFreePages, sNumPages);
|
||||
sUnreservedFreePages = sNumPages;
|
||||
|
||||
TRACE(("initialized table\n"));
|
||||
|
||||
@ -1914,6 +1954,14 @@ vm_page_init(kernel_args *args)
|
||||
|
||||
TRACE(("vm_page_init: exit\n"));
|
||||
|
||||
// reserve pages for the system, that user allocations will not touch
|
||||
if (sUnreservedFreePages < (int32)kMinimumSystemReserve) {
|
||||
panic("Less pages than the system reserve!");
|
||||
sSystemReservedPages = sUnreservedFreePages;
|
||||
} else
|
||||
sSystemReservedPages = kMinimumSystemReserve;
|
||||
sUnreservedFreePages -= sSystemReservedPages;
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
@ -2050,6 +2098,25 @@ vm_page_unreserve_pages(uint32 count)
|
||||
|
||||
T(UnreservePages(count));
|
||||
|
||||
while (true) {
|
||||
int32 systemReserve = sSystemReservedPages;
|
||||
if (systemReserve >= (int32)kMinimumSystemReserve)
|
||||
break;
|
||||
|
||||
int32 toUnreserve = std::min((int32)count,
|
||||
(int32)kMinimumSystemReserve - systemReserve);
|
||||
if (atomic_test_and_set(&sSystemReservedPages,
|
||||
systemReserve + toUnreserve, systemReserve)
|
||||
== systemReserve) {
|
||||
count -= toUnreserve;
|
||||
if (count == 0)
|
||||
return;
|
||||
break;
|
||||
}
|
||||
|
||||
// the count changed in the meantime -- retry
|
||||
}
|
||||
|
||||
atomic_add(&sUnreservedFreePages, count);
|
||||
|
||||
if (sPageDeficit > 0) {
|
||||
@ -2067,19 +2134,65 @@ vm_page_unreserve_pages(uint32 count)
|
||||
The caller must not hold any cache lock or the function might deadlock.
|
||||
*/
|
||||
void
|
||||
vm_page_reserve_pages(uint32 count)
|
||||
vm_page_reserve_pages(uint32 count, int priority)
|
||||
{
|
||||
if (count == 0)
|
||||
return;
|
||||
|
||||
T(ReservePages(count));
|
||||
|
||||
int32 oldFreePages = atomic_add(&sUnreservedFreePages, -count);
|
||||
if (oldFreePages >= (int32)count)
|
||||
return;
|
||||
while (true) {
|
||||
// Of the requested count reserve as many pages as possible from the
|
||||
// general reserve.
|
||||
int32 freePages = sUnreservedFreePages;
|
||||
if (freePages <= 0)
|
||||
break;
|
||||
|
||||
if (oldFreePages > 0)
|
||||
uint32 toReserve = std::min((int32)count, freePages);
|
||||
if (atomic_test_and_set(&sUnreservedFreePages,
|
||||
freePages - toReserve, freePages)
|
||||
!= freePages) {
|
||||
// the count changed in the meantime -- retry
|
||||
continue;
|
||||
}
|
||||
|
||||
count -= toReserve;
|
||||
if (count == 0)
|
||||
return;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
// Try to get the remaining pages from the system reserve.
|
||||
uint32 systemReserve = kPageReserveForPriority[priority];
|
||||
while (true) {
|
||||
int32 systemFreePages = sSystemReservedPages;
|
||||
uint32 toReserve = 0;
|
||||
if (systemFreePages > (int32)systemReserve) {
|
||||
toReserve = std::min(count, systemFreePages - systemReserve);
|
||||
if (atomic_test_and_set(&sSystemReservedPages,
|
||||
systemFreePages - toReserve, systemFreePages)
|
||||
!= systemFreePages) {
|
||||
// the count changed in the meantime -- retry
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
count -= toReserve;
|
||||
if (count == 0)
|
||||
return;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
// subtract the remaining pages
|
||||
int32 oldFreePages = atomic_add(&sUnreservedFreePages, -(int32)count);
|
||||
if (oldFreePages > 0) {
|
||||
if ((int32)count <= oldFreePages)
|
||||
return;
|
||||
count -= oldFreePages;
|
||||
// TODO: Activate low-memory handling/page daemon!
|
||||
}
|
||||
|
||||
steal_pages(NULL, count + 1);
|
||||
// we get one more, just in case we can do something someone
|
||||
@ -2088,7 +2201,7 @@ vm_page_reserve_pages(uint32 count)
|
||||
|
||||
|
||||
bool
|
||||
vm_page_try_reserve_pages(uint32 count)
|
||||
vm_page_try_reserve_pages(uint32 count, int priority)
|
||||
{
|
||||
if (count == 0)
|
||||
return true;
|
||||
@ -2096,16 +2209,43 @@ vm_page_try_reserve_pages(uint32 count)
|
||||
T(ReservePages(count));
|
||||
|
||||
while (true) {
|
||||
// From the requested count reserve as many pages as possible from the
|
||||
// general reserve.
|
||||
int32 freePages = sUnreservedFreePages;
|
||||
if (freePages < (int32)count)
|
||||
return false;
|
||||
|
||||
if (atomic_test_and_set(&sUnreservedFreePages, freePages - count,
|
||||
freePages) == freePages) {
|
||||
return true;
|
||||
uint32 reserved = 0;
|
||||
if (freePages > 0) {
|
||||
reserved = std::min((int32)count, freePages);
|
||||
if (atomic_test_and_set(&sUnreservedFreePages,
|
||||
freePages - reserved, freePages)
|
||||
!= freePages) {
|
||||
// the count changed in the meantime -- retry
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// the count changed in the meantime -- retry
|
||||
if (reserved == count)
|
||||
return true;
|
||||
|
||||
// Try to get the remaining pages from the system reserve.
|
||||
uint32 systemReserve = kPageReserveForPriority[priority];
|
||||
uint32 leftToReserve = count - reserved;
|
||||
while (true) {
|
||||
int32 systemFreePages = sSystemReservedPages;
|
||||
if ((uint32)systemFreePages < leftToReserve + systemReserve) {
|
||||
// no dice
|
||||
vm_page_unreserve_pages(reserved);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (atomic_test_and_set(&sSystemReservedPages,
|
||||
systemFreePages - leftToReserve, systemFreePages)
|
||||
== systemFreePages) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// the count changed in the meantime -- retry
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2234,11 +2374,12 @@ allocate_page_run(page_num_t start, page_num_t length, int pageState,
|
||||
|
||||
|
||||
vm_page *
|
||||
vm_page_allocate_page_run(int pageState, addr_t base, addr_t length)
|
||||
vm_page_allocate_page_run(int pageState, addr_t base, addr_t length,
|
||||
int priority)
|
||||
{
|
||||
uint32 start = base >> PAGE_SHIFT;
|
||||
|
||||
if (!vm_page_try_reserve_pages(length))
|
||||
if (!vm_page_try_reserve_pages(length, priority))
|
||||
return NULL;
|
||||
// TODO: add more tries, ie. free some inactive, ...
|
||||
// no free space
|
||||
@ -2273,7 +2414,7 @@ vm_page_allocate_page_run(int pageState, addr_t base, addr_t length)
|
||||
|
||||
|
||||
vm_page *
|
||||
vm_page_allocate_page_run_no_base(int pageState, addr_t count)
|
||||
vm_page_allocate_page_run_no_base(int pageState, addr_t count, int priority)
|
||||
{
|
||||
VMPageQueue* queue;
|
||||
VMPageQueue* otherQueue;
|
||||
@ -2290,7 +2431,7 @@ vm_page_allocate_page_run_no_base(int pageState, addr_t count)
|
||||
return NULL; // invalid
|
||||
}
|
||||
|
||||
if (!vm_page_try_reserve_pages(count))
|
||||
if (!vm_page_try_reserve_pages(count, priority))
|
||||
return NULL;
|
||||
// TODO: add more tries, ie. free some inactive, ...
|
||||
// no free space
|
||||
|
Loading…
Reference in New Issue
Block a user