* VMCache: Added a UserData attribute which can be used by the lock holder.
* Added "bool consumerLocked" parameter to VMCache::Unlock() and ReleaseRefAndUnlock(). Since Unlock() may cause the cache to be merged with a consumer cache, the flag is needed to prevent a deadlock in case the caller still holds a lock to the consumer. Hasn't been a problem yet, since that situation never occurred. * VMCacheChainLocker: Reversed unlocking order to bottom-up. The other direction could cause a deadlock in case caches would be merged, since the locking order would be reversed. The way VMCacheChainLocker was used this didn't happen, though. * fault_get_page(): While copying a page from a lower cache to the top cache, we do now unlock all caches but the top one, so we don't unnecessarily kill concurrency. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35153 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
5ec7bd49cb
commit
3632eeedb9
@ -74,14 +74,15 @@ public:
|
||||
inline bool TryLock();
|
||||
inline bool SwitchLock(mutex* from);
|
||||
inline bool SwitchFromReadLock(rw_lock* from);
|
||||
void Unlock();
|
||||
void Unlock(bool consumerLocked = false);
|
||||
inline void AssertLocked();
|
||||
|
||||
inline void AcquireRefLocked();
|
||||
inline void AcquireRef();
|
||||
inline void ReleaseRefLocked();
|
||||
inline void ReleaseRef();
|
||||
inline void ReleaseRefAndUnlock();
|
||||
inline void ReleaseRefAndUnlock(
|
||||
bool consumerLocked = false);
|
||||
|
||||
void WaitForPageEvents(vm_page* page, uint32 events,
|
||||
bool relock);
|
||||
@ -108,6 +109,11 @@ public:
|
||||
|
||||
status_t FlushAndRemoveAllPages();
|
||||
|
||||
void* UserData() { return fUserData; }
|
||||
void SetUserData(void* data) { fUserData = data; }
|
||||
// Settable by the lock owner and valid as
|
||||
// long as the lock is owned.
|
||||
|
||||
// for debugging only
|
||||
mutex* GetLock()
|
||||
{ return &fLock; }
|
||||
@ -171,13 +177,14 @@ private:
|
||||
|
||||
inline bool _IsMergeable() const;
|
||||
|
||||
void _MergeWithOnlyConsumer();
|
||||
void _MergeWithOnlyConsumer(bool consumerLocked);
|
||||
void _RemoveConsumer(VMCache* consumer);
|
||||
|
||||
private:
|
||||
int32 fRefCount;
|
||||
mutex fLock;
|
||||
PageEventWaiter* fPageEventWaiters;
|
||||
void* fUserData;
|
||||
};
|
||||
|
||||
|
||||
@ -272,10 +279,10 @@ VMCache::ReleaseRef()
|
||||
|
||||
|
||||
void
|
||||
VMCache::ReleaseRefAndUnlock()
|
||||
VMCache::ReleaseRefAndUnlock(bool consumerLocked)
|
||||
{
|
||||
ReleaseRefLocked();
|
||||
Unlock();
|
||||
Unlock(consumerLocked);
|
||||
}
|
||||
|
||||
|
||||
|
@ -643,12 +643,14 @@ VMCache::Delete()
|
||||
|
||||
|
||||
void
|
||||
VMCache::Unlock()
|
||||
VMCache::Unlock(bool consumerLocked)
|
||||
{
|
||||
while (fRefCount == 1 && _IsMergeable()) {
|
||||
VMCache* consumer = (VMCache*)list_get_first_item(&consumers);
|
||||
if (consumer->TryLock()) {
|
||||
_MergeWithOnlyConsumer();
|
||||
if (consumerLocked) {
|
||||
_MergeWithOnlyConsumer(true);
|
||||
} else if (consumer->TryLock()) {
|
||||
_MergeWithOnlyConsumer(false);
|
||||
} else {
|
||||
// Someone else has locked the consumer ATM. Unlock this cache and
|
||||
// wait for the consumer lock. Increment the cache's ref count
|
||||
@ -662,7 +664,7 @@ VMCache::Unlock()
|
||||
if (consumerLocked) {
|
||||
if (fRefCount == 1 && _IsMergeable()
|
||||
&& consumer == list_get_first_item(&consumers)) {
|
||||
_MergeWithOnlyConsumer();
|
||||
_MergeWithOnlyConsumer(false);
|
||||
} else {
|
||||
// something changed, get rid of the consumer lock
|
||||
consumer->Unlock();
|
||||
@ -1225,7 +1227,7 @@ VMCache::_NotifyPageEvents(vm_page* page, uint32 events)
|
||||
will unlock the consumer lock.
|
||||
*/
|
||||
void
|
||||
VMCache::_MergeWithOnlyConsumer()
|
||||
VMCache::_MergeWithOnlyConsumer(bool consumerLocked)
|
||||
{
|
||||
VMCache* consumer = (VMCache*)list_remove_head_item(&consumers);
|
||||
|
||||
@ -1256,7 +1258,8 @@ VMCache::_MergeWithOnlyConsumer()
|
||||
// over the cache's ref to its source (if any) instead.
|
||||
ReleaseRefLocked();
|
||||
|
||||
consumer->Unlock();
|
||||
if (!consumerLocked)
|
||||
consumer->Unlock();
|
||||
}
|
||||
|
||||
|
||||
|
@ -128,6 +128,9 @@ public:
|
||||
{
|
||||
fTopCache = topCache;
|
||||
fBottomCache = topCache;
|
||||
|
||||
if (topCache != NULL)
|
||||
topCache->SetUserData(NULL);
|
||||
}
|
||||
|
||||
VMCache* LockSourceCache()
|
||||
@ -135,9 +138,12 @@ public:
|
||||
if (fBottomCache == NULL || fBottomCache->source == NULL)
|
||||
return NULL;
|
||||
|
||||
VMCache* previousCache = fBottomCache;
|
||||
|
||||
fBottomCache = fBottomCache->source;
|
||||
fBottomCache->Lock();
|
||||
fBottomCache->AcquireRefLocked();
|
||||
fBottomCache->SetUserData(previousCache);
|
||||
|
||||
return fBottomCache;
|
||||
}
|
||||
@ -153,13 +159,16 @@ public:
|
||||
if (fTopCache == NULL)
|
||||
return;
|
||||
|
||||
VMCache* cache = fTopCache;
|
||||
// Unlock caches in source -> consumer direction. This is important to
|
||||
// avoid double-locking and a reversal of locking order in case a cache
|
||||
// is eligable for merging.
|
||||
VMCache* cache = fBottomCache;
|
||||
while (cache != NULL) {
|
||||
VMCache* nextCache = cache->source;
|
||||
VMCache* nextCache = (VMCache*)cache->UserData();
|
||||
if (cache != exceptCache)
|
||||
cache->ReleaseRefAndUnlock();
|
||||
cache->ReleaseRefAndUnlock(cache != fTopCache);
|
||||
|
||||
if (cache == fBottomCache)
|
||||
if (cache == fTopCache)
|
||||
break;
|
||||
|
||||
cache = nextCache;
|
||||
@ -169,6 +178,43 @@ public:
|
||||
fBottomCache = NULL;
|
||||
}
|
||||
|
||||
void UnlockKeepRefs(bool keepTopCacheLocked)
|
||||
{
|
||||
if (fTopCache == NULL)
|
||||
return;
|
||||
|
||||
VMCache* nextCache = fBottomCache;
|
||||
VMCache* cache = NULL;
|
||||
|
||||
while (keepTopCacheLocked
|
||||
? nextCache != fTopCache : cache != fTopCache) {
|
||||
cache = nextCache;
|
||||
nextCache = (VMCache*)cache->UserData();
|
||||
cache->Unlock(cache != fTopCache);
|
||||
}
|
||||
}
|
||||
|
||||
void RelockCaches(bool topCacheLocked)
|
||||
{
|
||||
if (fTopCache == NULL)
|
||||
return;
|
||||
|
||||
VMCache* nextCache = fTopCache;
|
||||
VMCache* cache = NULL;
|
||||
if (topCacheLocked) {
|
||||
cache = nextCache;
|
||||
nextCache = cache->source;
|
||||
}
|
||||
|
||||
while (cache != fBottomCache && nextCache != NULL) {
|
||||
VMCache* consumer = cache;
|
||||
cache = nextCache;
|
||||
nextCache = cache->source;
|
||||
cache->Lock();
|
||||
cache->SetUserData(consumer);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
VMCache* fTopCache;
|
||||
VMCache* fBottomCache;
|
||||
@ -3650,10 +3696,21 @@ fault_get_page(PageFaultContext& context)
|
||||
FTRACE(("get new page, copy it, and put it into the topmost cache\n"));
|
||||
page = vm_page_allocate_page(PAGE_STATE_FREE);
|
||||
|
||||
// To not needlessly kill concurrency we unlock all caches but the top
|
||||
// one while copying the page. Lacking another mechanism to ensure that
|
||||
// the source page doesn't disappear, we mark it busy.
|
||||
int sourcePageState = sourcePage->state;
|
||||
sourcePage->state = PAGE_STATE_BUSY;
|
||||
context.cacheChainLocker.UnlockKeepRefs(true);
|
||||
|
||||
// copy the page
|
||||
vm_memcpy_physical_page(page->physical_page_number * B_PAGE_SIZE,
|
||||
sourcePage->physical_page_number * B_PAGE_SIZE);
|
||||
|
||||
context.cacheChainLocker.RelockCaches(true);
|
||||
sourcePage->state = sourcePageState;
|
||||
sourcePage->cache->NotifyPageEvents(sourcePage, PAGE_EVENT_NOT_BUSY);
|
||||
|
||||
// insert the new page into our cache
|
||||
context.topCache->InsertPage(page, context.cacheOffset);
|
||||
} else
|
||||
|
Loading…
Reference in New Issue
Block a user