Patch by Zhao Shuai with minor changes by myself:

* Moved the static functions to the beginning of the file.
* Renamed several variables for more clarity, particularly
  offset/cacheOffset to index/pageIndex in case where the unit is pages.
* _SwapBlock{Build,Free}() can deal with a range of pages now, which
  should be more efficient.
* Additional checks/panics in swap_page_alloc().
* swap_file_add(): Wrong byte count was used when allocating and
  clearing the bitmap.
* swap_page_alloc(): Fixed off-by-one check. The last page of the swap
  file could not be used.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@26975 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2008-08-14 20:46:12 +00:00
parent c4cbe945fe
commit 4431edd453
2 changed files with 284 additions and 266 deletions

View File

@ -49,8 +49,9 @@
#define SWAP_PAGE_NONE (~(swap_addr_t)0)
// bitmap allocation macros
#define MAP_SHIFT 5
#define NUM_BITS_PER_WORD 32 // number of bits per word
#define NUM_BYTES_PER_WORD 4
#define NUM_BITS_PER_WORD (8 * NUM_BYTES_PER_WORD)
#define MAP_SHIFT 5 // 1 << MAP_SHIFT == NUM_BITS_PER_WORD
#define TESTBIT(map, i) \
(((map)[(i) >> MAP_SHIFT] & (1 << (i) % NUM_BITS_PER_WORD)))
@ -75,7 +76,7 @@ struct swap_file : DoublyLinkedListLinkImpl<swap_file> {
struct swap_hash_key {
VMAnonymousCache *cache;
off_t cache_offset;
off_t page_index; // page index in the cache
};
// Each swap block contains SWAP_BLOCK_PAGES pages
@ -85,14 +86,6 @@ struct swap_block : HashTableLink<swap_block> {
swap_addr_t swap_pages[SWAP_BLOCK_PAGES];
};
static swap_addr_t swap_page_alloc(uint32 npages);
static void swap_page_dealloc(swap_addr_t pageIndex, uint32 npages);
static swap_file *find_swap_file(swap_addr_t pageIndex);
static off_t swap_space_reserve(off_t amount);
static void swap_space_unreserve(off_t amount);
static object_cache *sSwapBlockCache;
struct SwapHashTableDefinition {
typedef swap_hash_key KeyType;
typedef swap_block ValueType;
@ -101,9 +94,9 @@ struct SwapHashTableDefinition {
size_t HashKey(const swap_hash_key& key) const
{
off_t cacheOffset = key.cache_offset >> SWAP_BLOCK_SHIFT;
off_t blockIndex = key.page_index >> SWAP_BLOCK_SHIFT;
VMAnonymousCache *cache = key.cache;
return cacheOffset ^ (int)(int *)cache;
return blockIndex ^ (int)(int *)cache;
}
size_t Hash(const swap_block *value) const
@ -113,8 +106,8 @@ struct SwapHashTableDefinition {
bool Compare(const swap_hash_key& key, const swap_block *value) const
{
return (key.cache_offset & ~(off_t)SWAP_BLOCK_MASK)
== (value->key.cache_offset & ~(off_t)SWAP_BLOCK_MASK)
return (key.page_index & ~(off_t)SWAP_BLOCK_MASK)
== (value->key.page_index & ~(off_t)SWAP_BLOCK_MASK)
&& key.cache == value->key.cache;
}
@ -138,18 +131,162 @@ static uint32 sSwapFileCount = 0;
static off_t sAvailSwapSpace = 0;
static mutex sAvailSwapSpaceLock;
static object_cache *sSwapBlockCache;
static swap_addr_t
swap_page_alloc(uint32 npages)
{
mutex_lock(&sSwapFileListLock);
if (sSwapFileList.IsEmpty()) {
mutex_unlock(&sSwapFileListLock);
panic("swap_page_alloc(): no swap file in the system\n");
return SWAP_PAGE_NONE;
}
// compute how many pages are free in all swap files
uint32 freeSwapPages = 0;
for (SwapFileList::Iterator it = sSwapFileList.GetIterator();
swap_file *file = it.Next();)
freeSwapPages += file->last_page - file->first_page - file->used;
if (freeSwapPages < npages) {
mutex_unlock(&sSwapFileListLock);
panic("swap_page_alloc(): swap space exhausted!\n");
return SWAP_PAGE_NONE;
}
swap_addr_t hint = 0;
swap_addr_t j;
for (j = 0; j < sSwapFileCount; j++) {
if (sSwapFileAlloc == NULL)
sSwapFileAlloc = sSwapFileList.First();
hint = sSwapFileAlloc->hint;
swap_addr_t pageCount = sSwapFileAlloc->last_page
- sSwapFileAlloc->first_page;
swap_addr_t i = 0;
while (i < npages && (hint + npages) <= pageCount) {
if (TESTBIT(sSwapFileAlloc->maps, hint + i)) {
hint++;
i = 0;
} else
i++;
}
if (i == npages)
break;
// this swap_file is full, find another
sSwapFileAlloc = sSwapFileList.GetNext(sSwapFileAlloc);
}
// no swap file can alloc so many pages, we return SWAP_PAGE_NONE
// and VMAnonymousCache::Write() will adjust allocation amount
if (j == sSwapFileCount) {
mutex_unlock(&sSwapFileListLock);
return SWAP_PAGE_NONE;
}
swap_addr_t swapAddress = sSwapFileAlloc->first_page + hint;
for (uint32 i = 0; i < npages; i++)
SETBIT(sSwapFileAlloc->maps, hint + i);
if (hint == sSwapFileAlloc->hint)
sSwapFileAlloc->hint += npages;
sSwapFileAlloc->used += npages;
// if this swap file has used more than 90% percent of its pages
// switch to another
if (sSwapFileAlloc->used
> 9 * (sSwapFileAlloc->last_page - sSwapFileAlloc->first_page) / 10)
sSwapFileAlloc = sSwapFileList.GetNext(sSwapFileAlloc);
mutex_unlock(&sSwapFileListLock);
return swapAddress;
}
static swap_file *
find_swap_file(swap_addr_t swapAddress)
{
for (SwapFileList::Iterator it = sSwapFileList.GetIterator();
swap_file *swapFile = it.Next();) {
if (swapAddress >= swapFile->first_page
&& swapAddress < swapFile->last_page)
return swapFile;
}
panic("find_swap_file(): can't find swap file for %ld\n", swapAddress);
return NULL;
}
static void
swap_page_dealloc(swap_addr_t swapAddress, uint32 npages)
{
if (swapAddress == SWAP_PAGE_NONE)
return;
mutex_lock(&sSwapFileListLock);
swap_file *swapFile = find_swap_file(swapAddress);
swapAddress -= swapFile->first_page;
for (uint32 i = 0; i < npages; i++)
CLEARBIT(swapFile->maps, swapAddress + i);
if (swapFile->hint > swapAddress)
swapFile->hint = swapAddress;
swapFile->used -= npages;
mutex_unlock(&sSwapFileListLock);
}
static off_t
swap_space_reserve(off_t amount)
{
mutex_lock(&sAvailSwapSpaceLock);
if (sAvailSwapSpace >= amount)
sAvailSwapSpace -= amount;
else {
sAvailSwapSpace = 0;
amount = sAvailSwapSpace;
}
mutex_unlock(&sAvailSwapSpaceLock);
return amount;
}
static void
swap_space_unreserve(off_t amount)
{
mutex_lock(&sAvailSwapSpaceLock);
sAvailSwapSpace += amount;
mutex_unlock(&sAvailSwapSpaceLock);
}
// #pragma mark -
VMAnonymousCache::~VMAnonymousCache()
{
// free allocated swap space and swap block
for (off_t offset = virtual_base, toFree = fAllocatedSwapSize;
offset < virtual_end && toFree > 0; offset += B_PAGE_SIZE) {
swap_addr_t pageIndex = _SwapBlockGetAddress(offset >> PAGE_SHIFT);
if (pageIndex == SWAP_PAGE_NONE)
swap_addr_t swapAddr = _SwapBlockGetAddress(offset >> PAGE_SHIFT);
if (swapAddr == SWAP_PAGE_NONE)
continue;
swap_page_dealloc(pageIndex, 1);
_SwapBlockFree(offset >> PAGE_SHIFT);
swap_page_dealloc(swapAddr, 1);
_SwapBlockFree(offset >> PAGE_SHIFT, 1);
toFree -= B_PAGE_SIZE;
}
@ -202,10 +339,9 @@ VMAnonymousCache::Commit(off_t size)
bool
VMAnonymousCache::HasPage(off_t offset)
{
offset >>= PAGE_SHIFT;
swap_addr_t pageIndex = _SwapBlockGetAddress(offset);
if (pageIndex != SWAP_PAGE_NONE)
if (_SwapBlockGetAddress(offset >> PAGE_SHIFT) != SWAP_PAGE_NONE)
return true;
return false;
}
@ -214,23 +350,19 @@ status_t
VMAnonymousCache::Read(off_t offset, const iovec *vecs, size_t count,
size_t *_numBytes)
{
off_t cacheOffset = offset >> PAGE_SHIFT;
off_t pageIndex = offset >> PAGE_SHIFT;
for (uint32 i = 0, j = 0; i < count; i = j) {
swap_addr_t startPageIndex = _SwapBlockGetAddress(cacheOffset + i);
swap_addr_t startSwapAddr = _SwapBlockGetAddress(pageIndex + i);
for (j = i + 1; j < count; j++) {
swap_addr_t pageIndex = _SwapBlockGetAddress(cacheOffset + j);
if (pageIndex != startPageIndex + j - i)
swap_addr_t swapAddr = _SwapBlockGetAddress(pageIndex + j);
if (swapAddr != startSwapAddr + j - i)
break;
}
swap_file *swapFile = find_swap_file(startPageIndex);
if (swapFile == NULL) {
panic("can't find swap file for page index %ld\n", startPageIndex);
return B_ERROR;
}
swap_file *swapFile = find_swap_file(startSwapAddr);
off_t pos = (startPageIndex - swapFile->first_page) * PAGE_SIZE;
off_t pos = (startSwapAddr - swapFile->first_page) * PAGE_SIZE;
status_t status = vfs_read_pages(swapFile->vnode, NULL, pos, vecs + i,
j - i, 0, _numBytes);
@ -246,14 +378,14 @@ status_t
VMAnonymousCache::Write(off_t offset, const iovec *vecs, size_t count,
size_t *_numBytes)
{
offset >>= PAGE_SHIFT;
off_t pageIndex = offset >> PAGE_SHIFT;
Lock();
for (uint32 i = 0; i < count; i++) {
swap_addr_t pageIndex = _SwapBlockGetAddress(offset + i);
if (pageIndex != SWAP_PAGE_NONE) {
swap_page_dealloc(pageIndex, 1);
_SwapBlockFree(offset + i);
swap_addr_t swapAddr = _SwapBlockGetAddress(pageIndex + i);
if (swapAddr != SWAP_PAGE_NONE) {
swap_page_dealloc(swapAddr, 1);
_SwapBlockFree(pageIndex + i, 1);
fAllocatedSwapSize -= B_PAGE_SIZE;
}
}
@ -266,21 +398,17 @@ VMAnonymousCache::Write(off_t offset, const iovec *vecs, size_t count,
uint32 n = count;
for (uint32 i = 0; i < count; i += n) {
swap_addr_t pageIndex;
swap_addr_t swapAddr;
// try to allocate n pages, if fail, try to allocate n/2
while ((pageIndex = swap_page_alloc(n)) == SWAP_PAGE_NONE
&& n >= 2)
while ((swapAddr = swap_page_alloc(n)) == SWAP_PAGE_NONE && n >= 2)
n >>= 1;
if (pageIndex == SWAP_PAGE_NONE)
panic("can't allocate swap space\n");
if (swapAddr == SWAP_PAGE_NONE)
panic("VMAnonymousCache::Write(): can't allocate swap space\n");
swap_file *swapFile = find_swap_file(pageIndex);
if (swapFile == NULL) {
panic("can't find swap file for page index %ld\n", pageIndex);
return B_ERROR;
}
swap_file *swapFile = find_swap_file(swapAddr);
off_t pos = (swapAddr - swapFile->first_page) * B_PAGE_SIZE;
off_t pos = (pageIndex - swapFile->first_page) * B_PAGE_SIZE;
status_t status = vfs_write_pages(swapFile->vnode, NULL, pos, vecs + i,
n, 0, _numBytes);
if (status != B_OK) {
@ -288,12 +416,11 @@ VMAnonymousCache::Write(off_t offset, const iovec *vecs, size_t count,
fAllocatedSwapSize -= n * B_PAGE_SIZE;
Unlock();
swap_page_dealloc(pageIndex, n);
swap_page_dealloc(swapAddr, n);
return status;
}
for (uint32 j = 0; j < n; j++)
_SwapBlockBuild(offset + i + j, pageIndex + j);
_SwapBlockBuild(pageIndex + i, swapAddr, n);
}
return B_OK;
@ -359,11 +486,10 @@ VMAnonymousCache::MergeStore(VMCache* _source)
if (committed_size > actualSize)
_Commit(actualSize);
// TODO: iterate over swap blocks
for (off_t offset = source->virtual_base; offset < source->virtual_end;
offset += PAGE_SIZE) {
off_t cacheOffset = offset >> PAGE_SHIFT;
swap_addr_t sourceSwapIndex = source->_SwapBlockGetAddress(cacheOffset);
off_t pageIndex = offset >> PAGE_SHIFT;
swap_addr_t sourceSwapIndex = source->_SwapBlockGetAddress(pageIndex);
if (sourceSwapIndex == SWAP_PAGE_NONE)
// this page is not swapped out
@ -374,12 +500,12 @@ VMAnonymousCache::MergeStore(VMCache* _source)
// free the swap space
swap_page_dealloc(sourceSwapIndex, 1);
} else {
swap_addr_t swapIndex = _SwapBlockGetAddress(cacheOffset);
swap_addr_t swapIndex = _SwapBlockGetAddress(pageIndex);
if (swapIndex == SWAP_PAGE_NONE) {
// the page is not shadowed,
// assign the swap address to the new cache
_SwapBlockBuild(cacheOffset, sourceSwapIndex);
_SwapBlockBuild(pageIndex, sourceSwapIndex, 1);
fAllocatedSwapSize += B_PAGE_SIZE;
} else {
// the page is shadowed and is also swapped out
@ -387,61 +513,70 @@ VMAnonymousCache::MergeStore(VMCache* _source)
}
}
source->fAllocatedSwapSize -= B_PAGE_SIZE;
source->_SwapBlockFree(cacheOffset);
source->_SwapBlockFree(pageIndex, 1);
}
}
void
VMAnonymousCache::_SwapBlockBuild(off_t cacheOffset, swap_addr_t pageIndex)
VMAnonymousCache::_SwapBlockBuild(off_t startPageIndex,
swap_addr_t startSwapAddress, uint32 count)
{
mutex_lock(&sSwapHashLock);
swap_hash_key key = { this, cacheOffset };
for (uint32 i = 0; i < count; i++) {
off_t pageIndex = startPageIndex + i;
swap_addr_t swapAddress = startSwapAddress + i;
swap_block *swap = sSwapHashTable.Lookup(key);
if (swap == NULL) {
swap = (swap_block *)object_cache_alloc(sSwapBlockCache,
CACHE_DONT_SLEEP);
swap_hash_key key = { this, pageIndex };
swap_block *swap = sSwapHashTable.Lookup(key);
if (swap == NULL) {
// TODO: wait until memory can be allocated
mutex_unlock(&sSwapHashLock);
return;
swap = (swap_block *)object_cache_alloc(sSwapBlockCache,
CACHE_DONT_SLEEP);
if (swap == NULL) {
// TODO: wait until memory can be allocated
mutex_unlock(&sSwapHashLock);
return;
}
swap->key.cache = this;
swap->key.page_index = pageIndex & ~(off_t)SWAP_BLOCK_MASK;
swap->used = 0;
for (uint32 i = 0; i < SWAP_BLOCK_PAGES; i++)
swap->swap_pages[i] = SWAP_PAGE_NONE;
sSwapHashTable.Insert(swap);
}
swap->key.cache = this;
swap->key.cache_offset = cacheOffset & ~(off_t)SWAP_BLOCK_MASK;
swap->used = 0;
for (uint32 i = 0; i < SWAP_BLOCK_PAGES; i++)
swap->swap_pages[i] = SWAP_PAGE_NONE;
swap_addr_t blockIndex = pageIndex & SWAP_BLOCK_MASK;
swap->swap_pages[blockIndex] = swapAddress;
sSwapHashTable.Insert(swap);
swap->used++;
}
swap_addr_t blockIndex = cacheOffset & SWAP_BLOCK_MASK;
swap->swap_pages[blockIndex] = pageIndex;
swap->used++;
mutex_unlock(&sSwapHashLock);
}
void
VMAnonymousCache::_SwapBlockFree(off_t cacheOffset)
VMAnonymousCache::_SwapBlockFree(off_t startPageIndex, uint32 count)
{
mutex_lock(&sSwapHashLock);
swap_hash_key key = { this, cacheOffset };
swap_block *swap = sSwapHashTable.Lookup(key);
if (swap != NULL) {
swap_addr_t pageIndex = swap->swap_pages[cacheOffset & SWAP_BLOCK_MASK];
if (pageIndex != SWAP_PAGE_NONE) {
swap->swap_pages[cacheOffset & SWAP_BLOCK_MASK] = SWAP_PAGE_NONE;
swap->used--;
if (swap->used == 0) {
sSwapHashTable.Remove(swap);
object_cache_free(sSwapBlockCache, swap);
for (uint32 i = 0; i < count; i++) {
off_t pageIndex = startPageIndex + i;
swap_hash_key key = { this, pageIndex };
swap_block *swap = sSwapHashTable.Lookup(key);
if (swap != NULL) {
swap_addr_t swapAddr = swap->swap_pages[pageIndex & SWAP_BLOCK_MASK];
if (swapAddr != SWAP_PAGE_NONE) {
swap->swap_pages[pageIndex & SWAP_BLOCK_MASK] = SWAP_PAGE_NONE;
swap->used--;
if (swap->used == 0) {
sSwapHashTable.Remove(swap);
object_cache_free(sSwapBlockCache, swap);
}
}
}
}
@ -451,22 +586,22 @@ VMAnonymousCache::_SwapBlockFree(off_t cacheOffset)
swap_addr_t
VMAnonymousCache::_SwapBlockGetAddress(off_t cacheOffset)
VMAnonymousCache::_SwapBlockGetAddress(off_t pageIndex)
{
mutex_lock(&sSwapHashLock);
swap_hash_key key = { this, cacheOffset };
swap_hash_key key = { this, pageIndex };
swap_block *swap = sSwapHashTable.Lookup(key);
swap_addr_t pageIndex = SWAP_PAGE_NONE;
swap_addr_t swapAddress = SWAP_PAGE_NONE;
if (swap != NULL) {
swap_addr_t blockIndex = cacheOffset & SWAP_BLOCK_MASK;
pageIndex = swap->swap_pages[blockIndex];
swap_addr_t blockIndex = pageIndex & SWAP_BLOCK_MASK;
swapAddress = swap->swap_pages[blockIndex];
}
mutex_unlock(&sSwapHashLock);
return pageIndex;
return swapAddress;
}
@ -522,171 +657,7 @@ VMAnonymousCache::_Commit(off_t size)
}
static swap_file *
find_swap_file(swap_addr_t pageIndex)
{
for (SwapFileList::Iterator it = sSwapFileList.GetIterator();
swap_file *swapFile = it.Next();) {
if (pageIndex >= swapFile->first_page
&& pageIndex < swapFile->last_page)
return swapFile;
}
return NULL;
}
// used by page daemon to free swap space
bool
swap_free_page_swap_space(vm_page *page)
{
VMAnonymousCache *cache = dynamic_cast<VMAnonymousCache *>(page->cache);
if (cache == NULL)
return false;
swap_addr_t pageIndex = cache->_SwapBlockGetAddress(page->cache_offset);
if (pageIndex == SWAP_PAGE_NONE)
return false;
swap_page_dealloc(pageIndex, 1);
cache->fAllocatedSwapSize -= B_PAGE_SIZE;
cache->_SwapBlockFree(page->cache_offset);
return true;
}
uint32
swap_available_pages()
{
mutex_lock(&sAvailSwapSpaceLock);
uint32 avail = sAvailSwapSpace >> PAGE_SHIFT;
mutex_unlock(&sAvailSwapSpaceLock);
return avail;
}
uint32
swap_total_swap_pages()
{
mutex_lock(&sSwapFileListLock);
uint32 totalSwapPages = 0;
for (SwapFileList::Iterator it = sSwapFileList.GetIterator();
swap_file *swapFile = it.Next();)
totalSwapPages += swapFile->last_page - swapFile->first_page;
mutex_unlock(&sSwapFileListLock);
return totalSwapPages;
}
static swap_addr_t
swap_page_alloc(uint32 npages)
{
swap_addr_t hint = 0;
swap_addr_t j;
if (sSwapFileList.IsEmpty())
return SWAP_PAGE_NONE;
mutex_lock(&sSwapFileListLock);
for (j = 0; j < sSwapFileCount; j++) {
if (sSwapFileAlloc == NULL)
sSwapFileAlloc = sSwapFileList.First();
hint = sSwapFileAlloc->hint;
swap_addr_t pageCount = sSwapFileAlloc->last_page
- sSwapFileAlloc->first_page;
swap_addr_t i = 0;
while (i < npages && (hint + npages) < pageCount) {
if (TESTBIT(sSwapFileAlloc->maps, hint + i)) {
hint++;
i = 0;
} else
i++;
}
if (i == npages)
break;
// this swap_file is full, find another
sSwapFileAlloc = sSwapFileList.GetNext(sSwapFileAlloc);
}
if (j == sSwapFileCount) {
panic("swap space exhausted\n");
mutex_unlock(&sSwapFileListLock);
return SWAP_PAGE_NONE;
}
swap_addr_t pageIndex = sSwapFileAlloc->first_page + hint;
for (uint32 i = 0; i < npages; i++)
SETBIT(sSwapFileAlloc->maps, hint + i);
if (hint == sSwapFileAlloc->hint)
sSwapFileAlloc->hint += npages;
sSwapFileAlloc->used += npages;
// if this swap file has used more than 90% percent of its pages
// switch to another
if (sSwapFileAlloc->used
> 9 * (sSwapFileAlloc->last_page - sSwapFileAlloc->first_page) / 10)
sSwapFileAlloc = sSwapFileList.GetNext(sSwapFileAlloc);
mutex_unlock(&sSwapFileListLock);
return pageIndex;
}
static void
swap_page_dealloc(swap_addr_t pageIndex, uint32 npages)
{
if (pageIndex == SWAP_PAGE_NONE)
return;
mutex_lock(&sSwapFileListLock);
swap_file *swapFile = find_swap_file(pageIndex);
pageIndex -= swapFile->first_page;
for (uint32 i = 0; i < npages; i++)
CLEARBIT(swapFile->maps, pageIndex + i);
if (swapFile->hint > pageIndex)
swapFile->hint = pageIndex;
swapFile->used -= npages;
mutex_unlock(&sSwapFileListLock);
}
static off_t
swap_space_reserve(off_t amount)
{
mutex_lock(&sAvailSwapSpaceLock);
if (sAvailSwapSpace >= amount)
sAvailSwapSpace -= amount;
else {
sAvailSwapSpace = 0;
amount = sAvailSwapSpace;
}
mutex_unlock(&sAvailSwapSpaceLock);
return amount;
}
static void
swap_space_unreserve(off_t amount)
{
mutex_lock(&sAvailSwapSpaceLock);
sAvailSwapSpace += amount;
mutex_unlock(&sAvailSwapSpaceLock);
}
// #pragma mark -
status_t
@ -722,14 +693,13 @@ swap_file_add(char *path)
int32 pageCount = st.st_size >> PAGE_SHIFT;
swap->used = 0;
swap->maps = (uint32 *)
malloc((pageCount + NUM_BITS_PER_WORD - 1) / NUM_BITS_PER_WORD);
swap->maps = (uint32 *)malloc((pageCount + 7) / 8);
if (swap->maps == NULL) {
free(swap);
vfs_put_vnode(node);
return B_NO_MEMORY;
}
memset(swap->maps, 0, pageCount % 8 + 1);
memset(swap->maps, 0, (pageCount + 7) / 8);
swap->hint = 0;
// set start page index and add this file to swap file list
@ -808,7 +778,7 @@ swap_init(void)
sSwapBlockCache = create_object_cache("swapblock",
sizeof(swap_block), sizeof(void*), NULL, NULL, NULL);
if (sSwapBlockCache == NULL)
panic("can't create object cache for swap blocks\n");
panic("swap_init(): can't create object cache for swap blocks\n");
// init swap hash table
sSwapHashTable.Init();
@ -824,6 +794,7 @@ swap_init(void)
sAvailSwapSpace = 0;
}
void
swap_init_post_modules()
{
@ -867,4 +838,50 @@ swap_init_post_modules()
swap_file_add("/var/swap");
}
// used by page daemon to free swap space
bool
swap_free_page_swap_space(vm_page *page)
{
VMAnonymousCache *cache = dynamic_cast<VMAnonymousCache *>(page->cache);
if (cache == NULL)
return false;
swap_addr_t swapAddress = cache->_SwapBlockGetAddress(page->cache_offset);
if (swapAddress == SWAP_PAGE_NONE)
return false;
swap_page_dealloc(swapAddress, 1);
cache->fAllocatedSwapSize -= B_PAGE_SIZE;
cache->_SwapBlockFree(page->cache_offset, 1);
return true;
}
uint32
swap_available_pages()
{
mutex_lock(&sAvailSwapSpaceLock);
uint32 avail = sAvailSwapSpace >> PAGE_SHIFT;
mutex_unlock(&sAvailSwapSpaceLock);
return avail;
}
uint32
swap_total_swap_pages()
{
mutex_lock(&sSwapFileListLock);
uint32 totalSwapPages = 0;
for (SwapFileList::Iterator it = sSwapFileList.GetIterator();
swap_file *swapFile = it.Next();)
totalSwapPages += swapFile->last_page - swapFile->first_page;
mutex_unlock(&sSwapFileListLock);
return totalSwapPages;
}
#endif // ENABLE_SWAP_SUPPORT

View File

@ -46,9 +46,10 @@ public:
virtual void MergeStore(VMCache* source);
private:
void _SwapBlockBuild(off_t cacheOffset, swap_addr_t pageIndex);
void _SwapBlockFree(off_t cacheOffset);
swap_addr_t _SwapBlockGetAddress(off_t cacheOffset);
void _SwapBlockBuild(off_t pageIndex,
swap_addr_t swapAddress, uint32 count);
void _SwapBlockFree(off_t pageIndex, uint32 count);
swap_addr_t _SwapBlockGetAddress(off_t pageIndex);
status_t _Commit(off_t size);
private: