* agp_gart(): Use vm_page_[un]reserve_pages().
* Removed unused vm_page_allocate_pages(). * Removed now unused (always true) "reserved" parameter from vm_page_allocate_page(). * Removed unused (always false) "stealActive" parameter from steal_page(). git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@34836 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
e30dd2c076
commit
1021fd2826
@ -49,9 +49,7 @@ void vm_page_unreserve_pages(uint32 count);
|
||||
void vm_page_reserve_pages(uint32 count);
|
||||
bool vm_page_try_reserve_pages(uint32 count);
|
||||
|
||||
struct vm_page *vm_page_allocate_page(int pageState, bool reserved);
|
||||
status_t vm_page_allocate_pages(int pageState, struct vm_page **pages,
|
||||
uint32 numPages);
|
||||
struct vm_page *vm_page_allocate_page(int pageState);
|
||||
struct vm_page *vm_page_allocate_page_run(int state, addr_t base,
|
||||
addr_t length);
|
||||
struct vm_page *vm_page_allocate_page_run_no_base(int state, addr_t count);
|
||||
|
@ -545,18 +545,10 @@ Aperture::AllocateMemory(aperture_memory *memory, uint32 flags)
|
||||
if (memory->pages == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
for (uint32 i = 0; i < count; i++) {
|
||||
memory->pages[i] = vm_page_allocate_page(PAGE_STATE_CLEAR, false);
|
||||
if (memory->pages[i] == NULL) {
|
||||
// Free pages we already allocated
|
||||
while (i-- > 0) {
|
||||
vm_page_set_state(memory->pages[i], PAGE_STATE_CLEAR);
|
||||
}
|
||||
free(memory->pages);
|
||||
memory->pages = NULL;
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
}
|
||||
vm_page_reserve_pages(count);
|
||||
for (uint32 i = 0; i < count; i++)
|
||||
memory->pages[i] = vm_page_allocate_page(PAGE_STATE_CLEAR);
|
||||
vm_page_unreserve_pages(count);
|
||||
}
|
||||
#else
|
||||
void *address;
|
||||
|
@ -540,7 +540,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
unsigned int i;
|
||||
|
||||
// we need to allocate a pgtable
|
||||
page = vm_page_allocate_page(PAGE_STATE_CLEAR, true);
|
||||
page = vm_page_allocate_page(PAGE_STATE_CLEAR);
|
||||
|
||||
// mark the page WIRED
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
@ -586,7 +586,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
unsigned int i;
|
||||
|
||||
// we need to allocate a pgtable
|
||||
page = vm_page_allocate_page(PAGE_STATE_CLEAR, true);
|
||||
page = vm_page_allocate_page(PAGE_STATE_CLEAR);
|
||||
|
||||
// mark the page WIRED
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2008-2009, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
@ -364,7 +364,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
vm_page *page;
|
||||
|
||||
// we need to allocate a pgtable
|
||||
page = vm_page_allocate_page(PAGE_STATE_CLEAR, true);
|
||||
page = vm_page_allocate_page(PAGE_STATE_CLEAR);
|
||||
|
||||
// mark the page WIRED
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
|
6
src/system/kernel/cache/file_cache.cpp
vendored
6
src/system/kernel/cache/file_cache.cpp
vendored
@ -149,7 +149,7 @@ PrecacheIO::Prepare()
|
||||
// allocate pages for the cache and mark them busy
|
||||
uint32 i = 0;
|
||||
for (size_t pos = 0; pos < fSize; pos += B_PAGE_SIZE) {
|
||||
vm_page* page = vm_page_allocate_page(PAGE_STATE_FREE, true);
|
||||
vm_page* page = vm_page_allocate_page(PAGE_STATE_FREE);
|
||||
if (page == NULL)
|
||||
break;
|
||||
|
||||
@ -381,7 +381,7 @@ read_into_cache(file_cache_ref* ref, void* cookie, off_t offset,
|
||||
// allocate pages for the cache and mark them busy
|
||||
for (size_t pos = 0; pos < numBytes; pos += B_PAGE_SIZE) {
|
||||
vm_page* page = pages[pageIndex++] = vm_page_allocate_page(
|
||||
PAGE_STATE_FREE, true);
|
||||
PAGE_STATE_FREE);
|
||||
if (page == NULL)
|
||||
panic("no more pages!");
|
||||
|
||||
@ -506,7 +506,7 @@ write_to_cache(file_cache_ref* ref, void* cookie, off_t offset,
|
||||
// TODO: the pages we allocate here should have been reserved upfront
|
||||
// in cache_io()
|
||||
vm_page* page = pages[pageIndex++] = vm_page_allocate_page(
|
||||
PAGE_STATE_FREE, true);
|
||||
PAGE_STATE_FREE);
|
||||
|
||||
ref->cache->InsertPage(page, offset + pos);
|
||||
|
||||
|
@ -515,9 +515,6 @@ vm_cache_acquire_locked_page_cache(vm_page* page, bool dontWait)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// TODO: this is problematic, as it requires the caller not to have
|
||||
// a lock on this cache (it might be called via
|
||||
// vm_page_allocate_page(..., false)).
|
||||
if (!cache->SwitchLock(&sCacheListLock)) {
|
||||
// cache has been deleted
|
||||
mutex_lock(&sCacheListLock);
|
||||
|
@ -871,7 +871,7 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
# endif
|
||||
continue;
|
||||
#endif
|
||||
vm_page* page = vm_page_allocate_page(newPageState, true);
|
||||
vm_page* page = vm_page_allocate_page(newPageState);
|
||||
cache->InsertPage(page, offset);
|
||||
vm_map_page(area, page, address, protection);
|
||||
|
||||
@ -3671,7 +3671,7 @@ fault_get_page(PageFaultContext& context)
|
||||
// see if the backing store has it
|
||||
if (cache->HasPage(context.cacheOffset)) {
|
||||
// insert a fresh page and mark it busy -- we're going to read it in
|
||||
page = vm_page_allocate_page(PAGE_STATE_FREE, true);
|
||||
page = vm_page_allocate_page(PAGE_STATE_FREE);
|
||||
cache->InsertPage(page, context.cacheOffset);
|
||||
|
||||
// We need to unlock all caches and the address space while reading
|
||||
@ -3724,7 +3724,7 @@ fault_get_page(PageFaultContext& context)
|
||||
cache = context.isWrite ? context.topCache : lastCache;
|
||||
|
||||
// allocate a clean page
|
||||
page = vm_page_allocate_page(PAGE_STATE_CLEAR, true);
|
||||
page = vm_page_allocate_page(PAGE_STATE_CLEAR);
|
||||
FTRACE(("vm_soft_fault: just allocated page 0x%lx\n",
|
||||
page->physical_page_number));
|
||||
|
||||
@ -3739,7 +3739,7 @@ fault_get_page(PageFaultContext& context)
|
||||
// TODO: If memory is low, it might be a good idea to steal the page
|
||||
// from our source cache -- if possible, that is.
|
||||
FTRACE(("get new page, copy it, and put it into the topmost cache\n"));
|
||||
page = vm_page_allocate_page(PAGE_STATE_FREE, true);
|
||||
page = vm_page_allocate_page(PAGE_STATE_FREE);
|
||||
|
||||
// copy the page
|
||||
vm_memcpy_physical_page(page->physical_page_number * B_PAGE_SIZE,
|
||||
|
@ -127,9 +127,7 @@ class UnreservePages : public AbstractTraceEntry {
|
||||
|
||||
class AllocatePage : public AbstractTraceEntry {
|
||||
public:
|
||||
AllocatePage(bool reserved)
|
||||
:
|
||||
fReserved(reserved)
|
||||
AllocatePage()
|
||||
{
|
||||
Initialized();
|
||||
}
|
||||
@ -137,12 +135,7 @@ class AllocatePage : public AbstractTraceEntry {
|
||||
virtual void AddDump(TraceOutput& out)
|
||||
{
|
||||
out.Print("page alloc");
|
||||
if (fReserved)
|
||||
out.Print(" reserved");
|
||||
}
|
||||
|
||||
private:
|
||||
bool fReserved;
|
||||
};
|
||||
|
||||
|
||||
@ -1511,7 +1504,7 @@ page_writer(void* /*unused*/)
|
||||
|
||||
|
||||
static vm_page *
|
||||
find_page_candidate(struct vm_page &marker, bool stealActive)
|
||||
find_page_candidate(struct vm_page &marker)
|
||||
{
|
||||
MutexLocker locker(sPageLock);
|
||||
page_queue *queue;
|
||||
@ -1521,10 +1514,6 @@ find_page_candidate(struct vm_page &marker, bool stealActive)
|
||||
// Get the first free pages of the (in)active queue
|
||||
queue = &sInactivePageQueue;
|
||||
page = sInactivePageQueue.head;
|
||||
if (page == NULL && stealActive) {
|
||||
queue = &sActivePageQueue;
|
||||
page = sActivePageQueue.head;
|
||||
}
|
||||
} else {
|
||||
// Get the next page of the current queue
|
||||
if (marker.state == PAGE_STATE_INACTIVE)
|
||||
@ -1542,10 +1531,7 @@ find_page_candidate(struct vm_page &marker, bool stealActive)
|
||||
}
|
||||
|
||||
while (page != NULL) {
|
||||
if (!is_marker_page(page)
|
||||
&& (page->state == PAGE_STATE_INACTIVE
|
||||
|| (stealActive && page->state == PAGE_STATE_ACTIVE
|
||||
&& page->wired_count == 0))) {
|
||||
if (!is_marker_page(page) && page->state == PAGE_STATE_INACTIVE) {
|
||||
// we found a candidate, insert marker
|
||||
marker.state = queue == &sActivePageQueue
|
||||
? PAGE_STATE_ACTIVE : PAGE_STATE_INACTIVE;
|
||||
@ -1554,10 +1540,6 @@ find_page_candidate(struct vm_page &marker, bool stealActive)
|
||||
}
|
||||
|
||||
page = page->queue_next;
|
||||
if (page == NULL && stealActive && queue != &sActivePageQueue) {
|
||||
queue = &sActivePageQueue;
|
||||
page = sActivePageQueue.head;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@ -1565,7 +1547,7 @@ find_page_candidate(struct vm_page &marker, bool stealActive)
|
||||
|
||||
|
||||
static bool
|
||||
steal_page(vm_page *page, bool stealActive)
|
||||
steal_page(vm_page *page)
|
||||
{
|
||||
// try to lock the page's cache
|
||||
if (vm_cache_acquire_locked_page_cache(page, false) == NULL)
|
||||
@ -1575,9 +1557,7 @@ steal_page(vm_page *page, bool stealActive)
|
||||
MethodDeleter<VMCache> _2(page->cache, &VMCache::ReleaseRefLocked);
|
||||
|
||||
// check again if that page is still a candidate
|
||||
if (page->state != PAGE_STATE_INACTIVE
|
||||
&& (!stealActive || page->state != PAGE_STATE_ACTIVE
|
||||
|| page->wired_count != 0))
|
||||
if (page->state != PAGE_STATE_INACTIVE)
|
||||
return false;
|
||||
|
||||
// recheck eventual last minute changes
|
||||
@ -1608,10 +1588,8 @@ steal_page(vm_page *page, bool stealActive)
|
||||
|
||||
|
||||
static size_t
|
||||
steal_pages(vm_page **pages, size_t count, bool reserve)
|
||||
steal_pages(vm_page **pages, size_t count)
|
||||
{
|
||||
size_t maxCount = count;
|
||||
|
||||
while (true) {
|
||||
vm_page marker;
|
||||
marker.type = PAGE_TYPE_DUMMY;
|
||||
@ -1622,19 +1600,17 @@ steal_pages(vm_page **pages, size_t count, bool reserve)
|
||||
size_t stolen = 0;
|
||||
|
||||
while (count > 0) {
|
||||
vm_page *page = find_page_candidate(marker, false);
|
||||
vm_page *page = find_page_candidate(marker);
|
||||
if (page == NULL)
|
||||
break;
|
||||
|
||||
if (steal_page(page, false)) {
|
||||
if (reserve || stolen >= maxCount) {
|
||||
MutexLocker _(sPageLock);
|
||||
enqueue_page(&sFreePageQueue, page);
|
||||
page->state = PAGE_STATE_FREE;
|
||||
if (steal_page(page)) {
|
||||
MutexLocker locker(sPageLock);
|
||||
enqueue_page(&sFreePageQueue, page);
|
||||
page->state = PAGE_STATE_FREE;
|
||||
locker.Unlock();
|
||||
|
||||
T(StolenPage());
|
||||
} else if (stolen < maxCount)
|
||||
pages[stolen] = page;
|
||||
T(StolenPage());
|
||||
|
||||
stolen++;
|
||||
count--;
|
||||
@ -1646,11 +1622,11 @@ steal_pages(vm_page **pages, size_t count, bool reserve)
|
||||
|
||||
MutexLocker locker(sPageLock);
|
||||
|
||||
if ((reserve && sReservedPages <= free_page_queue_count())
|
||||
if (sReservedPages <= free_page_queue_count()
|
||||
|| count == 0
|
||||
|| ((!reserve && (sInactivePageQueue.count > 0))
|
||||
|| free_page_queue_count() > sReservedPages))
|
||||
|| free_page_queue_count() > sReservedPages) {
|
||||
return stolen;
|
||||
}
|
||||
|
||||
if (stolen && !tried && sInactivePageQueue.count > 0) {
|
||||
count++;
|
||||
@ -1681,7 +1657,7 @@ steal_pages(vm_page **pages, size_t count, bool reserve)
|
||||
locker.Lock();
|
||||
sPageDeficit--;
|
||||
|
||||
if (reserve && sReservedPages <= free_page_queue_count())
|
||||
if (sReservedPages <= free_page_queue_count())
|
||||
return stolen;
|
||||
}
|
||||
}
|
||||
@ -2046,6 +2022,7 @@ vm_page_unreserve_pages(uint32 count)
|
||||
They will only be handed out to someone who has actually reserved them.
|
||||
This call returns as soon as the number of requested pages has been
|
||||
reached.
|
||||
The caller must not hold any cache lock or the function might deadlock.
|
||||
*/
|
||||
void
|
||||
vm_page_reserve_pages(uint32 count)
|
||||
@ -2065,7 +2042,7 @@ vm_page_reserve_pages(uint32 count)
|
||||
count = sReservedPages - freePages;
|
||||
locker.Unlock();
|
||||
|
||||
steal_pages(NULL, count + 1, true);
|
||||
steal_pages(NULL, count + 1);
|
||||
// we get one more, just in case we can do something someone
|
||||
// else can't
|
||||
}
|
||||
@ -2090,10 +2067,8 @@ vm_page_try_reserve_pages(uint32 count)
|
||||
}
|
||||
|
||||
|
||||
// TODO: you must not have locked a cache when calling this function with
|
||||
// reserved == false. See vm_cache_acquire_locked_page_cache().
|
||||
vm_page *
|
||||
vm_page_allocate_page(int pageState, bool reserved)
|
||||
vm_page_allocate_page(int pageState)
|
||||
{
|
||||
page_queue *queue;
|
||||
page_queue *otherQueue;
|
||||
@ -2113,37 +2088,21 @@ vm_page_allocate_page(int pageState, bool reserved)
|
||||
|
||||
MutexLocker locker(sPageLock);
|
||||
|
||||
T(AllocatePage(reserved));
|
||||
T(AllocatePage());
|
||||
|
||||
vm_page *page = NULL;
|
||||
while (true) {
|
||||
if (reserved || sReservedPages < free_page_queue_count()) {
|
||||
page = dequeue_page(queue);
|
||||
if (page == NULL) {
|
||||
vm_page *page = dequeue_page(queue);
|
||||
if (page == NULL) {
|
||||
#if DEBUG_PAGE_QUEUE
|
||||
if (queue->count != 0)
|
||||
panic("queue %p corrupted, count = %d\n", queue, queue->count);
|
||||
if (queue->count != 0)
|
||||
panic("queue %p corrupted, count = %d\n", queue, queue->count);
|
||||
#endif
|
||||
|
||||
// if the primary queue was empty, grab the page from the
|
||||
// secondary queue
|
||||
page = dequeue_page(otherQueue);
|
||||
}
|
||||
}
|
||||
// if the primary queue was empty, grab the page from the
|
||||
// secondary queue
|
||||
page = dequeue_page(otherQueue);
|
||||
|
||||
if (page != NULL)
|
||||
break;
|
||||
|
||||
if (reserved)
|
||||
if (page == NULL)
|
||||
panic("Had reserved page, but there is none!");
|
||||
|
||||
// steal one from the inactive list
|
||||
locker.Unlock();
|
||||
size_t stolen = steal_pages(&page, 1, false);
|
||||
locker.Lock();
|
||||
|
||||
if (stolen > 0)
|
||||
break;
|
||||
}
|
||||
|
||||
if (page->cache != NULL)
|
||||
@ -2165,31 +2124,6 @@ vm_page_allocate_page(int pageState, bool reserved)
|
||||
}
|
||||
|
||||
|
||||
/*! Allocates a number of pages and puts their pointers into the provided
|
||||
array. All pages are marked busy.
|
||||
Returns B_OK on success, and B_NO_MEMORY when there aren't any free
|
||||
pages left to allocate.
|
||||
*/
|
||||
status_t
|
||||
vm_page_allocate_pages(int pageState, vm_page **pages, uint32 numPages)
|
||||
{
|
||||
uint32 i;
|
||||
|
||||
for (i = 0; i < numPages; i++) {
|
||||
pages[i] = vm_page_allocate_page(pageState, false);
|
||||
if (pages[i] == NULL) {
|
||||
// allocation failed, we need to free what we already have
|
||||
while (i-- > 0)
|
||||
vm_page_set_state(pages[i], pageState);
|
||||
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
vm_page *
|
||||
vm_page_allocate_page_run(int pageState, addr_t base, addr_t length)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user