* vm_page::offset is now called cache_offset and is now an uint32 instead of off_t;
this saves 4 bytes per page. To compensate the loss of bytes, the offset is now stored in page size units, that's enough to address 2^44 or 16 TB (which is now the maximal supported file size!). * Renamed vm_page::ppn to physical_page_number. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@15637 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
3ca8cdfc07
commit
79f73dbc56
@ -10,4 +10,6 @@
|
||||
#define NUM_IO_PAGES 4
|
||||
/* 16 kB */
|
||||
|
||||
#define PAGE_SHIFT 12
|
||||
|
||||
#endif /* ARCH_PPC_VM_H */
|
||||
|
@ -10,4 +10,6 @@
|
||||
#define NUM_IO_PAGES 4
|
||||
/* 16 kB */
|
||||
|
||||
#define PAGE_SHIFT 12
|
||||
|
||||
#endif /* ARCH_x86_VM_H */
|
||||
|
@ -9,8 +9,9 @@
|
||||
#define _KERNEL_VM_H
|
||||
|
||||
|
||||
#include <kernel.h>
|
||||
//#include <kernel.h>
|
||||
#include <vm_types.h>
|
||||
#include <arch/vm.h>
|
||||
#include <arch/vm_translation_map.h>
|
||||
|
||||
struct kernel_args;
|
||||
|
@ -22,10 +22,11 @@ typedef struct vm_page {
|
||||
|
||||
struct vm_page *hash_next;
|
||||
|
||||
addr_t ppn; // physical page number
|
||||
off_t offset;
|
||||
addr_t physical_page_number;
|
||||
|
||||
struct vm_cache *cache;
|
||||
uint32 cache_offset;
|
||||
// in page size units
|
||||
|
||||
struct vm_page *cache_prev;
|
||||
struct vm_page *cache_next;
|
||||
|
@ -359,7 +359,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
// mark the page WIRED
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
|
||||
pgtable = page->ppn * B_PAGE_SIZE;
|
||||
pgtable = page->physical_page_number * B_PAGE_SIZE;
|
||||
|
||||
TRACE(("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable));
|
||||
|
||||
|
2
src/system/kernel/cache/block_allocator.cpp
vendored
2
src/system/kernel/cache/block_allocator.cpp
vendored
@ -360,7 +360,7 @@ block_range::Allocate(block_cache *cache, block_chunk **_chunk)
|
||||
|
||||
for (uint32 i = 0; i < numPages; i++) {
|
||||
map->ops->map(map, base + chunk * cache->chunk_size + i * B_PAGE_SIZE,
|
||||
pages[pageBaseIndex + i]->ppn * B_PAGE_SIZE,
|
||||
pages[pageBaseIndex + i]->physical_page_number * B_PAGE_SIZE,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
}
|
||||
|
||||
|
6
src/system/kernel/cache/file_cache.cpp
vendored
6
src/system/kernel/cache/file_cache.cpp
vendored
@ -445,7 +445,7 @@ read_chunk_into_cache(file_cache_ref *ref, off_t offset, size_t size,
|
||||
vm_cache_insert_page(cache, page, offset + pos);
|
||||
|
||||
addr_t virtualAddress;
|
||||
if (vm_get_physical_page(page->ppn * B_PAGE_SIZE, &virtualAddress, PHYSICAL_PAGE_CAN_WAIT) < B_OK)
|
||||
if (vm_get_physical_page(page->physical_page_number * B_PAGE_SIZE, &virtualAddress, PHYSICAL_PAGE_CAN_WAIT) < B_OK)
|
||||
panic("could not get physical page");
|
||||
|
||||
add_to_iovec(vecs, vecCount, MAX_IO_VECS, virtualAddress, B_PAGE_SIZE);
|
||||
@ -578,7 +578,7 @@ write_chunk_to_cache(file_cache_ref *ref, off_t offset, size_t size,
|
||||
vm_cache_insert_page(ref->cache, page, offset + pos);
|
||||
|
||||
addr_t virtualAddress;
|
||||
vm_get_physical_page(page->ppn * B_PAGE_SIZE, &virtualAddress,
|
||||
vm_get_physical_page(page->physical_page_number * B_PAGE_SIZE, &virtualAddress,
|
||||
PHYSICAL_PAGE_CAN_WAIT);
|
||||
|
||||
add_to_iovec(vecs, vecCount, MAX_IO_VECS, virtualAddress, B_PAGE_SIZE);
|
||||
@ -773,7 +773,7 @@ cache_io(void *_cacheRef, off_t offset, addr_t buffer, size_t *_size, bool doWri
|
||||
|
||||
TRACE(("lookup page from offset %Ld: %p, size = %lu, pageOffset = %lu\n", offset, page, bytesLeft, pageOffset));
|
||||
if (page != NULL
|
||||
&& vm_get_physical_page(page->ppn * B_PAGE_SIZE,
|
||||
&& vm_get_physical_page(page->physical_page_number * B_PAGE_SIZE,
|
||||
&virtualAddress, PHYSICAL_PAGE_CAN_WAIT) == B_OK) {
|
||||
// it is, so let's satisfy the first part of the request, if we have to
|
||||
if (lastBuffer != buffer) {
|
||||
|
@ -819,7 +819,7 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
|
||||
|
||||
if (wiring == B_CONTIGUOUS) {
|
||||
// we had reserved the area space upfront...
|
||||
addr_t pageNumber = page->ppn;
|
||||
addr_t pageNumber = page->physical_page_number;
|
||||
int32 i;
|
||||
for (i = size / B_PAGE_SIZE; i-- > 0; pageNumber++) {
|
||||
page = vm_lookup_page(pageNumber);
|
||||
@ -902,7 +902,7 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
|
||||
// We have already allocated our continuous pages run, so we can now just
|
||||
// map them in the address space
|
||||
vm_translation_map *map = &addressSpace->translation_map;
|
||||
addr_t physicalAddress = page->ppn * B_PAGE_SIZE;
|
||||
addr_t physicalAddress = page->physical_page_number * B_PAGE_SIZE;
|
||||
addr_t virtualAddress;
|
||||
off_t offset = 0;
|
||||
|
||||
@ -1449,8 +1449,9 @@ vm_copy_on_write_area(vm_area *area)
|
||||
map->ops->unmap(map, area->base, area->base - 1 + area->size);
|
||||
|
||||
for (page = lowerCache->page_list; page; page = page->cache_next) {
|
||||
map->ops->map(map, area->base + (page->offset - area->cache_offset),
|
||||
page->ppn * B_PAGE_SIZE, protection);
|
||||
map->ops->map(map, area->base + (page->cache_offset << PAGE_SHIFT)
|
||||
- area->cache_offset, page->physical_page_number << PAGE_SHIFT,
|
||||
protection);
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
@ -1838,8 +1839,8 @@ dump_cache(int argc, char **argv)
|
||||
kprintf("page_list:\n");
|
||||
for (page = cache->page_list; page != NULL; page = page->cache_next) {
|
||||
if (page->type == PAGE_TYPE_PHYSICAL) {
|
||||
kprintf(" %p ppn 0x%lx offset 0x%Lx type %ld state %ld (%s) ref_count %ld\n",
|
||||
page, page->ppn, page->offset, page->type, page->state,
|
||||
kprintf(" %p ppn 0x%lx offset 0x%lx type %ld state %ld (%s) ref_count %ld\n",
|
||||
page, page->physical_page_number, page->cache_offset, page->type, page->state,
|
||||
page_state_to_text(page->state), page->ref_count);
|
||||
} else if(page->type == PAGE_TYPE_DUMMY) {
|
||||
kprintf(" %p DUMMY PAGE state %ld (%s)\n",
|
||||
@ -2419,7 +2420,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
vm_cache_ref *cache_ref;
|
||||
vm_cache_ref *last_cache_ref;
|
||||
vm_cache_ref *top_cache_ref;
|
||||
off_t cache_offset;
|
||||
off_t cacheOffset;
|
||||
vm_page dummy_page;
|
||||
vm_page *page = NULL;
|
||||
addr_t address;
|
||||
@ -2483,7 +2484,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
// At first, the top most cache from the area is investigated
|
||||
|
||||
top_cache_ref = area->cache_ref;
|
||||
cache_offset = address - area->base + area->cache_offset;
|
||||
cacheOffset = address - area->base + area->cache_offset;
|
||||
vm_cache_acquire_ref(top_cache_ref, true);
|
||||
change_count = addressSpace->change_count;
|
||||
release_sem_etc(addressSpace->sem, READ_COUNT, 0);
|
||||
@ -2493,7 +2494,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
// Note, since the page fault is resolved with interrupts enabled, the
|
||||
// fault handler could be called more than once for the same reason -
|
||||
// the store must take this into account
|
||||
status_t status = (*top_cache_ref->cache->store->ops->fault)(top_cache_ref->cache->store, addressSpace, cache_offset);
|
||||
status_t status = (*top_cache_ref->cache->store->ops->fault)(top_cache_ref->cache->store, addressSpace, cacheOffset);
|
||||
if (status != B_BAD_HANDLER) {
|
||||
vm_cache_release_ref(top_cache_ref);
|
||||
vm_put_address_space(addressSpace);
|
||||
@ -2512,7 +2513,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
mutex_lock(&cache_ref->lock);
|
||||
|
||||
for (;;) {
|
||||
page = vm_cache_lookup_page(cache_ref, cache_offset);
|
||||
page = vm_cache_lookup_page(cache_ref, cacheOffset);
|
||||
if (page != NULL && page->state != PAGE_STATE_BUSY) {
|
||||
vm_page_set_state(page, PAGE_STATE_BUSY);
|
||||
mutex_unlock(&cache_ref->lock);
|
||||
@ -2538,12 +2539,12 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
// from faulting on the same address and chasing us up the cache chain
|
||||
if (cache_ref == top_cache_ref) {
|
||||
dummy_page.state = PAGE_STATE_BUSY;
|
||||
vm_cache_insert_page(cache_ref, &dummy_page, cache_offset);
|
||||
vm_cache_insert_page(cache_ref, &dummy_page, cacheOffset);
|
||||
}
|
||||
|
||||
// see if the vm_store has it
|
||||
if (cache_ref->cache->store->ops->has_page != NULL
|
||||
&& cache_ref->cache->store->ops->has_page(cache_ref->cache->store, cache_offset)) {
|
||||
&& cache_ref->cache->store->ops->has_page(cache_ref->cache->store, cacheOffset)) {
|
||||
size_t bytesRead;
|
||||
iovec vec;
|
||||
|
||||
@ -2552,9 +2553,9 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
mutex_unlock(&cache_ref->lock);
|
||||
|
||||
page = vm_page_allocate_page(PAGE_STATE_FREE);
|
||||
addressSpace->translation_map.ops->get_physical_page(page->ppn * B_PAGE_SIZE, (addr_t *)&vec.iov_base, PHYSICAL_PAGE_CAN_WAIT);
|
||||
addressSpace->translation_map.ops->get_physical_page(page->physical_page_number * B_PAGE_SIZE, (addr_t *)&vec.iov_base, PHYSICAL_PAGE_CAN_WAIT);
|
||||
// ToDo: handle errors here
|
||||
err = cache_ref->cache->store->ops->read(cache_ref->cache->store, cache_offset, &vec, 1, &bytesRead);
|
||||
err = cache_ref->cache->store->ops->read(cache_ref->cache->store, cacheOffset, &vec, 1, &bytesRead);
|
||||
addressSpace->translation_map.ops->put_physical_page((addr_t)vec.iov_base);
|
||||
|
||||
mutex_lock(&cache_ref->lock);
|
||||
@ -2563,7 +2564,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
vm_cache_remove_page(cache_ref, &dummy_page);
|
||||
dummy_page.state = PAGE_STATE_INACTIVE;
|
||||
}
|
||||
vm_cache_insert_page(cache_ref, page, cache_offset);
|
||||
vm_cache_insert_page(cache_ref, page, cacheOffset);
|
||||
mutex_unlock(&cache_ref->lock);
|
||||
break;
|
||||
}
|
||||
@ -2583,7 +2584,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
if (page == NULL) {
|
||||
// we still haven't found a page, so we allocate a clean one
|
||||
page = vm_page_allocate_page(PAGE_STATE_CLEAR);
|
||||
FTRACE(("vm_soft_fault: just allocated page 0x%lx\n", page->ppn));
|
||||
FTRACE(("vm_soft_fault: just allocated page 0x%lx\n", page->physical_page_number));
|
||||
|
||||
// Insert the new page into our cache, and replace it with the dummy page if necessary
|
||||
|
||||
@ -2595,7 +2596,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
dummy_page.state = PAGE_STATE_INACTIVE;
|
||||
}
|
||||
|
||||
vm_cache_insert_page(cache_ref, page, cache_offset);
|
||||
vm_cache_insert_page(cache_ref, page, cacheOffset);
|
||||
mutex_unlock(&cache_ref->lock);
|
||||
|
||||
if (dummy_page.state == PAGE_STATE_BUSY) {
|
||||
@ -2624,8 +2625,8 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
|
||||
// try to get a mapping for the src and dest page so we can copy it
|
||||
for (;;) {
|
||||
(*addressSpace->translation_map.ops->get_physical_page)(src_page->ppn * B_PAGE_SIZE, (addr_t *)&src, PHYSICAL_PAGE_CAN_WAIT);
|
||||
err = (*addressSpace->translation_map.ops->get_physical_page)(page->ppn * B_PAGE_SIZE, (addr_t *)&dest, PHYSICAL_PAGE_NO_WAIT);
|
||||
(*addressSpace->translation_map.ops->get_physical_page)(src_page->physical_page_number * B_PAGE_SIZE, (addr_t *)&src, PHYSICAL_PAGE_CAN_WAIT);
|
||||
err = (*addressSpace->translation_map.ops->get_physical_page)(page->physical_page_number * B_PAGE_SIZE, (addr_t *)&dest, PHYSICAL_PAGE_NO_WAIT);
|
||||
if (err == B_NO_ERROR)
|
||||
break;
|
||||
|
||||
@ -2651,7 +2652,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
dummy_page.state = PAGE_STATE_INACTIVE;
|
||||
}
|
||||
|
||||
vm_cache_insert_page(top_cache_ref, page, cache_offset);
|
||||
vm_cache_insert_page(top_cache_ref, page, cacheOffset);
|
||||
mutex_unlock(&top_cache_ref->lock);
|
||||
|
||||
if (dummy_page.state == PAGE_STATE_BUSY) {
|
||||
@ -2671,7 +2672,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
area = vm_area_lookup(addressSpace, address);
|
||||
if (area == NULL
|
||||
|| area->cache_ref != top_cache_ref
|
||||
|| (address - area->base + area->cache_offset) != cache_offset) {
|
||||
|| (address - area->base + area->cache_offset) != cacheOffset) {
|
||||
dprintf("vm_soft_fault: address space layout changed effecting ongoing soft fault\n");
|
||||
err = B_BAD_ADDRESS;
|
||||
}
|
||||
@ -2689,7 +2690,7 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
atomic_add(&page->ref_count, 1);
|
||||
(*addressSpace->translation_map.ops->lock)(&addressSpace->translation_map);
|
||||
(*addressSpace->translation_map.ops->map)(&addressSpace->translation_map, address,
|
||||
page->ppn * B_PAGE_SIZE, newProtection);
|
||||
page->physical_page_number * B_PAGE_SIZE, newProtection);
|
||||
(*addressSpace->translation_map.ops->unlock)(&addressSpace->translation_map);
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@ static void *page_cache_table;
|
||||
static spinlock page_cache_table_lock;
|
||||
|
||||
struct page_lookup_key {
|
||||
off_t offset;
|
||||
uint32 offset;
|
||||
vm_cache *cache;
|
||||
};
|
||||
|
||||
@ -50,7 +50,7 @@ page_compare_func(void *_p, const void *_key)
|
||||
|
||||
TRACE(("page_compare_func: page %p, key %p\n", page, key));
|
||||
|
||||
if (page->cache == key->cache && page->offset == key->offset)
|
||||
if (page->cache == key->cache && page->cache_offset == key->offset)
|
||||
return 0;
|
||||
|
||||
return -1;
|
||||
@ -62,16 +62,11 @@ page_hash_func(void *_p, const void *_key, uint32 range)
|
||||
{
|
||||
vm_page *page = _p;
|
||||
const struct page_lookup_key *key = _key;
|
||||
#if 0
|
||||
if(p)
|
||||
dprintf("page_hash_func: p 0x%x, key 0x%x, HASH = 0x%x\n", p, key, HASH(p->offset, p->cache_ref) % range);
|
||||
else
|
||||
dprintf("page_hash_func: p 0x%x, key 0x%x, HASH = 0x%x\n", p, key, HASH(key->offset, key->ref) % range);
|
||||
#endif
|
||||
#define HASH(offset, ref) ((unsigned int)(offset >> 12) ^ ((unsigned int)(ref)>>4))
|
||||
|
||||
#define HASH(offset, ref) ((offset) ^ ((uint32)(ref) >> 4))
|
||||
|
||||
if (page)
|
||||
return HASH(page->offset, page->cache) % range;
|
||||
return HASH(page->cache_offset, page->cache) % range;
|
||||
|
||||
return HASH(key->offset, key->cache) % range;
|
||||
}
|
||||
@ -212,7 +207,7 @@ vm_cache_lookup_page(vm_cache_ref *cache_ref, off_t offset)
|
||||
|
||||
ASSERT_LOCKED_MUTEX(&cache_ref->lock);
|
||||
|
||||
key.offset = offset;
|
||||
key.offset = (uint32)(offset >> PAGE_SHIFT);
|
||||
key.cache = cache_ref->cache;
|
||||
|
||||
state = disable_interrupts();
|
||||
@ -235,7 +230,7 @@ vm_cache_insert_page(vm_cache_ref *cache_ref, vm_page *page, off_t offset)
|
||||
TRACE(("vm_cache_insert_page: cache_ref %p, page %p, offset %Ld\n", cache_ref, page, offset));
|
||||
ASSERT_LOCKED_MUTEX(&cache_ref->lock);
|
||||
|
||||
page->offset = offset;
|
||||
page->cache_offset = (uint32)(offset >> PAGE_SHIFT);
|
||||
|
||||
if (cache_ref->cache->page_list != NULL)
|
||||
cache_ref->cache->page_list->cache_prev = page;
|
||||
@ -351,12 +346,13 @@ vm_cache_resize(vm_cache_ref *cacheRef, off_t newSize)
|
||||
oldSize = cache->virtual_size;
|
||||
if (newSize < oldSize) {
|
||||
// we need to remove all pages in the cache outside of the new virtual size
|
||||
uint32 lastOffset = (uint32)(newSize >> PAGE_SHIFT);
|
||||
vm_page *page, *next;
|
||||
|
||||
for (page = cache->page_list; page; page = next) {
|
||||
next = page->cache_next;
|
||||
|
||||
if (page->offset >= newSize) {
|
||||
if (page->cache_offset >= lastOffset) {
|
||||
// remove the page and put it into the free queue
|
||||
vm_cache_remove_page(cacheRef, page);
|
||||
vm_page_set_state(page, PAGE_STATE_FREE);
|
||||
|
@ -137,20 +137,24 @@ write_page(vm_page *page)
|
||||
status_t status;
|
||||
iovec vecs[1];
|
||||
|
||||
TRACE(("write_page(page = %p): offset = %Ld\n", page, page->offset));
|
||||
TRACE(("write_page(page = %p): offset = %Ld\n", page, (off_t)page->cache_offset << PAGE_SHIFT));
|
||||
|
||||
status = vm_get_physical_page(page->ppn * B_PAGE_SIZE,
|
||||
status = vm_get_physical_page(page->physical_page_number * B_PAGE_SIZE,
|
||||
(addr_t *)&vecs[0].iov_base, PHYSICAL_PAGE_CAN_WAIT);
|
||||
if (status < B_OK)
|
||||
panic("could not map page!");
|
||||
vecs->iov_len = B_PAGE_SIZE;
|
||||
|
||||
status = store->ops->write(store, page->offset, vecs, 1, &length);
|
||||
status = store->ops->write(store, (off_t)page->cache_offset << PAGE_SHIFT,
|
||||
vecs, 1, &length);
|
||||
|
||||
vm_put_physical_page((addr_t)vecs[0].iov_base);
|
||||
|
||||
if (status < B_OK)
|
||||
dprintf("write_page(page = %p): offset = %Ld, status = %ld\n", page, page->offset, status);
|
||||
if (status < B_OK) {
|
||||
dprintf("write_page(page = %p): offset = %lx, status = %ld\n",
|
||||
page, page->cache_offset, status);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -164,6 +168,7 @@ vm_page_write_modified(vm_cache *cache)
|
||||
|
||||
for (; page; page = page->cache_next) {
|
||||
bool gotPage = false;
|
||||
off_t pageOffset;
|
||||
status_t status;
|
||||
vm_area *area;
|
||||
|
||||
@ -188,9 +193,11 @@ vm_page_write_modified(vm_cache *cache)
|
||||
// the chance to write it back, then we'll write it again later - that will
|
||||
// probably not happen that often, though.
|
||||
|
||||
pageOffset = (off_t)page->cache_offset << PAGE_SHIFT;
|
||||
|
||||
for (area = page->cache->ref->areas; area; area = area->cache_next) {
|
||||
if (page->offset >= area->cache_offset
|
||||
&& page->offset < area->cache_offset + area->size) {
|
||||
if (pageOffset >= area->cache_offset
|
||||
&& pageOffset < area->cache_offset + area->size) {
|
||||
vm_translation_map *map = &area->address_space->translation_map;
|
||||
map->ops->lock(map);
|
||||
|
||||
@ -198,14 +205,14 @@ vm_page_write_modified(vm_cache *cache)
|
||||
// Check if the PAGE_MODIFIED bit hasn't been propagated yet
|
||||
addr_t physicalAddress;
|
||||
uint32 flags;
|
||||
map->ops->query(map, page->offset - area->cache_offset + area->base,
|
||||
map->ops->query(map, pageOffset - area->cache_offset + area->base,
|
||||
&physicalAddress, &flags);
|
||||
if (flags & PAGE_MODIFIED)
|
||||
gotPage = true;
|
||||
}
|
||||
if (gotPage) {
|
||||
// clear the modified flag
|
||||
map->ops->clear_flags(map, page->offset - area->cache_offset
|
||||
map->ops->clear_flags(map, pageOffset - area->cache_offset
|
||||
+ area->base, PAGE_MODIFIED);
|
||||
}
|
||||
map->ops->unlock(map);
|
||||
@ -299,7 +306,7 @@ static int pageout_daemon()
|
||||
/* write the page out to it's backing store */
|
||||
vecs->num = 1;
|
||||
vecs->total_len = PAGE_SIZE;
|
||||
vm_get_physical_page(page->ppn * PAGE_SIZE, (addr_t *)&vecs->vec[0].iov_base, PHYSICAL_PAGE_CAN_WAIT);
|
||||
vm_get_physical_page(page->physical_page_number * PAGE_SIZE, (addr_t *)&vecs->vec[0].iov_base, PHYSICAL_PAGE_CAN_WAIT);
|
||||
vecs->vec[0].iov_len = PAGE_SIZE;
|
||||
|
||||
err = page->cache_ref->cache->store->ops->write(page->cache_ref->cache->store, page->offset, vecs);
|
||||
@ -367,7 +374,7 @@ vm_page_init(kernel_args *ka)
|
||||
|
||||
// initialize the free page table
|
||||
for (i = 0; i < num_pages - 1; i++) {
|
||||
all_pages[i].ppn = physical_page_offset + i;
|
||||
all_pages[i].physical_page_number = physical_page_offset + i;
|
||||
all_pages[i].type = PAGE_TYPE_PHYSICAL;
|
||||
all_pages[i].state = PAGE_STATE_FREE;
|
||||
all_pages[i].ref_count = 0;
|
||||
@ -466,7 +473,7 @@ page_scrubber(void *unused)
|
||||
scrubCount = i;
|
||||
|
||||
for (i = 0; i < scrubCount; i++) {
|
||||
clear_page(page[i]->ppn * B_PAGE_SIZE);
|
||||
clear_page(page[i]->physical_page_number * B_PAGE_SIZE);
|
||||
}
|
||||
|
||||
state = disable_interrupts();
|
||||
@ -601,7 +608,7 @@ out:
|
||||
|
||||
if (p != NULL && page_state == PAGE_STATE_CLEAR
|
||||
&& (old_page_state == PAGE_STATE_FREE || old_page_state == PAGE_STATE_UNUSED))
|
||||
clear_page(p->ppn * B_PAGE_SIZE);
|
||||
clear_page(p->physical_page_number * B_PAGE_SIZE);
|
||||
|
||||
return p;
|
||||
}
|
||||
@ -663,7 +670,7 @@ vm_page_allocate_page(int page_state)
|
||||
|
||||
// if needed take the page from the free queue and zero it out
|
||||
if (page_state == PAGE_STATE_CLEAR && old_page_state == PAGE_STATE_FREE)
|
||||
clear_page(p->ppn * B_PAGE_SIZE);
|
||||
clear_page(p->physical_page_number * B_PAGE_SIZE);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user