Another work-in-progress towards having extra structures per mapping per page:
* vm_area and vm_page now have a new field "mappings" where they will store lists of vm_page_mapping structures. vm_page::ref_count is gone, as it's no longer needed (it was never updated correctly, anyway). * vm_caches now have a type field, ie. CACHE_TYPE_RAM for anonymous areas - this makes the stores a bit less independent, but is quite handy in several places. * Added new vm_map_page() and vm_unmap_pages() functions to be used whenever you map in or unmap pages into/from an area. They don't do much more than handling vm_page::wired_count correctly right now, though (ie. B_LAZY_LOCK is now working as expected as well). * Moved the device fault handler to vm_map_physical_memory(); it was not really used as a fault handler, anyway. * Didn't notice Ingo's changes to the I/O space region broke lock_memory(). It now checks the type of the area that contains the memory, and doesn't lock anymore if not needed which solves the problem in a platform independent way. * Implemented lock_memory() and unlock_memory() for real: they now change the vm_page::wired_count member to identify pages that shouldn't be paged out. * vm_area_for() now uses vm_area_lookup() internally. * Fixed various potential overflow conditions with areas that reach 0xffffffff. * Creating anonymous areas with B_FULL_LOCK no longer causes vm_soft_fault() to be called, instead, the pages are allocated and mapped (via vm_map_page()) directly. * Removed the _vm_ prefix for create_area_struct() and create_reserved_area_struct(). * Fixed a bug in vm_page_write_modified() that would not have enqueued pages that failed to be written to the modified queue again when needed. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@20251 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
91f1fe44d2
commit
ca954b7816
@ -60,10 +60,11 @@ area_id vm_clone_area(team_id team, const char *name, void **address,
|
||||
status_t vm_delete_area(team_id aid, area_id id);
|
||||
status_t vm_create_vnode_cache(void *vnode, vm_cache_ref **_cacheRef);
|
||||
vm_area *vm_area_lookup(vm_address_space *addressSpace, addr_t address);
|
||||
|
||||
status_t vm_set_area_memory_type(area_id id, addr_t physicalBase, uint32 type);
|
||||
|
||||
status_t vm_get_page_mapping(team_id team, addr_t vaddr, addr_t *paddr);
|
||||
status_t vm_unmap_pages(vm_area *area, addr_t base, size_t length);
|
||||
status_t vm_map_page(vm_area *area, vm_page *page, addr_t address,
|
||||
uint32 protection);
|
||||
status_t vm_get_physical_page(addr_t paddr, addr_t *vaddr, uint32 flags);
|
||||
status_t vm_put_physical_page(addr_t vaddr);
|
||||
|
||||
|
@ -10,10 +10,60 @@
|
||||
|
||||
|
||||
#include <kernel.h>
|
||||
#include <sys/uio.h>
|
||||
#include <arch/vm_types.h>
|
||||
#include <arch/vm_translation_map.h>
|
||||
#include <util/DoublyLinkedQueue.h>
|
||||
|
||||
#include <sys/uio.h>
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
struct vm_page_mapping;
|
||||
typedef DoublyLinkedListLink<vm_page_mapping> vm_page_mapping_link;
|
||||
#else
|
||||
typedef struct { void *previous; void *next; } vm_page_mapping_link;
|
||||
#endif
|
||||
|
||||
typedef struct vm_page_mapping {
|
||||
vm_page_mapping_link page_link;
|
||||
vm_page_mapping_link area_link;
|
||||
struct vm_page *page;
|
||||
struct vm_area *area;
|
||||
} vm_page_mapping;
|
||||
|
||||
#ifdef __cplusplus
|
||||
class DoublyLinkedPageLink {
|
||||
public:
|
||||
inline vm_page_mapping_link *operator()(vm_page_mapping *element) const
|
||||
{
|
||||
return &element->page_link;
|
||||
}
|
||||
|
||||
inline const vm_page_mapping_link *operator()(const vm_page_mapping *element) const
|
||||
{
|
||||
return &element->page_link;
|
||||
}
|
||||
};
|
||||
|
||||
class DoublyLinkedAreaLink {
|
||||
public:
|
||||
inline vm_page_mapping_link *operator()(vm_page_mapping *element) const
|
||||
{
|
||||
return &element->area_link;
|
||||
}
|
||||
|
||||
inline const vm_page_mapping_link *operator()(const vm_page_mapping *element) const
|
||||
{
|
||||
return &element->area_link;
|
||||
}
|
||||
};
|
||||
|
||||
typedef class DoublyLinkedQueue<vm_page_mapping, DoublyLinkedPageLink> vm_page_mappings;
|
||||
typedef class DoublyLinkedQueue<vm_page_mapping, DoublyLinkedAreaLink> vm_area_mappings;
|
||||
#else // !__cplusplus
|
||||
typedef void *vm_page_mappings;
|
||||
typedef void *vm_area_mappings;
|
||||
#endif
|
||||
|
||||
// vm page
|
||||
typedef struct vm_page {
|
||||
@ -31,12 +81,15 @@ typedef struct vm_page {
|
||||
struct vm_page *cache_prev;
|
||||
struct vm_page *cache_next;
|
||||
|
||||
int32 ref_count;
|
||||
vm_page_mappings mappings;
|
||||
|
||||
uint32 type : 2;
|
||||
uint32 state : 3;
|
||||
uint32 busy_reading : 1;
|
||||
uint32 busy_writing : 1;
|
||||
uint8 type : 2;
|
||||
uint8 state : 3;
|
||||
uint8 busy_reading : 1;
|
||||
uint8 busy_writing : 1;
|
||||
|
||||
uint16 wired_count;
|
||||
int8 usage_count;
|
||||
} vm_page;
|
||||
|
||||
enum {
|
||||
@ -56,6 +109,13 @@ enum {
|
||||
PAGE_STATE_UNUSED
|
||||
};
|
||||
|
||||
enum {
|
||||
CACHE_TYPE_RAM = 0,
|
||||
CACHE_TYPE_VNODE,
|
||||
CACHE_TYPE_DEVICE,
|
||||
CACHE_TYPE_NULL
|
||||
};
|
||||
|
||||
// vm_cache_ref
|
||||
typedef struct vm_cache_ref {
|
||||
struct vm_cache *cache;
|
||||
@ -82,6 +142,7 @@ typedef struct vm_cache {
|
||||
uint32 temporary : 1;
|
||||
uint32 scan_skip : 1;
|
||||
uint32 busy : 1;
|
||||
uint32 type : 5;
|
||||
} vm_cache;
|
||||
|
||||
// vm area
|
||||
@ -97,6 +158,7 @@ typedef struct vm_area {
|
||||
|
||||
struct vm_cache_ref *cache_ref;
|
||||
off_t cache_offset;
|
||||
vm_area_mappings mappings;
|
||||
|
||||
struct vm_address_space *address_space;
|
||||
struct vm_area *address_space_next;
|
||||
|
@ -117,7 +117,7 @@ vm_get_area(area_id id)
|
||||
|
||||
|
||||
static vm_area *
|
||||
_vm_create_reserved_region_struct(vm_address_space *addressSpace, uint32 flags)
|
||||
create_reserved_area_struct(vm_address_space *addressSpace, uint32 flags)
|
||||
{
|
||||
vm_area *reserved = (vm_area *)malloc(sizeof(vm_area));
|
||||
if (reserved == NULL)
|
||||
@ -134,7 +134,7 @@ _vm_create_reserved_region_struct(vm_address_space *addressSpace, uint32 flags)
|
||||
|
||||
|
||||
static vm_area *
|
||||
_vm_create_area_struct(vm_address_space *addressSpace, const char *name,
|
||||
create_area_struct(vm_address_space *addressSpace, const char *name,
|
||||
uint32 wiring, uint32 protection)
|
||||
{
|
||||
vm_area *area = NULL;
|
||||
@ -234,7 +234,7 @@ find_reserved_area(vm_address_space *addressSpace, addr_t start,
|
||||
} else {
|
||||
// the area splits the reserved range into two separate ones
|
||||
// we need a new reserved area to cover this space
|
||||
vm_area *reserved = _vm_create_reserved_region_struct(addressSpace,
|
||||
vm_area *reserved = create_reserved_area_struct(addressSpace,
|
||||
next->protection);
|
||||
if (reserved == NULL)
|
||||
return B_NO_MEMORY;
|
||||
@ -258,8 +258,7 @@ find_reserved_area(vm_address_space *addressSpace, addr_t start,
|
||||
}
|
||||
|
||||
|
||||
/** must be called with this address space's sem held */
|
||||
|
||||
/*! Must be called with this address space's sem held */
|
||||
static status_t
|
||||
find_and_insert_area_slot(vm_address_space *addressSpace, addr_t start,
|
||||
addr_t size, addr_t end, uint32 addressSpec, vm_area *area)
|
||||
@ -268,7 +267,7 @@ find_and_insert_area_slot(vm_address_space *addressSpace, addr_t start,
|
||||
vm_area *next;
|
||||
bool foundSpot = false;
|
||||
|
||||
TRACE(("find_and_insert_region_slot: address space %p, start 0x%lx, "
|
||||
TRACE(("find_and_insert_area_slot: address space %p, start 0x%lx, "
|
||||
"size %ld, end 0x%lx, addressSpec %ld, area %p\n", addressSpace, start,
|
||||
size, end, addressSpec, area));
|
||||
|
||||
@ -506,7 +505,7 @@ map_backing_store(vm_address_space *addressSpace, vm_cache_ref *cacheRef,
|
||||
addressSpace, cacheRef, *_virtualAddress, offset, size, addressSpec,
|
||||
wiring, protection, _area, areaName));
|
||||
|
||||
vm_area *area = _vm_create_area_struct(addressSpace, areaName, wiring, protection);
|
||||
vm_area *area = create_area_struct(addressSpace, areaName, wiring, protection);
|
||||
if (area == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
@ -581,6 +580,7 @@ map_backing_store(vm_address_space *addressSpace, vm_cache_ref *cacheRef,
|
||||
// attach the cache to the area
|
||||
area->cache_ref = cacheRef;
|
||||
area->cache_offset = offset;
|
||||
|
||||
// point the cache back to the area
|
||||
vm_cache_insert_area_locked(cacheRef, area);
|
||||
mutex_unlock(&cacheRef->lock);
|
||||
@ -682,7 +682,7 @@ vm_reserve_address_range(team_id team, void **_address, uint32 addressSpec,
|
||||
if (addressSpace == NULL)
|
||||
return B_BAD_TEAM_ID;
|
||||
|
||||
area = _vm_create_reserved_region_struct(addressSpace, flags);
|
||||
area = create_reserved_area_struct(addressSpace, flags);
|
||||
if (area == NULL) {
|
||||
status = B_NO_MEMORY;
|
||||
goto err1;
|
||||
@ -808,17 +808,17 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
|
||||
goto err3;
|
||||
|
||||
cache->temporary = 1;
|
||||
cache->type = CACHE_TYPE_RAM;
|
||||
cache->virtual_size = size;
|
||||
|
||||
switch (wiring) {
|
||||
case B_LAZY_LOCK: // for now
|
||||
case B_LAZY_LOCK:
|
||||
case B_FULL_LOCK:
|
||||
case B_CONTIGUOUS:
|
||||
case B_ALREADY_WIRED:
|
||||
cache->scan_skip = 1;
|
||||
break;
|
||||
case B_NO_LOCK:
|
||||
//case B_LAZY_LOCK:
|
||||
cache->scan_skip = 0;
|
||||
break;
|
||||
}
|
||||
@ -840,22 +840,33 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
|
||||
|
||||
case B_FULL_LOCK:
|
||||
{
|
||||
// Pages aren't mapped at this point, but we just simulate a fault on
|
||||
// every page, which should allocate them
|
||||
// ToDo: at this point, it would probably be cheaper to allocate
|
||||
// and map the pages directly
|
||||
addr_t va;
|
||||
for (va = area->base; va < area->base + area->size; va += B_PAGE_SIZE) {
|
||||
// Allocate and map all pages for this area
|
||||
mutex_lock(&cacheRef->lock);
|
||||
|
||||
off_t offset = 0;
|
||||
for (addr_t address = area->base; address < area->base + (area->size - 1);
|
||||
address += B_PAGE_SIZE, offset += B_PAGE_SIZE) {
|
||||
#ifdef DEBUG_KERNEL_STACKS
|
||||
# ifdef STACK_GROWS_DOWNWARDS
|
||||
if (isStack && va < area->base + KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE)
|
||||
if (isStack && address < area->base + KERNEL_STACK_GUARD_PAGES
|
||||
* B_PAGE_SIZE)
|
||||
# else
|
||||
if (isStack && va >= area->base + area->size - KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE)
|
||||
if (isStack && address >= area->base + area->size
|
||||
- KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE)
|
||||
# endif
|
||||
continue;
|
||||
#endif
|
||||
vm_soft_fault(va, false, false);
|
||||
vm_page *page = vm_page_allocate_page(PAGE_STATE_CLEAR);
|
||||
if (page == NULL) {
|
||||
// this shouldn't really happen, as we reserve the memory upfront
|
||||
panic("couldn't fulfill B_FULL lock!");
|
||||
}
|
||||
|
||||
vm_cache_insert_page(cacheRef, page, offset);
|
||||
vm_map_page(area, page, address, protection);
|
||||
}
|
||||
|
||||
mutex_unlock(&cacheRef->lock);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -865,33 +876,38 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
|
||||
// boot time. Find the appropriate vm_page objects and stick them in
|
||||
// the cache object.
|
||||
vm_translation_map *map = &addressSpace->translation_map;
|
||||
addr_t va;
|
||||
addr_t pa;
|
||||
uint32 flags;
|
||||
int err;
|
||||
off_t offset = 0;
|
||||
|
||||
if (!kernel_startup)
|
||||
panic("ALREADY_WIRED flag used outside kernel startup\n");
|
||||
|
||||
mutex_lock(&cacheRef->lock);
|
||||
(*map->ops->lock)(map);
|
||||
for (va = area->base; va < area->base + area->size; va += B_PAGE_SIZE, offset += B_PAGE_SIZE) {
|
||||
err = (*map->ops->query)(map, va, &pa, &flags);
|
||||
if (err < 0) {
|
||||
// dprintf("vm_create_anonymous_area: error looking up mapping for va 0x%x\n", va);
|
||||
continue;
|
||||
map->ops->lock(map);
|
||||
|
||||
for (addr_t virtualAddress = area->base; virtualAddress < area->base
|
||||
+ (area->size - 1); virtualAddress += B_PAGE_SIZE,
|
||||
offset += B_PAGE_SIZE) {
|
||||
addr_t physicalAddress;
|
||||
uint32 flags;
|
||||
status = map->ops->query(map, virtualAddress,
|
||||
&physicalAddress, &flags);
|
||||
if (status < B_OK) {
|
||||
panic("looking up mapping failed for va 0x%lx\n",
|
||||
virtualAddress);
|
||||
}
|
||||
page = vm_lookup_page(pa / B_PAGE_SIZE);
|
||||
page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
|
||||
if (page == NULL) {
|
||||
// dprintf("vm_create_anonymous_area: error looking up vm_page structure for pa 0x%x\n", pa);
|
||||
continue;
|
||||
panic("looking up page failed for pa 0x%lx\n",
|
||||
physicalAddress);
|
||||
}
|
||||
atomic_add(&page->ref_count, 1);
|
||||
|
||||
page->wired_count++;
|
||||
// TODO: needs to be atomic on all platforms!
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
vm_cache_insert_page(cacheRef, page, offset);
|
||||
}
|
||||
(*map->ops->unlock)(map);
|
||||
|
||||
map->ops->unlock(map);
|
||||
mutex_unlock(&cacheRef->lock);
|
||||
break;
|
||||
}
|
||||
@ -906,25 +922,27 @@ vm_create_anonymous_area(team_id aid, const char *name, void **address,
|
||||
off_t offset = 0;
|
||||
|
||||
mutex_lock(&cacheRef->lock);
|
||||
(*map->ops->lock)(map);
|
||||
map->ops->lock(map);
|
||||
|
||||
for (virtualAddress = area->base; virtualAddress < area->base + area->size;
|
||||
virtualAddress += B_PAGE_SIZE, offset += B_PAGE_SIZE,
|
||||
physicalAddress += B_PAGE_SIZE) {
|
||||
for (virtualAddress = area->base; virtualAddress < area->base
|
||||
+ (area->size - 1); virtualAddress += B_PAGE_SIZE,
|
||||
offset += B_PAGE_SIZE, physicalAddress += B_PAGE_SIZE) {
|
||||
page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
|
||||
if (page == NULL)
|
||||
panic("couldn't lookup physical page just allocated\n");
|
||||
|
||||
atomic_add(&page->ref_count, 1);
|
||||
status = (*map->ops->map)(map, virtualAddress, physicalAddress, protection);
|
||||
if (status < 0)
|
||||
status = map->ops->map(map, virtualAddress, physicalAddress,
|
||||
protection);
|
||||
if (status < B_OK)
|
||||
panic("couldn't map physical page in page run\n");
|
||||
|
||||
page->wired_count++;
|
||||
// TODO: needs to be atomic on all platforms!
|
||||
vm_page_set_state(page, PAGE_STATE_WIRED);
|
||||
vm_cache_insert_page(cacheRef, page, offset);
|
||||
}
|
||||
|
||||
(*map->ops->unlock)(map);
|
||||
map->ops->unlock(map);
|
||||
mutex_unlock(&cacheRef->lock);
|
||||
break;
|
||||
}
|
||||
@ -1010,6 +1028,7 @@ vm_map_physical_memory(team_id aspaceID, const char *name, void **_address,
|
||||
|
||||
// tell the page scanner to skip over this area, it's pages are special
|
||||
cache->scan_skip = 1;
|
||||
cache->type = CACHE_TYPE_DEVICE;
|
||||
cache->virtual_size = size;
|
||||
|
||||
cacheRef = cache->ref;
|
||||
@ -1029,13 +1048,18 @@ vm_map_physical_memory(team_id aspaceID, const char *name, void **_address,
|
||||
|
||||
if (status >= B_OK) {
|
||||
mutex_lock(&cacheRef->lock);
|
||||
store = cacheRef->cache->store;
|
||||
|
||||
// make sure our area is mapped in completely
|
||||
// (even if that makes the fault routine pretty much useless)
|
||||
|
||||
vm_translation_map *map = &addressSpace->translation_map;
|
||||
map->ops->lock(map);
|
||||
|
||||
for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
|
||||
store->ops->fault(store, addressSpace, offset);
|
||||
map->ops->map(map, area->base + offset, physicalAddress + offset,
|
||||
protection);
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
mutex_unlock(&cacheRef->lock);
|
||||
}
|
||||
|
||||
@ -1093,6 +1117,7 @@ vm_create_null_area(team_id team, const char *name, void **address,
|
||||
|
||||
// tell the page scanner to skip over this area, no pages will be mapped here
|
||||
cache->scan_skip = 1;
|
||||
cache->type = CACHE_TYPE_NULL;
|
||||
cache->virtual_size = size;
|
||||
|
||||
cacheRef = cache->ref;
|
||||
@ -1145,6 +1170,8 @@ vm_create_vnode_cache(void *vnode, struct vm_cache_ref **_cacheRef)
|
||||
if (status < B_OK)
|
||||
goto err2;
|
||||
|
||||
cache->type = CACHE_TYPE_VNODE;
|
||||
|
||||
*_cacheRef = cache->ref;
|
||||
vfs_acquire_vnode(vnode);
|
||||
return B_OK;
|
||||
@ -1433,10 +1460,7 @@ _vm_put_area(vm_area *area, bool aspaceLocked)
|
||||
|
||||
// unmap the virtual address space the area occupied. any page faults at this
|
||||
// point should fail in vm_area_lookup().
|
||||
vm_translation_map *map = &addressSpace->translation_map;
|
||||
map->ops->lock(map);
|
||||
map->ops->unmap(map, area->base, area->base + (area->size - 1));
|
||||
map->ops->unlock(map);
|
||||
vm_unmap_pages(area, area->base, area->size);
|
||||
|
||||
// ToDo: do that only for vnode stores
|
||||
vm_cache_write_modified(area->cache_ref, false);
|
||||
@ -1765,6 +1789,61 @@ vm_get_page_mapping(team_id aid, addr_t vaddr, addr_t *paddr)
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
vm_unmap_pages(vm_area *area, addr_t base, size_t size)
|
||||
{
|
||||
vm_translation_map *map = &area->address_space->translation_map;
|
||||
|
||||
map->ops->lock(map);
|
||||
|
||||
if (area->wiring != B_NO_LOCK && area->cache_ref->cache->type != CACHE_TYPE_DEVICE) {
|
||||
// iterate through all pages and decrease their wired count
|
||||
for (addr_t virtualAddress = base; virtualAddress < base + (size - 1);
|
||||
virtualAddress += B_PAGE_SIZE) {
|
||||
addr_t physicalAddress;
|
||||
uint32 flags;
|
||||
status_t status = map->ops->query(map, virtualAddress,
|
||||
&physicalAddress, &flags);
|
||||
if (status < B_OK || (flags & PAGE_PRESENT) == 0)
|
||||
continue;
|
||||
|
||||
vm_page *page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
|
||||
if (page == NULL) {
|
||||
panic("area %p looking up page failed for pa 0x%lx\n", area,
|
||||
physicalAddress);
|
||||
}
|
||||
|
||||
page->wired_count--;
|
||||
// TODO: needs to be atomic on all platforms!
|
||||
}
|
||||
}
|
||||
|
||||
map->ops->unmap(map, base, base + (size - 1));
|
||||
map->ops->unlock(map);
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
vm_map_page(vm_area *area, vm_page *page, addr_t address, uint32 protection)
|
||||
{
|
||||
vm_translation_map *map = &area->address_space->translation_map;
|
||||
|
||||
map->ops->lock(map);
|
||||
map->ops->map(map, address, page->physical_page_number * B_PAGE_SIZE,
|
||||
protection);
|
||||
map->ops->unlock(map);
|
||||
|
||||
if (area->wiring != B_NO_LOCK) {
|
||||
page->wired_count++;
|
||||
// TODO: needs to be atomic on all platforms!
|
||||
}
|
||||
|
||||
vm_page_set_state(page, PAGE_STATE_ACTIVE);
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
display_mem(int argc, char **argv)
|
||||
{
|
||||
@ -2048,14 +2127,14 @@ dump_cache(int argc, char **argv)
|
||||
continue;
|
||||
|
||||
if (page->type == PAGE_TYPE_PHYSICAL) {
|
||||
kprintf("\t%p ppn 0x%lx offset 0x%lx type %ld state %ld (%s) ref_count %ld\n",
|
||||
page, page->physical_page_number, page->cache_offset, page->type, page->state,
|
||||
page_state_to_text(page->state), page->ref_count);
|
||||
kprintf("\t%p ppn 0x%lx offset 0x%lx type %u state %u (%s) wired_count %u\n",
|
||||
page, page->physical_page_number, page->cache_offset, page->type, page->state,
|
||||
page_state_to_text(page->state), page->wired_count);
|
||||
} else if(page->type == PAGE_TYPE_DUMMY) {
|
||||
kprintf("\t%p DUMMY PAGE state %ld (%s)\n",
|
||||
kprintf("\t%p DUMMY PAGE state %u (%s)\n",
|
||||
page, page->state, page_state_to_text(page->state));
|
||||
} else
|
||||
kprintf("\t%p UNKNOWN PAGE type %ld\n", page, page->type);
|
||||
kprintf("\t%p UNKNOWN PAGE type %u\n", page, page->type);
|
||||
}
|
||||
|
||||
if (!showPages)
|
||||
@ -2211,7 +2290,6 @@ vm_area_for(team_id team, addr_t address)
|
||||
{
|
||||
vm_address_space *addressSpace;
|
||||
area_id id = B_ERROR;
|
||||
vm_area *area;
|
||||
|
||||
addressSpace = vm_get_address_space_by_id(team);
|
||||
if (addressSpace == NULL)
|
||||
@ -2219,17 +2297,9 @@ vm_area_for(team_id team, addr_t address)
|
||||
|
||||
acquire_sem_etc(addressSpace->sem, READ_COUNT, 0, 0);
|
||||
|
||||
area = addressSpace->areas;
|
||||
for (; area != NULL; area = area->address_space_next) {
|
||||
// ignore reserved space regions
|
||||
if (area->id == RESERVED_AREA_ID)
|
||||
continue;
|
||||
|
||||
if (address >= area->base && address < area->base + area->size) {
|
||||
id = area->id;
|
||||
break;
|
||||
}
|
||||
}
|
||||
vm_area *area = vm_area_lookup(addressSpace, address);
|
||||
if (area != NULL)
|
||||
id = area->id;
|
||||
|
||||
release_sem_etc(addressSpace->sem, READ_COUNT, 0);
|
||||
vm_put_address_space(addressSpace);
|
||||
@ -3218,18 +3288,9 @@ vm_soft_fault(addr_t originalAddress, bool isWrite, bool isUser)
|
||||
if (page->cache != topCacheRef->cache && !isWrite)
|
||||
newProtection &= ~(isUser ? B_WRITE_AREA : B_KERNEL_WRITE_AREA);
|
||||
|
||||
atomic_add(&page->ref_count, 1);
|
||||
|
||||
//vm_page_map(area, page, newProtection);
|
||||
map->ops->lock(map);
|
||||
map->ops->map(map, address, page->physical_page_number * B_PAGE_SIZE,
|
||||
newProtection);
|
||||
map->ops->unlock(map);
|
||||
vm_map_page(area, page, address, newProtection);
|
||||
}
|
||||
|
||||
vm_page_set_state(page, area->wiring == B_NO_LOCK
|
||||
? PAGE_STATE_ACTIVE : PAGE_STATE_WIRED);
|
||||
|
||||
release_sem_etc(addressSpace->sem, READ_COUNT, 0);
|
||||
|
||||
mutex_unlock(&pageSourceRef->lock);
|
||||
@ -3256,14 +3317,14 @@ vm_area_lookup(vm_address_space *addressSpace, addr_t address)
|
||||
|
||||
// check the areas list first
|
||||
area = addressSpace->area_hint;
|
||||
if (area && area->base <= address && (area->base + area->size) > address)
|
||||
if (area && area->base <= address && area->base + (area->size - 1) >= address)
|
||||
goto found;
|
||||
|
||||
for (area = addressSpace->areas; area != NULL; area = area->address_space_next) {
|
||||
if (area->id == RESERVED_AREA_ID)
|
||||
continue;
|
||||
|
||||
if (area->base <= address && (area->base + area->size) > address)
|
||||
if (area->base <= address && area->base + (area->size - 1) >= address)
|
||||
break;
|
||||
}
|
||||
|
||||
@ -3382,6 +3443,39 @@ fill_area_info(struct vm_area *area, area_info *info, size_t size)
|
||||
}
|
||||
|
||||
|
||||
/*!
|
||||
Tests wether or not the area that contains the specified address
|
||||
needs any kind of locking, and actually exists.
|
||||
Used by both lock_memory() and unlock_memory().
|
||||
*/
|
||||
status_t
|
||||
test_lock_memory(vm_address_space *addressSpace, addr_t address,
|
||||
bool &needsLocking)
|
||||
{
|
||||
acquire_sem_etc(addressSpace->sem, READ_COUNT, 0, 0);
|
||||
|
||||
vm_area *area = vm_area_lookup(addressSpace, address);
|
||||
if (area != NULL) {
|
||||
mutex_lock(&area->cache_ref->lock);
|
||||
|
||||
// This determines if we need to lock the memory at all
|
||||
needsLocking = area->cache_ref->cache->type != CACHE_TYPE_NULL
|
||||
&& area->cache_ref->cache->type != CACHE_TYPE_DEVICE
|
||||
&& area->wiring != B_FULL_LOCK
|
||||
&& area->wiring != B_CONTIGUOUS;
|
||||
|
||||
mutex_unlock(&area->cache_ref->lock);
|
||||
}
|
||||
|
||||
release_sem_etc(addressSpace->sem, READ_COUNT, 0);
|
||||
|
||||
if (area == NULL)
|
||||
return B_BAD_ADDRESS;
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
@ -3427,19 +3521,7 @@ lock_memory(void *address, ulong numBytes, ulong flags)
|
||||
addr_t base = (addr_t)address;
|
||||
addr_t end = base + numBytes;
|
||||
bool isUser = IS_USER_ADDRESS(address);
|
||||
|
||||
// ToDo: Our VM currently doesn't support locking, this function
|
||||
// will now at least make sure that the memory is paged in, but
|
||||
// that's about it.
|
||||
// Nevertheless, it must be implemented as soon as we're able to
|
||||
// swap pages out of memory.
|
||||
|
||||
// ToDo: this is a hack, too; the iospace area is a null region and
|
||||
// officially cannot be written to or read; ie. vm_soft_fault() will
|
||||
// fail there. Furthermore, this is x86 specific as well.
|
||||
#define IOSPACE_SIZE (256 * 1024 * 1024)
|
||||
if (base >= KERNEL_BASE + IOSPACE_SIZE && base + numBytes < KERNEL_BASE + 2 * IOSPACE_SIZE)
|
||||
return B_OK;
|
||||
bool needsLocking = true;
|
||||
|
||||
if (isUser)
|
||||
addressSpace = vm_get_current_user_address_space();
|
||||
@ -3448,44 +3530,125 @@ lock_memory(void *address, ulong numBytes, ulong flags)
|
||||
if (addressSpace == NULL)
|
||||
return B_ERROR;
|
||||
|
||||
// test if we're on an area that allows faults at all
|
||||
|
||||
map = &addressSpace->translation_map;
|
||||
|
||||
status_t status = test_lock_memory(addressSpace, base, needsLocking);
|
||||
if (status < B_OK)
|
||||
goto out;
|
||||
if (!needsLocking)
|
||||
goto out;
|
||||
|
||||
for (; base < end; base += B_PAGE_SIZE) {
|
||||
addr_t physicalAddress;
|
||||
uint32 protection;
|
||||
status_t status;
|
||||
|
||||
map->ops->lock(map);
|
||||
map->ops->query(map, base, &physicalAddress, &protection);
|
||||
status = map->ops->query(map, base, &physicalAddress, &protection);
|
||||
map->ops->unlock(map);
|
||||
|
||||
if (status < B_OK)
|
||||
goto out;
|
||||
|
||||
if ((protection & PAGE_PRESENT) != 0) {
|
||||
// if B_READ_DEVICE is set, the caller intents to write to the locked
|
||||
// memory, so if it hasn't been mapped writable, we'll try the soft
|
||||
// fault anyway
|
||||
if ((flags & B_READ_DEVICE) == 0
|
||||
|| (protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0)
|
||||
continue;
|
||||
|| (protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0) {
|
||||
// update wiring
|
||||
vm_page *page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
|
||||
if (page == NULL)
|
||||
panic("couldn't lookup physical page just allocated\n");
|
||||
|
||||
page->wired_count++;
|
||||
// TODO: needs to be atomic on all platforms!
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
status = vm_soft_fault(base, (flags & B_READ_DEVICE) != 0, isUser);
|
||||
if (status != B_OK) {
|
||||
dprintf("lock_memory(address = %p, numBytes = %lu, flags = %lu) failed: %s\n",
|
||||
address, numBytes, flags, strerror(status));
|
||||
vm_put_address_space(addressSpace);
|
||||
return status;
|
||||
goto out;
|
||||
}
|
||||
|
||||
map->ops->lock(map);
|
||||
status = map->ops->query(map, base, &physicalAddress, &protection);
|
||||
map->ops->unlock(map);
|
||||
|
||||
if (status < B_OK)
|
||||
goto out;
|
||||
|
||||
// update wiring
|
||||
vm_page *page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
|
||||
if (page == NULL)
|
||||
panic("couldn't lookup physical page");
|
||||
|
||||
page->wired_count++;
|
||||
// TODO: needs to be atomic on all platforms!
|
||||
}
|
||||
|
||||
out:
|
||||
vm_put_address_space(addressSpace);
|
||||
return B_OK;
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
long
|
||||
unlock_memory(void *buffer, ulong numBytes, ulong flags)
|
||||
unlock_memory(void *address, ulong numBytes, ulong flags)
|
||||
{
|
||||
return B_OK;
|
||||
vm_address_space *addressSpace = NULL;
|
||||
struct vm_translation_map *map;
|
||||
addr_t base = (addr_t)address;
|
||||
addr_t end = base + numBytes;
|
||||
bool needsLocking = true;
|
||||
|
||||
if (IS_USER_ADDRESS(address))
|
||||
addressSpace = vm_get_current_user_address_space();
|
||||
else
|
||||
addressSpace = vm_get_kernel_address_space();
|
||||
if (addressSpace == NULL)
|
||||
return B_ERROR;
|
||||
|
||||
map = &addressSpace->translation_map;
|
||||
|
||||
status_t status = test_lock_memory(addressSpace, base, needsLocking);
|
||||
if (status < B_OK)
|
||||
goto out;
|
||||
if (!needsLocking)
|
||||
goto out;
|
||||
|
||||
for (; base < end; base += B_PAGE_SIZE) {
|
||||
map->ops->lock(map);
|
||||
|
||||
addr_t physicalAddress;
|
||||
uint32 protection;
|
||||
status = map->ops->query(map, base, &physicalAddress,
|
||||
&protection);
|
||||
|
||||
map->ops->unlock(map);
|
||||
|
||||
if (status < B_OK)
|
||||
goto out;
|
||||
if ((protection & PAGE_PRESENT) == 0)
|
||||
panic("calling unlock_memory() on unmapped memory!");
|
||||
|
||||
// update wiring
|
||||
vm_page *page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
|
||||
if (page == NULL)
|
||||
panic("couldn't lookup physical page");
|
||||
|
||||
page->wired_count--;
|
||||
// TODO: needs to be atomic on all platforms!
|
||||
}
|
||||
|
||||
out:
|
||||
vm_put_address_space(addressSpace);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
||||
@ -3832,7 +3995,7 @@ transfer_area(area_id id, void **_address, uint32 addressSpec, team_id target)
|
||||
|
||||
sourceAddressSpace = area->address_space;
|
||||
|
||||
reserved = _vm_create_reserved_region_struct(sourceAddressSpace, 0);
|
||||
reserved = create_reserved_area_struct(sourceAddressSpace, 0);
|
||||
if (reserved == NULL) {
|
||||
status = B_NO_MEMORY;
|
||||
goto err2;
|
||||
|
@ -22,6 +22,7 @@ static addr_t free_memory_low_water;
|
||||
static addr_t free_memory_high_water;
|
||||
|
||||
|
||||
#if 0
|
||||
static void
|
||||
scan_pages(vm_address_space *aspace, addr_t free_target)
|
||||
{
|
||||
@ -202,6 +203,7 @@ page_daemon(void *unused)
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
status_t
|
||||
|
@ -182,9 +182,11 @@ dump_page(int argc, char **argv)
|
||||
kprintf("cache: %p\n", page->cache);
|
||||
kprintf("cache_offset: %ld\n", page->cache_offset);
|
||||
kprintf("cache_next,prev: %p, %p\n", page->cache_next, page->cache_prev);
|
||||
kprintf("ref_count: %ld\n", page->ref_count);
|
||||
kprintf("mappings: %p\n", page->mappings);
|
||||
kprintf("type: %d\n", page->type);
|
||||
kprintf("state: %d\n", page->state);
|
||||
kprintf("wired_count: %u\n", page->wired_count);
|
||||
kprintf("usage_count: %u\n", page->usage_count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -637,7 +639,7 @@ vm_page_write_modified(vm_cache *cache, bool fsReenter)
|
||||
state = disable_interrupts();
|
||||
acquire_spinlock(&sPageLock);
|
||||
|
||||
if (page->ref_count > 0)
|
||||
if (page->mappings != NULL || page->wired_count)
|
||||
page->state = PAGE_STATE_ACTIVE;
|
||||
else
|
||||
page->state = PAGE_STATE_INACTIVE;
|
||||
@ -650,6 +652,16 @@ vm_page_write_modified(vm_cache *cache, bool fsReenter)
|
||||
} else {
|
||||
// We don't have to put the PAGE_MODIFIED bit back, as it's still
|
||||
// in the modified pages list.
|
||||
if (dequeuedPage) {
|
||||
state = disable_interrupts();
|
||||
acquire_spinlock(&sPageLock);
|
||||
|
||||
page->state = PAGE_STATE_MODIFIED;
|
||||
enqueue_page(&page_modified_queue, page);
|
||||
|
||||
release_spinlock(&sPageLock);
|
||||
restore_interrupts(state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -713,9 +725,9 @@ vm_page_init(kernel_args *args)
|
||||
sPages[i].physical_page_number = sPhysicalPageOffset + i;
|
||||
sPages[i].type = PAGE_TYPE_PHYSICAL;
|
||||
sPages[i].state = PAGE_STATE_FREE;
|
||||
sPages[i].ref_count = 0;
|
||||
//sPages[i].wired_count = 0;
|
||||
//sPages[i].usage_count = 0;
|
||||
sPages[i].mappings = NULL;
|
||||
sPages[i].wired_count = 0;
|
||||
sPages[i].usage_count = 0;
|
||||
enqueue_page(&page_free_queue, &sPages[i]);
|
||||
}
|
||||
|
||||
|
@ -62,42 +62,11 @@ device_write(struct vm_store *store, off_t offset, const iovec *vecs, size_t cou
|
||||
}
|
||||
|
||||
|
||||
/** this fault handler should take over the page fault routine and map the page in
|
||||
*
|
||||
* setup: the cache that this store is part of has a ref being held and will be
|
||||
* released after this handler is done
|
||||
*/
|
||||
|
||||
static status_t
|
||||
device_fault(struct vm_store *_store, struct vm_address_space *aspace, off_t offset)
|
||||
{
|
||||
struct device_store *store = (struct device_store *)_store;
|
||||
vm_cache_ref *cache_ref = store->vm.cache->ref;
|
||||
vm_translation_map *map = &aspace->translation_map;
|
||||
vm_area *area;
|
||||
|
||||
// figure out which page needs to be mapped where
|
||||
map->ops->lock(map);
|
||||
|
||||
// cycle through all of the regions that map this cache and map the page in
|
||||
for (area = cache_ref->areas; area != NULL; area = area->cache_next) {
|
||||
// make sure this page in the cache that was faulted on is covered in this area
|
||||
if (offset >= area->cache_offset && (offset - area->cache_offset) < area->size) {
|
||||
// don't map already mapped pages
|
||||
addr_t physicalAddress;
|
||||
uint32 flags;
|
||||
map->ops->query(map, area->base + (offset - area->cache_offset),
|
||||
&physicalAddress, &flags);
|
||||
if (flags & PAGE_PRESENT)
|
||||
continue;
|
||||
|
||||
map->ops->map(map, area->base + (offset - area->cache_offset),
|
||||
store->base_address + offset, area->protection);
|
||||
}
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
return B_OK;
|
||||
// devices are mapped in completely, so we shouldn't experience faults
|
||||
return B_BAD_ADDRESS;
|
||||
}
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user