haiku/headers/private/kernel/vm/VMArea.h

230 lines
5.1 KiB
C
Raw Normal View History

/*
* Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2009, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#ifndef _KERNEL_VM_VM_AREA_H
#define _KERNEL_VM_VM_AREA_H
#include <vm_defs.h>
#include <lock.h>
#include <util/DoublyLinkedList.h>
* vm_delete_areas(): Changed return type to void (was status_t and not used). * _user_map_file(), _user_unmap_memory(): Verify that the address (if given) is page aligned. * Reworked memory locking (wiring): - VMArea does now have a list of wired memory ranges and supports waiting for a range to be removed. - vm_soft_fault(): - Added "wirePage" parameter that, if given, makes the function wire the page and return it. - Added "wiredRange" parameter (for calls from lock_memory_etc()) and made sure we never unmap wired pages. This could e.g. happen when a page from a lower cache was read-mapped and a write fault occurred. Now in such a situation the function waits for the page to be unwired and restarts. - All functions that manipulate areas in a way that could affect wired ranges do now either require the caller to make sure there are no wired ranges in the way or do that themselves. Added a few wait_if_*_is_wired() helper functions for that purpose. - lock_memory_etc(): - Does now also work correctly when the range spans more than one area. - Adds VMAreaWiredRanges to the affected VMAreas and retains an address space reference (so that the address space won't be deleted as long as a wired range exists). - Resolved TODO: The area's caches are now locked when increment_page_wired_count() is called. - Resolved TODO: The race condition due to missing locking after looking up the page mapping is now prevented. We hold the cache locks (in case the page is already mapped) and the new vm_soft_fault() parameter allows us to get the page wired. - unlock_memory_etc(): Changes symmetrical to those in lock_memory_etc() and resolved all TODOs. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@36030 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-04-03 22:01:29 +04:00
#include <util/SinglyLinkedList.h>
#include <util/OpenHashTable.h>
#include <vm/vm_types.h>
struct VMAddressSpace;
struct VMCache;
struct VMKernelAddressSpace;
struct VMUserAddressSpace;
* vm_delete_areas(): Changed return type to void (was status_t and not used). * _user_map_file(), _user_unmap_memory(): Verify that the address (if given) is page aligned. * Reworked memory locking (wiring): - VMArea does now have a list of wired memory ranges and supports waiting for a range to be removed. - vm_soft_fault(): - Added "wirePage" parameter that, if given, makes the function wire the page and return it. - Added "wiredRange" parameter (for calls from lock_memory_etc()) and made sure we never unmap wired pages. This could e.g. happen when a page from a lower cache was read-mapped and a write fault occurred. Now in such a situation the function waits for the page to be unwired and restarts. - All functions that manipulate areas in a way that could affect wired ranges do now either require the caller to make sure there are no wired ranges in the way or do that themselves. Added a few wait_if_*_is_wired() helper functions for that purpose. - lock_memory_etc(): - Does now also work correctly when the range spans more than one area. - Adds VMAreaWiredRanges to the affected VMAreas and retains an address space reference (so that the address space won't be deleted as long as a wired range exists). - Resolved TODO: The area's caches are now locked when increment_page_wired_count() is called. - Resolved TODO: The race condition due to missing locking after looking up the page mapping is now prevented. We hold the cache locks (in case the page is already mapped) and the new vm_soft_fault() parameter allows us to get the page wired. - unlock_memory_etc(): Changes symmetrical to those in lock_memory_etc() and resolved all TODOs. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@36030 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-04-03 22:01:29 +04:00
struct VMAreaUnwiredWaiter
: public DoublyLinkedListLinkImpl<VMAreaUnwiredWaiter> {
VMArea* area;
addr_t base;
size_t size;
ConditionVariable condition;
ConditionVariableEntry waitEntry;
};
typedef DoublyLinkedList<VMAreaUnwiredWaiter> VMAreaUnwiredWaiterList;
struct VMAreaWiredRange : SinglyLinkedListLinkImpl<VMAreaWiredRange> {
VMArea* area;
addr_t base;
size_t size;
bool writable;
bool implicit; // range created automatically
VMAreaUnwiredWaiterList waiters;
VMAreaWiredRange()
{
}
* vm_delete_areas(): Changed return type to void (was status_t and not used). * _user_map_file(), _user_unmap_memory(): Verify that the address (if given) is page aligned. * Reworked memory locking (wiring): - VMArea does now have a list of wired memory ranges and supports waiting for a range to be removed. - vm_soft_fault(): - Added "wirePage" parameter that, if given, makes the function wire the page and return it. - Added "wiredRange" parameter (for calls from lock_memory_etc()) and made sure we never unmap wired pages. This could e.g. happen when a page from a lower cache was read-mapped and a write fault occurred. Now in such a situation the function waits for the page to be unwired and restarts. - All functions that manipulate areas in a way that could affect wired ranges do now either require the caller to make sure there are no wired ranges in the way or do that themselves. Added a few wait_if_*_is_wired() helper functions for that purpose. - lock_memory_etc(): - Does now also work correctly when the range spans more than one area. - Adds VMAreaWiredRanges to the affected VMAreas and retains an address space reference (so that the address space won't be deleted as long as a wired range exists). - Resolved TODO: The area's caches are now locked when increment_page_wired_count() is called. - Resolved TODO: The race condition due to missing locking after looking up the page mapping is now prevented. We hold the cache locks (in case the page is already mapped) and the new vm_soft_fault() parameter allows us to get the page wired. - unlock_memory_etc(): Changes symmetrical to those in lock_memory_etc() and resolved all TODOs. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@36030 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-04-03 22:01:29 +04:00
VMAreaWiredRange(addr_t base, size_t size, bool writable, bool implicit)
:
area(NULL),
base(base),
size(size),
writable(writable),
implicit(implicit)
{
}
void SetTo(addr_t base, size_t size, bool writable, bool implicit)
{
this->area = NULL;
this->base = base;
this->size = size;
this->writable = writable;
this->implicit = implicit;
}
* vm_delete_areas(): Changed return type to void (was status_t and not used). * _user_map_file(), _user_unmap_memory(): Verify that the address (if given) is page aligned. * Reworked memory locking (wiring): - VMArea does now have a list of wired memory ranges and supports waiting for a range to be removed. - vm_soft_fault(): - Added "wirePage" parameter that, if given, makes the function wire the page and return it. - Added "wiredRange" parameter (for calls from lock_memory_etc()) and made sure we never unmap wired pages. This could e.g. happen when a page from a lower cache was read-mapped and a write fault occurred. Now in such a situation the function waits for the page to be unwired and restarts. - All functions that manipulate areas in a way that could affect wired ranges do now either require the caller to make sure there are no wired ranges in the way or do that themselves. Added a few wait_if_*_is_wired() helper functions for that purpose. - lock_memory_etc(): - Does now also work correctly when the range spans more than one area. - Adds VMAreaWiredRanges to the affected VMAreas and retains an address space reference (so that the address space won't be deleted as long as a wired range exists). - Resolved TODO: The area's caches are now locked when increment_page_wired_count() is called. - Resolved TODO: The race condition due to missing locking after looking up the page mapping is now prevented. We hold the cache locks (in case the page is already mapped) and the new vm_soft_fault() parameter allows us to get the page wired. - unlock_memory_etc(): Changes symmetrical to those in lock_memory_etc() and resolved all TODOs. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@36030 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-04-03 22:01:29 +04:00
bool IntersectsWith(addr_t base, size_t size) const
{
return this->base + this->size - 1 >= base
&& base + size - 1 >= this->base;
}
};
typedef SinglyLinkedList<VMAreaWiredRange> VMAreaWiredRangeList;
struct VMPageWiringInfo {
VMAreaWiredRange range;
phys_addr_t physicalAddress;
// the actual physical address corresponding to
// the virtual address passed to vm_wire_page()
// (i.e. with in-page offset)
vm_page* page;
};
struct VMArea {
char* name;
area_id id;
uint32 protection;
uint16 wiring;
private:
uint16 memory_type; // >> shifted by MEMORY_TYPE_SHIFT
public:
VMCache* cache;
vint32 no_cache_change;
off_t cache_offset;
uint32 cache_type;
VMAreaMappings mappings;
uint8* page_protections;
struct VMAddressSpace* address_space;
struct VMArea* cache_next;
struct VMArea* cache_prev;
struct VMArea* hash_next;
addr_t Base() const { return fBase; }
size_t Size() const { return fSize; }
inline uint32 MemoryType() const;
inline void SetMemoryType(uint32 memoryType);
bool ContainsAddress(addr_t address) const
{ return address >= fBase
&& address <= fBase + (fSize - 1); }
* vm_delete_areas(): Changed return type to void (was status_t and not used). * _user_map_file(), _user_unmap_memory(): Verify that the address (if given) is page aligned. * Reworked memory locking (wiring): - VMArea does now have a list of wired memory ranges and supports waiting for a range to be removed. - vm_soft_fault(): - Added "wirePage" parameter that, if given, makes the function wire the page and return it. - Added "wiredRange" parameter (for calls from lock_memory_etc()) and made sure we never unmap wired pages. This could e.g. happen when a page from a lower cache was read-mapped and a write fault occurred. Now in such a situation the function waits for the page to be unwired and restarts. - All functions that manipulate areas in a way that could affect wired ranges do now either require the caller to make sure there are no wired ranges in the way or do that themselves. Added a few wait_if_*_is_wired() helper functions for that purpose. - lock_memory_etc(): - Does now also work correctly when the range spans more than one area. - Adds VMAreaWiredRanges to the affected VMAreas and retains an address space reference (so that the address space won't be deleted as long as a wired range exists). - Resolved TODO: The area's caches are now locked when increment_page_wired_count() is called. - Resolved TODO: The race condition due to missing locking after looking up the page mapping is now prevented. We hold the cache locks (in case the page is already mapped) and the new vm_soft_fault() parameter allows us to get the page wired. - unlock_memory_etc(): Changes symmetrical to those in lock_memory_etc() and resolved all TODOs. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@36030 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-04-03 22:01:29 +04:00
bool IsWired() const
{ return !fWiredRanges.IsEmpty(); }
bool IsWired(addr_t base, size_t size) const;
void Wire(VMAreaWiredRange* range);
void Unwire(VMAreaWiredRange* range);
VMAreaWiredRange* Unwire(addr_t base, size_t size, bool writable);
* vm_delete_areas(): Changed return type to void (was status_t and not used). * _user_map_file(), _user_unmap_memory(): Verify that the address (if given) is page aligned. * Reworked memory locking (wiring): - VMArea does now have a list of wired memory ranges and supports waiting for a range to be removed. - vm_soft_fault(): - Added "wirePage" parameter that, if given, makes the function wire the page and return it. - Added "wiredRange" parameter (for calls from lock_memory_etc()) and made sure we never unmap wired pages. This could e.g. happen when a page from a lower cache was read-mapped and a write fault occurred. Now in such a situation the function waits for the page to be unwired and restarts. - All functions that manipulate areas in a way that could affect wired ranges do now either require the caller to make sure there are no wired ranges in the way or do that themselves. Added a few wait_if_*_is_wired() helper functions for that purpose. - lock_memory_etc(): - Does now also work correctly when the range spans more than one area. - Adds VMAreaWiredRanges to the affected VMAreas and retains an address space reference (so that the address space won't be deleted as long as a wired range exists). - Resolved TODO: The area's caches are now locked when increment_page_wired_count() is called. - Resolved TODO: The race condition due to missing locking after looking up the page mapping is now prevented. We hold the cache locks (in case the page is already mapped) and the new vm_soft_fault() parameter allows us to get the page wired. - unlock_memory_etc(): Changes symmetrical to those in lock_memory_etc() and resolved all TODOs. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@36030 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-04-03 22:01:29 +04:00
bool AddWaiterIfWired(VMAreaUnwiredWaiter* waiter);
bool AddWaiterIfWired(VMAreaUnwiredWaiter* waiter,
addr_t base, size_t size,
VMAreaWiredRange* ignoreRange = NULL);
protected:
VMArea(VMAddressSpace* addressSpace,
uint32 wiring, uint32 protection);
~VMArea();
status_t Init(const char* name, uint32 allocationFlags);
protected:
friend class VMAddressSpace;
friend class VMKernelAddressSpace;
friend class VMUserAddressSpace;
protected:
void SetBase(addr_t base) { fBase = base; }
void SetSize(size_t size) { fSize = size; }
protected:
addr_t fBase;
size_t fSize;
* vm_delete_areas(): Changed return type to void (was status_t and not used). * _user_map_file(), _user_unmap_memory(): Verify that the address (if given) is page aligned. * Reworked memory locking (wiring): - VMArea does now have a list of wired memory ranges and supports waiting for a range to be removed. - vm_soft_fault(): - Added "wirePage" parameter that, if given, makes the function wire the page and return it. - Added "wiredRange" parameter (for calls from lock_memory_etc()) and made sure we never unmap wired pages. This could e.g. happen when a page from a lower cache was read-mapped and a write fault occurred. Now in such a situation the function waits for the page to be unwired and restarts. - All functions that manipulate areas in a way that could affect wired ranges do now either require the caller to make sure there are no wired ranges in the way or do that themselves. Added a few wait_if_*_is_wired() helper functions for that purpose. - lock_memory_etc(): - Does now also work correctly when the range spans more than one area. - Adds VMAreaWiredRanges to the affected VMAreas and retains an address space reference (so that the address space won't be deleted as long as a wired range exists). - Resolved TODO: The area's caches are now locked when increment_page_wired_count() is called. - Resolved TODO: The race condition due to missing locking after looking up the page mapping is now prevented. We hold the cache locks (in case the page is already mapped) and the new vm_soft_fault() parameter allows us to get the page wired. - unlock_memory_etc(): Changes symmetrical to those in lock_memory_etc() and resolved all TODOs. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@36030 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-04-03 22:01:29 +04:00
VMAreaWiredRangeList fWiredRanges;
};
struct VMAreaHashDefinition {
typedef area_id KeyType;
typedef VMArea ValueType;
size_t HashKey(area_id key) const
{
return key;
}
size_t Hash(const VMArea* value) const
{
return HashKey(value->id);
}
bool Compare(area_id key, const VMArea* value) const
{
return value->id == key;
}
VMArea*& GetLink(VMArea* value) const
{
return value->hash_next;
}
};
typedef BOpenHashTable<VMAreaHashDefinition> VMAreaHashTable;
struct VMAreaHash {
static status_t Init();
static status_t ReadLock()
{ return rw_lock_read_lock(&sLock); }
static void ReadUnlock()
{ rw_lock_read_unlock(&sLock); }
static status_t WriteLock()
{ return rw_lock_write_lock(&sLock); }
static void WriteUnlock()
{ rw_lock_write_unlock(&sLock); }
static VMArea* LookupLocked(area_id id)
{ return sTable.Lookup(id); }
static VMArea* Lookup(area_id id);
static area_id Find(const char* name);
static void Insert(VMArea* area);
static void Remove(VMArea* area);
static VMAreaHashTable::Iterator GetIterator()
{ return sTable.GetIterator(); }
private:
static rw_lock sLock;
static VMAreaHashTable sTable;
};
uint32
VMArea::MemoryType() const
{
return (uint32)memory_type << MEMORY_TYPE_SHIFT;
}
void
VMArea::SetMemoryType(uint32 memoryType)
{
memory_type = memoryType >> MEMORY_TYPE_SHIFT;
}
#endif // _KERNEL_VM_VM_AREA_H