2005-12-20 18:54:45 +03:00
|
|
|
/*
|
2010-01-09 23:21:43 +03:00
|
|
|
* Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
2008-07-17 02:55:17 +04:00
|
|
|
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
2005-12-20 18:54:45 +03:00
|
|
|
* Distributed under the terms of the MIT License.
|
|
|
|
*
|
|
|
|
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
|
|
|
* Distributed under the terms of the NewOS License.
|
|
|
|
*/
|
2009-12-02 21:05:10 +03:00
|
|
|
#ifndef _KERNEL_VM_VM_ADDRESS_SPACE_H
|
|
|
|
#define _KERNEL_VM_VM_ADDRESS_SPACE_H
|
2005-12-20 18:54:45 +03:00
|
|
|
|
|
|
|
|
2007-09-27 16:21:33 +04:00
|
|
|
#include <OS.h>
|
|
|
|
|
2009-12-04 16:33:25 +03:00
|
|
|
#include <vm/vm_priv.h>
|
2009-12-03 15:41:11 +03:00
|
|
|
#include <vm/VMArea.h>
|
2010-01-14 06:26:12 +03:00
|
|
|
#include <vm/VMTranslationMap.h>
|
2009-12-02 19:12:15 +03:00
|
|
|
|
|
|
|
|
* Introduced structures {virtual,physical}_address_restrictions, which specify
restrictions for virtual/physical addresses.
* vm_page_allocate_page_run():
- Fixed conversion of base/limit to array indexes. sPhysicalPageOffset was not
taken into account.
- Takes a physical_address_restrictions instead of base/limit and also
supports alignment and boundary restrictions, now.
* map_backing_store(), VM[User,Kernel]AddressSpace::InsertArea()/
ReserveAddressRange() take a virtual_address_restrictions parameter, now. They
also support an alignment independent from the range size.
* create_area_etc(), vm_create_anonymous_area(): Take
{virtual,physical}_address_restrictions parameters, now.
* Removed no longer needed B_PHYSICAL_BASE_ADDRESS.
* DMAResources:
- Fixed potential overflows of uint32 when initializing from device node
attributes.
- Fixed bounce buffer creation TODOs: By using create_area_etc() with the
new restrictions parameters we can directly support physical high address,
boundary, and alignment.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@37131 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-06-14 20:25:14 +04:00
|
|
|
struct virtual_address_restrictions;
|
|
|
|
|
|
|
|
|
2009-12-02 19:12:15 +03:00
|
|
|
struct VMAddressSpace {
|
2009-12-03 15:41:11 +03:00
|
|
|
public:
|
2009-12-04 16:33:25 +03:00
|
|
|
class AreaIterator;
|
2009-12-03 15:41:11 +03:00
|
|
|
|
|
|
|
public:
|
2009-12-02 19:12:15 +03:00
|
|
|
VMAddressSpace(team_id id, addr_t base,
|
2009-12-04 17:45:08 +03:00
|
|
|
size_t size, const char* name);
|
|
|
|
virtual ~VMAddressSpace();
|
2009-12-02 19:12:15 +03:00
|
|
|
|
|
|
|
static status_t Init();
|
|
|
|
|
|
|
|
team_id ID() const { return fID; }
|
|
|
|
addr_t Base() const { return fBase; }
|
2009-12-04 16:33:25 +03:00
|
|
|
addr_t EndAddress() const { return fEndAddress; }
|
2010-01-09 23:21:43 +03:00
|
|
|
size_t Size() const { return fEndAddress - fBase + 1; }
|
2009-12-03 15:41:11 +03:00
|
|
|
size_t FreeSpace() const { return fFreeSpace; }
|
2009-12-02 19:12:15 +03:00
|
|
|
bool IsBeingDeleted() const { return fDeleting; }
|
|
|
|
|
2010-01-14 06:26:12 +03:00
|
|
|
VMTranslationMap* TranslationMap() { return fTranslationMap; }
|
2009-12-02 19:12:15 +03:00
|
|
|
|
|
|
|
status_t ReadLock()
|
|
|
|
{ return rw_lock_read_lock(&fLock); }
|
|
|
|
void ReadUnlock()
|
|
|
|
{ rw_lock_read_unlock(&fLock); }
|
|
|
|
status_t WriteLock()
|
|
|
|
{ return rw_lock_write_lock(&fLock); }
|
|
|
|
void WriteUnlock()
|
|
|
|
{ rw_lock_write_unlock(&fLock); }
|
|
|
|
|
|
|
|
int32 RefCount() const
|
|
|
|
{ return fRefCount; }
|
|
|
|
|
2010-01-07 17:09:56 +03:00
|
|
|
inline void Get() { atomic_add(&fRefCount, 1); }
|
|
|
|
inline void Put();
|
2009-12-02 19:12:15 +03:00
|
|
|
void RemoveAndPut();
|
|
|
|
|
|
|
|
void IncrementFaultCount()
|
|
|
|
{ atomic_add(&fFaultCount, 1); }
|
|
|
|
void IncrementChangeCount()
|
|
|
|
{ fChangeCount++; }
|
|
|
|
|
2013-12-01 05:51:12 +04:00
|
|
|
inline bool IsRandomizingEnabled() const
|
|
|
|
{ return fRandomizingEnabled; }
|
|
|
|
inline void SetRandomizingEnabled(bool enabled)
|
|
|
|
{ fRandomizingEnabled = enabled; }
|
|
|
|
|
2009-12-04 17:45:08 +03:00
|
|
|
inline AreaIterator GetAreaIterator();
|
|
|
|
|
|
|
|
VMAddressSpace*& HashTableLink() { return fHashTableLink; }
|
2009-12-03 15:41:11 +03:00
|
|
|
|
2009-12-06 20:18:04 +03:00
|
|
|
virtual status_t InitObject();
|
|
|
|
|
2009-12-04 17:45:08 +03:00
|
|
|
virtual VMArea* FirstArea() const = 0;
|
|
|
|
virtual VMArea* NextArea(VMArea* area) const = 0;
|
2009-12-04 16:33:25 +03:00
|
|
|
|
2009-12-04 17:45:08 +03:00
|
|
|
virtual VMArea* LookupArea(addr_t address) const = 0;
|
2009-12-04 20:07:16 +03:00
|
|
|
virtual VMArea* CreateArea(const char* name, uint32 wiring,
|
2010-01-27 15:45:53 +03:00
|
|
|
uint32 protection,
|
|
|
|
uint32 allocationFlags) = 0;
|
|
|
|
virtual void DeleteArea(VMArea* area,
|
|
|
|
uint32 allocationFlags) = 0;
|
* Introduced structures {virtual,physical}_address_restrictions, which specify
restrictions for virtual/physical addresses.
* vm_page_allocate_page_run():
- Fixed conversion of base/limit to array indexes. sPhysicalPageOffset was not
taken into account.
- Takes a physical_address_restrictions instead of base/limit and also
supports alignment and boundary restrictions, now.
* map_backing_store(), VM[User,Kernel]AddressSpace::InsertArea()/
ReserveAddressRange() take a virtual_address_restrictions parameter, now. They
also support an alignment independent from the range size.
* create_area_etc(), vm_create_anonymous_area(): Take
{virtual,physical}_address_restrictions parameters, now.
* Removed no longer needed B_PHYSICAL_BASE_ADDRESS.
* DMAResources:
- Fixed potential overflows of uint32 when initializing from device node
attributes.
- Fixed bounce buffer creation TODOs: By using create_area_etc() with the
new restrictions parameters we can directly support physical high address,
boundary, and alignment.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@37131 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-06-14 20:25:14 +04:00
|
|
|
virtual status_t InsertArea(VMArea* area, size_t size,
|
|
|
|
const virtual_address_restrictions*
|
|
|
|
addressRestrictions,
|
|
|
|
uint32 allocationFlags, void** _address)
|
|
|
|
= 0;
|
2010-01-27 15:45:53 +03:00
|
|
|
virtual void RemoveArea(VMArea* area,
|
|
|
|
uint32 allocationFlags) = 0;
|
2009-12-02 19:12:15 +03:00
|
|
|
|
2009-12-04 17:45:08 +03:00
|
|
|
virtual bool CanResizeArea(VMArea* area, size_t newSize) = 0;
|
2010-01-27 15:45:53 +03:00
|
|
|
virtual status_t ResizeArea(VMArea* area, size_t newSize,
|
|
|
|
uint32 allocationFlags) = 0;
|
|
|
|
virtual status_t ShrinkAreaHead(VMArea* area, size_t newSize,
|
|
|
|
uint32 allocationFlags) = 0;
|
|
|
|
virtual status_t ShrinkAreaTail(VMArea* area, size_t newSize,
|
|
|
|
uint32 allocationFlags) = 0;
|
2009-12-04 17:45:08 +03:00
|
|
|
|
* Introduced structures {virtual,physical}_address_restrictions, which specify
restrictions for virtual/physical addresses.
* vm_page_allocate_page_run():
- Fixed conversion of base/limit to array indexes. sPhysicalPageOffset was not
taken into account.
- Takes a physical_address_restrictions instead of base/limit and also
supports alignment and boundary restrictions, now.
* map_backing_store(), VM[User,Kernel]AddressSpace::InsertArea()/
ReserveAddressRange() take a virtual_address_restrictions parameter, now. They
also support an alignment independent from the range size.
* create_area_etc(), vm_create_anonymous_area(): Take
{virtual,physical}_address_restrictions parameters, now.
* Removed no longer needed B_PHYSICAL_BASE_ADDRESS.
* DMAResources:
- Fixed potential overflows of uint32 when initializing from device node
attributes.
- Fixed bounce buffer creation TODOs: By using create_area_etc() with the
new restrictions parameters we can directly support physical high address,
boundary, and alignment.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@37131 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-06-14 20:25:14 +04:00
|
|
|
virtual status_t ReserveAddressRange(size_t size,
|
|
|
|
const virtual_address_restrictions*
|
|
|
|
addressRestrictions,
|
|
|
|
uint32 flags, uint32 allocationFlags,
|
|
|
|
void** _address) = 0;
|
2009-12-04 17:45:08 +03:00
|
|
|
virtual status_t UnreserveAddressRange(addr_t address,
|
2010-01-27 15:45:53 +03:00
|
|
|
size_t size, uint32 allocationFlags) = 0;
|
|
|
|
virtual void UnreserveAllAddressRanges(
|
|
|
|
uint32 allocationFlags) = 0;
|
2009-12-04 16:33:25 +03:00
|
|
|
|
2009-12-04 17:45:08 +03:00
|
|
|
virtual void Dump() const;
|
2009-12-03 15:41:11 +03:00
|
|
|
|
2009-12-02 19:12:15 +03:00
|
|
|
static status_t Create(team_id teamID, addr_t base, size_t size,
|
|
|
|
bool kernel,
|
|
|
|
VMAddressSpace** _addressSpace);
|
|
|
|
|
|
|
|
static team_id KernelID()
|
|
|
|
{ return sKernelAddressSpace->ID(); }
|
|
|
|
static VMAddressSpace* Kernel()
|
|
|
|
{ return sKernelAddressSpace; }
|
|
|
|
static VMAddressSpace* GetKernel();
|
|
|
|
|
|
|
|
static team_id CurrentID();
|
|
|
|
static VMAddressSpace* GetCurrent();
|
|
|
|
|
|
|
|
static VMAddressSpace* Get(team_id teamID);
|
|
|
|
|
2010-01-09 23:21:43 +03:00
|
|
|
static VMAddressSpace* DebugFirst();
|
|
|
|
static VMAddressSpace* DebugNext(VMAddressSpace* addressSpace);
|
2010-04-13 21:16:42 +04:00
|
|
|
static VMAddressSpace* DebugGet(team_id teamID);
|
2010-01-09 23:21:43 +03:00
|
|
|
|
2009-12-04 17:45:08 +03:00
|
|
|
protected:
|
2010-01-07 17:09:56 +03:00
|
|
|
static void _DeleteIfUnreferenced(team_id id);
|
|
|
|
|
2009-12-02 19:12:15 +03:00
|
|
|
static int _DumpCommand(int argc, char** argv);
|
|
|
|
static int _DumpListCommand(int argc, char** argv);
|
|
|
|
|
2009-12-04 17:45:08 +03:00
|
|
|
protected:
|
2009-12-02 19:12:15 +03:00
|
|
|
struct HashDefinition;
|
|
|
|
|
2009-12-04 17:45:08 +03:00
|
|
|
protected:
|
2009-12-02 19:12:15 +03:00
|
|
|
VMAddressSpace* fHashTableLink;
|
|
|
|
addr_t fBase;
|
2009-12-04 16:33:25 +03:00
|
|
|
addr_t fEndAddress; // base + (size - 1)
|
2009-12-03 15:41:11 +03:00
|
|
|
size_t fFreeSpace;
|
2009-12-02 19:12:15 +03:00
|
|
|
rw_lock fLock;
|
|
|
|
team_id fID;
|
|
|
|
int32 fRefCount;
|
|
|
|
int32 fFaultCount;
|
|
|
|
int32 fChangeCount;
|
2010-01-14 06:26:12 +03:00
|
|
|
VMTranslationMap* fTranslationMap;
|
2013-12-01 05:51:12 +04:00
|
|
|
bool fRandomizingEnabled;
|
2009-12-02 19:12:15 +03:00
|
|
|
bool fDeleting;
|
|
|
|
static VMAddressSpace* sKernelAddressSpace;
|
|
|
|
};
|
2005-12-20 18:54:45 +03:00
|
|
|
|
|
|
|
|
2010-01-07 17:09:56 +03:00
|
|
|
void
|
|
|
|
VMAddressSpace::Put()
|
|
|
|
{
|
|
|
|
team_id id = fID;
|
|
|
|
if (atomic_add(&fRefCount, -1) == 1)
|
|
|
|
_DeleteIfUnreferenced(id);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2009-12-04 16:33:25 +03:00
|
|
|
class VMAddressSpace::AreaIterator {
|
2009-12-03 15:41:11 +03:00
|
|
|
public:
|
2009-12-04 16:33:25 +03:00
|
|
|
AreaIterator()
|
2009-12-03 15:41:11 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2009-12-04 16:33:25 +03:00
|
|
|
AreaIterator(VMAddressSpace* addressSpace)
|
2009-12-03 15:41:11 +03:00
|
|
|
:
|
2009-12-04 17:45:08 +03:00
|
|
|
fAddressSpace(addressSpace),
|
|
|
|
fNext(addressSpace->FirstArea())
|
2009-12-03 15:41:11 +03:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
bool HasNext() const
|
|
|
|
{
|
2009-12-04 16:33:25 +03:00
|
|
|
return fNext != NULL;
|
2009-12-03 15:41:11 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
VMArea* Next()
|
|
|
|
{
|
2009-12-04 16:33:25 +03:00
|
|
|
VMArea* result = fNext;
|
2009-12-07 04:56:01 +03:00
|
|
|
if (fNext != NULL)
|
|
|
|
fNext = fAddressSpace->NextArea(fNext);
|
2009-12-04 16:33:25 +03:00
|
|
|
return result;
|
2009-12-03 15:41:11 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void Rewind()
|
|
|
|
{
|
2009-12-04 17:45:08 +03:00
|
|
|
fNext = fAddressSpace->FirstArea();
|
2009-12-03 15:41:11 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2009-12-04 17:45:08 +03:00
|
|
|
VMAddressSpace* fAddressSpace;
|
|
|
|
VMArea* fNext;
|
2009-12-03 15:41:11 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
|
2009-12-04 16:33:25 +03:00
|
|
|
inline VMAddressSpace::AreaIterator
|
|
|
|
VMAddressSpace::GetAreaIterator()
|
2009-12-03 15:41:11 +03:00
|
|
|
{
|
2009-12-04 16:33:25 +03:00
|
|
|
return AreaIterator(this);
|
2009-12-03 15:41:11 +03:00
|
|
|
}
|
|
|
|
|
2005-12-20 18:54:45 +03:00
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2010-04-03 22:01:29 +04:00
|
|
|
void vm_delete_areas(struct VMAddressSpace *aspace, bool deletingAddressSpace);
|
2008-10-08 01:39:19 +04:00
|
|
|
#define vm_swap_address_space(from, to) arch_vm_aspace_swap(from, to)
|
2005-12-20 18:54:45 +03:00
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2009-12-02 19:12:15 +03:00
|
|
|
|
2009-12-02 21:05:10 +03:00
|
|
|
#endif /* _KERNEL_VM_VM_ADDRESS_SPACE_H */
|