haiku/headers/private/kernel/vm/VMAddressSpace.h
Ingo Weinhold a8ad734f1c * Introduced structures {virtual,physical}_address_restrictions, which specify
restrictions for virtual/physical addresses.
* vm_page_allocate_page_run():
  - Fixed conversion of base/limit to array indexes. sPhysicalPageOffset was not
    taken into account.
  - Takes a physical_address_restrictions instead of base/limit and also
    supports alignment and boundary restrictions, now.
* map_backing_store(), VM[User,Kernel]AddressSpace::InsertArea()/
  ReserveAddressRange() take a virtual_address_restrictions parameter, now. They
  also support an alignment independent from the range size.
* create_area_etc(), vm_create_anonymous_area(): Take
  {virtual,physical}_address_restrictions parameters, now.
* Removed no longer needed B_PHYSICAL_BASE_ADDRESS.
* DMAResources:
  - Fixed potential overflows of uint32 when initializing from device node
    attributes.
  - Fixed bounce buffer creation TODOs: By using create_area_etc() with the
    new restrictions parameters we can directly support physical high address,
    boundary, and alignment.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@37131 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-06-14 16:25:14 +00:00

217 lines
5.4 KiB
C++

/*
* Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#ifndef _KERNEL_VM_VM_ADDRESS_SPACE_H
#define _KERNEL_VM_VM_ADDRESS_SPACE_H
#include <OS.h>
#include <vm/vm_priv.h>
#include <vm/VMArea.h>
#include <vm/VMTranslationMap.h>
struct virtual_address_restrictions;
struct VMAddressSpace {
public:
class AreaIterator;
public:
VMAddressSpace(team_id id, addr_t base,
size_t size, const char* name);
virtual ~VMAddressSpace();
static status_t Init();
team_id ID() const { return fID; }
addr_t Base() const { return fBase; }
addr_t EndAddress() const { return fEndAddress; }
size_t Size() const { return fEndAddress - fBase + 1; }
size_t FreeSpace() const { return fFreeSpace; }
bool IsBeingDeleted() const { return fDeleting; }
VMTranslationMap* TranslationMap() { return fTranslationMap; }
status_t ReadLock()
{ return rw_lock_read_lock(&fLock); }
void ReadUnlock()
{ rw_lock_read_unlock(&fLock); }
status_t WriteLock()
{ return rw_lock_write_lock(&fLock); }
void WriteUnlock()
{ rw_lock_write_unlock(&fLock); }
int32 RefCount() const
{ return fRefCount; }
inline void Get() { atomic_add(&fRefCount, 1); }
inline void Put();
void RemoveAndPut();
void IncrementFaultCount()
{ atomic_add(&fFaultCount, 1); }
void IncrementChangeCount()
{ fChangeCount++; }
inline AreaIterator GetAreaIterator();
VMAddressSpace*& HashTableLink() { return fHashTableLink; }
virtual status_t InitObject();
virtual VMArea* FirstArea() const = 0;
virtual VMArea* NextArea(VMArea* area) const = 0;
virtual VMArea* LookupArea(addr_t address) const = 0;
virtual VMArea* CreateArea(const char* name, uint32 wiring,
uint32 protection,
uint32 allocationFlags) = 0;
virtual void DeleteArea(VMArea* area,
uint32 allocationFlags) = 0;
virtual status_t InsertArea(VMArea* area, size_t size,
const virtual_address_restrictions*
addressRestrictions,
uint32 allocationFlags, void** _address)
= 0;
virtual void RemoveArea(VMArea* area,
uint32 allocationFlags) = 0;
virtual bool CanResizeArea(VMArea* area, size_t newSize) = 0;
virtual status_t ResizeArea(VMArea* area, size_t newSize,
uint32 allocationFlags) = 0;
virtual status_t ShrinkAreaHead(VMArea* area, size_t newSize,
uint32 allocationFlags) = 0;
virtual status_t ShrinkAreaTail(VMArea* area, size_t newSize,
uint32 allocationFlags) = 0;
virtual status_t ReserveAddressRange(size_t size,
const virtual_address_restrictions*
addressRestrictions,
uint32 flags, uint32 allocationFlags,
void** _address) = 0;
virtual status_t UnreserveAddressRange(addr_t address,
size_t size, uint32 allocationFlags) = 0;
virtual void UnreserveAllAddressRanges(
uint32 allocationFlags) = 0;
virtual void Dump() const;
static status_t Create(team_id teamID, addr_t base, size_t size,
bool kernel,
VMAddressSpace** _addressSpace);
static team_id KernelID()
{ return sKernelAddressSpace->ID(); }
static VMAddressSpace* Kernel()
{ return sKernelAddressSpace; }
static VMAddressSpace* GetKernel();
static team_id CurrentID();
static VMAddressSpace* GetCurrent();
static VMAddressSpace* Get(team_id teamID);
static VMAddressSpace* DebugFirst();
static VMAddressSpace* DebugNext(VMAddressSpace* addressSpace);
static VMAddressSpace* DebugGet(team_id teamID);
protected:
static void _DeleteIfUnreferenced(team_id id);
static int _DumpCommand(int argc, char** argv);
static int _DumpListCommand(int argc, char** argv);
protected:
struct HashDefinition;
protected:
VMAddressSpace* fHashTableLink;
addr_t fBase;
addr_t fEndAddress; // base + (size - 1)
size_t fFreeSpace;
rw_lock fLock;
team_id fID;
int32 fRefCount;
int32 fFaultCount;
int32 fChangeCount;
VMTranslationMap* fTranslationMap;
bool fDeleting;
static VMAddressSpace* sKernelAddressSpace;
};
void
VMAddressSpace::Put()
{
team_id id = fID;
if (atomic_add(&fRefCount, -1) == 1)
_DeleteIfUnreferenced(id);
}
class VMAddressSpace::AreaIterator {
public:
AreaIterator()
{
}
AreaIterator(VMAddressSpace* addressSpace)
:
fAddressSpace(addressSpace),
fNext(addressSpace->FirstArea())
{
}
bool HasNext() const
{
return fNext != NULL;
}
VMArea* Next()
{
VMArea* result = fNext;
if (fNext != NULL)
fNext = fAddressSpace->NextArea(fNext);
return result;
}
void Rewind()
{
fNext = fAddressSpace->FirstArea();
}
private:
VMAddressSpace* fAddressSpace;
VMArea* fNext;
};
inline VMAddressSpace::AreaIterator
VMAddressSpace::GetAreaIterator()
{
return AreaIterator(this);
}
#ifdef __cplusplus
extern "C" {
#endif
void vm_delete_areas(struct VMAddressSpace *aspace, bool deletingAddressSpace);
#define vm_swap_address_space(from, to) arch_vm_aspace_swap(from, to)
#ifdef __cplusplus
}
#endif
#endif /* _KERNEL_VM_VM_ADDRESS_SPACE_H */