haiku/headers/private/kernel/vm/VMAddressSpace.h

211 lines
5.2 KiB
C
Raw Normal View History

/*
* Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#ifndef _KERNEL_VM_VM_ADDRESS_SPACE_H
#define _KERNEL_VM_VM_ADDRESS_SPACE_H
#include <OS.h>
#include <vm/vm_priv.h>
#include <vm/VMArea.h>
#include <vm/VMTranslationMap.h>
struct VMAddressSpace {
public:
class AreaIterator;
public:
VMAddressSpace(team_id id, addr_t base,
size_t size, const char* name);
virtual ~VMAddressSpace();
static status_t Init();
static status_t InitPostSem();
team_id ID() const { return fID; }
addr_t Base() const { return fBase; }
addr_t EndAddress() const { return fEndAddress; }
size_t Size() const { return fEndAddress - fBase + 1; }
size_t FreeSpace() const { return fFreeSpace; }
bool IsBeingDeleted() const { return fDeleting; }
VMTranslationMap* TranslationMap() { return fTranslationMap; }
status_t ReadLock()
{ return rw_lock_read_lock(&fLock); }
void ReadUnlock()
{ rw_lock_read_unlock(&fLock); }
status_t WriteLock()
{ return rw_lock_write_lock(&fLock); }
void WriteUnlock()
{ rw_lock_write_unlock(&fLock); }
int32 RefCount() const
{ return fRefCount; }
inline void Get() { atomic_add(&fRefCount, 1); }
inline void Put();
void RemoveAndPut();
void IncrementFaultCount()
{ atomic_add(&fFaultCount, 1); }
void IncrementChangeCount()
{ fChangeCount++; }
inline AreaIterator GetAreaIterator();
VMAddressSpace*& HashTableLink() { return fHashTableLink; }
virtual status_t InitObject();
virtual VMArea* FirstArea() const = 0;
virtual VMArea* NextArea(VMArea* area) const = 0;
virtual VMArea* LookupArea(addr_t address) const = 0;
virtual VMArea* CreateArea(const char* name, uint32 wiring,
uint32 protection,
uint32 allocationFlags) = 0;
virtual void DeleteArea(VMArea* area,
uint32 allocationFlags) = 0;
virtual status_t InsertArea(void** _address, uint32 addressSpec,
size_t size, VMArea* area,
uint32 allocationFlags) = 0;
virtual void RemoveArea(VMArea* area,
uint32 allocationFlags) = 0;
virtual bool CanResizeArea(VMArea* area, size_t newSize) = 0;
virtual status_t ResizeArea(VMArea* area, size_t newSize,
uint32 allocationFlags) = 0;
virtual status_t ShrinkAreaHead(VMArea* area, size_t newSize,
uint32 allocationFlags) = 0;
virtual status_t ShrinkAreaTail(VMArea* area, size_t newSize,
uint32 allocationFlags) = 0;
virtual status_t ReserveAddressRange(void** _address,
uint32 addressSpec, size_t size,
uint32 flags, uint32 allocationFlags) = 0;
virtual status_t UnreserveAddressRange(addr_t address,
size_t size, uint32 allocationFlags) = 0;
virtual void UnreserveAllAddressRanges(
uint32 allocationFlags) = 0;
virtual void Dump() const;
static status_t Create(team_id teamID, addr_t base, size_t size,
bool kernel,
VMAddressSpace** _addressSpace);
static team_id KernelID()
{ return sKernelAddressSpace->ID(); }
static VMAddressSpace* Kernel()
{ return sKernelAddressSpace; }
static VMAddressSpace* GetKernel();
static team_id CurrentID();
static VMAddressSpace* GetCurrent();
static VMAddressSpace* Get(team_id teamID);
static VMAddressSpace* DebugFirst();
static VMAddressSpace* DebugNext(VMAddressSpace* addressSpace);
static VMAddressSpace* DebugGet(team_id teamID);
protected:
static void _DeleteIfUnreferenced(team_id id);
static int _DumpCommand(int argc, char** argv);
static int _DumpListCommand(int argc, char** argv);
protected:
struct HashDefinition;
protected:
VMAddressSpace* fHashTableLink;
addr_t fBase;
addr_t fEndAddress; // base + (size - 1)
size_t fFreeSpace;
rw_lock fLock;
team_id fID;
int32 fRefCount;
int32 fFaultCount;
int32 fChangeCount;
VMTranslationMap* fTranslationMap;
bool fDeleting;
static VMAddressSpace* sKernelAddressSpace;
};
void
VMAddressSpace::Put()
{
team_id id = fID;
if (atomic_add(&fRefCount, -1) == 1)
_DeleteIfUnreferenced(id);
}
class VMAddressSpace::AreaIterator {
public:
AreaIterator()
{
}
AreaIterator(VMAddressSpace* addressSpace)
:
fAddressSpace(addressSpace),
fNext(addressSpace->FirstArea())
{
}
bool HasNext() const
{
return fNext != NULL;
}
VMArea* Next()
{
VMArea* result = fNext;
if (fNext != NULL)
fNext = fAddressSpace->NextArea(fNext);
return result;
}
void Rewind()
{
fNext = fAddressSpace->FirstArea();
}
private:
VMAddressSpace* fAddressSpace;
VMArea* fNext;
};
inline VMAddressSpace::AreaIterator
VMAddressSpace::GetAreaIterator()
{
return AreaIterator(this);
}
#ifdef __cplusplus
extern "C" {
#endif
* vm_delete_areas(): Changed return type to void (was status_t and not used). * _user_map_file(), _user_unmap_memory(): Verify that the address (if given) is page aligned. * Reworked memory locking (wiring): - VMArea does now have a list of wired memory ranges and supports waiting for a range to be removed. - vm_soft_fault(): - Added "wirePage" parameter that, if given, makes the function wire the page and return it. - Added "wiredRange" parameter (for calls from lock_memory_etc()) and made sure we never unmap wired pages. This could e.g. happen when a page from a lower cache was read-mapped and a write fault occurred. Now in such a situation the function waits for the page to be unwired and restarts. - All functions that manipulate areas in a way that could affect wired ranges do now either require the caller to make sure there are no wired ranges in the way or do that themselves. Added a few wait_if_*_is_wired() helper functions for that purpose. - lock_memory_etc(): - Does now also work correctly when the range spans more than one area. - Adds VMAreaWiredRanges to the affected VMAreas and retains an address space reference (so that the address space won't be deleted as long as a wired range exists). - Resolved TODO: The area's caches are now locked when increment_page_wired_count() is called. - Resolved TODO: The race condition due to missing locking after looking up the page mapping is now prevented. We hold the cache locks (in case the page is already mapped) and the new vm_soft_fault() parameter allows us to get the page wired. - unlock_memory_etc(): Changes symmetrical to those in lock_memory_etc() and resolved all TODOs. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@36030 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-04-03 22:01:29 +04:00
void vm_delete_areas(struct VMAddressSpace *aspace, bool deletingAddressSpace);
#define vm_swap_address_space(from, to) arch_vm_aspace_swap(from, to)
#ifdef __cplusplus
}
#endif
#endif /* _KERNEL_VM_VM_ADDRESS_SPACE_H */