haiku/headers/private/kernel/vm/VMAddressSpace.h
Ingo Weinhold f082f7f019 * Added vm_page::accessed flag. Works analogously to vm_page::modified.
* Reorganized the code for [un]mapping pages:
  - Added new VMTranslationMap::Unmap{Area,Page[s]}() which essentially do what
    vm_unmap_page[s]() did before, just in the architecture specific code, which
    allows for specific optimizations. UnmapArea() is for the special case that
    the complete area is unmapped. Particularly in case the address space is
    deleted, some work can be saved. Several TODOs could be slain.
  - Since they are only used within vm.cpp vm_map_page() and vm_unmap_page[s]()
    are now static and have lost their prefix (and the "preserveModified"
    parameter).
* Added VMTranslationMap::Protect{Page,Area}(). They are just inline wrappers
  for Protect().
* X86VMTranslationMap::Protect(): Make sure not to accidentally clear the
  accessed/dirty flags.
* X86VMTranslationMap::Unmap()/Protect(): Make page table skipping actually
  work. It was only skipping to the next page.
* Adjusted the PPC code to at least compile.

No measurable effect for the -j8 Haiku image build time, though the kernel time
drops minimally.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35089 a95241bf-73f2-0310-859d-f6bbb57e9c96
2010-01-15 22:32:51 +00:00

205 lines
4.9 KiB
C++

/*
* Copyright 2009-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#ifndef _KERNEL_VM_VM_ADDRESS_SPACE_H
#define _KERNEL_VM_VM_ADDRESS_SPACE_H
#include <OS.h>
#include <vm/vm_priv.h>
#include <vm/VMArea.h>
#include <vm/VMTranslationMap.h>
struct VMAddressSpace {
public:
class AreaIterator;
public:
VMAddressSpace(team_id id, addr_t base,
size_t size, const char* name);
virtual ~VMAddressSpace();
static status_t Init();
static status_t InitPostSem();
team_id ID() const { return fID; }
addr_t Base() const { return fBase; }
addr_t EndAddress() const { return fEndAddress; }
size_t Size() const { return fEndAddress - fBase + 1; }
size_t FreeSpace() const { return fFreeSpace; }
bool IsBeingDeleted() const { return fDeleting; }
VMTranslationMap* TranslationMap() { return fTranslationMap; }
status_t ReadLock()
{ return rw_lock_read_lock(&fLock); }
void ReadUnlock()
{ rw_lock_read_unlock(&fLock); }
status_t WriteLock()
{ return rw_lock_write_lock(&fLock); }
void WriteUnlock()
{ rw_lock_write_unlock(&fLock); }
int32 RefCount() const
{ return fRefCount; }
inline void Get() { atomic_add(&fRefCount, 1); }
inline void Put();
void RemoveAndPut();
void IncrementFaultCount()
{ atomic_add(&fFaultCount, 1); }
void IncrementChangeCount()
{ fChangeCount++; }
inline AreaIterator GetAreaIterator();
VMAddressSpace*& HashTableLink() { return fHashTableLink; }
virtual status_t InitObject();
virtual VMArea* FirstArea() const = 0;
virtual VMArea* NextArea(VMArea* area) const = 0;
virtual VMArea* LookupArea(addr_t address) const = 0;
virtual VMArea* CreateArea(const char* name, uint32 wiring,
uint32 protection) = 0;
virtual void DeleteArea(VMArea* area) = 0;
virtual status_t InsertArea(void** _address, uint32 addressSpec,
size_t size, VMArea* area) = 0;
virtual void RemoveArea(VMArea* area) = 0;
virtual bool CanResizeArea(VMArea* area, size_t newSize) = 0;
virtual status_t ResizeArea(VMArea* area, size_t newSize) = 0;
virtual status_t ShrinkAreaHead(VMArea* area, size_t newSize)
= 0;
virtual status_t ShrinkAreaTail(VMArea* area, size_t newSize)
= 0;
virtual status_t ReserveAddressRange(void** _address,
uint32 addressSpec, size_t size,
uint32 flags) = 0;
virtual status_t UnreserveAddressRange(addr_t address,
size_t size) = 0;
virtual void UnreserveAllAddressRanges() = 0;
virtual void Dump() const;
static status_t Create(team_id teamID, addr_t base, size_t size,
bool kernel,
VMAddressSpace** _addressSpace);
static team_id KernelID()
{ return sKernelAddressSpace->ID(); }
static VMAddressSpace* Kernel()
{ return sKernelAddressSpace; }
static VMAddressSpace* GetKernel();
static team_id CurrentID();
static VMAddressSpace* GetCurrent();
static VMAddressSpace* Get(team_id teamID);
static VMAddressSpace* DebugFirst();
static VMAddressSpace* DebugNext(VMAddressSpace* addressSpace);
protected:
static void _DeleteIfUnreferenced(team_id id);
static int _DumpCommand(int argc, char** argv);
static int _DumpListCommand(int argc, char** argv);
protected:
struct HashDefinition;
protected:
VMAddressSpace* fHashTableLink;
addr_t fBase;
addr_t fEndAddress; // base + (size - 1)
size_t fFreeSpace;
rw_lock fLock;
team_id fID;
int32 fRefCount;
int32 fFaultCount;
int32 fChangeCount;
VMTranslationMap* fTranslationMap;
bool fDeleting;
static VMAddressSpace* sKernelAddressSpace;
};
void
VMAddressSpace::Put()
{
team_id id = fID;
if (atomic_add(&fRefCount, -1) == 1)
_DeleteIfUnreferenced(id);
}
class VMAddressSpace::AreaIterator {
public:
AreaIterator()
{
}
AreaIterator(VMAddressSpace* addressSpace)
:
fAddressSpace(addressSpace),
fNext(addressSpace->FirstArea())
{
}
bool HasNext() const
{
return fNext != NULL;
}
VMArea* Next()
{
VMArea* result = fNext;
if (fNext != NULL)
fNext = fAddressSpace->NextArea(fNext);
return result;
}
void Rewind()
{
fNext = fAddressSpace->FirstArea();
}
private:
VMAddressSpace* fAddressSpace;
VMArea* fNext;
};
inline VMAddressSpace::AreaIterator
VMAddressSpace::GetAreaIterator()
{
return AreaIterator(this);
}
#ifdef __cplusplus
extern "C" {
#endif
status_t vm_delete_areas(struct VMAddressSpace *aspace,
bool deletingAddressSpace);
#define vm_swap_address_space(from, to) arch_vm_aspace_swap(from, to)
#ifdef __cplusplus
}
#endif
#endif /* _KERNEL_VM_VM_ADDRESS_SPACE_H */