Renamed vm_address_space to VMAddressSpace.
git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@34422 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
fe01182ce4
commit
b0db552cd9
@ -16,7 +16,7 @@
|
||||
|
||||
struct kernel_args;
|
||||
struct vm_area;
|
||||
struct vm_address_space;
|
||||
struct VMAddressSpace;
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
@ -27,8 +27,8 @@ status_t arch_vm_init(struct kernel_args *args);
|
||||
status_t arch_vm_init_post_area(struct kernel_args *args);
|
||||
status_t arch_vm_init_end(struct kernel_args *args);
|
||||
status_t arch_vm_init_post_modules(struct kernel_args *args);
|
||||
void arch_vm_aspace_swap(struct vm_address_space *from,
|
||||
struct vm_address_space *to);
|
||||
void arch_vm_aspace_swap(struct VMAddressSpace *from,
|
||||
struct VMAddressSpace *to);
|
||||
bool arch_vm_supports_protection(uint32 protection);
|
||||
|
||||
status_t arch_vm_set_memory_type(struct vm_area *area, addr_t physicalBase,
|
||||
|
@ -31,7 +31,7 @@ struct team *team_get_kernel_team(void);
|
||||
team_id team_get_kernel_team_id(void);
|
||||
team_id team_get_current_team_id(void);
|
||||
status_t team_get_address_space(team_id id,
|
||||
struct vm_address_space **_addressSpace);
|
||||
struct VMAddressSpace **_addressSpace);
|
||||
char **user_team_get_arguments(void);
|
||||
int user_team_get_arg_count(void);
|
||||
struct job_control_entry* team_get_death_entry(struct team *team,
|
||||
|
@ -187,7 +187,7 @@ struct team {
|
||||
team_job_control_children *continued_children;
|
||||
struct job_control_entry* job_control_entry;
|
||||
|
||||
struct vm_address_space *address_space;
|
||||
struct VMAddressSpace *address_space;
|
||||
struct thread *main_thread;
|
||||
struct thread *thread_list;
|
||||
struct team_loading_info *loading_info;
|
||||
|
@ -18,10 +18,10 @@ struct iovec;
|
||||
struct kernel_args;
|
||||
struct team;
|
||||
struct system_memory_info;
|
||||
struct vm_page;
|
||||
struct VMAddressSpace;
|
||||
struct VMCache;
|
||||
struct vm_area;
|
||||
struct vm_address_space;
|
||||
struct vm_page;
|
||||
struct vnode;
|
||||
|
||||
|
||||
@ -87,7 +87,7 @@ area_id vm_clone_area(team_id team, const char *name, void **address,
|
||||
area_id sourceArea, bool kernel);
|
||||
status_t vm_delete_area(team_id teamID, area_id areaID, bool kernel);
|
||||
status_t vm_create_vnode_cache(struct vnode *vnode, struct VMCache **_cache);
|
||||
struct vm_area *vm_area_lookup(struct vm_address_space *addressSpace,
|
||||
struct vm_area *vm_area_lookup(struct VMAddressSpace *addressSpace,
|
||||
addr_t address);
|
||||
status_t vm_set_area_memory_type(area_id id, addr_t physicalBase, uint32 type);
|
||||
status_t vm_get_page_mapping(team_id team, addr_t vaddr, addr_t *paddr);
|
||||
|
@ -13,7 +13,7 @@
|
||||
|
||||
|
||||
struct kernel_args;
|
||||
struct vm_address_space;
|
||||
struct VMAddressSpace;
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
@ -23,17 +23,17 @@ extern "C" {
|
||||
status_t vm_address_space_init(void);
|
||||
status_t vm_address_space_init_post_sem(void);
|
||||
|
||||
void vm_delete_address_space(struct vm_address_space *aspace);
|
||||
void vm_delete_address_space(struct VMAddressSpace *aspace);
|
||||
status_t vm_create_address_space(team_id id, addr_t base, addr_t size,
|
||||
bool kernel, struct vm_address_space **_aspace);
|
||||
status_t vm_delete_areas(struct vm_address_space *aspace);
|
||||
struct vm_address_space *vm_get_kernel_address_space(void);
|
||||
struct vm_address_space *vm_kernel_address_space(void);
|
||||
bool kernel, struct VMAddressSpace **_aspace);
|
||||
status_t vm_delete_areas(struct VMAddressSpace *aspace);
|
||||
struct VMAddressSpace *vm_get_kernel_address_space(void);
|
||||
struct VMAddressSpace *vm_kernel_address_space(void);
|
||||
team_id vm_kernel_address_space_id(void);
|
||||
struct vm_address_space *vm_get_current_user_address_space(void);
|
||||
struct VMAddressSpace *vm_get_current_user_address_space(void);
|
||||
team_id vm_current_user_address_space_id(void);
|
||||
struct vm_address_space *vm_get_address_space(team_id team);
|
||||
void vm_put_address_space(struct vm_address_space *aspace);
|
||||
struct VMAddressSpace *vm_get_address_space(team_id team);
|
||||
void vm_put_address_space(struct VMAddressSpace *aspace);
|
||||
#define vm_swap_address_space(from, to) arch_vm_aspace_swap(from, to)
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
@ -225,7 +225,7 @@ public:
|
||||
virtual int32 MaxPagesPerAsyncWrite() const
|
||||
{ return -1; } // no restriction
|
||||
|
||||
virtual status_t Fault(struct vm_address_space *aspace,
|
||||
virtual status_t Fault(struct VMAddressSpace *aspace,
|
||||
off_t offset);
|
||||
|
||||
virtual void Merge(VMCache* source);
|
||||
@ -305,7 +305,7 @@ struct vm_area {
|
||||
vm_area_mappings mappings;
|
||||
uint8* page_protections;
|
||||
|
||||
struct vm_address_space* address_space;
|
||||
struct VMAddressSpace* address_space;
|
||||
struct vm_area* address_space_next;
|
||||
struct vm_area* cache_next;
|
||||
struct vm_area* cache_prev;
|
||||
@ -318,7 +318,7 @@ enum {
|
||||
VM_ASPACE_STATE_DELETION
|
||||
};
|
||||
|
||||
struct vm_address_space {
|
||||
struct VMAddressSpace {
|
||||
struct vm_area* areas;
|
||||
struct vm_area* area_hint;
|
||||
rw_lock lock;
|
||||
@ -330,7 +330,7 @@ struct vm_address_space {
|
||||
int32 ref_count;
|
||||
int32 fault_count;
|
||||
int32 state;
|
||||
struct vm_address_space* hash_next;
|
||||
struct VMAddressSpace* hash_next;
|
||||
};
|
||||
|
||||
|
||||
|
@ -73,7 +73,7 @@ arch_vm_init_post_modules(kernel_args *args)
|
||||
|
||||
|
||||
void
|
||||
arch_vm_aspace_swap(struct vm_address_space *from, struct vm_address_space *to)
|
||||
arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to)
|
||||
{
|
||||
#warning ARM:WRITEME
|
||||
// m68k_set_pgdir(m68k_translation_map_get_pgdir(&to->translation_map));
|
||||
|
@ -103,7 +103,7 @@ arch_vm_init_post_modules(kernel_args *args)
|
||||
|
||||
|
||||
void
|
||||
arch_vm_aspace_swap(struct vm_address_space *from, struct vm_address_space *to)
|
||||
arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to)
|
||||
{
|
||||
m68k_set_pgdir(m68k_translation_map_get_pgdir(&to->translation_map));
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ arch_vm_init_post_modules(kernel_args* args)
|
||||
|
||||
|
||||
void
|
||||
arch_vm_aspace_swap(struct vm_address_space* from, struct vm_address_space* to)
|
||||
arch_vm_aspace_swap(struct VMAddressSpace* from, struct VMAddressSpace* to)
|
||||
{
|
||||
#warning IMPLEMENT arch_vm_aspace_swap
|
||||
}
|
||||
|
@ -144,7 +144,7 @@ arch_vm_init_post_modules(kernel_args *args)
|
||||
|
||||
|
||||
void
|
||||
arch_vm_aspace_swap(struct vm_address_space *from, struct vm_address_space *to)
|
||||
arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -681,7 +681,7 @@ ppc_map_address_range(addr_t virtualAddress, addr_t physicalAddress,
|
||||
virtualAddress = ROUNDDOWN(virtualAddress, B_PAGE_SIZE);
|
||||
physicalAddress = ROUNDDOWN(physicalAddress, B_PAGE_SIZE);
|
||||
|
||||
vm_address_space *addressSpace = vm_kernel_address_space();
|
||||
VMAddressSpace *addressSpace = vm_kernel_address_space();
|
||||
|
||||
// map the pages
|
||||
for (; virtualAddress < virtualEnd;
|
||||
@ -703,7 +703,7 @@ ppc_unmap_address_range(addr_t virtualAddress, size_t size)
|
||||
addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
|
||||
virtualAddress = ROUNDDOWN(virtualAddress, B_PAGE_SIZE);
|
||||
|
||||
vm_address_space *addressSpace = vm_kernel_address_space();
|
||||
VMAddressSpace *addressSpace = vm_kernel_address_space();
|
||||
|
||||
for (0; virtualAddress < virtualEnd; virtualAddress += B_PAGE_SIZE)
|
||||
remove_page_table_entry(&addressSpace->translation_map, virtualAddress);
|
||||
@ -716,7 +716,7 @@ ppc_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap)
|
||||
addr_t virtualAddress = ROUNDDOWN(*_virtualAddress, B_PAGE_SIZE);
|
||||
size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE);
|
||||
|
||||
vm_address_space *addressSpace = vm_kernel_address_space();
|
||||
VMAddressSpace *addressSpace = vm_kernel_address_space();
|
||||
|
||||
// reserve space in the address space
|
||||
void *newAddress = NULL;
|
||||
|
@ -189,7 +189,7 @@ i386_get_current_iframe(void)
|
||||
void *
|
||||
x86_next_page_directory(struct thread *from, struct thread *to)
|
||||
{
|
||||
vm_address_space* toAddressSpace = to->team->address_space;
|
||||
VMAddressSpace* toAddressSpace = to->team->address_space;
|
||||
if (from->team->address_space == toAddressSpace) {
|
||||
// don't change the pgdir, same address space
|
||||
return NULL;
|
||||
@ -372,7 +372,7 @@ arch_thread_context_switch(struct thread *from, struct thread *to)
|
||||
struct cpu_ent* cpuData = to->cpu;
|
||||
vm_translation_map_arch_info* activeMap
|
||||
= cpuData->arch.active_translation_map;
|
||||
vm_address_space* toAddressSpace = to->team->address_space;
|
||||
VMAddressSpace* toAddressSpace = to->team->address_space;
|
||||
|
||||
addr_t newPageDirectory;
|
||||
vm_translation_map_arch_info* toMap;
|
||||
|
@ -617,7 +617,7 @@ arch_vm_init_post_modules(kernel_args *args)
|
||||
|
||||
|
||||
void
|
||||
arch_vm_aspace_swap(struct vm_address_space *from, struct vm_address_space *to)
|
||||
arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to)
|
||||
{
|
||||
// This functions is only invoked when a userland thread is in the process
|
||||
// of dying. It switches to the kernel team and does whatever cleanup is
|
||||
|
2
src/system/kernel/cache/vnode_store.cpp
vendored
2
src/system/kernel/cache/vnode_store.cpp
vendored
@ -100,7 +100,7 @@ VMVnodeCache::WriteAsync(off_t offset, const iovec* vecs, size_t count,
|
||||
|
||||
|
||||
status_t
|
||||
VMVnodeCache::Fault(struct vm_address_space *aspace, off_t offset)
|
||||
VMVnodeCache::Fault(struct VMAddressSpace *aspace, off_t offset)
|
||||
{
|
||||
return B_BAD_HANDLER;
|
||||
}
|
||||
|
2
src/system/kernel/cache/vnode_store.h
vendored
2
src/system/kernel/cache/vnode_store.h
vendored
@ -28,7 +28,7 @@ public:
|
||||
AsyncIOCallback* callback);
|
||||
virtual bool CanWritePage(off_t offset);
|
||||
|
||||
virtual status_t Fault(struct vm_address_space *aspace, off_t offset);
|
||||
virtual status_t Fault(struct VMAddressSpace *aspace, off_t offset);
|
||||
|
||||
virtual status_t AcquireUnreferencedStoreRef();
|
||||
virtual void AcquireStoreRef();
|
||||
|
@ -2522,7 +2522,7 @@ team_get_current_team_id(void)
|
||||
|
||||
|
||||
status_t
|
||||
team_get_address_space(team_id id, vm_address_space** _addressSpace)
|
||||
team_get_address_space(team_id id, VMAddressSpace** _addressSpace)
|
||||
{
|
||||
cpu_status state;
|
||||
struct team* team;
|
||||
|
@ -684,7 +684,7 @@ VMAnonymousCache::CanWritePage(off_t offset)
|
||||
|
||||
|
||||
status_t
|
||||
VMAnonymousCache::Fault(struct vm_address_space *aspace, off_t offset)
|
||||
VMAnonymousCache::Fault(struct VMAddressSpace* aspace, off_t offset)
|
||||
{
|
||||
if (fCanOvercommit && LookupPage(offset) == NULL && !HasPage(offset)) {
|
||||
if (fGuardedSize > 0) {
|
||||
|
@ -50,7 +50,7 @@ public:
|
||||
virtual int32 MaxPagesPerAsyncWrite() const
|
||||
{ return 1; }
|
||||
|
||||
virtual status_t Fault(struct vm_address_space *aspace, off_t offset);
|
||||
virtual status_t Fault(struct VMAddressSpace* aspace, off_t offset);
|
||||
|
||||
virtual void Merge(VMCache* source);
|
||||
|
||||
|
@ -113,7 +113,7 @@ VMAnonymousNoSwapCache::Write(off_t offset, const iovec *vecs, size_t count,
|
||||
|
||||
|
||||
status_t
|
||||
VMAnonymousNoSwapCache::Fault(struct vm_address_space *aspace, off_t offset)
|
||||
VMAnonymousNoSwapCache::Fault(struct VMAddressSpace *aspace, off_t offset)
|
||||
{
|
||||
if (fCanOvercommit) {
|
||||
if (fGuardedSize > 0) {
|
||||
|
@ -27,7 +27,7 @@ public:
|
||||
virtual status_t Write(off_t offset, const iovec *vecs, size_t count,
|
||||
uint32 flags, size_t *_numBytes);
|
||||
|
||||
virtual status_t Fault(struct vm_address_space *aspace, off_t offset);
|
||||
virtual status_t Fault(struct VMAddressSpace *aspace, off_t offset);
|
||||
|
||||
virtual void MergeStore(VMCache* source);
|
||||
|
||||
|
@ -70,12 +70,12 @@
|
||||
class AddressSpaceReadLocker {
|
||||
public:
|
||||
AddressSpaceReadLocker(team_id team);
|
||||
AddressSpaceReadLocker(vm_address_space* space, bool getNewReference);
|
||||
AddressSpaceReadLocker(VMAddressSpace* space, bool getNewReference);
|
||||
AddressSpaceReadLocker();
|
||||
~AddressSpaceReadLocker();
|
||||
|
||||
status_t SetTo(team_id team);
|
||||
void SetTo(vm_address_space* space, bool getNewReference);
|
||||
void SetTo(VMAddressSpace* space, bool getNewReference);
|
||||
status_t SetFromArea(area_id areaID, vm_area*& area);
|
||||
|
||||
bool IsLocked() const { return fLocked; }
|
||||
@ -84,10 +84,10 @@ public:
|
||||
|
||||
void Unset();
|
||||
|
||||
vm_address_space* AddressSpace() { return fSpace; }
|
||||
VMAddressSpace* AddressSpace() { return fSpace; }
|
||||
|
||||
private:
|
||||
vm_address_space* fSpace;
|
||||
VMAddressSpace* fSpace;
|
||||
bool fLocked;
|
||||
};
|
||||
|
||||
@ -109,10 +109,10 @@ public:
|
||||
void DegradeToReadLock();
|
||||
void Unset();
|
||||
|
||||
vm_address_space* AddressSpace() { return fSpace; }
|
||||
VMAddressSpace* AddressSpace() { return fSpace; }
|
||||
|
||||
private:
|
||||
vm_address_space* fSpace;
|
||||
VMAddressSpace* fSpace;
|
||||
bool fLocked;
|
||||
bool fDegraded;
|
||||
};
|
||||
@ -123,9 +123,9 @@ public:
|
||||
~MultiAddressSpaceLocker();
|
||||
|
||||
inline status_t AddTeam(team_id team, bool writeLock,
|
||||
vm_address_space** _space = NULL);
|
||||
VMAddressSpace** _space = NULL);
|
||||
inline status_t AddArea(area_id area, bool writeLock,
|
||||
vm_address_space** _space = NULL);
|
||||
VMAddressSpace** _space = NULL);
|
||||
|
||||
status_t AddAreaCacheAndLock(area_id areaID, bool writeLockThisOne,
|
||||
bool writeLockOthers, vm_area*& _area, vm_cache** _cache = NULL);
|
||||
@ -138,14 +138,14 @@ public:
|
||||
|
||||
private:
|
||||
struct lock_item {
|
||||
vm_address_space* space;
|
||||
bool write_lock;
|
||||
VMAddressSpace* space;
|
||||
bool write_lock;
|
||||
};
|
||||
|
||||
bool _ResizeIfNeeded();
|
||||
int32 _IndexOfAddressSpace(vm_address_space* space) const;
|
||||
status_t _AddAddressSpace(vm_address_space* space, bool writeLock,
|
||||
vm_address_space** _space);
|
||||
int32 _IndexOfAddressSpace(VMAddressSpace* space) const;
|
||||
status_t _AddAddressSpace(VMAddressSpace* space, bool writeLock,
|
||||
VMAddressSpace** _space);
|
||||
|
||||
static int _CompareItems(const void* _a, const void* _b);
|
||||
|
||||
@ -217,11 +217,11 @@ static cache_info* sCacheInfoTable;
|
||||
|
||||
|
||||
// function declarations
|
||||
static void delete_area(vm_address_space* addressSpace, vm_area* area);
|
||||
static vm_address_space* get_address_space_by_area_id(area_id id);
|
||||
static status_t vm_soft_fault(vm_address_space* addressSpace, addr_t address,
|
||||
static void delete_area(VMAddressSpace* addressSpace, vm_area* area);
|
||||
static VMAddressSpace* get_address_space_by_area_id(area_id id);
|
||||
static status_t vm_soft_fault(VMAddressSpace* addressSpace, addr_t address,
|
||||
bool isWrite, bool isUser);
|
||||
static status_t map_backing_store(vm_address_space* addressSpace,
|
||||
static status_t map_backing_store(VMAddressSpace* addressSpace,
|
||||
vm_cache* cache, void** _virtualAddress, off_t offset, addr_t size,
|
||||
uint32 addressSpec, int wiring, int protection, int mapping,
|
||||
vm_area** _area, const char* areaName, bool unmapAddressRange, bool kernel);
|
||||
@ -245,7 +245,7 @@ AddressSpaceReadLocker::AddressSpaceReadLocker(team_id team)
|
||||
/*! Takes over the reference of the address space, if \a getNewReference is
|
||||
\c false.
|
||||
*/
|
||||
AddressSpaceReadLocker::AddressSpaceReadLocker(vm_address_space* space,
|
||||
AddressSpaceReadLocker::AddressSpaceReadLocker(VMAddressSpace* space,
|
||||
bool getNewReference)
|
||||
:
|
||||
fSpace(NULL),
|
||||
@ -295,7 +295,7 @@ AddressSpaceReadLocker::SetTo(team_id team)
|
||||
\c false.
|
||||
*/
|
||||
void
|
||||
AddressSpaceReadLocker::SetTo(vm_address_space* space, bool getNewReference)
|
||||
AddressSpaceReadLocker::SetTo(VMAddressSpace* space, bool getNewReference)
|
||||
{
|
||||
fSpace = space;
|
||||
|
||||
@ -545,7 +545,7 @@ MultiAddressSpaceLocker::_ResizeIfNeeded()
|
||||
|
||||
|
||||
int32
|
||||
MultiAddressSpaceLocker::_IndexOfAddressSpace(vm_address_space* space) const
|
||||
MultiAddressSpaceLocker::_IndexOfAddressSpace(VMAddressSpace* space) const
|
||||
{
|
||||
for (int32 i = 0; i < fCount; i++) {
|
||||
if (fItems[i].space == space)
|
||||
@ -557,8 +557,8 @@ MultiAddressSpaceLocker::_IndexOfAddressSpace(vm_address_space* space) const
|
||||
|
||||
|
||||
status_t
|
||||
MultiAddressSpaceLocker::_AddAddressSpace(vm_address_space* space,
|
||||
bool writeLock, vm_address_space** _space)
|
||||
MultiAddressSpaceLocker::_AddAddressSpace(VMAddressSpace* space,
|
||||
bool writeLock, VMAddressSpace** _space)
|
||||
{
|
||||
if (!space)
|
||||
return B_BAD_VALUE;
|
||||
@ -590,7 +590,7 @@ MultiAddressSpaceLocker::_AddAddressSpace(vm_address_space* space,
|
||||
|
||||
inline status_t
|
||||
MultiAddressSpaceLocker::AddTeam(team_id team, bool writeLock,
|
||||
vm_address_space** _space)
|
||||
VMAddressSpace** _space)
|
||||
{
|
||||
return _AddAddressSpace(vm_get_address_space(team), writeLock,
|
||||
_space);
|
||||
@ -599,7 +599,7 @@ MultiAddressSpaceLocker::AddTeam(team_id team, bool writeLock,
|
||||
|
||||
inline status_t
|
||||
MultiAddressSpaceLocker::AddArea(area_id area, bool writeLock,
|
||||
vm_address_space** _space)
|
||||
VMAddressSpace** _space)
|
||||
{
|
||||
return _AddAddressSpace(get_address_space_by_area_id(area), writeLock,
|
||||
_space);
|
||||
@ -918,10 +918,10 @@ area_hash(void* _area, const void* key, uint32 range)
|
||||
}
|
||||
|
||||
|
||||
static vm_address_space*
|
||||
static VMAddressSpace*
|
||||
get_address_space_by_area_id(area_id id)
|
||||
{
|
||||
vm_address_space* addressSpace = NULL;
|
||||
VMAddressSpace* addressSpace = NULL;
|
||||
|
||||
rw_lock_read_lock(&sAreaHashLock);
|
||||
|
||||
@ -939,7 +939,7 @@ get_address_space_by_area_id(area_id id)
|
||||
|
||||
//! You need to have the address space locked when calling this function
|
||||
static vm_area*
|
||||
lookup_area(vm_address_space* addressSpace, area_id id)
|
||||
lookup_area(VMAddressSpace* addressSpace, area_id id)
|
||||
{
|
||||
rw_lock_read_lock(&sAreaHashLock);
|
||||
|
||||
@ -954,7 +954,7 @@ lookup_area(vm_address_space* addressSpace, area_id id)
|
||||
|
||||
|
||||
static vm_area*
|
||||
create_reserved_area_struct(vm_address_space* addressSpace, uint32 flags)
|
||||
create_reserved_area_struct(VMAddressSpace* addressSpace, uint32 flags)
|
||||
{
|
||||
vm_area* reserved = (vm_area*)malloc_nogrow(sizeof(vm_area));
|
||||
if (reserved == NULL)
|
||||
@ -971,7 +971,7 @@ create_reserved_area_struct(vm_address_space* addressSpace, uint32 flags)
|
||||
|
||||
|
||||
static vm_area*
|
||||
create_area_struct(vm_address_space* addressSpace, const char* name,
|
||||
create_area_struct(VMAddressSpace* addressSpace, const char* name,
|
||||
uint32 wiring, uint32 protection)
|
||||
{
|
||||
// restrict the area name to B_OS_NAME_LENGTH
|
||||
@ -1016,7 +1016,7 @@ create_area_struct(vm_address_space* addressSpace, const char* name,
|
||||
there are reserved regions for the remaining parts.
|
||||
*/
|
||||
static status_t
|
||||
find_reserved_area(vm_address_space* addressSpace, addr_t start,
|
||||
find_reserved_area(VMAddressSpace* addressSpace, addr_t start,
|
||||
addr_t size, vm_area* area)
|
||||
{
|
||||
vm_area* last = NULL;
|
||||
@ -1112,7 +1112,7 @@ is_valid_spot(addr_t base, addr_t alignedBase, addr_t size, addr_t limit)
|
||||
|
||||
/*! Must be called with this address space's write lock held */
|
||||
static status_t
|
||||
find_and_insert_area_slot(vm_address_space* addressSpace, addr_t start,
|
||||
find_and_insert_area_slot(VMAddressSpace* addressSpace, addr_t start,
|
||||
addr_t size, addr_t end, uint32 addressSpec, vm_area* area)
|
||||
{
|
||||
vm_area* last = NULL;
|
||||
@ -1349,10 +1349,10 @@ second_chance:
|
||||
/*! This inserts the area you pass into the specified address space.
|
||||
It will also set the "_address" argument to its base address when
|
||||
the call succeeds.
|
||||
You need to hold the vm_address_space write lock.
|
||||
You need to hold the VMAddressSpace write lock.
|
||||
*/
|
||||
static status_t
|
||||
insert_area(vm_address_space* addressSpace, void** _address,
|
||||
insert_area(VMAddressSpace* addressSpace, void** _address,
|
||||
uint32 addressSpec, addr_t size, vm_area* area)
|
||||
{
|
||||
addr_t searchBase, searchEnd;
|
||||
@ -1388,7 +1388,7 @@ insert_area(vm_address_space* addressSpace, void** _address,
|
||||
searchEnd, addressSpec, area);
|
||||
if (status == B_OK) {
|
||||
*_address = (void*)area->base;
|
||||
|
||||
|
||||
if (addressSpace == vm_kernel_address_space())
|
||||
sKernelAddressSpaceLeft -= area->size;
|
||||
}
|
||||
@ -1436,7 +1436,7 @@ get_area_page_protection(vm_area* area, addr_t pageAddress)
|
||||
The address space must be write locked.
|
||||
*/
|
||||
static status_t
|
||||
cut_area(vm_address_space* addressSpace, vm_area* area, addr_t address,
|
||||
cut_area(VMAddressSpace* addressSpace, vm_area* area, addr_t address,
|
||||
addr_t lastAddress, vm_area** _secondArea, bool kernel)
|
||||
{
|
||||
// Does the cut range intersect with the area at all?
|
||||
@ -1561,7 +1561,7 @@ decrement_page_wired_count(vm_page* page)
|
||||
The address space must be write-locked.
|
||||
*/
|
||||
static status_t
|
||||
unmap_address_range(vm_address_space* addressSpace, addr_t address, addr_t size,
|
||||
unmap_address_range(VMAddressSpace* addressSpace, addr_t address, addr_t size,
|
||||
bool kernel)
|
||||
{
|
||||
size = PAGE_ALIGN(size);
|
||||
@ -1614,7 +1614,7 @@ unmap_address_range(vm_address_space* addressSpace, addr_t address, addr_t size,
|
||||
Note, that in case of error your cache will be temporarily unlocked.
|
||||
*/
|
||||
static status_t
|
||||
map_backing_store(vm_address_space* addressSpace, vm_cache* cache,
|
||||
map_backing_store(VMAddressSpace* addressSpace, vm_cache* cache,
|
||||
void** _virtualAddress, off_t offset, addr_t size, uint32 addressSpec,
|
||||
int wiring, int protection, int mapping, vm_area** _area,
|
||||
const char* areaName, bool unmapAddressRange, bool kernel)
|
||||
@ -1736,7 +1736,7 @@ vm_block_address_range(const char* name, void* address, addr_t size)
|
||||
if (status != B_OK)
|
||||
return status;
|
||||
|
||||
vm_address_space* addressSpace = locker.AddressSpace();
|
||||
VMAddressSpace* addressSpace = locker.AddressSpace();
|
||||
|
||||
// create an anonymous cache
|
||||
vm_cache* cache;
|
||||
@ -1950,7 +1950,7 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
}
|
||||
|
||||
AddressSpaceWriteLocker locker;
|
||||
vm_address_space* addressSpace;
|
||||
VMAddressSpace* addressSpace;
|
||||
status_t status;
|
||||
|
||||
// For full lock areas reserve the pages before locking the address
|
||||
@ -2634,12 +2634,12 @@ vm_clone_area(team_id team, const char* name, void** address,
|
||||
// Now lock both address spaces and actually do the cloning.
|
||||
|
||||
MultiAddressSpaceLocker locker;
|
||||
vm_address_space* sourceAddressSpace;
|
||||
VMAddressSpace* sourceAddressSpace;
|
||||
status_t status = locker.AddArea(sourceID, false, &sourceAddressSpace);
|
||||
if (status != B_OK)
|
||||
return status;
|
||||
|
||||
vm_address_space* targetAddressSpace;
|
||||
VMAddressSpace* targetAddressSpace;
|
||||
status = locker.AddTeam(team, true, &targetAddressSpace);
|
||||
if (status != B_OK)
|
||||
return status;
|
||||
@ -2745,7 +2745,7 @@ vm_clone_area(team_id team, const char* name, void** address,
|
||||
|
||||
//! The address space must be write locked at this point
|
||||
static void
|
||||
remove_area_from_address_space(vm_address_space* addressSpace, vm_area* area)
|
||||
remove_area_from_address_space(VMAddressSpace* addressSpace, vm_area* area)
|
||||
{
|
||||
vm_area* temp = addressSpace->areas;
|
||||
vm_area* last = NULL;
|
||||
@ -2775,7 +2775,7 @@ remove_area_from_address_space(vm_address_space* addressSpace, vm_area* area)
|
||||
|
||||
|
||||
static void
|
||||
delete_area(vm_address_space* addressSpace, vm_area* area)
|
||||
delete_area(VMAddressSpace* addressSpace, vm_area* area)
|
||||
{
|
||||
rw_lock_write_lock(&sAreaHashLock);
|
||||
hash_remove(sAreaHash, area);
|
||||
@ -2908,7 +2908,7 @@ vm_copy_area(team_id team, const char* name, void** _address,
|
||||
// Do the locking: target address space, all address spaces associated with
|
||||
// the source cache, and the cache itself.
|
||||
MultiAddressSpaceLocker locker;
|
||||
vm_address_space* targetAddressSpace;
|
||||
VMAddressSpace* targetAddressSpace;
|
||||
vm_cache* cache;
|
||||
vm_area* source;
|
||||
status_t status = locker.AddTeam(team, true, &targetAddressSpace);
|
||||
@ -3089,7 +3089,7 @@ vm_set_area_protection(team_id team, area_id areaID, uint32 newProtection,
|
||||
status_t
|
||||
vm_get_page_mapping(team_id team, addr_t vaddr, addr_t* paddr)
|
||||
{
|
||||
vm_address_space* addressSpace = vm_get_address_space(team);
|
||||
VMAddressSpace* addressSpace = vm_get_address_space(team);
|
||||
if (addressSpace == NULL)
|
||||
return B_BAD_TEAM_ID;
|
||||
|
||||
@ -4065,7 +4065,7 @@ dump_available_memory(int argc, char** argv)
|
||||
|
||||
|
||||
status_t
|
||||
vm_delete_areas(struct vm_address_space* addressSpace)
|
||||
vm_delete_areas(struct VMAddressSpace* addressSpace)
|
||||
{
|
||||
vm_area* area;
|
||||
vm_area* next;
|
||||
@ -4634,7 +4634,7 @@ vm_page_fault(addr_t address, addr_t faultAddress, bool isWrite, bool isUser,
|
||||
TPF(PageFaultStart(address, isWrite, isUser, faultAddress));
|
||||
|
||||
addr_t pageAddress = ROUNDDOWN(address, B_PAGE_SIZE);
|
||||
vm_address_space* addressSpace = NULL;
|
||||
VMAddressSpace* addressSpace = NULL;
|
||||
|
||||
status_t status = B_OK;
|
||||
*newIP = 0;
|
||||
@ -4854,7 +4854,7 @@ struct PageFaultContext {
|
||||
bool restart;
|
||||
|
||||
|
||||
PageFaultContext(vm_address_space* addressSpace, bool isWrite)
|
||||
PageFaultContext(VMAddressSpace* addressSpace, bool isWrite)
|
||||
:
|
||||
addressSpaceLocker(addressSpace, true),
|
||||
map(&addressSpace->translation_map),
|
||||
@ -5023,7 +5023,7 @@ fault_get_page(PageFaultContext& context)
|
||||
|
||||
|
||||
static status_t
|
||||
vm_soft_fault(vm_address_space* addressSpace, addr_t originalAddress,
|
||||
vm_soft_fault(VMAddressSpace* addressSpace, addr_t originalAddress,
|
||||
bool isWrite, bool isUser)
|
||||
{
|
||||
FTRACE(("vm_soft_fault: thid 0x%lx address 0x%lx, isWrite %d, isUser %d\n",
|
||||
@ -5172,7 +5172,7 @@ vm_soft_fault(vm_address_space* addressSpace, addr_t originalAddress,
|
||||
|
||||
/*! You must have the address space's sem held */
|
||||
vm_area*
|
||||
vm_area_lookup(vm_address_space* addressSpace, addr_t address)
|
||||
vm_area_lookup(VMAddressSpace* addressSpace, addr_t address)
|
||||
{
|
||||
vm_area* area;
|
||||
|
||||
@ -5402,7 +5402,7 @@ fill_area_info(struct vm_area* area, area_info* info, size_t size)
|
||||
Used by both lock_memory() and unlock_memory().
|
||||
*/
|
||||
static status_t
|
||||
test_lock_memory(vm_address_space* addressSpace, addr_t address,
|
||||
test_lock_memory(VMAddressSpace* addressSpace, addr_t address,
|
||||
bool& needsLocking)
|
||||
{
|
||||
rw_lock_read_lock(&addressSpace->lock);
|
||||
@ -5645,7 +5645,7 @@ user_memset(void* s, char c, size_t count)
|
||||
status_t
|
||||
lock_memory_etc(team_id team, void* address, size_t numBytes, uint32 flags)
|
||||
{
|
||||
vm_address_space* addressSpace = NULL;
|
||||
VMAddressSpace* addressSpace = NULL;
|
||||
struct vm_translation_map* map;
|
||||
addr_t unalignedBase = (addr_t)address;
|
||||
addr_t end = unalignedBase + numBytes;
|
||||
@ -5751,7 +5751,7 @@ lock_memory(void* address, size_t numBytes, uint32 flags)
|
||||
status_t
|
||||
unlock_memory_etc(team_id team, void* address, size_t numBytes, uint32 flags)
|
||||
{
|
||||
vm_address_space* addressSpace = NULL;
|
||||
VMAddressSpace* addressSpace = NULL;
|
||||
struct vm_translation_map* map;
|
||||
addr_t unalignedBase = (addr_t)address;
|
||||
addr_t end = unalignedBase + numBytes;
|
||||
@ -5827,7 +5827,7 @@ get_memory_map_etc(team_id team, const void* address, size_t numBytes,
|
||||
uint32 numEntries = *_numEntries;
|
||||
*_numEntries = 0;
|
||||
|
||||
vm_address_space* addressSpace;
|
||||
VMAddressSpace* addressSpace;
|
||||
addr_t virtualAddress = (addr_t)address;
|
||||
addr_t pageOffset = virtualAddress & (B_PAGE_SIZE - 1);
|
||||
addr_t physicalAddress;
|
||||
|
@ -27,7 +27,7 @@
|
||||
#endif
|
||||
|
||||
|
||||
static vm_address_space* sKernelAddressSpace;
|
||||
static VMAddressSpace* sKernelAddressSpace;
|
||||
|
||||
#define ASPACE_HASH_TABLE_SIZE 1024
|
||||
static struct hash_table* sAddressSpaceTable;
|
||||
@ -35,7 +35,7 @@ static rw_lock sAddressSpaceTableLock;
|
||||
|
||||
|
||||
static void
|
||||
_dump_aspace(vm_address_space* aspace)
|
||||
_dump_aspace(VMAddressSpace* aspace)
|
||||
{
|
||||
vm_area* area;
|
||||
|
||||
@ -62,7 +62,7 @@ _dump_aspace(vm_address_space* aspace)
|
||||
static int
|
||||
dump_aspace(int argc, char** argv)
|
||||
{
|
||||
vm_address_space* aspace;
|
||||
VMAddressSpace* aspace;
|
||||
|
||||
if (argc < 2) {
|
||||
kprintf("aspace: not enough arguments\n");
|
||||
@ -74,7 +74,7 @@ dump_aspace(int argc, char** argv)
|
||||
{
|
||||
team_id id = strtoul(argv[1], NULL, 0);
|
||||
|
||||
aspace = (vm_address_space*)hash_lookup(sAddressSpaceTable, &id);
|
||||
aspace = (VMAddressSpace*)hash_lookup(sAddressSpaceTable, &id);
|
||||
if (aspace == NULL) {
|
||||
kprintf("invalid aspace id\n");
|
||||
} else {
|
||||
@ -89,14 +89,14 @@ dump_aspace(int argc, char** argv)
|
||||
static int
|
||||
dump_aspace_list(int argc, char** argv)
|
||||
{
|
||||
vm_address_space* space;
|
||||
VMAddressSpace* space;
|
||||
struct hash_iterator iter;
|
||||
|
||||
kprintf(" address id base size area count "
|
||||
" area size\n");
|
||||
|
||||
hash_open(sAddressSpaceTable, &iter);
|
||||
while ((space = (vm_address_space*)hash_next(sAddressSpaceTable,
|
||||
while ((space = (VMAddressSpace*)hash_next(sAddressSpaceTable,
|
||||
&iter)) != NULL) {
|
||||
int32 areaCount = 0;
|
||||
off_t areaSize = 0;
|
||||
@ -119,7 +119,7 @@ dump_aspace_list(int argc, char** argv)
|
||||
static int
|
||||
aspace_compare(void* _a, const void* key)
|
||||
{
|
||||
vm_address_space* aspace = (vm_address_space*)_a;
|
||||
VMAddressSpace* aspace = (VMAddressSpace*)_a;
|
||||
const team_id* id = (const team_id*)key;
|
||||
|
||||
if (aspace->id == *id)
|
||||
@ -132,7 +132,7 @@ aspace_compare(void* _a, const void* key)
|
||||
static uint32
|
||||
aspace_hash(void* _a, const void* key, uint32 range)
|
||||
{
|
||||
vm_address_space* aspace = (vm_address_space*)_a;
|
||||
VMAddressSpace* aspace = (VMAddressSpace*)_a;
|
||||
const team_id* id = (const team_id*)key;
|
||||
|
||||
if (aspace != NULL)
|
||||
@ -146,7 +146,7 @@ aspace_hash(void* _a, const void* key, uint32 range)
|
||||
have been released, so it's safe to remove it.
|
||||
*/
|
||||
static void
|
||||
delete_address_space(vm_address_space* addressSpace)
|
||||
delete_address_space(VMAddressSpace* addressSpace)
|
||||
{
|
||||
TRACE(("delete_address_space: called on aspace 0x%lx\n", addressSpace->id));
|
||||
|
||||
@ -165,13 +165,13 @@ delete_address_space(vm_address_space* addressSpace)
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
vm_address_space*
|
||||
VMAddressSpace*
|
||||
vm_get_address_space(team_id id)
|
||||
{
|
||||
vm_address_space* addressSpace;
|
||||
VMAddressSpace* addressSpace;
|
||||
|
||||
rw_lock_read_lock(&sAddressSpaceTableLock);
|
||||
addressSpace = (vm_address_space*)hash_lookup(sAddressSpaceTable, &id);
|
||||
addressSpace = (VMAddressSpace*)hash_lookup(sAddressSpaceTable, &id);
|
||||
if (addressSpace)
|
||||
atomic_add(&addressSpace->ref_count, 1);
|
||||
rw_lock_read_unlock(&sAddressSpaceTableLock);
|
||||
@ -180,7 +180,7 @@ vm_get_address_space(team_id id)
|
||||
}
|
||||
|
||||
|
||||
vm_address_space*
|
||||
VMAddressSpace*
|
||||
vm_get_kernel_address_space(void)
|
||||
{
|
||||
// we can treat this one a little differently since it can't be deleted
|
||||
@ -189,7 +189,7 @@ vm_get_kernel_address_space(void)
|
||||
}
|
||||
|
||||
|
||||
vm_address_space*
|
||||
VMAddressSpace*
|
||||
vm_kernel_address_space(void)
|
||||
{
|
||||
return sKernelAddressSpace;
|
||||
@ -203,13 +203,13 @@ vm_kernel_address_space_id(void)
|
||||
}
|
||||
|
||||
|
||||
vm_address_space*
|
||||
VMAddressSpace*
|
||||
vm_get_current_user_address_space(void)
|
||||
{
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
|
||||
if (thread != NULL) {
|
||||
vm_address_space* addressSpace = thread->team->address_space;
|
||||
VMAddressSpace* addressSpace = thread->team->address_space;
|
||||
if (addressSpace != NULL) {
|
||||
atomic_add(&addressSpace->ref_count, 1);
|
||||
return addressSpace;
|
||||
@ -233,7 +233,7 @@ vm_current_user_address_space_id(void)
|
||||
|
||||
|
||||
void
|
||||
vm_put_address_space(vm_address_space* addressSpace)
|
||||
vm_put_address_space(VMAddressSpace* addressSpace)
|
||||
{
|
||||
bool remove = false;
|
||||
|
||||
@ -257,7 +257,7 @@ vm_put_address_space(vm_address_space* addressSpace)
|
||||
still be in memory until the last reference has been released.
|
||||
*/
|
||||
void
|
||||
vm_delete_address_space(vm_address_space* addressSpace)
|
||||
vm_delete_address_space(VMAddressSpace* addressSpace)
|
||||
{
|
||||
rw_lock_write_lock(&addressSpace->lock);
|
||||
addressSpace->state = VM_ASPACE_STATE_DELETION;
|
||||
@ -270,12 +270,12 @@ vm_delete_address_space(vm_address_space* addressSpace)
|
||||
|
||||
status_t
|
||||
vm_create_address_space(team_id id, addr_t base, addr_t size,
|
||||
bool kernel, vm_address_space** _addressSpace)
|
||||
bool kernel, VMAddressSpace** _addressSpace)
|
||||
{
|
||||
vm_address_space* addressSpace;
|
||||
VMAddressSpace* addressSpace;
|
||||
status_t status;
|
||||
|
||||
addressSpace = (vm_address_space*)malloc_nogrow(sizeof(vm_address_space));
|
||||
addressSpace = (VMAddressSpace*)malloc_nogrow(sizeof(VMAddressSpace));
|
||||
if (addressSpace == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
@ -321,7 +321,7 @@ vm_address_space_init(void)
|
||||
|
||||
// create the area and address space hash tables
|
||||
{
|
||||
vm_address_space* aspace;
|
||||
VMAddressSpace* aspace;
|
||||
sAddressSpaceTable = hash_init(ASPACE_HASH_TABLE_SIZE,
|
||||
(addr_t)&aspace->hash_next - (addr_t)aspace, &aspace_compare,
|
||||
&aspace_hash);
|
||||
|
@ -915,7 +915,7 @@ VMCache::CanWritePage(off_t offset)
|
||||
|
||||
|
||||
status_t
|
||||
VMCache::Fault(struct vm_address_space *aspace, off_t offset)
|
||||
VMCache::Fault(struct VMAddressSpace *aspace, off_t offset)
|
||||
{
|
||||
return B_BAD_ADDRESS;
|
||||
}
|
||||
|
@ -536,7 +536,7 @@ dump_page(int argc, char **argv)
|
||||
|
||||
if (index == 2) {
|
||||
if (!physical) {
|
||||
vm_address_space *addressSpace = vm_kernel_address_space();
|
||||
VMAddressSpace *addressSpace = vm_kernel_address_space();
|
||||
uint32 flags;
|
||||
|
||||
if (thread_get_current_thread()->team->address_space != NULL)
|
||||
|
Loading…
Reference in New Issue
Block a user