Refactored vm_translation_map:
* Pulled the physical page mapping functions out of vm_translation_map into a new interface VMPhysicalPageMapper. * Renamed vm_translation_map to VMTranslationMap and made it a proper C++ class. The functions in the operations vector have become methods. * Added class GenericVMPhysicalPageMapper implementing VMPhysicalPageMapper as far as possible (without actually writing new code). * Adjusted the x86 and the PPC specifics accordingly (untested for the latter). For the other architectures the build is, I'm afraid, seriously broken. The next steps will modify and extend the VMTranslationMap interface, so that it will be possible to fix the bugs in vm_unmap_page[s]() and employ architecture specific optimizations. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35066 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
167898484d
commit
bcc2c157a1
@ -11,7 +11,7 @@
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
void ppc_translation_map_change_asid(vm_translation_map *map);
|
||||
void ppc_translation_map_change_asid(VMTranslationMap *map);
|
||||
|
||||
status_t ppc_map_address_range(addr_t virtualAddress, addr_t physicalAddress,
|
||||
size_t size);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
** Copyright 2002-2004, The Haiku Team. All rights reserved.
|
||||
** Copyright 2002-2010, The Haiku Team. All rights reserved.
|
||||
** Distributed under the terms of the Haiku License.
|
||||
**
|
||||
** Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
@ -9,27 +9,28 @@
|
||||
#define KERNEL_ARCH_VM_TRANSLATION_MAP_H
|
||||
|
||||
|
||||
#include <vm/vm_translation_map.h>
|
||||
#include <vm/VMTranslationMap.h>
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
status_t arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel);
|
||||
status_t arch_vm_translation_map_init_kernel_map_post_sem(vm_translation_map *map);
|
||||
status_t arch_vm_translation_map_create_map(bool kernel,
|
||||
VMTranslationMap** _map);
|
||||
|
||||
status_t arch_vm_translation_map_init(struct kernel_args *args);
|
||||
status_t arch_vm_translation_map_init(struct kernel_args *args,
|
||||
VMPhysicalPageMapper** _physicalPageMapper);
|
||||
status_t arch_vm_translation_map_init_post_area(struct kernel_args *args);
|
||||
status_t arch_vm_translation_map_init_post_sem(struct kernel_args *args);
|
||||
|
||||
// Quick function to map a page in regardless of map context. Used in VM
|
||||
// initialization before most vm data structures exist.
|
||||
status_t arch_vm_translation_map_early_map(struct kernel_args *args, addr_t va, addr_t pa,
|
||||
uint8 attributes, addr_t (*get_free_page)(struct kernel_args *));
|
||||
status_t arch_vm_translation_map_early_map(struct kernel_args *args, addr_t va,
|
||||
addr_t pa, uint8 attributes, addr_t (*get_free_page)(struct kernel_args *));
|
||||
|
||||
bool arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
|
||||
uint32 protection);
|
||||
uint32 protection);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@ -12,6 +12,6 @@
|
||||
#ifdef __cplusplus
|
||||
extern "C"
|
||||
#endif
|
||||
void *i386_translation_map_get_pgdir(vm_translation_map *map);
|
||||
void *i386_translation_map_get_pgdir(VMTranslationMap *map);
|
||||
|
||||
#endif /* _KERNEL_ARCH_x86_VM_TRANSLATION_MAP_H */
|
||||
|
@ -13,8 +13,8 @@
|
||||
#include <OS.h>
|
||||
|
||||
#include <vm/vm_priv.h>
|
||||
#include <vm/vm_translation_map.h>
|
||||
#include <vm/VMArea.h>
|
||||
#include <vm/VMTranslationMap.h>
|
||||
|
||||
|
||||
struct VMAddressSpace {
|
||||
@ -36,7 +36,7 @@ public:
|
||||
size_t FreeSpace() const { return fFreeSpace; }
|
||||
bool IsBeingDeleted() const { return fDeleting; }
|
||||
|
||||
vm_translation_map& TranslationMap() { return fTranslationMap; }
|
||||
VMTranslationMap* TranslationMap() { return fTranslationMap; }
|
||||
|
||||
status_t ReadLock()
|
||||
{ return rw_lock_read_lock(&fLock); }
|
||||
@ -129,7 +129,7 @@ protected:
|
||||
int32 fRefCount;
|
||||
int32 fFaultCount;
|
||||
int32 fChangeCount;
|
||||
vm_translation_map fTranslationMap;
|
||||
VMTranslationMap* fTranslationMap;
|
||||
bool fDeleting;
|
||||
static VMAddressSpace* sKernelAddressSpace;
|
||||
};
|
||||
|
98
headers/private/kernel/vm/VMTranslationMap.h
Normal file
98
headers/private/kernel/vm/VMTranslationMap.h
Normal file
@ -0,0 +1,98 @@
|
||||
/*
|
||||
* Copyright 2002-2010, Haiku. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
#ifndef KERNEL_VM_VM_TRANSLATION_MAP_H
|
||||
#define KERNEL_VM_VM_TRANSLATION_MAP_H
|
||||
|
||||
|
||||
#include <kernel.h>
|
||||
#include <lock.h>
|
||||
|
||||
|
||||
struct kernel_args;
|
||||
|
||||
|
||||
struct VMTranslationMap {
|
||||
VMTranslationMap();
|
||||
virtual ~VMTranslationMap();
|
||||
|
||||
virtual status_t InitPostSem() = 0;
|
||||
|
||||
virtual status_t Lock() = 0;
|
||||
virtual status_t Unlock() = 0;
|
||||
|
||||
virtual addr_t MappedSize() const = 0;
|
||||
virtual size_t MaxPagesNeededToMap(addr_t start,
|
||||
addr_t end) const = 0;
|
||||
|
||||
virtual status_t Map(addr_t virtualAddress,
|
||||
addr_t physicalAddress,
|
||||
uint32 attributes) = 0;
|
||||
virtual status_t Unmap(addr_t start, addr_t end) = 0;
|
||||
|
||||
virtual status_t Query(addr_t virtualAddress,
|
||||
addr_t* _physicalAddress,
|
||||
uint32* _flags) = 0;
|
||||
virtual status_t QueryInterrupt(addr_t virtualAddress,
|
||||
addr_t* _physicalAddress,
|
||||
uint32* _flags) = 0;
|
||||
|
||||
virtual status_t Protect(addr_t base, addr_t top,
|
||||
uint32 attributes) = 0;
|
||||
virtual status_t ClearFlags(addr_t virtualAddress,
|
||||
uint32 flags) = 0;
|
||||
|
||||
virtual void Flush() = 0;
|
||||
|
||||
protected:
|
||||
recursive_lock fLock;
|
||||
int32 fMapCount;
|
||||
};
|
||||
|
||||
|
||||
struct VMPhysicalPageMapper {
|
||||
VMPhysicalPageMapper();
|
||||
virtual ~VMPhysicalPageMapper();
|
||||
|
||||
// get/put virtual address for physical page -- will be usuable on all CPUs
|
||||
// (usually more expensive than the *_current_cpu() versions)
|
||||
virtual status_t GetPage(addr_t physicalAddress,
|
||||
addr_t* _virtualAddress,
|
||||
void** _handle) = 0;
|
||||
virtual status_t PutPage(addr_t virtualAddress,
|
||||
void* handle) = 0;
|
||||
|
||||
// get/put virtual address for physical page -- thread must be pinned the
|
||||
// whole time
|
||||
virtual status_t GetPageCurrentCPU(
|
||||
addr_t physicalAddress,
|
||||
addr_t* _virtualAddress,
|
||||
void** _handle) = 0;
|
||||
virtual status_t PutPageCurrentCPU(addr_t virtualAddress,
|
||||
void* _handle) = 0;
|
||||
|
||||
// get/put virtual address for physical in KDL
|
||||
virtual status_t GetPageDebug(addr_t physicalAddress,
|
||||
addr_t* _virtualAddress,
|
||||
void** _handle) = 0;
|
||||
virtual status_t PutPageDebug(addr_t virtualAddress,
|
||||
void* handle) = 0;
|
||||
|
||||
// memory operations on pages
|
||||
virtual status_t MemsetPhysical(addr_t address, int value,
|
||||
size_t length) = 0;
|
||||
virtual status_t MemcpyFromPhysical(void* to, addr_t from,
|
||||
size_t length, bool user) = 0;
|
||||
virtual status_t MemcpyToPhysical(addr_t to, const void* from,
|
||||
size_t length, bool user) = 0;
|
||||
virtual void MemcpyPhysicalPage(addr_t to, addr_t from) = 0;
|
||||
};
|
||||
|
||||
|
||||
#include <arch/vm_translation_map.h>
|
||||
|
||||
#endif /* KERNEL_VM_VM_TRANSLATION_MAP_H */
|
@ -1,76 +0,0 @@
|
||||
/*
|
||||
* Copyright 2002-2007, Haiku. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
#ifndef KERNEL_VM_VM_TRANSLATION_MAP_H
|
||||
#define KERNEL_VM_VM_TRANSLATION_MAP_H
|
||||
|
||||
|
||||
#include <kernel.h>
|
||||
#include <lock.h>
|
||||
|
||||
|
||||
struct kernel_args;
|
||||
|
||||
|
||||
typedef struct vm_translation_map {
|
||||
struct vm_translation_map *next;
|
||||
struct vm_translation_map_ops *ops;
|
||||
recursive_lock lock;
|
||||
int32 map_count;
|
||||
struct vm_translation_map_arch_info *arch_data;
|
||||
} vm_translation_map;
|
||||
|
||||
|
||||
// table of operations the vm may want to do to this mapping
|
||||
typedef struct vm_translation_map_ops {
|
||||
void (*destroy)(vm_translation_map *map);
|
||||
status_t (*lock)(vm_translation_map *map);
|
||||
status_t (*unlock)(vm_translation_map *map);
|
||||
size_t (*map_max_pages_need)(vm_translation_map *map, addr_t start, addr_t end);
|
||||
status_t (*map)(vm_translation_map *map, addr_t va, addr_t pa,
|
||||
uint32 attributes);
|
||||
status_t (*unmap)(vm_translation_map *map, addr_t start, addr_t end);
|
||||
status_t (*query)(vm_translation_map *map, addr_t va, addr_t *_outPhysical,
|
||||
uint32 *_outFlags);
|
||||
status_t (*query_interrupt)(vm_translation_map *map, addr_t va,
|
||||
addr_t *_outPhysical, uint32 *_outFlags);
|
||||
addr_t (*get_mapped_size)(vm_translation_map*);
|
||||
status_t (*protect)(vm_translation_map *map, addr_t base, addr_t top,
|
||||
uint32 attributes);
|
||||
status_t (*clear_flags)(vm_translation_map *map, addr_t va, uint32 flags);
|
||||
void (*flush)(vm_translation_map *map);
|
||||
|
||||
// get/put virtual address for physical page -- will be usuable on all CPUs
|
||||
// (usually more expensive than the *_current_cpu() versions)
|
||||
status_t (*get_physical_page)(addr_t physicalAddress,
|
||||
addr_t *_virtualAddress, void **handle);
|
||||
status_t (*put_physical_page)(addr_t virtualAddress, void *handle);
|
||||
|
||||
// get/put virtual address for physical page -- thread must be pinned the
|
||||
// whole time
|
||||
status_t (*get_physical_page_current_cpu)(addr_t physicalAddress,
|
||||
addr_t *_virtualAddress, void **handle);
|
||||
status_t (*put_physical_page_current_cpu)(addr_t virtualAddress,
|
||||
void *handle);
|
||||
|
||||
// get/put virtual address for physical in KDL
|
||||
status_t (*get_physical_page_debug)(addr_t physicalAddress,
|
||||
addr_t *_virtualAddress, void **handle);
|
||||
status_t (*put_physical_page_debug)(addr_t virtualAddress, void *handle);
|
||||
|
||||
// memory operations on pages
|
||||
status_t (*memset_physical)(addr_t address, int value, size_t length);
|
||||
status_t (*memcpy_from_physical)(void* to, addr_t from, size_t length,
|
||||
bool user);
|
||||
status_t (*memcpy_to_physical)(addr_t to, const void* from, size_t length,
|
||||
bool user);
|
||||
void (*memcpy_physical_page)(addr_t to, addr_t from);
|
||||
} vm_translation_map_ops;
|
||||
|
||||
#include <arch/vm_translation_map.h>
|
||||
|
||||
#endif /* KERNEL_VM_VM_TRANSLATION_MAP_H */
|
103
src/system/kernel/arch/generic/GenericVMPhysicalPageMapper.cpp
Normal file
103
src/system/kernel/arch/generic/GenericVMPhysicalPageMapper.cpp
Normal file
@ -0,0 +1,103 @@
|
||||
/*
|
||||
* Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include "GenericVMPhysicalPageMapper.h"
|
||||
|
||||
#include <Errors.h>
|
||||
|
||||
#include "generic_vm_physical_page_mapper.h"
|
||||
#include "generic_vm_physical_page_ops.h"
|
||||
|
||||
|
||||
GenericVMPhysicalPageMapper::GenericVMPhysicalPageMapper()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
GenericVMPhysicalPageMapper::~GenericVMPhysicalPageMapper()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
GenericVMPhysicalPageMapper::GetPage(addr_t physicalAddress,
|
||||
addr_t* _virtualAddress, void** _handle)
|
||||
{
|
||||
return generic_get_physical_page(physicalAddress, _virtualAddress, 0);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
GenericVMPhysicalPageMapper::PutPage(addr_t virtualAddress, void* handle)
|
||||
{
|
||||
return generic_put_physical_page(virtualAddress);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
GenericVMPhysicalPageMapper::GetPageCurrentCPU(addr_t physicalAddress,
|
||||
addr_t* _virtualAddress, void** _handle)
|
||||
{
|
||||
// TODO:...
|
||||
return B_UNSUPPORTED;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
GenericVMPhysicalPageMapper::PutPageCurrentCPU(addr_t virtualAddress,
|
||||
void* _handle)
|
||||
{
|
||||
// TODO:...
|
||||
return B_UNSUPPORTED;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
GenericVMPhysicalPageMapper::GetPageDebug(addr_t physicalAddress,
|
||||
addr_t* _virtualAddress, void** _handle)
|
||||
{
|
||||
// TODO:...
|
||||
return B_UNSUPPORTED;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
GenericVMPhysicalPageMapper::PutPageDebug(addr_t virtualAddress, void* handle)
|
||||
{
|
||||
// TODO:...
|
||||
return B_UNSUPPORTED;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
GenericVMPhysicalPageMapper::MemsetPhysical(addr_t address, int value,
|
||||
size_t length)
|
||||
{
|
||||
return generic_vm_memset_physical(address, value, length);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
GenericVMPhysicalPageMapper::MemcpyFromPhysical(void* to, addr_t from,
|
||||
size_t length, bool user)
|
||||
{
|
||||
return generic_vm_memcpy_from_physical(to, from, length, user);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
GenericVMPhysicalPageMapper::MemcpyToPhysical(addr_t to, const void* from,
|
||||
size_t length, bool user)
|
||||
{
|
||||
return generic_vm_memcpy_to_physical(to, from, length, user);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
GenericVMPhysicalPageMapper::MemcpyPhysicalPage(addr_t to, addr_t from)
|
||||
{
|
||||
generic_vm_memcpy_physical_page(to, from);
|
||||
}
|
45
src/system/kernel/arch/generic/GenericVMPhysicalPageMapper.h
Normal file
45
src/system/kernel/arch/generic/GenericVMPhysicalPageMapper.h
Normal file
@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright 2010, Ingo Weinhold <ingo_weinhold@gmx.de>.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef _KERNEL_GENERIC_VM_PHYSICAL_PAGE_MAPPER_CLASS_H
|
||||
#define _KERNEL_GENERIC_VM_PHYSICAL_PAGE_MAPPER_CLASS_H
|
||||
|
||||
|
||||
#include <vm/VMTranslationMap.h>
|
||||
|
||||
|
||||
struct GenericVMPhysicalPageMapper : VMPhysicalPageMapper {
|
||||
GenericVMPhysicalPageMapper();
|
||||
virtual ~GenericVMPhysicalPageMapper();
|
||||
|
||||
virtual status_t GetPage(addr_t physicalAddress,
|
||||
addr_t* _virtualAddress,
|
||||
void** _handle);
|
||||
virtual status_t PutPage(addr_t virtualAddress,
|
||||
void* handle);
|
||||
|
||||
virtual status_t GetPageCurrentCPU(
|
||||
addr_t physicalAddress,
|
||||
addr_t* _virtualAddress,
|
||||
void** _handle);
|
||||
virtual status_t PutPageCurrentCPU(addr_t virtualAddress,
|
||||
void* _handle);
|
||||
|
||||
virtual status_t GetPageDebug(addr_t physicalAddress,
|
||||
addr_t* _virtualAddress,
|
||||
void** _handle);
|
||||
virtual status_t PutPageDebug(addr_t virtualAddress,
|
||||
void* handle);
|
||||
|
||||
virtual status_t MemsetPhysical(addr_t address, int value,
|
||||
size_t length);
|
||||
virtual status_t MemcpyFromPhysical(void* to, addr_t from,
|
||||
size_t length, bool user);
|
||||
virtual status_t MemcpyToPhysical(addr_t to, const void* from,
|
||||
size_t length, bool user);
|
||||
virtual void MemcpyPhysicalPage(addr_t to, addr_t from);
|
||||
};
|
||||
|
||||
|
||||
#endif // _KERNEL_GENERIC_VM_PHYSICAL_PAGE_MAPPER_CLASS_H
|
@ -29,6 +29,7 @@ KernelMergeObject kernel_arch_ppc.o :
|
||||
|
||||
generic_vm_physical_page_mapper.cpp
|
||||
generic_vm_physical_page_ops.cpp
|
||||
GenericVMPhysicalPageMapper.cpp
|
||||
:
|
||||
$(TARGET_KERNEL_PIC_CCFLAGS) -Wno-unused
|
||||
;
|
||||
|
@ -181,7 +181,7 @@ arch_thread_context_switch(struct thread *t_from, struct thread *t_to)
|
||||
if (t_from->team != t_to->team) {
|
||||
// switching to a new address space
|
||||
ppc_translation_map_change_asid(
|
||||
&t_to->team->address_space->TranslationMap());
|
||||
t_to->team->address_space->TranslationMap());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -53,10 +53,10 @@
|
||||
registers (8 - 15) map the kernel addresses, so they remain unchanged.
|
||||
|
||||
The range of the virtual address space a team's effective address space
|
||||
is mapped to is defined by its vm_translation_map_arch_info::vsid_base,
|
||||
is mapped to is defined by its PPCVMTranslationMap::fVSIDBase,
|
||||
which is the first of the 8 successive VSID values used for the team.
|
||||
|
||||
Which vsid_base values are already taken is defined by the set bits in
|
||||
Which fVSIDBase values are already taken is defined by the set bits in
|
||||
the bitmap sVSIDBaseBitmap.
|
||||
|
||||
|
||||
@ -85,6 +85,7 @@
|
||||
|
||||
#include "generic_vm_physical_page_mapper.h"
|
||||
#include "generic_vm_physical_page_ops.h"
|
||||
#include "GenericVMPhysicalPageMapper.h"
|
||||
|
||||
|
||||
static struct page_table_entry_group *sPageTable;
|
||||
@ -92,7 +93,6 @@ static size_t sPageTableSize;
|
||||
static uint32 sPageTableHashMask;
|
||||
static area_id sPageTableArea;
|
||||
|
||||
|
||||
// 64 MB of iospace
|
||||
#define IOSPACE_SIZE (64*1024*1024)
|
||||
// We only have small (4 KB) pages. The only reason for choosing greater chunk
|
||||
@ -103,6 +103,7 @@ static area_id sPageTableArea;
|
||||
|
||||
static addr_t sIOSpaceBase;
|
||||
|
||||
static GenericVMPhysicalPageMapper sPhysicalPageMapper;
|
||||
|
||||
// The VSID is a 24 bit number. The lower three bits are defined by the
|
||||
// (effective) segment number, which leaves us with a 21 bit space of
|
||||
@ -112,23 +113,61 @@ static uint32 sVSIDBaseBitmap[MAX_VSID_BASES / (sizeof(uint32) * 8)];
|
||||
static spinlock sVSIDBaseBitmapLock;
|
||||
|
||||
#define VSID_BASE_SHIFT 3
|
||||
#define VADDR_TO_VSID(map, vaddr) \
|
||||
((map)->arch_data->vsid_base + ((vaddr) >> 28))
|
||||
#define VADDR_TO_VSID(vsidBase, vaddr) (vsidBase + ((vaddr) >> 28))
|
||||
|
||||
// vm_translation object stuff
|
||||
typedef struct vm_translation_map_arch_info {
|
||||
int vsid_base; // used VSIDs are vside_base ... vsid_base + 7
|
||||
} vm_translation_map_arch_info;
|
||||
|
||||
struct PPCVMTranslationMap : VMTranslationMap {
|
||||
PPCVMTranslationMap();
|
||||
virtual ~PPCVMTranslationMap();
|
||||
|
||||
status_t Init(bool kernel);
|
||||
|
||||
inline int VSIDBase() const { return fVSIDBase; }
|
||||
|
||||
page_table_entry* LookupPageTableEntry(addr_t virtualAddress);
|
||||
bool RemovePageTableEntry(addr_t virtualAddress);
|
||||
|
||||
virtual status_t InitPostSem();
|
||||
|
||||
virtual status_t Lock();
|
||||
virtual status_t Unlock();
|
||||
|
||||
virtual addr_t MappedSize() const;
|
||||
virtual size_t MaxPagesNeededToMap(addr_t start,
|
||||
addr_t end) const;
|
||||
|
||||
virtual status_t Map(addr_t virtualAddress,
|
||||
addr_t physicalAddress,
|
||||
uint32 attributes);
|
||||
virtual status_t Unmap(addr_t start, addr_t end);
|
||||
|
||||
virtual status_t Query(addr_t virtualAddress,
|
||||
addr_t* _physicalAddress,
|
||||
uint32* _flags);
|
||||
virtual status_t QueryInterrupt(addr_t virtualAddress,
|
||||
addr_t* _physicalAddress,
|
||||
uint32* _flags);
|
||||
|
||||
virtual status_t Protect(addr_t base, addr_t top,
|
||||
uint32 attributes);
|
||||
virtual status_t ClearFlags(addr_t virtualAddress,
|
||||
uint32 flags);
|
||||
|
||||
virtual void Flush();
|
||||
|
||||
protected:
|
||||
int fVSIDBase;
|
||||
};
|
||||
|
||||
|
||||
void
|
||||
ppc_translation_map_change_asid(vm_translation_map *map)
|
||||
ppc_translation_map_change_asid(VMTranslationMap *map)
|
||||
{
|
||||
// this code depends on the kernel being at 0x80000000, fix if we change that
|
||||
#if KERNEL_BASE != 0x80000000
|
||||
#error fix me
|
||||
#endif
|
||||
int vsidBase = map->arch_data->vsid_base;
|
||||
int vsidBase = static_cast<PPCVMTranslationMap*>(map)->VSIDBase();
|
||||
|
||||
isync(); // synchronize context
|
||||
asm("mtsr 0,%0" : : "g"(vsidBase));
|
||||
@ -143,40 +182,6 @@ ppc_translation_map_change_asid(vm_translation_map *map)
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
lock_tmap(vm_translation_map *map)
|
||||
{
|
||||
recursive_lock_lock(&map->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
unlock_tmap(vm_translation_map *map)
|
||||
{
|
||||
recursive_lock_unlock(&map->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
destroy_tmap(vm_translation_map *map)
|
||||
{
|
||||
if (map->map_count > 0) {
|
||||
panic("vm_translation_map.destroy_tmap: map %p has positive map count %ld\n",
|
||||
map, map->map_count);
|
||||
}
|
||||
|
||||
// mark the vsid base not in use
|
||||
int baseBit = map->arch_data->vsid_base >> VSID_BASE_SHIFT;
|
||||
atomic_and((vint32 *)&sVSIDBaseBitmap[baseBit / 32],
|
||||
~(1 << (baseBit % 32)));
|
||||
|
||||
free(map->arch_data);
|
||||
recursive_lock_destroy(&map->lock);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID,
|
||||
addr_t virtualAddress, addr_t physicalAddress, uint8 protection,
|
||||
@ -207,75 +212,14 @@ fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID,
|
||||
}
|
||||
|
||||
|
||||
static size_t
|
||||
map_max_pages_need(vm_translation_map *map, addr_t start, addr_t end)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
map_tmap(vm_translation_map *map, addr_t virtualAddress, addr_t physicalAddress, uint32 attributes)
|
||||
page_table_entry *
|
||||
PPCVMTranslationMap::LookupPageTableEntry(addr_t virtualAddress)
|
||||
{
|
||||
// lookup the vsid based off the va
|
||||
uint32 virtualSegmentID = VADDR_TO_VSID(map, virtualAddress);
|
||||
uint32 protection = 0;
|
||||
|
||||
// ToDo: check this
|
||||
// all kernel mappings are R/W to supervisor code
|
||||
if (attributes & (B_READ_AREA | B_WRITE_AREA))
|
||||
protection = (attributes & B_WRITE_AREA) ? PTE_READ_WRITE : PTE_READ_ONLY;
|
||||
|
||||
//dprintf("vm_translation_map.map_tmap: vsid %d, pa 0x%lx, va 0x%lx\n", vsid, pa, va);
|
||||
|
||||
// Search for a free page table slot using the primary hash value
|
||||
|
||||
uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
|
||||
page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
|
||||
|
||||
for (int i = 0; i < 8; i++) {
|
||||
page_table_entry *entry = &group->entry[i];
|
||||
|
||||
if (entry->valid)
|
||||
continue;
|
||||
|
||||
fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
|
||||
protection, false);
|
||||
map->map_count++;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
// Didn't found one, try the secondary hash value
|
||||
|
||||
hash = page_table_entry::SecondaryHash(hash);
|
||||
group = &sPageTable[hash & sPageTableHashMask];
|
||||
|
||||
for (int i = 0; i < 8; i++) {
|
||||
page_table_entry *entry = &group->entry[i];
|
||||
|
||||
if (entry->valid)
|
||||
continue;
|
||||
|
||||
fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
|
||||
protection, false);
|
||||
map->map_count++;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
panic("vm_translation_map.map_tmap: hash table full\n");
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
|
||||
static page_table_entry *
|
||||
lookup_page_table_entry(vm_translation_map *map, addr_t virtualAddress)
|
||||
{
|
||||
// lookup the vsid based off the va
|
||||
uint32 virtualSegmentID = VADDR_TO_VSID(map, virtualAddress);
|
||||
uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
|
||||
|
||||
// dprintf("vm_translation_map.lookup_page_table_entry: vsid %ld, va 0x%lx\n", virtualSegmentID, virtualAddress);
|
||||
|
||||
|
||||
// Search for the page table entry using the primary hash value
|
||||
|
||||
uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
|
||||
@ -308,10 +252,10 @@ lookup_page_table_entry(vm_translation_map *map, addr_t virtualAddress)
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
remove_page_table_entry(vm_translation_map *map, addr_t virtualAddress)
|
||||
bool
|
||||
PPCVMTranslationMap::RemovePageTableEntry(addr_t virtualAddress)
|
||||
{
|
||||
page_table_entry *entry = lookup_page_table_entry(map, virtualAddress);
|
||||
page_table_entry *entry = LookupPageTableEntry(virtualAddress);
|
||||
if (entry) {
|
||||
entry->valid = 0;
|
||||
ppc_sync();
|
||||
@ -326,7 +270,168 @@ remove_page_table_entry(vm_translation_map *map, addr_t virtualAddress)
|
||||
|
||||
|
||||
static status_t
|
||||
unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
|
||||
map_iospace_chunk(addr_t va, addr_t pa, uint32 flags)
|
||||
{
|
||||
pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
|
||||
va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
|
||||
if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE))
|
||||
panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);
|
||||
|
||||
// map the pages
|
||||
return ppc_map_address_range(va, pa, IOSPACE_CHUNK_SIZE);
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
PPCVMTranslationMap::PPCVMTranslationMap()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
PPCVMTranslationMap::~PPCVMTranslationMap()
|
||||
{
|
||||
if (fMapCount > 0) {
|
||||
panic("vm_translation_map.destroy_tmap: map %p has positive map count %ld\n",
|
||||
this, fMapCount);
|
||||
}
|
||||
|
||||
// mark the vsid base not in use
|
||||
int baseBit = fVSIDBase >> VSID_BASE_SHIFT;
|
||||
atomic_and((vint32 *)&sVSIDBaseBitmap[baseBit / 32],
|
||||
~(1 << (baseBit % 32)));
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
PPCVMTranslationMap::Init(bool kernel)
|
||||
{
|
||||
cpu_status state = disable_interrupts();
|
||||
acquire_spinlock(&sVSIDBaseBitmapLock);
|
||||
|
||||
// allocate a VSID base for this one
|
||||
if (kernel) {
|
||||
// The boot loader has set up the segment registers for identical
|
||||
// mapping. Two VSID bases are reserved for the kernel: 0 and 8. The
|
||||
// latter one for mapping the kernel address space (0x80000000...), the
|
||||
// former one for the lower addresses required by the Open Firmware
|
||||
// services.
|
||||
fVSIDBase = 0;
|
||||
sVSIDBaseBitmap[0] |= 0x3;
|
||||
} else {
|
||||
int i = 0;
|
||||
|
||||
while (i < MAX_VSID_BASES) {
|
||||
if (sVSIDBaseBitmap[i / 32] == 0xffffffff) {
|
||||
i += 32;
|
||||
continue;
|
||||
}
|
||||
if ((sVSIDBaseBitmap[i / 32] & (1 << (i % 32))) == 0) {
|
||||
// we found it
|
||||
sVSIDBaseBitmap[i / 32] |= 1 << (i % 32);
|
||||
break;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
if (i >= MAX_VSID_BASES)
|
||||
panic("vm_translation_map_create: out of VSID bases\n");
|
||||
fVSIDBase = i << VSID_BASE_SHIFT;
|
||||
}
|
||||
|
||||
release_spinlock(&sVSIDBaseBitmapLock);
|
||||
restore_interrupts(state);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
PPCVMTranslationMap::InitPostSem()
|
||||
{
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
PPCVMTranslationMap::Lock()
|
||||
{
|
||||
recursive_lock_lock(&fLock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
PPCVMTranslationMap::Unlock()
|
||||
{
|
||||
recursive_lock_unlock(&fLock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
size_t
|
||||
PPCVMTranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
PPCVMTranslationMap::Map(addr_t virtualAddress, addr_t physicalAddress,
|
||||
uint32 attributes)
|
||||
{
|
||||
// lookup the vsid based off the va
|
||||
uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
|
||||
uint32 protection = 0;
|
||||
|
||||
// ToDo: check this
|
||||
// all kernel mappings are R/W to supervisor code
|
||||
if (attributes & (B_READ_AREA | B_WRITE_AREA))
|
||||
protection = (attributes & B_WRITE_AREA) ? PTE_READ_WRITE : PTE_READ_ONLY;
|
||||
|
||||
//dprintf("vm_translation_map.map_tmap: vsid %d, pa 0x%lx, va 0x%lx\n", vsid, pa, va);
|
||||
|
||||
// Search for a free page table slot using the primary hash value
|
||||
|
||||
uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
|
||||
page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
|
||||
|
||||
for (int i = 0; i < 8; i++) {
|
||||
page_table_entry *entry = &group->entry[i];
|
||||
|
||||
if (entry->valid)
|
||||
continue;
|
||||
|
||||
fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
|
||||
protection, false);
|
||||
fMapCount++;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
// Didn't found one, try the secondary hash value
|
||||
|
||||
hash = page_table_entry::SecondaryHash(hash);
|
||||
group = &sPageTable[hash & sPageTableHashMask];
|
||||
|
||||
for (int i = 0; i < 8; i++) {
|
||||
page_table_entry *entry = &group->entry[i];
|
||||
|
||||
if (entry->valid)
|
||||
continue;
|
||||
|
||||
fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
|
||||
protection, false);
|
||||
fMapCount++;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
panic("vm_translation_map.map_tmap: hash table full\n");
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
PPCVMTranslationMap::Unmap(addr_t start, addr_t end)
|
||||
{
|
||||
page_table_entry *entry;
|
||||
|
||||
@ -336,8 +441,8 @@ unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
|
||||
// dprintf("vm_translation_map.unmap_tmap: start 0x%lx, end 0x%lx\n", start, end);
|
||||
|
||||
while (start < end) {
|
||||
if (remove_page_table_entry(map, start))
|
||||
map->map_count--;
|
||||
if (RemovePageTableEntry(start))
|
||||
fMapCount--;
|
||||
|
||||
start += B_PAGE_SIZE;
|
||||
}
|
||||
@ -346,8 +451,8 @@ unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
query_tmap(vm_translation_map *map, addr_t va, addr_t *_outPhysical, uint32 *_outFlags)
|
||||
status_t
|
||||
PPCVMTranslationMap::Query(addr_t va, addr_t *_outPhysical, uint32 *_outFlags)
|
||||
{
|
||||
page_table_entry *entry;
|
||||
|
||||
@ -355,7 +460,7 @@ query_tmap(vm_translation_map *map, addr_t va, addr_t *_outPhysical, uint32 *_ou
|
||||
*_outFlags = 0;
|
||||
*_outPhysical = 0;
|
||||
|
||||
entry = lookup_page_table_entry(map, va);
|
||||
entry = LookupPageTableEntry(va);
|
||||
if (entry == NULL)
|
||||
return B_NO_ERROR;
|
||||
|
||||
@ -375,38 +480,25 @@ query_tmap(vm_translation_map *map, addr_t va, addr_t *_outPhysical, uint32 *_ou
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
map_iospace_chunk(addr_t va, addr_t pa, uint32 flags)
|
||||
addr_t
|
||||
PPCVMTranslationMap::MappedSize() const
|
||||
{
|
||||
pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
|
||||
va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
|
||||
if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE))
|
||||
panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);
|
||||
|
||||
// map the pages
|
||||
return ppc_map_address_range(va, pa, IOSPACE_CHUNK_SIZE);
|
||||
return fMapCount;
|
||||
}
|
||||
|
||||
|
||||
static addr_t
|
||||
get_mapped_size_tmap(vm_translation_map *map)
|
||||
{
|
||||
return map->map_count;
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
protect_tmap(vm_translation_map *map, addr_t base, addr_t top, uint32 attributes)
|
||||
status_t
|
||||
PPCVMTranslationMap::Protect(addr_t base, addr_t top, uint32 attributes)
|
||||
{
|
||||
// XXX finish
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
clear_flags_tmap(vm_translation_map *map, addr_t virtualAddress, uint32 flags)
|
||||
status_t
|
||||
PPCVMTranslationMap::ClearFlags(addr_t virtualAddress, uint32 flags)
|
||||
{
|
||||
page_table_entry *entry = lookup_page_table_entry(map, virtualAddress);
|
||||
page_table_entry *entry = LookupPageTableEntry(virtualAddress);
|
||||
if (entry == NULL)
|
||||
return B_NO_ERROR;
|
||||
|
||||
@ -434,8 +526,8 @@ clear_flags_tmap(vm_translation_map *map, addr_t virtualAddress, uint32 flags)
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
flush_tmap(vm_translation_map *map)
|
||||
void
|
||||
PPCVMTranslationMap::Flush()
|
||||
{
|
||||
// TODO: arch_cpu_global_TLB_invalidate() is extremely expensive and doesn't
|
||||
// even cut it here. We are supposed to invalidate all TLB entries for this
|
||||
@ -460,103 +552,31 @@ put_physical_page_tmap(addr_t virtualAddress, void *handle)
|
||||
}
|
||||
|
||||
|
||||
static vm_translation_map_ops tmap_ops = {
|
||||
destroy_tmap,
|
||||
lock_tmap,
|
||||
unlock_tmap,
|
||||
map_max_pages_need,
|
||||
map_tmap,
|
||||
unmap_tmap,
|
||||
query_tmap,
|
||||
query_tmap,
|
||||
get_mapped_size_tmap,
|
||||
protect_tmap,
|
||||
clear_flags_tmap,
|
||||
flush_tmap,
|
||||
get_physical_page_tmap,
|
||||
put_physical_page_tmap,
|
||||
get_physical_page_tmap, // *_current_cpu()
|
||||
put_physical_page_tmap, // *_current_cpu()
|
||||
get_physical_page_tmap, // *_debug()
|
||||
put_physical_page_tmap, // *_debug()
|
||||
// TODO: Replace the *_current_cpu() and *_debug() versions!
|
||||
|
||||
generic_vm_memset_physical,
|
||||
generic_vm_memcpy_from_physical,
|
||||
generic_vm_memcpy_to_physical,
|
||||
generic_vm_memcpy_physical_page
|
||||
// TODO: Verify that this is safe to use!
|
||||
};
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
// VM API
|
||||
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
|
||||
arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
|
||||
{
|
||||
// initialize the new object
|
||||
map->ops = &tmap_ops;
|
||||
map->map_count = 0;
|
||||
|
||||
recursive_lock_init(&map->lock, "translation map");
|
||||
|
||||
map->arch_data = (vm_translation_map_arch_info *)malloc(sizeof(vm_translation_map_arch_info));
|
||||
if (map->arch_data == NULL) {
|
||||
if (!kernel)
|
||||
recursive_lock_destroy(&map->lock);
|
||||
PPCVMTranslationMap* map = new(std::nothrow) PPCVMTranslationMap;
|
||||
if (map == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
status_t error = map->Init(kernel);
|
||||
if (error != B_OK) {
|
||||
delete map;
|
||||
return error;
|
||||
}
|
||||
|
||||
cpu_status state = disable_interrupts();
|
||||
acquire_spinlock(&sVSIDBaseBitmapLock);
|
||||
|
||||
// allocate a VSID base for this one
|
||||
if (kernel) {
|
||||
// The boot loader has set up the segment registers for identical
|
||||
// mapping. Two VSID bases are reserved for the kernel: 0 and 8. The
|
||||
// latter one for mapping the kernel address space (0x80000000...), the
|
||||
// former one for the lower addresses required by the Open Firmware
|
||||
// services.
|
||||
map->arch_data->vsid_base = 0;
|
||||
sVSIDBaseBitmap[0] |= 0x3;
|
||||
} else {
|
||||
int i = 0;
|
||||
|
||||
while (i < MAX_VSID_BASES) {
|
||||
if (sVSIDBaseBitmap[i / 32] == 0xffffffff) {
|
||||
i += 32;
|
||||
continue;
|
||||
}
|
||||
if ((sVSIDBaseBitmap[i / 32] & (1 << (i % 32))) == 0) {
|
||||
// we found it
|
||||
sVSIDBaseBitmap[i / 32] |= 1 << (i % 32);
|
||||
break;
|
||||
}
|
||||
i++;
|
||||
}
|
||||
if (i >= MAX_VSID_BASES)
|
||||
panic("vm_translation_map_create: out of VSID bases\n");
|
||||
map->arch_data->vsid_base = i << VSID_BASE_SHIFT;
|
||||
}
|
||||
|
||||
release_spinlock(&sVSIDBaseBitmapLock);
|
||||
restore_interrupts(state);
|
||||
|
||||
*_map = map;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_init_kernel_map_post_sem(vm_translation_map *map)
|
||||
{
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_init(kernel_args *args)
|
||||
arch_vm_translation_map_init(kernel_args *args,
|
||||
VMPhysicalPageMapper** _physicalPageMapper)
|
||||
{
|
||||
sPageTable = (page_table_entry_group *)args->arch_args.page_table.start;
|
||||
sPageTableSize = args->arch_args.page_table.size;
|
||||
@ -568,6 +588,9 @@ arch_vm_translation_map_init(kernel_args *args)
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
new(&sPhysicalPageMapper) GenericVMPhysicalPageMapper;
|
||||
|
||||
*_physicalPageMapper = &sPhysicalPageMapper;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
@ -682,12 +705,13 @@ ppc_map_address_range(addr_t virtualAddress, addr_t physicalAddress,
|
||||
physicalAddress = ROUNDDOWN(physicalAddress, B_PAGE_SIZE);
|
||||
|
||||
VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
|
||||
PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
|
||||
addressSpace->TranslationMap());
|
||||
|
||||
// map the pages
|
||||
for (; virtualAddress < virtualEnd;
|
||||
virtualAddress += B_PAGE_SIZE, physicalAddress += B_PAGE_SIZE) {
|
||||
status_t error = map_tmap(&addressSpace->TranslationMap(),
|
||||
virtualAddress, physicalAddress,
|
||||
status_t error = map->Map(virtualAddress, physicalAddress,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
@ -705,9 +729,10 @@ ppc_unmap_address_range(addr_t virtualAddress, size_t size)
|
||||
|
||||
VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
|
||||
|
||||
PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
|
||||
addressSpace->TranslationMap());
|
||||
for (0; virtualAddress < virtualEnd; virtualAddress += B_PAGE_SIZE)
|
||||
remove_page_table_entry(&addressSpace->TranslationMap(),
|
||||
virtualAddress);
|
||||
map->RemovePageTableEntry(virtualAddress);
|
||||
}
|
||||
|
||||
|
||||
@ -727,8 +752,9 @@ ppc_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap)
|
||||
return error;
|
||||
|
||||
// get the area's first physical page
|
||||
page_table_entry *entry = lookup_page_table_entry(
|
||||
&addressSpace->TranslationMap(), virtualAddress);
|
||||
PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
|
||||
addressSpace->TranslationMap());
|
||||
page_table_entry *entry = map->LookupPageTableEntry(virtualAddress);
|
||||
if (!entry)
|
||||
return B_ERROR;
|
||||
addr_t physicalBase = entry->physical_page_number << 12;
|
||||
|
56
src/system/kernel/arch/x86/X86VMTranslationMap.h
Normal file
56
src/system/kernel/arch/x86/X86VMTranslationMap.h
Normal file
@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef KERNEL_ARCH_X86_X86_VM_TRANSLATION_MAP_H
|
||||
#define KERNEL_ARCH_X86_X86_VM_TRANSLATION_MAP_H
|
||||
|
||||
|
||||
#include <vm/VMTranslationMap.h>
|
||||
|
||||
|
||||
struct X86VMTranslationMap : VMTranslationMap {
|
||||
X86VMTranslationMap();
|
||||
virtual ~X86VMTranslationMap();
|
||||
|
||||
status_t Init(bool kernel);
|
||||
|
||||
inline vm_translation_map_arch_info* ArchData() const
|
||||
{ return fArchData; }
|
||||
inline void* PhysicalPageDir() const
|
||||
{ return fArchData->pgdir_phys; }
|
||||
|
||||
virtual status_t InitPostSem();
|
||||
|
||||
virtual status_t Lock();
|
||||
virtual status_t Unlock();
|
||||
|
||||
virtual addr_t MappedSize() const;
|
||||
virtual size_t MaxPagesNeededToMap(addr_t start,
|
||||
addr_t end) const;
|
||||
|
||||
virtual status_t Map(addr_t virtualAddress,
|
||||
addr_t physicalAddress,
|
||||
uint32 attributes);
|
||||
virtual status_t Unmap(addr_t start, addr_t end);
|
||||
|
||||
virtual status_t Query(addr_t virtualAddress,
|
||||
addr_t* _physicalAddress,
|
||||
uint32* _flags);
|
||||
virtual status_t QueryInterrupt(addr_t virtualAddress,
|
||||
addr_t* _physicalAddress,
|
||||
uint32* _flags);
|
||||
|
||||
virtual status_t Protect(addr_t base, addr_t top,
|
||||
uint32 attributes);
|
||||
virtual status_t ClearFlags(addr_t virtualAddress,
|
||||
uint32 flags);
|
||||
|
||||
virtual void Flush();
|
||||
|
||||
protected:
|
||||
vm_translation_map_arch_info* fArchData;
|
||||
};
|
||||
|
||||
|
||||
#endif // KERNEL_ARCH_X86_X86_VM_TRANSLATION_MAP_H
|
@ -31,6 +31,7 @@
|
||||
|
||||
#include "interrupts.h"
|
||||
#include "x86_paging.h"
|
||||
#include "X86VMTranslationMap.h"
|
||||
|
||||
|
||||
#define DUMP_FEATURE_STRING 1
|
||||
@ -664,7 +665,8 @@ arch_cpu_init_post_vm(kernel_args *args)
|
||||
B_FULL_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
|
||||
vm_translation_map_arch_info* kernelArchTranslationMap
|
||||
= VMAddressSpace::Kernel()->TranslationMap().arch_data;
|
||||
= static_cast<X86VMTranslationMap*>(
|
||||
VMAddressSpace::Kernel()->TranslationMap())->ArchData();
|
||||
|
||||
// setup task-state segments
|
||||
for (i = 0; i < args->num_cpus; i++) {
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <vm/VMAddressSpace.h>
|
||||
|
||||
#include "x86_paging.h"
|
||||
#include "X86VMTranslationMap.h"
|
||||
|
||||
|
||||
//#define TRACE_ARCH_THREAD
|
||||
@ -199,7 +200,7 @@ x86_next_page_directory(struct thread *from, struct thread *to)
|
||||
if (toAddressSpace == NULL)
|
||||
toAddressSpace = VMAddressSpace::Kernel();
|
||||
|
||||
return i386_translation_map_get_pgdir(&toAddressSpace->TranslationMap());
|
||||
return i386_translation_map_get_pgdir(toAddressSpace->TranslationMap());
|
||||
}
|
||||
|
||||
|
||||
@ -370,7 +371,8 @@ arch_thread_context_switch(struct thread *from, struct thread *to)
|
||||
addr_t newPageDirectory;
|
||||
vm_translation_map_arch_info* toMap;
|
||||
if (toAddressSpace != NULL
|
||||
&& (toMap = toAddressSpace->TranslationMap().arch_data) != activeMap) {
|
||||
&& (toMap = static_cast<X86VMTranslationMap*>(
|
||||
toAddressSpace->TranslationMap())->ArchData()) != activeMap) {
|
||||
// update on which CPUs the address space is used
|
||||
int cpu = cpuData->cpu_num;
|
||||
atomic_and(&activeMap->active_on_cpus, ~((uint32)1 << cpu));
|
||||
|
@ -7,6 +7,7 @@
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
|
||||
#include <arch/vm_translation_map.h>
|
||||
|
||||
#include <stdlib.h>
|
||||
@ -27,6 +28,7 @@
|
||||
|
||||
#include "x86_paging.h"
|
||||
#include "x86_physical_page_mapper.h"
|
||||
#include "X86VMTranslationMap.h"
|
||||
|
||||
|
||||
//#define TRACE_VM_TMAP
|
||||
@ -36,11 +38,15 @@
|
||||
# define TRACE(x) ;
|
||||
#endif
|
||||
|
||||
|
||||
static page_table_entry *sPageHole = NULL;
|
||||
static page_directory_entry *sPageHolePageDir = NULL;
|
||||
static page_directory_entry *sKernelPhysicalPageDirectory = NULL;
|
||||
static page_directory_entry *sKernelVirtualPageDirectory = NULL;
|
||||
|
||||
static X86PhysicalPageMapper* sPhysicalPageMapper;
|
||||
static TranslationMapPhysicalPageMapper* sKernelPhysicalPageMapper;
|
||||
|
||||
|
||||
// Accessor class to reuse the SinglyLinkedListLink of DeferredDeletable for
|
||||
// vm_translation_map_arch_info.
|
||||
@ -77,13 +83,9 @@ static spinlock sTMapListLock;
|
||||
B_PAGE_SIZE * 1024)))
|
||||
#define FIRST_KERNEL_PGDIR_ENT (VADDR_TO_PDENT(KERNEL_BASE))
|
||||
#define NUM_KERNEL_PGDIR_ENTS (VADDR_TO_PDENT(KERNEL_SIZE))
|
||||
#define IS_KERNEL_MAP(map) (map->arch_data->pgdir_phys \
|
||||
#define IS_KERNEL_MAP(map) (fArchData->pgdir_phys \
|
||||
== sKernelPhysicalPageDirectory)
|
||||
|
||||
static status_t early_query(addr_t va, addr_t *out_physical);
|
||||
|
||||
static void flush_tmap(vm_translation_map *map);
|
||||
|
||||
|
||||
vm_translation_map_arch_info::vm_translation_map_arch_info()
|
||||
:
|
||||
@ -173,9 +175,9 @@ put_page_table_entry_in_pgtable(page_table_entry* entry,
|
||||
|
||||
|
||||
void *
|
||||
i386_translation_map_get_pgdir(vm_translation_map *map)
|
||||
i386_translation_map_get_pgdir(VMTranslationMap* map)
|
||||
{
|
||||
return map->arch_data->pgdir_phys;
|
||||
return static_cast<X86VMTranslationMap*>(map)->PhysicalPageDir();
|
||||
}
|
||||
|
||||
|
||||
@ -241,21 +243,22 @@ x86_early_prepare_page_tables(page_table_entry* pageTables, addr_t address,
|
||||
// #pragma mark - VM ops
|
||||
|
||||
|
||||
static void
|
||||
destroy_tmap(vm_translation_map *map)
|
||||
X86VMTranslationMap::X86VMTranslationMap()
|
||||
{
|
||||
if (map == NULL)
|
||||
return;
|
||||
}
|
||||
|
||||
if (map->arch_data->page_mapper != NULL)
|
||||
map->arch_data->page_mapper->Delete();
|
||||
|
||||
if (map->arch_data->pgdir_virt != NULL) {
|
||||
X86VMTranslationMap::~X86VMTranslationMap()
|
||||
{
|
||||
if (fArchData->page_mapper != NULL)
|
||||
fArchData->page_mapper->Delete();
|
||||
|
||||
if (fArchData->pgdir_virt != NULL) {
|
||||
// cycle through and free all of the user space pgtables
|
||||
for (uint32 i = VADDR_TO_PDENT(USER_BASE);
|
||||
i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
|
||||
if ((map->arch_data->pgdir_virt[i] & X86_PDE_PRESENT) != 0) {
|
||||
addr_t address = map->arch_data->pgdir_virt[i]
|
||||
if ((fArchData->pgdir_virt[i] & X86_PDE_PRESENT) != 0) {
|
||||
addr_t address = fArchData->pgdir_virt[i]
|
||||
& X86_PDE_ADDRESS_MASK;
|
||||
vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
|
||||
if (!page)
|
||||
@ -266,25 +269,96 @@ destroy_tmap(vm_translation_map *map)
|
||||
}
|
||||
}
|
||||
|
||||
map->arch_data->RemoveReference();
|
||||
fArchData->RemoveReference();
|
||||
}
|
||||
|
||||
recursive_lock_destroy(&map->lock);
|
||||
|
||||
status_t
|
||||
X86VMTranslationMap::Init(bool kernel)
|
||||
{
|
||||
TRACE(("X86VMTranslationMap::Init()\n"));
|
||||
|
||||
fArchData = new(std::nothrow) vm_translation_map_arch_info;
|
||||
if (fArchData == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
fArchData->active_on_cpus = 0;
|
||||
fArchData->num_invalidate_pages = 0;
|
||||
fArchData->page_mapper = NULL;
|
||||
|
||||
if (!kernel) {
|
||||
// user
|
||||
// allocate a physical page mapper
|
||||
status_t error = sPhysicalPageMapper
|
||||
->CreateTranslationMapPhysicalPageMapper(
|
||||
&fArchData->page_mapper);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
// allocate a pgdir
|
||||
fArchData->pgdir_virt = (page_directory_entry *)memalign(
|
||||
B_PAGE_SIZE, B_PAGE_SIZE);
|
||||
if (fArchData->pgdir_virt == NULL) {
|
||||
fArchData->page_mapper->Delete();
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
vm_get_page_mapping(VMAddressSpace::KernelID(),
|
||||
(addr_t)fArchData->pgdir_virt,
|
||||
(addr_t*)&fArchData->pgdir_phys);
|
||||
} else {
|
||||
// kernel
|
||||
// get the physical page mapper
|
||||
fArchData->page_mapper = sKernelPhysicalPageMapper;
|
||||
|
||||
// we already know the kernel pgdir mapping
|
||||
fArchData->pgdir_virt = sKernelVirtualPageDirectory;
|
||||
fArchData->pgdir_phys = sKernelPhysicalPageDirectory;
|
||||
}
|
||||
|
||||
// zero out the bottom portion of the new pgdir
|
||||
memset(fArchData->pgdir_virt + FIRST_USER_PGDIR_ENT, 0,
|
||||
NUM_USER_PGDIR_ENTS * sizeof(page_directory_entry));
|
||||
|
||||
// insert this new map into the map list
|
||||
{
|
||||
int state = disable_interrupts();
|
||||
acquire_spinlock(&sTMapListLock);
|
||||
|
||||
// copy the top portion of the pgdir from the current one
|
||||
memcpy(fArchData->pgdir_virt + FIRST_KERNEL_PGDIR_ENT,
|
||||
sKernelVirtualPageDirectory + FIRST_KERNEL_PGDIR_ENT,
|
||||
NUM_KERNEL_PGDIR_ENTS * sizeof(page_directory_entry));
|
||||
|
||||
sTMapList.Add(fArchData);
|
||||
|
||||
release_spinlock(&sTMapListLock);
|
||||
restore_interrupts(state);
|
||||
}
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
X86VMTranslationMap::InitPostSem()
|
||||
{
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
/*! Acquires the map's recursive lock, and resets the invalidate pages counter
|
||||
in case it's the first locking recursion.
|
||||
*/
|
||||
static status_t
|
||||
lock_tmap(vm_translation_map *map)
|
||||
status_t
|
||||
X86VMTranslationMap::Lock()
|
||||
{
|
||||
TRACE(("lock_tmap: map %p\n", map));
|
||||
|
||||
recursive_lock_lock(&map->lock);
|
||||
if (recursive_lock_get_recursion(&map->lock) == 1) {
|
||||
recursive_lock_lock(&fLock);
|
||||
if (recursive_lock_get_recursion(&fLock) == 1) {
|
||||
// we were the first one to grab the lock
|
||||
TRACE(("clearing invalidated page count\n"));
|
||||
map->arch_data->num_invalidate_pages = 0;
|
||||
fArchData->num_invalidate_pages = 0;
|
||||
}
|
||||
|
||||
return B_OK;
|
||||
@ -295,23 +369,23 @@ lock_tmap(vm_translation_map *map)
|
||||
flush all pending changes of this map (ie. flush TLB caches as
|
||||
needed).
|
||||
*/
|
||||
static status_t
|
||||
unlock_tmap(vm_translation_map *map)
|
||||
status_t
|
||||
X86VMTranslationMap::Unlock()
|
||||
{
|
||||
TRACE(("unlock_tmap: map %p\n", map));
|
||||
|
||||
if (recursive_lock_get_recursion(&map->lock) == 1) {
|
||||
if (recursive_lock_get_recursion(&fLock) == 1) {
|
||||
// we're about to release it for the last time
|
||||
flush_tmap(map);
|
||||
X86VMTranslationMap::Flush();
|
||||
}
|
||||
|
||||
recursive_lock_unlock(&map->lock);
|
||||
recursive_lock_unlock(&fLock);
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
static size_t
|
||||
map_max_pages_need(vm_translation_map */*map*/, addr_t start, addr_t end)
|
||||
size_t
|
||||
X86VMTranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
|
||||
{
|
||||
// If start == 0, the actual base address is not yet known to the caller and
|
||||
// we shall assume the worst case.
|
||||
@ -325,8 +399,8 @@ map_max_pages_need(vm_translation_map */*map*/, addr_t start, addr_t end)
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
status_t
|
||||
X86VMTranslationMap::Map(addr_t va, addr_t pa, uint32 attributes)
|
||||
{
|
||||
TRACE(("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va));
|
||||
|
||||
@ -338,7 +412,7 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
|
||||
dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
|
||||
*/
|
||||
page_directory_entry* pd = map->arch_data->pgdir_virt;
|
||||
page_directory_entry* pd = fArchData->pgdir_virt;
|
||||
|
||||
// check to see if a page table exists for this range
|
||||
uint32 index = VADDR_TO_PDENT(va);
|
||||
@ -368,14 +442,14 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
&& index < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS))
|
||||
x86_update_all_pgdirs(index, pd[index]);
|
||||
|
||||
map->map_count++;
|
||||
fMapCount++;
|
||||
}
|
||||
|
||||
// now, fill in the pentry
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner pinner(thread);
|
||||
|
||||
page_table_entry* pt = map->arch_data->page_mapper->GetPageTableAt(
|
||||
page_table_entry* pt = fArchData->page_mapper->GetPageTableAt(
|
||||
pd[index] & X86_PDE_ADDRESS_MASK);
|
||||
index = VADDR_TO_PTENT(va);
|
||||
|
||||
@ -389,16 +463,16 @@ map_tmap(vm_translation_map *map, addr_t va, addr_t pa, uint32 attributes)
|
||||
// Note: We don't need to invalidate the TLB for this address, as previously
|
||||
// the entry was not present and the TLB doesn't cache those entries.
|
||||
|
||||
map->map_count++;
|
||||
fMapCount++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
unmap_tmap(vm_translation_map *map, addr_t start, addr_t end)
|
||||
status_t
|
||||
X86VMTranslationMap::Unmap(addr_t start, addr_t end)
|
||||
{
|
||||
page_directory_entry *pd = map->arch_data->pgdir_virt;
|
||||
page_directory_entry *pd = fArchData->pgdir_virt;
|
||||
|
||||
start = ROUNDDOWN(start, B_PAGE_SIZE);
|
||||
end = ROUNDUP(end, B_PAGE_SIZE);
|
||||
@ -419,7 +493,7 @@ restart:
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner pinner(thread);
|
||||
|
||||
page_table_entry* pt = map->arch_data->page_mapper->GetPageTableAt(
|
||||
page_table_entry* pt = fArchData->page_mapper->GetPageTableAt(
|
||||
pd[index] & X86_PDE_ADDRESS_MASK);
|
||||
|
||||
for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
|
||||
@ -433,19 +507,19 @@ restart:
|
||||
|
||||
page_table_entry oldEntry = clear_page_table_entry_flags(&pt[index],
|
||||
X86_PTE_PRESENT);
|
||||
map->map_count--;
|
||||
fMapCount--;
|
||||
|
||||
if ((oldEntry & X86_PTE_ACCESSED) != 0) {
|
||||
// Note, that we only need to invalidate the address, if the
|
||||
// accessed flags was set, since only then the entry could have been
|
||||
// in any TLB.
|
||||
if (map->arch_data->num_invalidate_pages
|
||||
if (fArchData->num_invalidate_pages
|
||||
< PAGE_INVALIDATE_CACHE_SIZE) {
|
||||
map->arch_data->pages_to_invalidate[
|
||||
map->arch_data->num_invalidate_pages] = start;
|
||||
fArchData->pages_to_invalidate[
|
||||
fArchData->num_invalidate_pages] = start;
|
||||
}
|
||||
|
||||
map->arch_data->num_invalidate_pages++;
|
||||
fArchData->num_invalidate_pages++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -455,16 +529,15 @@ restart:
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
query_tmap(vm_translation_map *map, addr_t va, addr_t *_physical,
|
||||
uint32 *_flags)
|
||||
status_t
|
||||
X86VMTranslationMap::Query(addr_t va, addr_t *_physical, uint32 *_flags)
|
||||
{
|
||||
// default the flags to not present
|
||||
*_flags = 0;
|
||||
*_physical = 0;
|
||||
|
||||
int index = VADDR_TO_PDENT(va);
|
||||
page_directory_entry *pd = map->arch_data->pgdir_virt;
|
||||
page_directory_entry *pd = fArchData->pgdir_virt;
|
||||
if ((pd[index] & X86_PDE_PRESENT) == 0) {
|
||||
// no pagetable here
|
||||
return B_OK;
|
||||
@ -473,7 +546,7 @@ query_tmap(vm_translation_map *map, addr_t va, addr_t *_physical,
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner pinner(thread);
|
||||
|
||||
page_table_entry* pt = map->arch_data->page_mapper->GetPageTableAt(
|
||||
page_table_entry* pt = fArchData->page_mapper->GetPageTableAt(
|
||||
pd[index] & X86_PDE_ADDRESS_MASK);
|
||||
page_table_entry entry = pt[VADDR_TO_PTENT(va)];
|
||||
|
||||
@ -499,22 +572,22 @@ query_tmap(vm_translation_map *map, addr_t va, addr_t *_physical,
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
query_tmap_interrupt(vm_translation_map *map, addr_t va, addr_t *_physical,
|
||||
status_t
|
||||
X86VMTranslationMap::QueryInterrupt(addr_t va, addr_t *_physical,
|
||||
uint32 *_flags)
|
||||
{
|
||||
*_flags = 0;
|
||||
*_physical = 0;
|
||||
|
||||
int index = VADDR_TO_PDENT(va);
|
||||
page_directory_entry* pd = map->arch_data->pgdir_virt;
|
||||
page_directory_entry* pd = fArchData->pgdir_virt;
|
||||
if ((pd[index] & X86_PDE_PRESENT) == 0) {
|
||||
// no pagetable here
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
// map page table entry
|
||||
page_table_entry* pt = gPhysicalPageMapper->InterruptGetPageTableAt(
|
||||
page_table_entry* pt = sPhysicalPageMapper->InterruptGetPageTableAt(
|
||||
pd[index] & X86_PDE_ADDRESS_MASK);
|
||||
page_table_entry entry = pt[VADDR_TO_PTENT(va)];
|
||||
|
||||
@ -536,18 +609,17 @@ query_tmap_interrupt(vm_translation_map *map, addr_t va, addr_t *_physical,
|
||||
}
|
||||
|
||||
|
||||
static addr_t
|
||||
get_mapped_size_tmap(vm_translation_map *map)
|
||||
addr_t
|
||||
X86VMTranslationMap::MappedSize() const
|
||||
{
|
||||
return map->map_count;
|
||||
return fMapCount;
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
protect_tmap(vm_translation_map *map, addr_t start, addr_t end,
|
||||
uint32 attributes)
|
||||
status_t
|
||||
X86VMTranslationMap::Protect(addr_t start, addr_t end, uint32 attributes)
|
||||
{
|
||||
page_directory_entry *pd = map->arch_data->pgdir_virt;
|
||||
page_directory_entry *pd = fArchData->pgdir_virt;
|
||||
|
||||
start = ROUNDDOWN(start, B_PAGE_SIZE);
|
||||
end = ROUNDUP(end, B_PAGE_SIZE);
|
||||
@ -569,7 +641,7 @@ restart:
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner pinner(thread);
|
||||
|
||||
page_table_entry* pt = map->arch_data->page_mapper->GetPageTableAt(
|
||||
page_table_entry* pt = fArchData->page_mapper->GetPageTableAt(
|
||||
pd[index] & X86_PDE_ADDRESS_MASK);
|
||||
|
||||
for (index = VADDR_TO_PTENT(start); index < 1024 && start < end;
|
||||
@ -597,13 +669,13 @@ restart:
|
||||
// Note, that we only need to invalidate the address, if the
|
||||
// accessed flags was set, since only then the entry could have been
|
||||
// in any TLB.
|
||||
if (map->arch_data->num_invalidate_pages
|
||||
if (fArchData->num_invalidate_pages
|
||||
< PAGE_INVALIDATE_CACHE_SIZE) {
|
||||
map->arch_data->pages_to_invalidate[
|
||||
map->arch_data->num_invalidate_pages] = start;
|
||||
fArchData->pages_to_invalidate[
|
||||
fArchData->num_invalidate_pages] = start;
|
||||
}
|
||||
|
||||
map->arch_data->num_invalidate_pages++;
|
||||
fArchData->num_invalidate_pages++;
|
||||
}
|
||||
}
|
||||
|
||||
@ -613,11 +685,11 @@ restart:
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
clear_flags_tmap(vm_translation_map *map, addr_t va, uint32 flags)
|
||||
status_t
|
||||
X86VMTranslationMap::ClearFlags(addr_t va, uint32 flags)
|
||||
{
|
||||
int index = VADDR_TO_PDENT(va);
|
||||
page_directory_entry* pd = map->arch_data->pgdir_virt;
|
||||
page_directory_entry* pd = fArchData->pgdir_virt;
|
||||
if ((pd[index] & X86_PDE_PRESENT) == 0) {
|
||||
// no pagetable here
|
||||
return B_OK;
|
||||
@ -629,7 +701,7 @@ clear_flags_tmap(vm_translation_map *map, addr_t va, uint32 flags)
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner pinner(thread);
|
||||
|
||||
page_table_entry* pt = map->arch_data->page_mapper->GetPageTableAt(
|
||||
page_table_entry* pt = fArchData->page_mapper->GetPageTableAt(
|
||||
pd[index] & X86_PDE_ADDRESS_MASK);
|
||||
index = VADDR_TO_PTENT(va);
|
||||
|
||||
@ -640,31 +712,31 @@ clear_flags_tmap(vm_translation_map *map, addr_t va, uint32 flags)
|
||||
pinner.Unlock();
|
||||
|
||||
if ((oldEntry & flagsToClear) != 0) {
|
||||
if (map->arch_data->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE) {
|
||||
map->arch_data->pages_to_invalidate[
|
||||
map->arch_data->num_invalidate_pages] = va;
|
||||
if (fArchData->num_invalidate_pages < PAGE_INVALIDATE_CACHE_SIZE) {
|
||||
fArchData->pages_to_invalidate[
|
||||
fArchData->num_invalidate_pages] = va;
|
||||
}
|
||||
|
||||
map->arch_data->num_invalidate_pages++;
|
||||
fArchData->num_invalidate_pages++;
|
||||
}
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
flush_tmap(vm_translation_map *map)
|
||||
void
|
||||
X86VMTranslationMap::Flush()
|
||||
{
|
||||
if (map->arch_data->num_invalidate_pages <= 0)
|
||||
if (fArchData->num_invalidate_pages <= 0)
|
||||
return;
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
thread_pin_to_current_cpu(thread);
|
||||
|
||||
if (map->arch_data->num_invalidate_pages > PAGE_INVALIDATE_CACHE_SIZE) {
|
||||
if (fArchData->num_invalidate_pages > PAGE_INVALIDATE_CACHE_SIZE) {
|
||||
// invalidate all pages
|
||||
TRACE(("flush_tmap: %d pages to invalidate, invalidate all\n",
|
||||
map->arch_data->num_invalidate_pages));
|
||||
fArchData->num_invalidate_pages));
|
||||
|
||||
if (IS_KERNEL_MAP(map)) {
|
||||
arch_cpu_global_TLB_invalidate();
|
||||
@ -676,7 +748,7 @@ flush_tmap(vm_translation_map *map)
|
||||
restore_interrupts(state);
|
||||
|
||||
int cpu = smp_get_current_cpu();
|
||||
uint32 cpuMask = map->arch_data->active_on_cpus
|
||||
uint32 cpuMask = fArchData->active_on_cpus
|
||||
& ~((uint32)1 << cpu);
|
||||
if (cpuMask != 0) {
|
||||
smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES,
|
||||
@ -685,146 +757,58 @@ flush_tmap(vm_translation_map *map)
|
||||
}
|
||||
} else {
|
||||
TRACE(("flush_tmap: %d pages to invalidate, invalidate list\n",
|
||||
map->arch_data->num_invalidate_pages));
|
||||
fArchData->num_invalidate_pages));
|
||||
|
||||
arch_cpu_invalidate_TLB_list(map->arch_data->pages_to_invalidate,
|
||||
map->arch_data->num_invalidate_pages);
|
||||
arch_cpu_invalidate_TLB_list(fArchData->pages_to_invalidate,
|
||||
fArchData->num_invalidate_pages);
|
||||
|
||||
if (IS_KERNEL_MAP(map)) {
|
||||
smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
|
||||
(uint32)map->arch_data->pages_to_invalidate,
|
||||
map->arch_data->num_invalidate_pages, 0, NULL,
|
||||
(uint32)fArchData->pages_to_invalidate,
|
||||
fArchData->num_invalidate_pages, 0, NULL,
|
||||
SMP_MSG_FLAG_SYNC);
|
||||
} else {
|
||||
int cpu = smp_get_current_cpu();
|
||||
uint32 cpuMask = map->arch_data->active_on_cpus
|
||||
uint32 cpuMask = fArchData->active_on_cpus
|
||||
& ~((uint32)1 << cpu);
|
||||
if (cpuMask != 0) {
|
||||
smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST,
|
||||
(uint32)map->arch_data->pages_to_invalidate,
|
||||
map->arch_data->num_invalidate_pages, 0, NULL,
|
||||
(uint32)fArchData->pages_to_invalidate,
|
||||
fArchData->num_invalidate_pages, 0, NULL,
|
||||
SMP_MSG_FLAG_SYNC);
|
||||
}
|
||||
}
|
||||
}
|
||||
map->arch_data->num_invalidate_pages = 0;
|
||||
fArchData->num_invalidate_pages = 0;
|
||||
|
||||
thread_unpin_from_current_cpu(thread);
|
||||
}
|
||||
|
||||
|
||||
static vm_translation_map_ops tmap_ops = {
|
||||
destroy_tmap,
|
||||
lock_tmap,
|
||||
unlock_tmap,
|
||||
map_max_pages_need,
|
||||
map_tmap,
|
||||
unmap_tmap,
|
||||
query_tmap,
|
||||
query_tmap_interrupt,
|
||||
get_mapped_size_tmap,
|
||||
protect_tmap,
|
||||
clear_flags_tmap,
|
||||
flush_tmap
|
||||
|
||||
// The physical page ops are initialized by the respective physical page
|
||||
// mapper.
|
||||
};
|
||||
|
||||
|
||||
// #pragma mark - VM API
|
||||
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_init_map(vm_translation_map *map, bool kernel)
|
||||
arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
|
||||
{
|
||||
X86VMTranslationMap* map = new(std::nothrow) X86VMTranslationMap;
|
||||
if (map == NULL)
|
||||
return B_BAD_VALUE;
|
||||
|
||||
TRACE(("vm_translation_map_create\n"));
|
||||
|
||||
// initialize the new object
|
||||
map->ops = &tmap_ops;
|
||||
map->map_count = 0;
|
||||
|
||||
recursive_lock_init(&map->lock, "translation map");
|
||||
CObjectDeleter<recursive_lock> lockDeleter(&map->lock,
|
||||
&recursive_lock_destroy);
|
||||
|
||||
map->arch_data = new(std::nothrow) vm_translation_map_arch_info;
|
||||
if (map->arch_data == NULL)
|
||||
return B_NO_MEMORY;
|
||||
ObjectDeleter<vm_translation_map_arch_info> archInfoDeleter(map->arch_data);
|
||||
|
||||
map->arch_data->active_on_cpus = 0;
|
||||
map->arch_data->num_invalidate_pages = 0;
|
||||
map->arch_data->page_mapper = NULL;
|
||||
|
||||
if (!kernel) {
|
||||
// user
|
||||
// allocate a physical page mapper
|
||||
status_t error = gPhysicalPageMapper
|
||||
->CreateTranslationMapPhysicalPageMapper(
|
||||
&map->arch_data->page_mapper);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
// allocate a pgdir
|
||||
map->arch_data->pgdir_virt = (page_directory_entry *)memalign(
|
||||
B_PAGE_SIZE, B_PAGE_SIZE);
|
||||
if (map->arch_data->pgdir_virt == NULL) {
|
||||
map->arch_data->page_mapper->Delete();
|
||||
return B_NO_MEMORY;
|
||||
}
|
||||
vm_get_page_mapping(VMAddressSpace::KernelID(),
|
||||
(addr_t)map->arch_data->pgdir_virt,
|
||||
(addr_t*)&map->arch_data->pgdir_phys);
|
||||
} else {
|
||||
// kernel
|
||||
// get the physical page mapper
|
||||
map->arch_data->page_mapper = gKernelPhysicalPageMapper;
|
||||
|
||||
// we already know the kernel pgdir mapping
|
||||
map->arch_data->pgdir_virt = sKernelVirtualPageDirectory;
|
||||
map->arch_data->pgdir_phys = sKernelPhysicalPageDirectory;
|
||||
status_t error = map->Init(kernel);
|
||||
if (error != B_OK) {
|
||||
delete map;
|
||||
return error;
|
||||
}
|
||||
|
||||
// zero out the bottom portion of the new pgdir
|
||||
memset(map->arch_data->pgdir_virt + FIRST_USER_PGDIR_ENT, 0,
|
||||
NUM_USER_PGDIR_ENTS * sizeof(page_directory_entry));
|
||||
|
||||
// insert this new map into the map list
|
||||
{
|
||||
int state = disable_interrupts();
|
||||
acquire_spinlock(&sTMapListLock);
|
||||
|
||||
// copy the top portion of the pgdir from the current one
|
||||
memcpy(map->arch_data->pgdir_virt + FIRST_KERNEL_PGDIR_ENT,
|
||||
sKernelVirtualPageDirectory + FIRST_KERNEL_PGDIR_ENT,
|
||||
NUM_KERNEL_PGDIR_ENTS * sizeof(page_directory_entry));
|
||||
|
||||
sTMapList.Add(map->arch_data);
|
||||
|
||||
release_spinlock(&sTMapListLock);
|
||||
restore_interrupts(state);
|
||||
}
|
||||
|
||||
archInfoDeleter.Detach();
|
||||
lockDeleter.Detach();
|
||||
|
||||
*_map = map;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_init_kernel_map_post_sem(vm_translation_map *map)
|
||||
{
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_init(kernel_args *args)
|
||||
arch_vm_translation_map_init(kernel_args *args,
|
||||
VMPhysicalPageMapper** _physicalPageMapper)
|
||||
{
|
||||
TRACE(("vm_translation_map_init: entry\n"));
|
||||
|
||||
@ -846,8 +830,9 @@ arch_vm_translation_map_init(kernel_args *args)
|
||||
B_INITIALIZE_SPINLOCK(&sTMapListLock);
|
||||
new (&sTMapList) ArchTMapList;
|
||||
|
||||
// TODO: Select the best page mapper!
|
||||
large_memory_physical_page_ops_init(args, &tmap_ops);
|
||||
large_memory_physical_page_ops_init(args, sPhysicalPageMapper,
|
||||
sKernelPhysicalPageMapper);
|
||||
// TODO: Select the best page mapper!
|
||||
|
||||
// enable global page feature if available
|
||||
if (x86_check_feature(IA32_FEATURE_PGE, FEATURE_COMMON)) {
|
||||
@ -858,6 +843,7 @@ arch_vm_translation_map_init(kernel_args *args)
|
||||
|
||||
TRACE(("vm_translation_map_init: done\n"));
|
||||
|
||||
*_physicalPageMapper = sPhysicalPageMapper;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
@ -891,7 +877,7 @@ arch_vm_translation_map_init_post_area(kernel_args *args)
|
||||
if (area < B_OK)
|
||||
return area;
|
||||
|
||||
error = gPhysicalPageMapper->InitPostArea(args);
|
||||
error = sPhysicalPageMapper->InitPostArea(args);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
@ -979,12 +965,12 @@ arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
|
||||
// map the original page directory and get the entry
|
||||
void* handle;
|
||||
addr_t virtualPageDirectory;
|
||||
status_t error = gPhysicalPageMapper->GetPageDebug(
|
||||
status_t error = sPhysicalPageMapper->GetPageDebug(
|
||||
physicalPageDirectory, &virtualPageDirectory, &handle);
|
||||
if (error == B_OK) {
|
||||
pageDirectoryEntry
|
||||
= ((page_directory_entry*)virtualPageDirectory)[index];
|
||||
gPhysicalPageMapper->PutPageDebug(virtualPageDirectory,
|
||||
sPhysicalPageMapper->PutPageDebug(virtualPageDirectory,
|
||||
handle);
|
||||
} else
|
||||
pageDirectoryEntry = 0;
|
||||
@ -997,12 +983,12 @@ arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
|
||||
if ((pageDirectoryEntry & X86_PDE_PRESENT) != 0) {
|
||||
void* handle;
|
||||
addr_t virtualPageTable;
|
||||
status_t error = gPhysicalPageMapper->GetPageDebug(
|
||||
status_t error = sPhysicalPageMapper->GetPageDebug(
|
||||
pageDirectoryEntry & X86_PDE_ADDRESS_MASK, &virtualPageTable,
|
||||
&handle);
|
||||
if (error == B_OK) {
|
||||
pageTableEntry = ((page_table_entry*)virtualPageTable)[index];
|
||||
gPhysicalPageMapper->PutPageDebug(virtualPageTable, handle);
|
||||
sPhysicalPageMapper->PutPageDebug(virtualPageTable, handle);
|
||||
} else
|
||||
pageTableEntry = 0;
|
||||
} else
|
||||
|
@ -6,15 +6,11 @@
|
||||
#include "x86_physical_page_mapper.h"
|
||||
|
||||
|
||||
PhysicalPageMapper* gPhysicalPageMapper;
|
||||
TranslationMapPhysicalPageMapper* gKernelPhysicalPageMapper;
|
||||
|
||||
|
||||
TranslationMapPhysicalPageMapper::~TranslationMapPhysicalPageMapper()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
PhysicalPageMapper::~PhysicalPageMapper()
|
||||
X86PhysicalPageMapper::~X86PhysicalPageMapper()
|
||||
{
|
||||
}
|
||||
|
@ -6,6 +6,8 @@
|
||||
#define _KERNEL_ARCH_X86_PHYSICAL_PAGE_MAPPER_H
|
||||
|
||||
|
||||
#include <vm/VMTranslationMap.h>
|
||||
|
||||
#include "x86_paging.h"
|
||||
|
||||
|
||||
@ -24,9 +26,9 @@ public:
|
||||
};
|
||||
|
||||
|
||||
class PhysicalPageMapper {
|
||||
class X86PhysicalPageMapper : public VMPhysicalPageMapper {
|
||||
public:
|
||||
virtual ~PhysicalPageMapper();
|
||||
virtual ~X86PhysicalPageMapper();
|
||||
|
||||
virtual status_t InitPostArea(kernel_args* args) = 0;
|
||||
|
||||
@ -36,20 +38,12 @@ public:
|
||||
|
||||
virtual page_table_entry* InterruptGetPageTableAt(
|
||||
addr_t physicalAddress) = 0;
|
||||
|
||||
virtual status_t GetPageDebug(addr_t physicalAddress,
|
||||
addr_t* _virtualAddress,
|
||||
void** _handle) = 0;
|
||||
virtual status_t PutPageDebug(addr_t virtualAddress,
|
||||
void* _handle) = 0;
|
||||
};
|
||||
|
||||
extern PhysicalPageMapper* gPhysicalPageMapper;
|
||||
extern TranslationMapPhysicalPageMapper* gKernelPhysicalPageMapper;
|
||||
|
||||
|
||||
status_t large_memory_physical_page_ops_init(kernel_args* args,
|
||||
vm_translation_map_ops* ops);
|
||||
X86PhysicalPageMapper*& _pageMapper,
|
||||
TranslationMapPhysicalPageMapper*& _kernelPageMapper);
|
||||
|
||||
|
||||
#endif // _KERNEL_ARCH_X86_PHYSICAL_PAGE_MAPPER_H
|
||||
|
@ -21,6 +21,7 @@
|
||||
address space region).
|
||||
*/
|
||||
|
||||
|
||||
#include "x86_physical_page_mapper.h"
|
||||
|
||||
#include <new>
|
||||
@ -33,11 +34,11 @@
|
||||
#include <util/AutoLock.h>
|
||||
#include <util/DoublyLinkedList.h>
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_translation_map.h>
|
||||
#include <vm/vm_types.h>
|
||||
#include <vm/VMAddressSpace.h>
|
||||
|
||||
#include "x86_paging.h"
|
||||
#include "X86VMTranslationMap.h"
|
||||
|
||||
|
||||
// The number of slots we reserve per translation map from mapping page tables.
|
||||
@ -134,8 +135,8 @@ public:
|
||||
private:
|
||||
struct page_slot {
|
||||
PhysicalPageSlot* slot;
|
||||
addr_t physicalAddress;
|
||||
cpu_mask_t valid;
|
||||
addr_t physicalAddress;
|
||||
cpu_mask_t valid;
|
||||
};
|
||||
|
||||
page_slot fSlots[SLOTS_PER_TRANSLATION_MAP];
|
||||
@ -144,11 +145,13 @@ private:
|
||||
};
|
||||
|
||||
|
||||
class LargeMemoryPhysicalPageMapper : public PhysicalPageMapper {
|
||||
class LargeMemoryPhysicalPageMapper : public X86PhysicalPageMapper {
|
||||
public:
|
||||
LargeMemoryPhysicalPageMapper();
|
||||
|
||||
status_t Init(kernel_args* args);
|
||||
status_t Init(kernel_args* args,
|
||||
TranslationMapPhysicalPageMapper*&
|
||||
_kernelPageMapper);
|
||||
virtual status_t InitPostArea(kernel_args* args);
|
||||
|
||||
virtual status_t CreateTranslationMapPhysicalPageMapper(
|
||||
@ -157,13 +160,13 @@ public:
|
||||
virtual page_table_entry* InterruptGetPageTableAt(
|
||||
addr_t physicalAddress);
|
||||
|
||||
inline status_t GetPage(addr_t physicalAddress,
|
||||
virtual status_t GetPage(addr_t physicalAddress,
|
||||
addr_t* virtualAddress, void** handle);
|
||||
inline status_t PutPage(addr_t virtualAddress, void* handle);
|
||||
virtual status_t PutPage(addr_t virtualAddress, void* handle);
|
||||
|
||||
inline status_t GetPageCurrentCPU(addr_t physicalAddress,
|
||||
virtual status_t GetPageCurrentCPU(addr_t physicalAddress,
|
||||
addr_t* virtualAddress, void** handle);
|
||||
inline status_t PutPageCurrentCPU(addr_t virtualAddress,
|
||||
virtual status_t PutPageCurrentCPU(addr_t virtualAddress,
|
||||
void* handle);
|
||||
|
||||
virtual status_t GetPageDebug(addr_t physicalAddress,
|
||||
@ -171,6 +174,14 @@ public:
|
||||
virtual status_t PutPageDebug(addr_t virtualAddress,
|
||||
void* handle);
|
||||
|
||||
virtual status_t MemsetPhysical(addr_t address, int value,
|
||||
size_t length);
|
||||
virtual status_t MemcpyFromPhysical(void* to, addr_t from,
|
||||
size_t length, bool user);
|
||||
virtual status_t MemcpyToPhysical(addr_t to, const void* from,
|
||||
size_t length, bool user);
|
||||
virtual void MemcpyPhysicalPage(addr_t to, addr_t from);
|
||||
|
||||
status_t GetSlot(bool canWait,
|
||||
PhysicalPageSlot*& slot);
|
||||
void PutSlot(PhysicalPageSlot* slot);
|
||||
@ -466,7 +477,8 @@ LargeMemoryPhysicalPageMapper::LargeMemoryPhysicalPageMapper()
|
||||
|
||||
|
||||
status_t
|
||||
LargeMemoryPhysicalPageMapper::Init(kernel_args* args)
|
||||
LargeMemoryPhysicalPageMapper::Init(kernel_args* args,
|
||||
TranslationMapPhysicalPageMapper*& _kernelPageMapper)
|
||||
{
|
||||
// We reserve more, so that we can guarantee to align the base address
|
||||
// to page table ranges.
|
||||
@ -503,7 +515,7 @@ LargeMemoryPhysicalPageMapper::Init(kernel_args* args)
|
||||
"kernel translation map physical page mapper!");
|
||||
return error;
|
||||
}
|
||||
gKernelPhysicalPageMapper = &fKernelMapper;
|
||||
_kernelPageMapper = &fKernelMapper;
|
||||
|
||||
// init the per-CPU data
|
||||
int32 cpuCount = smp_get_num_cpus();
|
||||
@ -654,6 +666,137 @@ LargeMemoryPhysicalPageMapper::PutPageDebug(addr_t virtualAddress, void* handle)
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
LargeMemoryPhysicalPageMapper::MemsetPhysical(addr_t address, int value,
|
||||
size_t length)
|
||||
{
|
||||
addr_t pageOffset = address % B_PAGE_SIZE;
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner _(thread);
|
||||
|
||||
PhysicalPageSlotQueue* slotQueue = GetSlotQueue(thread->cpu->cpu_num,
|
||||
false);
|
||||
PhysicalPageSlot* slot = slotQueue->GetSlot();
|
||||
|
||||
while (length > 0) {
|
||||
slot->Map(address - pageOffset);
|
||||
|
||||
size_t toSet = min_c(length, B_PAGE_SIZE - pageOffset);
|
||||
memset((void*)(slot->address + pageOffset), value, toSet);
|
||||
|
||||
length -= toSet;
|
||||
address += toSet;
|
||||
pageOffset = 0;
|
||||
}
|
||||
|
||||
slotQueue->PutSlot(slot);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
LargeMemoryPhysicalPageMapper::MemcpyFromPhysical(void* _to, addr_t from,
|
||||
size_t length, bool user)
|
||||
{
|
||||
uint8* to = (uint8*)_to;
|
||||
addr_t pageOffset = from % B_PAGE_SIZE;
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner _(thread);
|
||||
|
||||
PhysicalPageSlotQueue* slotQueue = GetSlotQueue(thread->cpu->cpu_num, user);
|
||||
PhysicalPageSlot* slot = slotQueue->GetSlot();
|
||||
|
||||
status_t error = B_OK;
|
||||
|
||||
while (length > 0) {
|
||||
size_t toCopy = min_c(length, B_PAGE_SIZE - pageOffset);
|
||||
|
||||
slot->Map(from - pageOffset);
|
||||
|
||||
if (user) {
|
||||
error = user_memcpy(to, (void*)(slot->address + pageOffset),
|
||||
toCopy);
|
||||
if (error != B_OK)
|
||||
break;
|
||||
} else
|
||||
memcpy(to, (void*)(slot->address + pageOffset), toCopy);
|
||||
|
||||
to += toCopy;
|
||||
from += toCopy;
|
||||
length -= toCopy;
|
||||
pageOffset = 0;
|
||||
}
|
||||
|
||||
slotQueue->PutSlot(slot);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
LargeMemoryPhysicalPageMapper::MemcpyToPhysical(addr_t to, const void* _from,
|
||||
size_t length, bool user)
|
||||
{
|
||||
const uint8* from = (const uint8*)_from;
|
||||
addr_t pageOffset = to % B_PAGE_SIZE;
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner _(thread);
|
||||
|
||||
PhysicalPageSlotQueue* slotQueue = GetSlotQueue(thread->cpu->cpu_num, user);
|
||||
PhysicalPageSlot* slot = slotQueue->GetSlot();
|
||||
|
||||
status_t error = B_OK;
|
||||
|
||||
while (length > 0) {
|
||||
size_t toCopy = min_c(length, B_PAGE_SIZE - pageOffset);
|
||||
|
||||
slot->Map(to - pageOffset);
|
||||
|
||||
if (user) {
|
||||
error = user_memcpy((void*)(slot->address + pageOffset), from,
|
||||
toCopy);
|
||||
if (error != B_OK)
|
||||
break;
|
||||
} else
|
||||
memcpy((void*)(slot->address + pageOffset), from, toCopy);
|
||||
|
||||
to += toCopy;
|
||||
from += toCopy;
|
||||
length -= toCopy;
|
||||
pageOffset = 0;
|
||||
}
|
||||
|
||||
slotQueue->PutSlot(slot);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
LargeMemoryPhysicalPageMapper::MemcpyPhysicalPage(addr_t to, addr_t from)
|
||||
{
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner _(thread);
|
||||
|
||||
PhysicalPageSlotQueue* slotQueue = GetSlotQueue(thread->cpu->cpu_num,
|
||||
false);
|
||||
PhysicalPageSlot* fromSlot;
|
||||
PhysicalPageSlot* toSlot;
|
||||
slotQueue->GetSlots(fromSlot, toSlot);
|
||||
|
||||
fromSlot->Map(from);
|
||||
toSlot->Map(to);
|
||||
|
||||
memcpy((void*)toSlot->address, (void*)fromSlot->address, B_PAGE_SIZE);
|
||||
|
||||
slotQueue->PutSlots(fromSlot, toSlot);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
LargeMemoryPhysicalPageMapper::GetSlot(bool canWait,
|
||||
PhysicalPageSlot*& slot)
|
||||
@ -744,16 +887,16 @@ LargeMemoryPhysicalPageMapper::_AllocatePool(PhysicalPageSlotPool*& _pool)
|
||||
|
||||
// get the page table's physical address
|
||||
addr_t physicalTable;
|
||||
vm_translation_map* map = &VMAddressSpace::Kernel()->TranslationMap();
|
||||
X86VMTranslationMap* map = static_cast<X86VMTranslationMap*>(
|
||||
VMAddressSpace::Kernel()->TranslationMap());
|
||||
uint32 dummyFlags;
|
||||
cpu_status state = disable_interrupts();
|
||||
map->ops->query_interrupt(map, (addr_t)data, &physicalTable,
|
||||
&dummyFlags);
|
||||
map->QueryInterrupt((addr_t)data, &physicalTable, &dummyFlags);
|
||||
restore_interrupts(state);
|
||||
|
||||
// put the page table into the page directory
|
||||
int32 index = (addr_t)virtualBase / (B_PAGE_SIZE * 1024);
|
||||
page_directory_entry* entry = &map->arch_data->pgdir_virt[index];
|
||||
page_directory_entry* entry = &map->ArchData()->pgdir_virt[index];
|
||||
x86_put_pgtable_in_pgdir(entry, physicalTable,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
x86_update_all_pgdirs(index, *entry);
|
||||
@ -766,215 +909,16 @@ LargeMemoryPhysicalPageMapper::_AllocatePool(PhysicalPageSlotPool*& _pool)
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - physical page operations
|
||||
|
||||
|
||||
static status_t
|
||||
large_memory_get_physical_page(addr_t physicalAddress, addr_t *_virtualAddress,
|
||||
void **_handle)
|
||||
{
|
||||
return sPhysicalPageMapper.GetPage(physicalAddress, _virtualAddress,
|
||||
_handle);
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
large_memory_put_physical_page(addr_t virtualAddress, void *handle)
|
||||
{
|
||||
return sPhysicalPageMapper.PutPage(virtualAddress, handle);
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
large_memory_get_physical_page_current_cpu(addr_t physicalAddress,
|
||||
addr_t *_virtualAddress, void **_handle)
|
||||
{
|
||||
return sPhysicalPageMapper.GetPageCurrentCPU(physicalAddress,
|
||||
_virtualAddress, _handle);
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
large_memory_put_physical_page_current_cpu(addr_t virtualAddress, void *handle)
|
||||
{
|
||||
return sPhysicalPageMapper.PutPageCurrentCPU(virtualAddress, handle);
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
large_memory_get_physical_page_debug(addr_t physicalAddress,
|
||||
addr_t *_virtualAddress, void **_handle)
|
||||
{
|
||||
return sPhysicalPageMapper.GetPageDebug(physicalAddress,
|
||||
_virtualAddress, _handle);
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
large_memory_put_physical_page_debug(addr_t virtualAddress, void *handle)
|
||||
{
|
||||
return sPhysicalPageMapper.PutPageDebug(virtualAddress, handle);
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
large_memory_memset_physical(addr_t address, int value,
|
||||
size_t length)
|
||||
{
|
||||
addr_t pageOffset = address % B_PAGE_SIZE;
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner _(thread);
|
||||
|
||||
PhysicalPageSlotQueue* slotQueue = sPhysicalPageMapper.GetSlotQueue(
|
||||
thread->cpu->cpu_num, false);
|
||||
PhysicalPageSlot* slot = slotQueue->GetSlot();
|
||||
|
||||
while (length > 0) {
|
||||
slot->Map(address - pageOffset);
|
||||
|
||||
size_t toSet = min_c(length, B_PAGE_SIZE - pageOffset);
|
||||
memset((void*)(slot->address + pageOffset), value, toSet);
|
||||
|
||||
length -= toSet;
|
||||
address += toSet;
|
||||
pageOffset = 0;
|
||||
}
|
||||
|
||||
slotQueue->PutSlot(slot);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
large_memory_memcpy_from_physical(void* _to, addr_t from, size_t length,
|
||||
bool user)
|
||||
{
|
||||
uint8* to = (uint8*)_to;
|
||||
addr_t pageOffset = from % B_PAGE_SIZE;
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner _(thread);
|
||||
|
||||
PhysicalPageSlotQueue* slotQueue = sPhysicalPageMapper.GetSlotQueue(
|
||||
thread->cpu->cpu_num, user);
|
||||
PhysicalPageSlot* slot = slotQueue->GetSlot();
|
||||
|
||||
status_t error = B_OK;
|
||||
|
||||
while (length > 0) {
|
||||
size_t toCopy = min_c(length, B_PAGE_SIZE - pageOffset);
|
||||
|
||||
slot->Map(from - pageOffset);
|
||||
|
||||
if (user) {
|
||||
error = user_memcpy(to, (void*)(slot->address + pageOffset),
|
||||
toCopy);
|
||||
if (error != B_OK)
|
||||
break;
|
||||
} else
|
||||
memcpy(to, (void*)(slot->address + pageOffset), toCopy);
|
||||
|
||||
to += toCopy;
|
||||
from += toCopy;
|
||||
length -= toCopy;
|
||||
pageOffset = 0;
|
||||
}
|
||||
|
||||
slotQueue->PutSlot(slot);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
large_memory_memcpy_to_physical(addr_t to, const void* _from, size_t length,
|
||||
bool user)
|
||||
{
|
||||
const uint8* from = (const uint8*)_from;
|
||||
addr_t pageOffset = to % B_PAGE_SIZE;
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner _(thread);
|
||||
|
||||
PhysicalPageSlotQueue* slotQueue = sPhysicalPageMapper.GetSlotQueue(
|
||||
thread->cpu->cpu_num, user);
|
||||
PhysicalPageSlot* slot = slotQueue->GetSlot();
|
||||
|
||||
status_t error = B_OK;
|
||||
|
||||
while (length > 0) {
|
||||
size_t toCopy = min_c(length, B_PAGE_SIZE - pageOffset);
|
||||
|
||||
slot->Map(to - pageOffset);
|
||||
|
||||
if (user) {
|
||||
error = user_memcpy((void*)(slot->address + pageOffset), from,
|
||||
toCopy);
|
||||
if (error != B_OK)
|
||||
break;
|
||||
} else
|
||||
memcpy((void*)(slot->address + pageOffset), from, toCopy);
|
||||
|
||||
to += toCopy;
|
||||
from += toCopy;
|
||||
length -= toCopy;
|
||||
pageOffset = 0;
|
||||
}
|
||||
|
||||
slotQueue->PutSlot(slot);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
large_memory_memcpy_physical_page(addr_t to, addr_t from)
|
||||
{
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner _(thread);
|
||||
|
||||
PhysicalPageSlotQueue* slotQueue = sPhysicalPageMapper.GetSlotQueue(
|
||||
thread->cpu->cpu_num, false);
|
||||
PhysicalPageSlot* fromSlot;
|
||||
PhysicalPageSlot* toSlot;
|
||||
slotQueue->GetSlots(fromSlot, toSlot);
|
||||
|
||||
fromSlot->Map(from);
|
||||
toSlot->Map(to);
|
||||
|
||||
memcpy((void*)toSlot->address, (void*)fromSlot->address, B_PAGE_SIZE);
|
||||
|
||||
slotQueue->PutSlots(fromSlot, toSlot);
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - Initialization
|
||||
|
||||
|
||||
status_t
|
||||
large_memory_physical_page_ops_init(kernel_args* args,
|
||||
vm_translation_map_ops* ops)
|
||||
X86PhysicalPageMapper*& _pageMapper,
|
||||
TranslationMapPhysicalPageMapper*& _kernelPageMapper)
|
||||
{
|
||||
gPhysicalPageMapper
|
||||
= new(&sPhysicalPageMapper) LargeMemoryPhysicalPageMapper;
|
||||
sPhysicalPageMapper.Init(args);
|
||||
|
||||
// init physical ops
|
||||
ops->get_physical_page = &large_memory_get_physical_page;
|
||||
ops->put_physical_page = &large_memory_put_physical_page;
|
||||
ops->get_physical_page_current_cpu
|
||||
= &large_memory_get_physical_page_current_cpu;
|
||||
ops->put_physical_page_current_cpu
|
||||
= &large_memory_put_physical_page_current_cpu;
|
||||
ops->get_physical_page_debug = &large_memory_get_physical_page_debug;
|
||||
ops->put_physical_page_debug = &large_memory_put_physical_page_debug;
|
||||
|
||||
ops->memset_physical = &large_memory_memset_physical;
|
||||
ops->memcpy_from_physical = &large_memory_memcpy_from_physical;
|
||||
ops->memcpy_to_physical = &large_memory_memcpy_to_physical;
|
||||
ops->memcpy_physical_page = &large_memory_memcpy_physical_page;
|
||||
_pageMapper = new(&sPhysicalPageMapper) LargeMemoryPhysicalPageMapper;
|
||||
sPhysicalPageMapper.Init(args, _kernelPageMapper);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
@ -29,7 +29,7 @@
|
||||
#include <thread.h>
|
||||
#include <tracing.h>
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_translation_map.h>
|
||||
#include <vm/VMTranslationMap.h>
|
||||
|
||||
#include <arch/debug_console.h>
|
||||
#include <arch/debug.h>
|
||||
|
@ -19,6 +19,7 @@ KernelMergeObject kernel_vm.o :
|
||||
VMKernelArea.cpp
|
||||
VMNullCache.cpp
|
||||
VMPageQueue.cpp
|
||||
VMTranslationMap.cpp
|
||||
VMUserAddressSpace.cpp
|
||||
VMUserArea.cpp
|
||||
|
||||
|
@ -102,7 +102,7 @@ VMAddressSpace::~VMAddressSpace()
|
||||
|
||||
WriteLock();
|
||||
|
||||
fTranslationMap.ops->destroy(&fTranslationMap);
|
||||
delete fTranslationMap;
|
||||
|
||||
rw_lock_destroy(&fLock);
|
||||
}
|
||||
@ -139,8 +139,7 @@ VMAddressSpace::Init()
|
||||
/*static*/ status_t
|
||||
VMAddressSpace::InitPostSem()
|
||||
{
|
||||
status_t status = arch_vm_translation_map_init_kernel_map_post_sem(
|
||||
&sKernelAddressSpace->fTranslationMap);
|
||||
status_t status = sKernelAddressSpace->fTranslationMap->InitPostSem();
|
||||
if (status != B_OK)
|
||||
return status;
|
||||
|
||||
@ -181,7 +180,7 @@ VMAddressSpace::Dump() const
|
||||
kprintf("id: %" B_PRId32 "\n", fID);
|
||||
kprintf("ref_count: %" B_PRId32 "\n", fRefCount);
|
||||
kprintf("fault_count: %" B_PRId32 "\n", fFaultCount);
|
||||
kprintf("translation_map: %p\n", &fTranslationMap);
|
||||
kprintf("translation_map: %p\n", fTranslationMap);
|
||||
kprintf("base: %#" B_PRIxADDR "\n", fBase);
|
||||
kprintf("end: %#" B_PRIxADDR "\n", fEndAddress);
|
||||
kprintf("change_count: %" B_PRId32 "\n", fChangeCount);
|
||||
@ -207,9 +206,9 @@ VMAddressSpace::Create(team_id teamID, addr_t base, size_t size, bool kernel,
|
||||
TRACE(("vm_create_aspace: team %ld (%skernel): %#lx bytes starting at "
|
||||
"%#lx => %p\n", id, kernel ? "!" : "", size, base, addressSpace));
|
||||
|
||||
// initialize the corresponding translation map
|
||||
status = arch_vm_translation_map_init_map(
|
||||
&addressSpace->fTranslationMap, kernel);
|
||||
// create the corresponding translation map
|
||||
status = arch_vm_translation_map_create_map(kernel,
|
||||
&addressSpace->fTranslationMap);
|
||||
if (status != B_OK) {
|
||||
delete addressSpace;
|
||||
return status;
|
||||
|
37
src/system/kernel/vm/VMTranslationMap.cpp
Normal file
37
src/system/kernel/vm/VMTranslationMap.cpp
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include <vm/VMTranslationMap.h>
|
||||
|
||||
|
||||
// #pragma mark - VMTranslationMap
|
||||
|
||||
|
||||
VMTranslationMap::VMTranslationMap()
|
||||
:
|
||||
fMapCount(0)
|
||||
{
|
||||
recursive_lock_init(&fLock, "translation map");
|
||||
}
|
||||
|
||||
|
||||
VMTranslationMap::~VMTranslationMap()
|
||||
{
|
||||
recursive_lock_destroy(&fLock);
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - VMPhysicalPageMapper
|
||||
|
||||
|
||||
VMPhysicalPageMapper::VMPhysicalPageMapper()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
VMPhysicalPageMapper::~VMPhysicalPageMapper()
|
||||
{
|
||||
}
|
@ -182,6 +182,8 @@ static off_t sNeededMemory;
|
||||
static mutex sAvailableMemoryLock = MUTEX_INITIALIZER("available memory lock");
|
||||
static uint32 sPageFaults;
|
||||
|
||||
static VMPhysicalPageMapper* sPhysicalPageMapper;
|
||||
|
||||
#if DEBUG_CACHE_LIST
|
||||
|
||||
struct cache_info {
|
||||
@ -818,8 +820,8 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
if (status != B_OK)
|
||||
return status;
|
||||
|
||||
vm_translation_map* map = &locker.AddressSpace()->TranslationMap();
|
||||
reservedMapPages = map->ops->map_max_pages_need(map, 0, size - 1);
|
||||
VMTranslationMap* map = locker.AddressSpace()->TranslationMap();
|
||||
reservedMapPages = map->MaxPagesNeededToMap(0, size - 1);
|
||||
}
|
||||
|
||||
// Reserve memory before acquiring the address space lock. This reduces the
|
||||
@ -967,21 +969,20 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
// The pages should already be mapped. This is only really useful
|
||||
// during boot time. Find the appropriate vm_page objects and stick
|
||||
// them in the cache object.
|
||||
vm_translation_map* map = &addressSpace->TranslationMap();
|
||||
VMTranslationMap* map = addressSpace->TranslationMap();
|
||||
off_t offset = 0;
|
||||
|
||||
if (!gKernelStartup)
|
||||
panic("ALREADY_WIRED flag used outside kernel startup\n");
|
||||
|
||||
map->ops->lock(map);
|
||||
map->Lock();
|
||||
|
||||
for (addr_t virtualAddress = area->Base();
|
||||
virtualAddress < area->Base() + (area->Size() - 1);
|
||||
virtualAddress += B_PAGE_SIZE, offset += B_PAGE_SIZE) {
|
||||
addr_t physicalAddress;
|
||||
uint32 flags;
|
||||
status = map->ops->query(map, virtualAddress,
|
||||
&physicalAddress, &flags);
|
||||
status = map->Query(virtualAddress, &physicalAddress, &flags);
|
||||
if (status < B_OK) {
|
||||
panic("looking up mapping failed for va 0x%lx\n",
|
||||
virtualAddress);
|
||||
@ -1001,7 +1002,7 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1009,12 +1010,12 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
{
|
||||
// We have already allocated our continuous pages run, so we can now
|
||||
// just map them in the address space
|
||||
vm_translation_map* map = &addressSpace->TranslationMap();
|
||||
VMTranslationMap* map = addressSpace->TranslationMap();
|
||||
addr_t physicalAddress = page->physical_page_number * B_PAGE_SIZE;
|
||||
addr_t virtualAddress = area->Base();
|
||||
off_t offset = 0;
|
||||
|
||||
map->ops->lock(map);
|
||||
map->Lock();
|
||||
|
||||
for (virtualAddress = area->Base(); virtualAddress < area->Base()
|
||||
+ (area->Size() - 1); virtualAddress += B_PAGE_SIZE,
|
||||
@ -1023,8 +1024,7 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
if (page == NULL)
|
||||
panic("couldn't lookup physical page just allocated\n");
|
||||
|
||||
status = map->ops->map(map, virtualAddress, physicalAddress,
|
||||
protection);
|
||||
status = map->Map(virtualAddress, physicalAddress, protection);
|
||||
if (status < B_OK)
|
||||
panic("couldn't map physical page in page run\n");
|
||||
|
||||
@ -1035,7 +1035,7 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1136,19 +1136,19 @@ vm_map_physical_memory(team_id team, const char* name, void** _address,
|
||||
if (status >= B_OK && !alreadyWired) {
|
||||
// make sure our area is mapped in completely
|
||||
|
||||
vm_translation_map* map = &locker.AddressSpace()->TranslationMap();
|
||||
size_t reservePages = map->ops->map_max_pages_need(map, area->Base(),
|
||||
VMTranslationMap* map = locker.AddressSpace()->TranslationMap();
|
||||
size_t reservePages = map->MaxPagesNeededToMap(area->Base(),
|
||||
area->Base() + (size - 1));
|
||||
|
||||
vm_page_reserve_pages(reservePages);
|
||||
map->ops->lock(map);
|
||||
map->Lock();
|
||||
|
||||
for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
|
||||
map->ops->map(map, area->Base() + offset, physicalAddress + offset,
|
||||
map->Map(area->Base() + offset, physicalAddress + offset,
|
||||
protection);
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
vm_page_unreserve_pages(reservePages);
|
||||
}
|
||||
|
||||
@ -1222,12 +1222,12 @@ vm_map_physical_memory_vecs(team_id team, const char* name, void** _address,
|
||||
if (result != B_OK)
|
||||
return result;
|
||||
|
||||
vm_translation_map* map = &locker.AddressSpace()->TranslationMap();
|
||||
size_t reservePages = map->ops->map_max_pages_need(map, area->Base(),
|
||||
VMTranslationMap* map = locker.AddressSpace()->TranslationMap();
|
||||
size_t reservePages = map->MaxPagesNeededToMap(area->Base(),
|
||||
area->Base() + (size - 1));
|
||||
|
||||
vm_page_reserve_pages(reservePages);
|
||||
map->ops->lock(map);
|
||||
map->Lock();
|
||||
|
||||
uint32 vecIndex = 0;
|
||||
size_t vecOffset = 0;
|
||||
@ -1240,13 +1240,13 @@ vm_map_physical_memory_vecs(team_id team, const char* name, void** _address,
|
||||
if (vecIndex >= vecCount)
|
||||
break;
|
||||
|
||||
map->ops->map(map, area->Base() + offset,
|
||||
map->Map(area->Base() + offset,
|
||||
(addr_t)vecs[vecIndex].iov_base + vecOffset, protection);
|
||||
|
||||
vecOffset += B_PAGE_SIZE;
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
vm_page_unreserve_pages(reservePages);
|
||||
|
||||
if (_size != NULL)
|
||||
@ -1399,8 +1399,8 @@ _vm_map_file(team_id team, const char* name, void** _address,
|
||||
if (status != B_OK)
|
||||
return status;
|
||||
|
||||
vm_translation_map* map = &locker.AddressSpace()->TranslationMap();
|
||||
reservedPreMapPages = map->ops->map_max_pages_need(map, 0, size - 1);
|
||||
VMTranslationMap* map = locker.AddressSpace()->TranslationMap();
|
||||
reservedPreMapPages = map->MaxPagesNeededToMap(0, size - 1);
|
||||
|
||||
locker.Unlock();
|
||||
|
||||
@ -1592,35 +1592,34 @@ vm_clone_area(team_id team, const char* name, void** address,
|
||||
// we need to map in everything at this point
|
||||
if (sourceArea->cache_type == CACHE_TYPE_DEVICE) {
|
||||
// we don't have actual pages to map but a physical area
|
||||
vm_translation_map* map
|
||||
= &sourceArea->address_space->TranslationMap();
|
||||
map->ops->lock(map);
|
||||
VMTranslationMap* map
|
||||
= sourceArea->address_space->TranslationMap();
|
||||
map->Lock();
|
||||
|
||||
addr_t physicalAddress;
|
||||
uint32 oldProtection;
|
||||
map->ops->query(map, sourceArea->Base(), &physicalAddress,
|
||||
&oldProtection);
|
||||
map->Query(sourceArea->Base(), &physicalAddress, &oldProtection);
|
||||
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
|
||||
map = &targetAddressSpace->TranslationMap();
|
||||
size_t reservePages = map->ops->map_max_pages_need(map,
|
||||
newArea->Base(), newArea->Base() + (newArea->Size() - 1));
|
||||
map = targetAddressSpace->TranslationMap();
|
||||
size_t reservePages = map->MaxPagesNeededToMap(newArea->Base(),
|
||||
newArea->Base() + (newArea->Size() - 1));
|
||||
|
||||
vm_page_reserve_pages(reservePages);
|
||||
map->ops->lock(map);
|
||||
map->Lock();
|
||||
|
||||
for (addr_t offset = 0; offset < newArea->Size();
|
||||
offset += B_PAGE_SIZE) {
|
||||
map->ops->map(map, newArea->Base() + offset,
|
||||
physicalAddress + offset, protection);
|
||||
map->Map(newArea->Base() + offset, physicalAddress + offset,
|
||||
protection);
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
vm_page_unreserve_pages(reservePages);
|
||||
} else {
|
||||
vm_translation_map* map = &targetAddressSpace->TranslationMap();
|
||||
size_t reservePages = map->ops->map_max_pages_need(map,
|
||||
VMTranslationMap* map = targetAddressSpace->TranslationMap();
|
||||
size_t reservePages = map->MaxPagesNeededToMap(
|
||||
newArea->Base(), newArea->Base() + (newArea->Size() - 1));
|
||||
vm_page_reserve_pages(reservePages);
|
||||
|
||||
@ -1756,11 +1755,11 @@ vm_copy_on_write_area(VMCache* lowerCache)
|
||||
if ((tempArea->protection & B_READ_AREA) != 0)
|
||||
protection |= B_READ_AREA;
|
||||
|
||||
vm_translation_map* map = &tempArea->address_space->TranslationMap();
|
||||
map->ops->lock(map);
|
||||
map->ops->protect(map, tempArea->Base(),
|
||||
VMTranslationMap* map = tempArea->address_space->TranslationMap();
|
||||
map->Lock();
|
||||
map->Protect(tempArea->Base(),
|
||||
tempArea->Base() - 1 + tempArea->Size(), protection);
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
}
|
||||
|
||||
vm_area_put_locked_cache(upperCache);
|
||||
@ -1909,19 +1908,18 @@ vm_set_area_protection(team_id team, area_id areaID, uint32 newProtection,
|
||||
// a lower cache.
|
||||
changePageProtection = false;
|
||||
|
||||
struct vm_translation_map* map
|
||||
= &area->address_space->TranslationMap();
|
||||
map->ops->lock(map);
|
||||
VMTranslationMap* map = area->address_space->TranslationMap();
|
||||
map->Lock();
|
||||
|
||||
for (VMCachePagesTree::Iterator it = cache->pages.GetIterator();
|
||||
vm_page* page = it.Next();) {
|
||||
addr_t address = area->Base()
|
||||
+ (page->cache_offset << PAGE_SHIFT);
|
||||
map->ops->protect(map, address, address - 1 + B_PAGE_SIZE,
|
||||
map->Protect(address, address - 1 + B_PAGE_SIZE,
|
||||
newProtection);
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -1930,13 +1928,13 @@ vm_set_area_protection(team_id team, area_id areaID, uint32 newProtection,
|
||||
|
||||
if (status == B_OK) {
|
||||
// remap existing pages in this cache
|
||||
struct vm_translation_map* map = &area->address_space->TranslationMap();
|
||||
VMTranslationMap* map = area->address_space->TranslationMap();
|
||||
|
||||
if (changePageProtection) {
|
||||
map->ops->lock(map);
|
||||
map->ops->protect(map,
|
||||
area->Base(), area->Base() - 1 + area->Size(), newProtection);
|
||||
map->ops->unlock(map);
|
||||
map->Lock();
|
||||
map->Protect(area->Base(), area->Base() - 1 + area->Size(),
|
||||
newProtection);
|
||||
map->Unlock();
|
||||
}
|
||||
|
||||
area->protection = newProtection;
|
||||
@ -1954,8 +1952,8 @@ vm_get_page_mapping(team_id team, addr_t vaddr, addr_t* paddr)
|
||||
return B_BAD_TEAM_ID;
|
||||
|
||||
uint32 dummyFlags;
|
||||
status_t status = addressSpace->TranslationMap().ops->query(
|
||||
&addressSpace->TranslationMap(), vaddr, paddr, &dummyFlags);
|
||||
status_t status = addressSpace->TranslationMap()->Query(vaddr, paddr,
|
||||
&dummyFlags);
|
||||
|
||||
addressSpace->Put();
|
||||
return status;
|
||||
@ -1982,14 +1980,13 @@ vm_test_map_modification(vm_page* page)
|
||||
vm_page_mapping* mapping;
|
||||
while ((mapping = iterator.Next()) != NULL) {
|
||||
VMArea* area = mapping->area;
|
||||
vm_translation_map* map = &area->address_space->TranslationMap();
|
||||
VMTranslationMap* map = area->address_space->TranslationMap();
|
||||
|
||||
addr_t physicalAddress;
|
||||
uint32 flags;
|
||||
map->ops->lock(map);
|
||||
map->ops->query(map, virtual_page_address(area, page),
|
||||
&physicalAddress, &flags);
|
||||
map->ops->unlock(map);
|
||||
map->Lock();
|
||||
map->Query(virtual_page_address(area, page), &physicalAddress, &flags);
|
||||
map->Unlock();
|
||||
|
||||
if ((flags & PAGE_MODIFIED) != 0)
|
||||
return true;
|
||||
@ -2011,14 +2008,13 @@ vm_test_map_activation(vm_page* page, bool* _modified)
|
||||
vm_page_mapping* mapping;
|
||||
while ((mapping = iterator.Next()) != NULL) {
|
||||
VMArea* area = mapping->area;
|
||||
vm_translation_map* map = &area->address_space->TranslationMap();
|
||||
VMTranslationMap* map = area->address_space->TranslationMap();
|
||||
|
||||
addr_t physicalAddress;
|
||||
uint32 flags;
|
||||
map->ops->lock(map);
|
||||
map->ops->query(map, virtual_page_address(area, page),
|
||||
&physicalAddress, &flags);
|
||||
map->ops->unlock(map);
|
||||
map->Lock();
|
||||
map->Query(virtual_page_address(area, page), &physicalAddress, &flags);
|
||||
map->Unlock();
|
||||
|
||||
if ((flags & PAGE_ACCESSED) != 0)
|
||||
activation++;
|
||||
@ -2045,11 +2041,11 @@ vm_clear_map_flags(vm_page* page, uint32 flags)
|
||||
vm_page_mapping* mapping;
|
||||
while ((mapping = iterator.Next()) != NULL) {
|
||||
VMArea* area = mapping->area;
|
||||
vm_translation_map* map = &area->address_space->TranslationMap();
|
||||
VMTranslationMap* map = area->address_space->TranslationMap();
|
||||
|
||||
map->ops->lock(map);
|
||||
map->ops->clear_flags(map, virtual_page_address(area, page), flags);
|
||||
map->ops->unlock(map);
|
||||
map->Lock();
|
||||
map->ClearFlags(virtual_page_address(area, page), flags);
|
||||
map->Unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@ -2073,19 +2069,19 @@ vm_remove_all_page_mappings(vm_page* page, uint32* _flags)
|
||||
vm_page_mapping* mapping;
|
||||
while ((mapping = iterator.Next()) != NULL) {
|
||||
VMArea* area = mapping->area;
|
||||
vm_translation_map* map = &area->address_space->TranslationMap();
|
||||
VMTranslationMap* map = area->address_space->TranslationMap();
|
||||
addr_t physicalAddress;
|
||||
uint32 flags;
|
||||
|
||||
map->ops->lock(map);
|
||||
map->Lock();
|
||||
addr_t address = virtual_page_address(area, page);
|
||||
map->ops->unmap(map, address, address + (B_PAGE_SIZE - 1));
|
||||
map->ops->flush(map);
|
||||
map->ops->query(map, address, &physicalAddress, &flags);
|
||||
map->Unmap(address, address + (B_PAGE_SIZE - 1));
|
||||
map->Flush();
|
||||
map->Query(address, &physicalAddress, &flags);
|
||||
|
||||
area->mappings.Remove(mapping);
|
||||
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
|
||||
accumulatedFlags |= flags;
|
||||
}
|
||||
@ -2109,16 +2105,15 @@ vm_remove_all_page_mappings(vm_page* page, uint32* _flags)
|
||||
bool
|
||||
vm_unmap_page(VMArea* area, addr_t virtualAddress, bool preserveModified)
|
||||
{
|
||||
vm_translation_map* map = &area->address_space->TranslationMap();
|
||||
VMTranslationMap* map = area->address_space->TranslationMap();
|
||||
|
||||
map->ops->lock(map);
|
||||
map->Lock();
|
||||
|
||||
addr_t physicalAddress;
|
||||
uint32 flags;
|
||||
status_t status = map->ops->query(map, virtualAddress, &physicalAddress,
|
||||
&flags);
|
||||
status_t status = map->Query(virtualAddress, &physicalAddress, &flags);
|
||||
if (status < B_OK || (flags & PAGE_PRESENT) == 0) {
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
return false;
|
||||
}
|
||||
vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
|
||||
@ -2130,12 +2125,12 @@ vm_unmap_page(VMArea* area, addr_t virtualAddress, bool preserveModified)
|
||||
if (area->wiring != B_NO_LOCK && area->cache_type != CACHE_TYPE_DEVICE)
|
||||
decrement_page_wired_count(page);
|
||||
|
||||
map->ops->unmap(map, virtualAddress, virtualAddress + B_PAGE_SIZE - 1);
|
||||
map->Unmap(virtualAddress, virtualAddress + B_PAGE_SIZE - 1);
|
||||
|
||||
if (preserveModified) {
|
||||
map->ops->flush(map);
|
||||
map->Flush();
|
||||
|
||||
status = map->ops->query(map, virtualAddress, &physicalAddress, &flags);
|
||||
status = map->Query(virtualAddress, &physicalAddress, &flags);
|
||||
// TODO: The x86 implementation always returns 0 flags, if the entry is not
|
||||
// present. I.e. we've already lost the flag.
|
||||
if ((flags & PAGE_MODIFIED) != 0)
|
||||
@ -2157,7 +2152,7 @@ vm_unmap_page(VMArea* area, addr_t virtualAddress, bool preserveModified)
|
||||
}
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
|
||||
if (area->wiring == B_NO_LOCK) {
|
||||
if (mapping != NULL) {
|
||||
@ -2178,10 +2173,10 @@ vm_unmap_page(VMArea* area, addr_t virtualAddress, bool preserveModified)
|
||||
status_t
|
||||
vm_unmap_pages(VMArea* area, addr_t base, size_t size, bool preserveModified)
|
||||
{
|
||||
vm_translation_map* map = &area->address_space->TranslationMap();
|
||||
VMTranslationMap* map = area->address_space->TranslationMap();
|
||||
addr_t end = base + (size - 1);
|
||||
|
||||
map->ops->lock(map);
|
||||
map->Lock();
|
||||
|
||||
if (area->wiring != B_NO_LOCK && area->cache_type != CACHE_TYPE_DEVICE) {
|
||||
// iterate through all pages and decrease their wired count
|
||||
@ -2189,8 +2184,8 @@ vm_unmap_pages(VMArea* area, addr_t base, size_t size, bool preserveModified)
|
||||
virtualAddress += B_PAGE_SIZE) {
|
||||
addr_t physicalAddress;
|
||||
uint32 flags;
|
||||
status_t status = map->ops->query(map, virtualAddress,
|
||||
&physicalAddress, &flags);
|
||||
status_t status = map->Query(virtualAddress, &physicalAddress,
|
||||
&flags);
|
||||
if (status < B_OK || (flags & PAGE_PRESENT) == 0)
|
||||
continue;
|
||||
|
||||
@ -2204,16 +2199,16 @@ vm_unmap_pages(VMArea* area, addr_t base, size_t size, bool preserveModified)
|
||||
}
|
||||
}
|
||||
|
||||
map->ops->unmap(map, base, end);
|
||||
map->Unmap(base, end);
|
||||
if (preserveModified) {
|
||||
map->ops->flush(map);
|
||||
map->Flush();
|
||||
|
||||
for (addr_t virtualAddress = base; virtualAddress < end;
|
||||
virtualAddress += B_PAGE_SIZE) {
|
||||
addr_t physicalAddress;
|
||||
uint32 flags;
|
||||
status_t status = map->ops->query(map, virtualAddress,
|
||||
&physicalAddress, &flags);
|
||||
status_t status = map->Query(virtualAddress, &physicalAddress,
|
||||
&flags);
|
||||
if (status < B_OK || (flags & PAGE_PRESENT) == 0)
|
||||
continue;
|
||||
// TODO: We just unmapped the pages, so the PAGE_PRESENT flag won't be set for
|
||||
@ -2256,7 +2251,7 @@ vm_unmap_pages(VMArea* area, addr_t base, size_t size, bool preserveModified)
|
||||
}
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
|
||||
if (area->wiring == B_NO_LOCK) {
|
||||
while (vm_page_mapping* mapping = queue.RemoveHead())
|
||||
@ -2274,7 +2269,7 @@ vm_unmap_pages(VMArea* area, addr_t base, size_t size, bool preserveModified)
|
||||
status_t
|
||||
vm_map_page(VMArea* area, vm_page* page, addr_t address, uint32 protection)
|
||||
{
|
||||
vm_translation_map* map = &area->address_space->TranslationMap();
|
||||
VMTranslationMap* map = area->address_space->TranslationMap();
|
||||
vm_page_mapping* mapping = NULL;
|
||||
|
||||
DEBUG_PAGE_ACCESS_CHECK(page);
|
||||
@ -2288,10 +2283,9 @@ vm_map_page(VMArea* area, vm_page* page, addr_t address, uint32 protection)
|
||||
mapping->area = area;
|
||||
}
|
||||
|
||||
map->ops->lock(map);
|
||||
map->Lock();
|
||||
|
||||
map->ops->map(map, address, page->physical_page_number * B_PAGE_SIZE,
|
||||
protection);
|
||||
map->Map(address, page->physical_page_number * B_PAGE_SIZE, protection);
|
||||
|
||||
if (area->wiring == B_NO_LOCK) {
|
||||
// insert mapping into lists
|
||||
@ -2302,7 +2296,7 @@ vm_map_page(VMArea* area, vm_page* page, addr_t address, uint32 protection)
|
||||
area->mappings.Add(mapping);
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
|
||||
if (area->wiring != B_NO_LOCK)
|
||||
increment_page_wired_count(page);
|
||||
@ -2981,7 +2975,7 @@ vm_area_for(addr_t address, bool kernel)
|
||||
\a end is inclusive.
|
||||
*/
|
||||
static void
|
||||
unmap_and_free_physical_pages(vm_translation_map* map, addr_t start, addr_t end)
|
||||
unmap_and_free_physical_pages(VMTranslationMap* map, addr_t start, addr_t end)
|
||||
{
|
||||
// free all physical pages in the specified range
|
||||
|
||||
@ -2989,7 +2983,7 @@ unmap_and_free_physical_pages(vm_translation_map* map, addr_t start, addr_t end)
|
||||
addr_t physicalAddress;
|
||||
uint32 flags;
|
||||
|
||||
if (map->ops->query(map, current, &physicalAddress, &flags) == B_OK
|
||||
if (map->Query(current, &physicalAddress, &flags) == B_OK
|
||||
&& (flags & PAGE_PRESENT) != 0) {
|
||||
vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
|
||||
if (page != NULL && page->state != PAGE_STATE_FREE
|
||||
@ -3001,14 +2995,14 @@ unmap_and_free_physical_pages(vm_translation_map* map, addr_t start, addr_t end)
|
||||
}
|
||||
|
||||
// unmap the memory
|
||||
map->ops->unmap(map, start, end);
|
||||
map->Unmap(start, end);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
vm_free_unused_boot_loader_range(addr_t start, addr_t size)
|
||||
{
|
||||
vm_translation_map* map = &VMAddressSpace::Kernel()->TranslationMap();
|
||||
VMTranslationMap* map = VMAddressSpace::Kernel()->TranslationMap();
|
||||
addr_t end = start + (size - 1);
|
||||
addr_t lastEnd = start;
|
||||
|
||||
@ -3019,7 +3013,7 @@ vm_free_unused_boot_loader_range(addr_t start, addr_t size)
|
||||
// we just have to find the holes between them that fall
|
||||
// into the area we should dispose
|
||||
|
||||
map->ops->lock(map);
|
||||
map->Lock();
|
||||
|
||||
for (VMAddressSpace::AreaIterator it
|
||||
= VMAddressSpace::Kernel()->GetAreaIterator();
|
||||
@ -3059,7 +3053,7 @@ vm_free_unused_boot_loader_range(addr_t start, addr_t size)
|
||||
unmap_and_free_physical_pages(map, lastEnd, end);
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
}
|
||||
|
||||
|
||||
@ -3302,7 +3296,7 @@ vm_init(kernel_args* args)
|
||||
uint32 i;
|
||||
|
||||
TRACE(("vm_init: entry\n"));
|
||||
err = arch_vm_translation_map_init(args);
|
||||
err = arch_vm_translation_map_init(args, &sPhysicalPageMapper);
|
||||
err = arch_vm_init(args);
|
||||
|
||||
// initialize some globals
|
||||
@ -3640,7 +3634,7 @@ struct PageFaultContext {
|
||||
AddressSpaceReadLocker addressSpaceLocker;
|
||||
VMCacheChainLocker cacheChainLocker;
|
||||
|
||||
vm_translation_map* map;
|
||||
VMTranslationMap* map;
|
||||
VMCache* topCache;
|
||||
off_t cacheOffset;
|
||||
bool isWrite;
|
||||
@ -3653,7 +3647,7 @@ struct PageFaultContext {
|
||||
PageFaultContext(VMAddressSpace* addressSpace, bool isWrite)
|
||||
:
|
||||
addressSpaceLocker(addressSpace, true),
|
||||
map(&addressSpace->TranslationMap()),
|
||||
map(addressSpace->TranslationMap()),
|
||||
isWrite(isWrite)
|
||||
{
|
||||
}
|
||||
@ -3834,8 +3828,8 @@ vm_soft_fault(VMAddressSpace* addressSpace, addr_t originalAddress,
|
||||
// We may need up to 2 pages plus pages needed for mapping them -- reserving
|
||||
// the pages upfront makes sure we don't have any cache locked, so that the
|
||||
// page daemon/thief can do their job without problems.
|
||||
size_t reservePages = 2 + context.map->ops->map_max_pages_need(context.map,
|
||||
originalAddress, originalAddress);
|
||||
size_t reservePages = 2 + context.map->MaxPagesNeededToMap(originalAddress,
|
||||
originalAddress);
|
||||
context.addressSpaceLocker.Unlock();
|
||||
vm_page_reserve_pages(reservePages);
|
||||
|
||||
@ -3927,28 +3921,27 @@ vm_soft_fault(VMAddressSpace* addressSpace, addr_t originalAddress,
|
||||
bool mapPage = true;
|
||||
|
||||
// check whether there's already a page mapped at the address
|
||||
context.map->ops->lock(context.map);
|
||||
context.map->Lock();
|
||||
|
||||
addr_t physicalAddress;
|
||||
uint32 flags;
|
||||
vm_page* mappedPage = NULL;
|
||||
if (context.map->ops->query(context.map, address, &physicalAddress,
|
||||
&flags) == B_OK
|
||||
if (context.map->Query(address, &physicalAddress, &flags) == B_OK
|
||||
&& (flags & PAGE_PRESENT) != 0
|
||||
&& (mappedPage = vm_lookup_page(physicalAddress / B_PAGE_SIZE))
|
||||
!= NULL) {
|
||||
// Yep there's already a page. If it's ours, we can simply adjust
|
||||
// its protection. Otherwise we have to unmap it.
|
||||
if (mappedPage == context.page) {
|
||||
context.map->ops->protect(context.map, address,
|
||||
address + (B_PAGE_SIZE - 1), newProtection);
|
||||
context.map->Protect(address, address + (B_PAGE_SIZE - 1),
|
||||
newProtection);
|
||||
|
||||
mapPage = false;
|
||||
} else
|
||||
unmapPage = true;
|
||||
}
|
||||
|
||||
context.map->ops->unlock(context.map);
|
||||
context.map->Unlock();
|
||||
|
||||
if (unmapPage) {
|
||||
// Note: The mapped page is a page of a lower cache. We are
|
||||
@ -3982,45 +3975,39 @@ vm_soft_fault(VMAddressSpace* addressSpace, addr_t originalAddress,
|
||||
status_t
|
||||
vm_get_physical_page(addr_t paddr, addr_t* _vaddr, void** _handle)
|
||||
{
|
||||
return VMAddressSpace::Kernel()->TranslationMap().ops->get_physical_page(
|
||||
paddr, _vaddr, _handle);
|
||||
return sPhysicalPageMapper->GetPage(paddr, _vaddr, _handle);
|
||||
}
|
||||
|
||||
status_t
|
||||
vm_put_physical_page(addr_t vaddr, void* handle)
|
||||
{
|
||||
return VMAddressSpace::Kernel()->TranslationMap().ops->put_physical_page(
|
||||
vaddr, handle);
|
||||
return sPhysicalPageMapper->PutPage(vaddr, handle);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
vm_get_physical_page_current_cpu(addr_t paddr, addr_t* _vaddr, void** _handle)
|
||||
{
|
||||
return VMAddressSpace::Kernel()->TranslationMap().ops
|
||||
->get_physical_page_current_cpu(paddr, _vaddr, _handle);
|
||||
return sPhysicalPageMapper->GetPageCurrentCPU(paddr, _vaddr, _handle);
|
||||
}
|
||||
|
||||
status_t
|
||||
vm_put_physical_page_current_cpu(addr_t vaddr, void* handle)
|
||||
{
|
||||
return VMAddressSpace::Kernel()->TranslationMap().ops
|
||||
->put_physical_page_current_cpu(vaddr, handle);
|
||||
return sPhysicalPageMapper->PutPageCurrentCPU(vaddr, handle);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
vm_get_physical_page_debug(addr_t paddr, addr_t* _vaddr, void** _handle)
|
||||
{
|
||||
return VMAddressSpace::Kernel()->TranslationMap().ops
|
||||
->get_physical_page_debug(paddr, _vaddr, _handle);
|
||||
return sPhysicalPageMapper->GetPageDebug(paddr, _vaddr, _handle);
|
||||
}
|
||||
|
||||
status_t
|
||||
vm_put_physical_page_debug(addr_t vaddr, void* handle)
|
||||
{
|
||||
return VMAddressSpace::Kernel()->TranslationMap().ops
|
||||
->put_physical_page_debug(vaddr, handle);
|
||||
return sPhysicalPageMapper->PutPageDebug(vaddr, handle);
|
||||
}
|
||||
|
||||
|
||||
@ -4302,32 +4289,28 @@ vm_resize_area(area_id areaID, size_t newSize, bool kernel)
|
||||
status_t
|
||||
vm_memset_physical(addr_t address, int value, size_t length)
|
||||
{
|
||||
return VMAddressSpace::Kernel()->TranslationMap().ops->memset_physical(
|
||||
address, value, length);
|
||||
return sPhysicalPageMapper->MemsetPhysical(address, value, length);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
vm_memcpy_from_physical(void* to, addr_t from, size_t length, bool user)
|
||||
{
|
||||
return VMAddressSpace::Kernel()->TranslationMap().ops->memcpy_from_physical(
|
||||
to, from, length, user);
|
||||
return sPhysicalPageMapper->MemcpyFromPhysical(to, from, length, user);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
vm_memcpy_to_physical(addr_t to, const void* _from, size_t length, bool user)
|
||||
{
|
||||
return VMAddressSpace::Kernel()->TranslationMap().ops->memcpy_to_physical(
|
||||
to, _from, length, user);
|
||||
return sPhysicalPageMapper->MemcpyToPhysical(to, _from, length, user);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
vm_memcpy_physical_page(addr_t to, addr_t from)
|
||||
{
|
||||
return VMAddressSpace::Kernel()->TranslationMap().ops->memcpy_physical_page(
|
||||
to, from);
|
||||
return sPhysicalPageMapper->MemcpyPhysicalPage(to, from);
|
||||
}
|
||||
|
||||
|
||||
@ -4402,7 +4385,6 @@ status_t
|
||||
lock_memory_etc(team_id team, void* address, size_t numBytes, uint32 flags)
|
||||
{
|
||||
VMAddressSpace* addressSpace = NULL;
|
||||
struct vm_translation_map* map;
|
||||
addr_t unalignedBase = (addr_t)address;
|
||||
addr_t end = unalignedBase + numBytes;
|
||||
addr_t base = ROUNDDOWN(unalignedBase, B_PAGE_SIZE);
|
||||
@ -4421,7 +4403,7 @@ lock_memory_etc(team_id team, void* address, size_t numBytes, uint32 flags)
|
||||
|
||||
// test if we're on an area that allows faults at all
|
||||
|
||||
map = &addressSpace->TranslationMap();
|
||||
VMTranslationMap* map = addressSpace->TranslationMap();
|
||||
|
||||
status_t status = test_lock_memory(addressSpace, base, needsLocking);
|
||||
if (status < B_OK)
|
||||
@ -4434,9 +4416,9 @@ lock_memory_etc(team_id team, void* address, size_t numBytes, uint32 flags)
|
||||
uint32 protection;
|
||||
status_t status;
|
||||
|
||||
map->ops->lock(map);
|
||||
status = map->ops->query(map, base, &physicalAddress, &protection);
|
||||
map->ops->unlock(map);
|
||||
map->Lock();
|
||||
status = map->Query(base, &physicalAddress, &protection);
|
||||
map->Unlock();
|
||||
|
||||
if (status < B_OK)
|
||||
goto out;
|
||||
@ -4475,9 +4457,9 @@ lock_memory_etc(team_id team, void* address, size_t numBytes, uint32 flags)
|
||||
// we want to allow waiting for the area to become eligible for these
|
||||
// operations again.
|
||||
|
||||
map->ops->lock(map);
|
||||
status = map->ops->query(map, base, &physicalAddress, &protection);
|
||||
map->ops->unlock(map);
|
||||
map->Lock();
|
||||
status = map->Query(base, &physicalAddress, &protection);
|
||||
map->Unlock();
|
||||
|
||||
if (status < B_OK)
|
||||
goto out;
|
||||
@ -4509,7 +4491,6 @@ status_t
|
||||
unlock_memory_etc(team_id team, void* address, size_t numBytes, uint32 flags)
|
||||
{
|
||||
VMAddressSpace* addressSpace = NULL;
|
||||
struct vm_translation_map* map;
|
||||
addr_t unalignedBase = (addr_t)address;
|
||||
addr_t end = unalignedBase + numBytes;
|
||||
addr_t base = ROUNDDOWN(unalignedBase, B_PAGE_SIZE);
|
||||
@ -4525,7 +4506,7 @@ unlock_memory_etc(team_id team, void* address, size_t numBytes, uint32 flags)
|
||||
if (addressSpace == NULL)
|
||||
return B_ERROR;
|
||||
|
||||
map = &addressSpace->TranslationMap();
|
||||
VMTranslationMap* map = addressSpace->TranslationMap();
|
||||
|
||||
status_t status = test_lock_memory(addressSpace, base, needsLocking);
|
||||
if (status < B_OK)
|
||||
@ -4534,12 +4515,11 @@ unlock_memory_etc(team_id team, void* address, size_t numBytes, uint32 flags)
|
||||
goto out;
|
||||
|
||||
for (; base < end; base += B_PAGE_SIZE) {
|
||||
map->ops->lock(map);
|
||||
map->Lock();
|
||||
|
||||
addr_t physicalAddress;
|
||||
uint32 protection;
|
||||
status = map->ops->query(map, base, &physicalAddress,
|
||||
&protection);
|
||||
status = map->Query(base, &physicalAddress, &protection);
|
||||
// TODO: ATM there's no mechanism that guarantees that the page
|
||||
// we've marked wired in lock_memory_etc() is the one we find here.
|
||||
// If we only locked for reading, the original page might stem from
|
||||
@ -4549,7 +4529,7 @@ unlock_memory_etc(team_id team, void* address, size_t numBytes, uint32 flags)
|
||||
// read-only at any time. This would even cause a violation of the
|
||||
// lock_memory() guarantee.
|
||||
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
|
||||
if (status < B_OK)
|
||||
goto out;
|
||||
@ -4620,20 +4600,20 @@ get_memory_map_etc(team_id team, const void* address, size_t numBytes,
|
||||
if (addressSpace == NULL)
|
||||
return B_ERROR;
|
||||
|
||||
vm_translation_map* map = &addressSpace->TranslationMap();
|
||||
VMTranslationMap* map = addressSpace->TranslationMap();
|
||||
|
||||
if (interrupts)
|
||||
map->ops->lock(map);
|
||||
map->Lock();
|
||||
|
||||
while (offset < numBytes) {
|
||||
addr_t bytes = min_c(numBytes - offset, B_PAGE_SIZE);
|
||||
uint32 flags;
|
||||
|
||||
if (interrupts) {
|
||||
status = map->ops->query(map, (addr_t)address + offset,
|
||||
&physicalAddress, &flags);
|
||||
status = map->Query((addr_t)address + offset, &physicalAddress,
|
||||
&flags);
|
||||
} else {
|
||||
status = map->ops->query_interrupt(map, (addr_t)address + offset,
|
||||
status = map->QueryInterrupt((addr_t)address + offset,
|
||||
&physicalAddress, &flags);
|
||||
}
|
||||
if (status < B_OK)
|
||||
@ -4668,7 +4648,7 @@ get_memory_map_etc(team_id team, const void* address, size_t numBytes,
|
||||
}
|
||||
|
||||
if (interrupts)
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
|
||||
if (status != B_OK)
|
||||
return status;
|
||||
@ -5258,7 +5238,7 @@ _user_set_memory_protection(void* _address, size_t size, int protection)
|
||||
|
||||
// Second round: If the protections differ from that of the area, create a
|
||||
// page protection array and re-map mapped pages.
|
||||
vm_translation_map* map = &locker.AddressSpace()->TranslationMap();
|
||||
VMTranslationMap* map = locker.AddressSpace()->TranslationMap();
|
||||
currentAddress = address;
|
||||
sizeLeft = size;
|
||||
while (sizeLeft > 0) {
|
||||
@ -5298,17 +5278,16 @@ _user_set_memory_protection(void* _address, size_t size, int protection)
|
||||
|
||||
for (addr_t pageAddress = area->Base() + offset;
|
||||
pageAddress < currentAddress; pageAddress += B_PAGE_SIZE) {
|
||||
map->ops->lock(map);
|
||||
map->Lock();
|
||||
|
||||
set_area_page_protection(area, pageAddress, protection);
|
||||
|
||||
addr_t physicalAddress;
|
||||
uint32 flags;
|
||||
|
||||
status_t error = map->ops->query(map, pageAddress, &physicalAddress,
|
||||
&flags);
|
||||
status_t error = map->Query(pageAddress, &physicalAddress, &flags);
|
||||
if (error != B_OK || (flags & PAGE_PRESENT) == 0) {
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -5316,7 +5295,7 @@ _user_set_memory_protection(void* _address, size_t size, int protection)
|
||||
if (page == NULL) {
|
||||
panic("area %p looking up page failed for pa 0x%lx\n", area,
|
||||
physicalAddress);
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
@ -5327,13 +5306,11 @@ _user_set_memory_protection(void* _address, size_t size, int protection)
|
||||
&& (protection & B_WRITE_AREA) != 0;
|
||||
|
||||
if (!unmapPage) {
|
||||
map->ops->unmap(map, pageAddress,
|
||||
pageAddress + B_PAGE_SIZE - 1);
|
||||
map->ops->map(map, pageAddress, physicalAddress,
|
||||
actualProtection);
|
||||
map->Unmap(pageAddress, pageAddress + B_PAGE_SIZE - 1);
|
||||
map->Map(pageAddress, physicalAddress, actualProtection);
|
||||
}
|
||||
|
||||
map->ops->unlock(map);
|
||||
map->Unlock();
|
||||
|
||||
if (unmapPage)
|
||||
vm_unmap_page(area, pageAddress, true);
|
||||
|
@ -388,9 +388,8 @@ dump_page(int argc, char **argv)
|
||||
addressSpace = debug_get_debugged_thread()->team->address_space;
|
||||
|
||||
uint32 flags = 0;
|
||||
if (addressSpace->TranslationMap().ops->query_interrupt(
|
||||
&addressSpace->TranslationMap(), pageAddress, &pageAddress,
|
||||
&flags) != B_OK
|
||||
if (addressSpace->TranslationMap()->QueryInterrupt(pageAddress,
|
||||
&pageAddress, &flags) != B_OK
|
||||
|| (flags & PAGE_PRESENT) == 0) {
|
||||
kprintf("Virtual address not mapped to a physical page in this "
|
||||
"address space.\n");
|
||||
@ -438,8 +437,7 @@ dump_page(int argc, char **argv)
|
||||
address += B_PAGE_SIZE, pageCount--) {
|
||||
addr_t physicalAddress;
|
||||
uint32 flags = 0;
|
||||
if (addressSpace->TranslationMap().ops->query_interrupt(
|
||||
&addressSpace->TranslationMap(), address,
|
||||
if (addressSpace->TranslationMap()->QueryInterrupt(address,
|
||||
&physicalAddress, &flags) == B_OK
|
||||
&& (flags & PAGE_PRESENT) != 0
|
||||
&& physicalAddress / B_PAGE_SIZE
|
||||
|
Loading…
Reference in New Issue
Block a user