* Renamed vm_translation_map_arch_info to X86PagingStructures, and all
  members and local variables of that type accordingly.
* arch_thread_context_switch(): Added TODO: The still active paging structures
  can indeed be deleted before we stop using them.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@37022 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2010-06-05 21:51:05 +00:00
parent 3329d8c055
commit 8421714089
7 changed files with 87 additions and 82 deletions

View File

@ -102,6 +102,9 @@
#ifndef _ASSEMBLER
struct X86PagingStructures;
typedef struct x86_mtrr_info {
uint64 base;
uint64 size;
@ -248,7 +251,7 @@ typedef struct arch_cpu_info {
int model;
int extended_model;
struct vm_translation_map_arch_info* active_translation_map;
struct X86PagingStructures* active_paging_structures;
uint32 dr6; // temporary storage for debug registers (cf.
uint32 dr7; // x86_exit_user_debug_at_kernel_entry())

View File

@ -18,10 +18,10 @@ struct X86VMTranslationMap : VMTranslationMap {
status_t Init(bool kernel);
inline vm_translation_map_arch_info* ArchData() const
{ return fArchData; }
inline X86PagingStructures* PagingStructures() const
{ return fPagingStructures; }
inline uint32 PhysicalPageDir() const
{ return fArchData->pgdir_phys; }
{ return fPagingStructures->pgdir_phys; }
virtual status_t InitPostSem();
@ -67,7 +67,7 @@ struct X86VMTranslationMap : VMTranslationMap {
virtual void Flush();
protected:
vm_translation_map_arch_info* fArchData;
X86PagingStructures* fPagingStructures;
TranslationMapPhysicalPageMapper* fPageMapper;
int fInvalidPagesCount;
addr_t fInvalidPages[PAGE_INVALIDATE_CACHE_SIZE];

View File

@ -696,9 +696,9 @@ arch_cpu_init_post_vm(kernel_args *args)
kDoubleFaultStackSize * smp_get_num_cpus(), B_FULL_LOCK,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT);
vm_translation_map_arch_info* kernelArchTranslationMap
X86PagingStructures* kernelPagingStructures
= static_cast<X86VMTranslationMap*>(
VMAddressSpace::Kernel()->TranslationMap())->ArchData();
VMAddressSpace::Kernel()->TranslationMap())->PagingStructures();
// setup task-state segments
for (i = 0; i < args->num_cpus; i++) {
@ -716,8 +716,8 @@ arch_cpu_init_post_vm(kernel_args *args)
init_double_fault(i);
// init active translation map
gCPU[i].arch.active_translation_map = kernelArchTranslationMap;
kernelArchTranslationMap->AddReference();
gCPU[i].arch.active_paging_structures = kernelPagingStructures;
kernelPagingStructures->AddReference();
}
// set the current hardware task on cpu 0

View File

@ -366,30 +366,32 @@ arch_thread_context_switch(struct thread *from, struct thread *to)
set_tls_context(to);
struct cpu_ent* cpuData = to->cpu;
vm_translation_map_arch_info* activeMap
= cpuData->arch.active_translation_map;
X86PagingStructures* activePagingStructures
= cpuData->arch.active_paging_structures;
VMAddressSpace* toAddressSpace = to->team->address_space;
uint32 newPageDirectory;
vm_translation_map_arch_info* toMap;
X86PagingStructures* toPagingStructures;
if (toAddressSpace != NULL
&& (toMap = static_cast<X86VMTranslationMap*>(
toAddressSpace->TranslationMap())->ArchData()) != activeMap) {
&& (toPagingStructures = static_cast<X86VMTranslationMap*>(
toAddressSpace->TranslationMap())->PagingStructures())
!= activePagingStructures) {
// update on which CPUs the address space is used
int cpu = cpuData->cpu_num;
atomic_and(&activeMap->active_on_cpus, ~((uint32)1 << cpu));
atomic_or(&toMap->active_on_cpus, (uint32)1 << cpu);
atomic_and(&activePagingStructures->active_on_cpus,
~((uint32)1 << cpu));
atomic_or(&toPagingStructures->active_on_cpus, (uint32)1 << cpu);
activeMap->RemoveReference();
// this might causes the map to be deferred deleted - ie. it won't
// be deleted when it is still in use
activePagingStructures->RemoveReference();
// TODO: This might cause deferred deletion, which on SMP machines could happen
// right now on another CPU!
// assign the new map to the CPU
toMap->AddReference();
cpuData->arch.active_translation_map = toMap;
// assign the new paging structures to the CPU
toPagingStructures->AddReference();
cpuData->arch.active_paging_structures = toPagingStructures;
// get the new page directory
newPageDirectory = toMap->pgdir_phys;
newPageDirectory = toPagingStructures->pgdir_phys;
} else {
newPageDirectory = 0;
// this means no change

View File

@ -51,19 +51,19 @@ static TranslationMapPhysicalPageMapper* sKernelPhysicalPageMapper;
// Accessor class to reuse the SinglyLinkedListLink of DeferredDeletable for
// vm_translation_map_arch_info.
struct ArchTMapGetLink {
// X86PagingStructures.
struct PagingStructuresGetLink {
private:
typedef SinglyLinkedListLink<vm_translation_map_arch_info> Link;
typedef SinglyLinkedListLink<X86PagingStructures> Link;
public:
inline Link* operator()(vm_translation_map_arch_info* element) const
inline Link* operator()(X86PagingStructures* element) const
{
return (Link*)element->GetSinglyLinkedListLink();
}
inline const Link* operator()(
const vm_translation_map_arch_info* element) const
const X86PagingStructures* element) const
{
return (const Link*)element->GetSinglyLinkedListLink();
}
@ -71,12 +71,12 @@ public:
};
typedef SinglyLinkedList<vm_translation_map_arch_info, ArchTMapGetLink>
ArchTMapList;
typedef SinglyLinkedList<X86PagingStructures, PagingStructuresGetLink>
PagingStructuresList;
static ArchTMapList sTMapList;
static spinlock sTMapListLock;
static PagingStructuresList sPagingStructuresList;
static spinlock sPagingStructuresListLock;
#define CHATTY_TMAP 0
@ -85,11 +85,11 @@ static spinlock sTMapListLock;
B_PAGE_SIZE * 1024)))
#define FIRST_KERNEL_PGDIR_ENT (VADDR_TO_PDENT(KERNEL_BASE))
#define NUM_KERNEL_PGDIR_ENTS (VADDR_TO_PDENT(KERNEL_SIZE))
#define IS_KERNEL_MAP(map) (fArchData->pgdir_phys \
#define IS_KERNEL_MAP(map) (fPagingStructures->pgdir_phys \
== sKernelPhysicalPageDirectory)
vm_translation_map_arch_info::vm_translation_map_arch_info()
X86PagingStructures::X86PagingStructures()
:
pgdir_virt(NULL),
ref_count(1)
@ -97,7 +97,7 @@ vm_translation_map_arch_info::vm_translation_map_arch_info()
}
vm_translation_map_arch_info::~vm_translation_map_arch_info()
X86PagingStructures::~X86PagingStructures()
{
// free the page dir
free(pgdir_virt);
@ -105,11 +105,11 @@ vm_translation_map_arch_info::~vm_translation_map_arch_info()
void
vm_translation_map_arch_info::Delete()
X86PagingStructures::Delete()
{
// remove from global list
InterruptsSpinLocker locker(sTMapListLock);
sTMapList.Remove(this);
InterruptsSpinLocker locker(sPagingStructuresListLock);
sPagingStructuresList.Remove(this);
locker.Unlock();
#if 0
@ -220,13 +220,13 @@ x86_update_all_pgdirs(int index, page_directory_entry e)
{
unsigned int state = disable_interrupts();
acquire_spinlock(&sTMapListLock);
acquire_spinlock(&sPagingStructuresListLock);
ArchTMapList::Iterator it = sTMapList.GetIterator();
while (vm_translation_map_arch_info* info = it.Next())
PagingStructuresList::Iterator it = sPagingStructuresList.GetIterator();
while (X86PagingStructures* info = it.Next())
info->pgdir_virt[index] = e;
release_spinlock(&sTMapListLock);
release_spinlock(&sPagingStructuresListLock);
restore_interrupts(state);
}
@ -279,7 +279,7 @@ x86_early_prepare_page_tables(page_table_entry* pageTables, addr_t address,
X86VMTranslationMap::X86VMTranslationMap()
:
fArchData(NULL),
fPagingStructures(NULL),
fPageMapper(NULL),
fInvalidPagesCount(0)
{
@ -288,18 +288,18 @@ X86VMTranslationMap::X86VMTranslationMap()
X86VMTranslationMap::~X86VMTranslationMap()
{
if (fArchData == NULL)
if (fPagingStructures == NULL)
return;
if (fPageMapper != NULL)
fPageMapper->Delete();
if (fArchData->pgdir_virt != NULL) {
if (fPagingStructures->pgdir_virt != NULL) {
// cycle through and free all of the user space pgtables
for (uint32 i = VADDR_TO_PDENT(USER_BASE);
i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
if ((fArchData->pgdir_virt[i] & X86_PDE_PRESENT) != 0) {
addr_t address = fArchData->pgdir_virt[i]
if ((fPagingStructures->pgdir_virt[i] & X86_PDE_PRESENT) != 0) {
addr_t address = fPagingStructures->pgdir_virt[i]
& X86_PDE_ADDRESS_MASK;
vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
if (!page)
@ -310,7 +310,7 @@ X86VMTranslationMap::~X86VMTranslationMap()
}
}
fArchData->RemoveReference();
fPagingStructures->RemoveReference();
}
@ -319,11 +319,11 @@ X86VMTranslationMap::Init(bool kernel)
{
TRACE("X86VMTranslationMap::Init()\n");
fArchData = new(std::nothrow) vm_translation_map_arch_info;
if (fArchData == NULL)
fPagingStructures = new(std::nothrow) X86PagingStructures;
if (fPagingStructures == NULL)
return B_NO_MEMORY;
fArchData->active_on_cpus = 0;
fPagingStructures->active_on_cpus = 0;
if (!kernel) {
// user
@ -334,43 +334,43 @@ X86VMTranslationMap::Init(bool kernel)
return error;
// allocate a pgdir
fArchData->pgdir_virt = (page_directory_entry *)memalign(
fPagingStructures->pgdir_virt = (page_directory_entry *)memalign(
B_PAGE_SIZE, B_PAGE_SIZE);
if (fArchData->pgdir_virt == NULL)
if (fPagingStructures->pgdir_virt == NULL)
return B_NO_MEMORY;
phys_addr_t physicalPageDir;
vm_get_page_mapping(VMAddressSpace::KernelID(),
(addr_t)fArchData->pgdir_virt,
(addr_t)fPagingStructures->pgdir_virt,
&physicalPageDir);
fArchData->pgdir_phys = physicalPageDir;
fPagingStructures->pgdir_phys = physicalPageDir;
} else {
// kernel
// get the physical page mapper
fPageMapper = sKernelPhysicalPageMapper;
// we already know the kernel pgdir mapping
fArchData->pgdir_virt = sKernelVirtualPageDirectory;
fArchData->pgdir_phys = sKernelPhysicalPageDirectory;
fPagingStructures->pgdir_virt = sKernelVirtualPageDirectory;
fPagingStructures->pgdir_phys = sKernelPhysicalPageDirectory;
}
// zero out the bottom portion of the new pgdir
memset(fArchData->pgdir_virt + FIRST_USER_PGDIR_ENT, 0,
memset(fPagingStructures->pgdir_virt + FIRST_USER_PGDIR_ENT, 0,
NUM_USER_PGDIR_ENTS * sizeof(page_directory_entry));
// insert this new map into the map list
{
int state = disable_interrupts();
acquire_spinlock(&sTMapListLock);
acquire_spinlock(&sPagingStructuresListLock);
// copy the top portion of the pgdir from the current one
memcpy(fArchData->pgdir_virt + FIRST_KERNEL_PGDIR_ENT,
memcpy(fPagingStructures->pgdir_virt + FIRST_KERNEL_PGDIR_ENT,
sKernelVirtualPageDirectory + FIRST_KERNEL_PGDIR_ENT,
NUM_KERNEL_PGDIR_ENTS * sizeof(page_directory_entry));
sTMapList.Add(fArchData);
sPagingStructuresList.Add(fPagingStructures);
release_spinlock(&sTMapListLock);
release_spinlock(&sPagingStructuresListLock);
restore_interrupts(state);
}
@ -451,7 +451,7 @@ X86VMTranslationMap::Map(addr_t va, phys_addr_t pa, uint32 attributes,
dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
*/
page_directory_entry* pd = fArchData->pgdir_virt;
page_directory_entry* pd = fPagingStructures->pgdir_virt;
// check to see if a page table exists for this range
uint32 index = VADDR_TO_PDENT(va);
@ -511,7 +511,7 @@ X86VMTranslationMap::Map(addr_t va, phys_addr_t pa, uint32 attributes,
status_t
X86VMTranslationMap::Unmap(addr_t start, addr_t end)
{
page_directory_entry *pd = fArchData->pgdir_virt;
page_directory_entry *pd = fPagingStructures->pgdir_virt;
start = ROUNDDOWN(start, B_PAGE_SIZE);
end = ROUNDUP(end, B_PAGE_SIZE);
@ -576,7 +576,7 @@ X86VMTranslationMap::UnmapPage(VMArea* area, addr_t address,
{
ASSERT(address % B_PAGE_SIZE == 0);
page_directory_entry* pd = fArchData->pgdir_virt;
page_directory_entry* pd = fPagingStructures->pgdir_virt;
TRACE("X86VMTranslationMap::UnmapPage(%#" B_PRIxADDR ")\n", address);
@ -686,7 +686,7 @@ void
X86VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size,
bool updatePageQueue)
{
page_directory_entry* pd = fArchData->pgdir_virt;
page_directory_entry* pd = fPagingStructures->pgdir_virt;
addr_t start = base;
addr_t end = base + size;
@ -816,7 +816,7 @@ X86VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
page_directory_entry* pd = fArchData->pgdir_virt;
page_directory_entry* pd = fPagingStructures->pgdir_virt;
RecursiveLocker locker(fLock);
@ -916,7 +916,7 @@ X86VMTranslationMap::Query(addr_t va, phys_addr_t *_physical, uint32 *_flags)
*_physical = 0;
int index = VADDR_TO_PDENT(va);
page_directory_entry *pd = fArchData->pgdir_virt;
page_directory_entry *pd = fPagingStructures->pgdir_virt;
if ((pd[index] & X86_PDE_PRESENT) == 0) {
// no pagetable here
return B_OK;
@ -959,7 +959,7 @@ X86VMTranslationMap::QueryInterrupt(addr_t va, phys_addr_t *_physical,
*_physical = 0;
int index = VADDR_TO_PDENT(va);
page_directory_entry* pd = fArchData->pgdir_virt;
page_directory_entry* pd = fPagingStructures->pgdir_virt;
if ((pd[index] & X86_PDE_PRESENT) == 0) {
// no pagetable here
return B_OK;
@ -999,7 +999,7 @@ status_t
X86VMTranslationMap::Protect(addr_t start, addr_t end, uint32 attributes,
uint32 memoryType)
{
page_directory_entry *pd = fArchData->pgdir_virt;
page_directory_entry *pd = fPagingStructures->pgdir_virt;
start = ROUNDDOWN(start, B_PAGE_SIZE);
@ -1078,7 +1078,7 @@ status_t
X86VMTranslationMap::ClearFlags(addr_t va, uint32 flags)
{
int index = VADDR_TO_PDENT(va);
page_directory_entry* pd = fArchData->pgdir_virt;
page_directory_entry* pd = fPagingStructures->pgdir_virt;
if ((pd[index] & X86_PDE_PRESENT) == 0) {
// no pagetable here
return B_OK;
@ -1117,7 +1117,7 @@ X86VMTranslationMap::ClearAccessedAndModified(VMArea* area, addr_t address,
{
ASSERT(address % B_PAGE_SIZE == 0);
page_directory_entry* pd = fArchData->pgdir_virt;
page_directory_entry* pd = fPagingStructures->pgdir_virt;
TRACE("X86VMTranslationMap::ClearAccessedAndModified(%#" B_PRIxADDR ")\n",
address);
@ -1255,7 +1255,7 @@ X86VMTranslationMap::Flush()
restore_interrupts(state);
int cpu = smp_get_current_cpu();
uint32 cpuMask = fArchData->active_on_cpus
uint32 cpuMask = fPagingStructures->active_on_cpus
& ~((uint32)1 << cpu);
if (cpuMask != 0) {
smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES,
@ -1274,7 +1274,7 @@ X86VMTranslationMap::Flush()
SMP_MSG_FLAG_SYNC);
} else {
int cpu = smp_get_current_cpu();
uint32 cpuMask = fArchData->active_on_cpus
uint32 cpuMask = fPagingStructures->active_on_cpus
& ~((uint32)1 << cpu);
if (cpuMask != 0) {
smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST,
@ -1359,8 +1359,8 @@ arch_vm_translation_map_init(kernel_args *args,
}
#endif
B_INITIALIZE_SPINLOCK(&sTMapListLock);
new (&sTMapList) ArchTMapList;
B_INITIALIZE_SPINLOCK(&sPagingStructuresListLock);
new (&sPagingStructuresList) PagingStructuresList;
large_memory_physical_page_ops_init(args, sPhysicalPageMapper,
sKernelPhysicalPageMapper);

View File

@ -61,15 +61,15 @@ typedef uint32 page_table_entry;
typedef uint32 page_directory_entry;
struct vm_translation_map_arch_info : DeferredDeletable {
struct X86PagingStructures : DeferredDeletable {
page_directory_entry* pgdir_virt;
uint32 pgdir_phys;
vint32 ref_count;
vint32 active_on_cpus;
// mask indicating on which CPUs the map is currently used
vm_translation_map_arch_info();
virtual ~vm_translation_map_arch_info();
X86PagingStructures();
virtual ~X86PagingStructures();
inline void AddReference();
inline void RemoveReference();
@ -122,14 +122,14 @@ set_page_table_entry_flags(page_table_entry* entry, uint32 flags)
inline void
vm_translation_map_arch_info::AddReference()
X86PagingStructures::AddReference()
{
atomic_add(&ref_count, 1);
}
inline void
vm_translation_map_arch_info::RemoveReference()
X86PagingStructures::RemoveReference()
{
if (atomic_add(&ref_count, -1) == 1)
Delete();

View File

@ -901,7 +901,7 @@ LargeMemoryPhysicalPageMapper::_AllocatePool(PhysicalPageSlotPool*& _pool)
// put the page table into the page directory
int32 index = (addr_t)virtualBase / (B_PAGE_SIZE * 1024);
page_directory_entry* entry = &map->ArchData()->pgdir_virt[index];
page_directory_entry* entry = &map->PagingStructures()->pgdir_virt[index];
x86_put_pgtable_in_pgdir(entry, physicalTable,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
x86_update_all_pgdirs(index, *entry);