* Introduced global variable gX86PagingMethod, so the paging method can be

accessed from anywhere. Added static X86PagingMethod32Bit::Method()
  returning it as the subtype pointer -- to be used in the code related to
  that method only, of course.
* Made a bunch of static variables non-static members of
  X86PagingMethod32Bit and added accessors for them. This makes them
  accessible in other source files (allowing for more refactoring) and saves
  memory, when we actually have another paging method implementation.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@37062 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2010-06-08 16:18:57 +00:00
parent f0675bf757
commit 2434bdc4d9
5 changed files with 106 additions and 69 deletions

View File

@ -21,7 +21,10 @@
#endif
static X86PagingMethod* sPagingMethod;
static union {
uint64 align;
char thirty_two[sizeof(X86PagingMethod32Bit)];
} sPagingMethodBuffer;
// #pragma mark - VM API
@ -30,7 +33,7 @@ static X86PagingMethod* sPagingMethod;
status_t
arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
{
return sPagingMethod->CreateTranslationMap(kernel, _map);
return gX86PagingMethod->CreateTranslationMap(kernel, _map);
}
@ -65,9 +68,9 @@ arch_vm_translation_map_init(kernel_args *args,
}
#endif
sPagingMethod = X86PagingMethod32Bit::Create();
gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod32Bit;
return sPagingMethod->Init(args, _physicalPageMapper);
return gX86PagingMethod->Init(args, _physicalPageMapper);
}
@ -83,7 +86,7 @@ arch_vm_translation_map_init_post_area(kernel_args *args)
{
TRACE("vm_translation_map_init_post_area: entry\n");
return sPagingMethod->InitPostArea(args);
return gX86PagingMethod->InitPostArea(args);
}
@ -93,7 +96,7 @@ arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
{
TRACE("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
return sPagingMethod->MapEarly(args, va, pa, attributes, get_free_page);
return gX86PagingMethod->MapEarly(args, va, pa, attributes, get_free_page);
}
@ -113,5 +116,5 @@ bool
arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
uint32 protection)
{
return sPagingMethod->IsKernelPageAccessible(virtualAddress, protection);
return gX86PagingMethod->IsKernelPageAccessible(virtualAddress, protection);
}

View File

@ -47,17 +47,6 @@ using X86LargePhysicalPageMapper::PhysicalPageSlot;
static const size_t kPageTableAlignment = 1024 * B_PAGE_SIZE;
static X86PagingMethod32Bit sMethod;
static page_table_entry *sPageHole = NULL;
static page_directory_entry *sPageHolePageDir = NULL;
static uint32 sKernelPhysicalPageDirectory = 0;
static page_directory_entry *sKernelVirtualPageDirectory = NULL;
static X86PhysicalPageMapper* sPhysicalPageMapper;
static TranslationMapPhysicalPageMapper* sKernelPhysicalPageMapper;
// #pragma mark -
@ -65,12 +54,14 @@ static TranslationMapPhysicalPageMapper* sKernelPhysicalPageMapper;
static status_t
early_query(addr_t va, phys_addr_t *_physicalAddress)
{
if ((sPageHolePageDir[VADDR_TO_PDENT(va)] & X86_PDE_PRESENT) == 0) {
X86PagingMethod32Bit* method = X86PagingMethod32Bit::Method();
if ((method->PageHolePageDir()[VADDR_TO_PDENT(va)] & X86_PDE_PRESENT)
== 0) {
// no pagetable here
return B_ERROR;
}
page_table_entry* pentry = sPageHole + va / B_PAGE_SIZE;
page_table_entry* pentry = method->PageHole() + va / B_PAGE_SIZE;
if ((*pentry & X86_PTE_PRESENT) == 0) {
// page mapping not valid
return B_ERROR;
@ -168,11 +159,14 @@ x86_early_prepare_page_tables(page_table_entry* pageTables, addr_t address,
{
addr_t virtualTable = (addr_t)pageTables;
page_directory_entry* pageHolePageDir
= X86PagingMethod32Bit::Method()->PageHolePageDir();
for (size_t i = 0; i < (size / (B_PAGE_SIZE * 1024));
i++, virtualTable += B_PAGE_SIZE) {
phys_addr_t physicalTable = 0;
early_query(virtualTable, &physicalTable);
page_directory_entry* entry = &sPageHolePageDir[
page_directory_entry* entry = &pageHolePageDir[
(address / (B_PAGE_SIZE * 1024)) + i];
x86_put_pgtable_in_pgdir(entry, physicalTable,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
@ -230,10 +224,12 @@ X86VMTranslationMap32Bit::Init(bool kernel)
if (fPagingStructures == NULL)
return B_NO_MEMORY;
X86PagingMethod32Bit* method = X86PagingMethod32Bit::Method();
if (!kernel) {
// user
// allocate a physical page mapper
status_t error = sPhysicalPageMapper
status_t error = method->PhysicalPageMapper()
->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
if (error != B_OK)
return error;
@ -250,15 +246,15 @@ X86VMTranslationMap32Bit::Init(bool kernel)
(addr_t)virtualPageDir, &physicalPageDir);
fPagingStructures->Init(virtualPageDir, physicalPageDir,
sKernelVirtualPageDirectory);
method->KernelVirtualPageDirectory());
} else {
// kernel
// get the physical page mapper
fPageMapper = sKernelPhysicalPageMapper;
fPageMapper = method->KernelPhysicalPageMapper();
// we already know the kernel pgdir mapping
fPagingStructures->Init(sKernelVirtualPageDirectory,
sKernelPhysicalPageDirectory, NULL);
fPagingStructures->Init(method->KernelVirtualPageDirectory(),
method->KernelPhysicalPageDirectory(), NULL);
}
return B_OK;
@ -813,8 +809,8 @@ X86VMTranslationMap32Bit::QueryInterrupt(addr_t va, phys_addr_t *_physical,
}
// map page table entry
page_table_entry* pt
= (page_table_entry*)sPhysicalPageMapper->InterruptGetPageTableAt(
page_table_entry* pt = (page_table_entry*)X86PagingMethod32Bit::Method()
->PhysicalPageMapper()->InterruptGetPageTableAt(
pd[index] & X86_PDE_ADDRESS_MASK);
page_table_entry entry = pt[VADDR_TO_PTENT(va)];
@ -1283,6 +1279,13 @@ X86PagingMethod32Bit::PhysicalPageSlotPool::AllocatePool(
X86PagingMethod32Bit::X86PagingMethod32Bit()
:
fPageHole(NULL),
fPageHolePageDir(NULL),
fKernelPhysicalPageDirectory(0),
fKernelVirtualPageDirectory(NULL),
fPhysicalPageMapper(NULL),
fKernelPhysicalPageMapper(NULL)
{
}
@ -1292,13 +1295,6 @@ X86PagingMethod32Bit::~X86PagingMethod32Bit()
}
/*static*/ X86PagingMethod*
X86PagingMethod32Bit::Create()
{
return new(&sMethod) X86PagingMethod32Bit;
}
status_t
X86PagingMethod32Bit::Init(kernel_args* args,
VMPhysicalPageMapper** _physicalPageMapper)
@ -1306,23 +1302,23 @@ X86PagingMethod32Bit::Init(kernel_args* args,
TRACE("vm_translation_map_init: entry\n");
// page hole set up in stage2
sPageHole = (page_table_entry *)args->arch_args.page_hole;
fPageHole = (page_table_entry*)args->arch_args.page_hole;
// calculate where the pgdir would be
sPageHolePageDir = (page_directory_entry*)
fPageHolePageDir = (page_directory_entry*)
(((addr_t)args->arch_args.page_hole)
+ (B_PAGE_SIZE * 1024 - B_PAGE_SIZE));
// clear out the bottom 2 GB, unmap everything
memset(sPageHolePageDir + FIRST_USER_PGDIR_ENT, 0,
memset(fPageHolePageDir + FIRST_USER_PGDIR_ENT, 0,
sizeof(page_directory_entry) * NUM_USER_PGDIR_ENTS);
sKernelPhysicalPageDirectory = args->arch_args.phys_pgdir;
sKernelVirtualPageDirectory = (page_directory_entry*)
fKernelPhysicalPageDirectory = args->arch_args.phys_pgdir;
fKernelVirtualPageDirectory = (page_directory_entry*)
args->arch_args.vir_pgdir;
#ifdef TRACE_X86_PAGING_METHOD_32_BIT
TRACE("page hole: %p, page dir: %p\n", sPageHole, sPageHolePageDir);
TRACE("page hole: %p, page dir: %p\n", fPageHole, fPageHolePageDir);
TRACE("page dir: %p (physical: %#" B_PRIx32 ")\n",
sKernelVirtualPageDirectory, sKernelPhysicalPageDirectory);
fKernelVirtualPageDirectory, fKernelPhysicalPageDirectory);
#endif
X86PagingStructures32Bit::StaticInit();
@ -1339,8 +1335,8 @@ X86PagingMethod32Bit::Init(kernel_args* args,
}
// create physical page mapper
large_memory_physical_page_ops_init(args, pool, sPhysicalPageMapper,
sKernelPhysicalPageMapper);
large_memory_physical_page_ops_init(args, pool, fPhysicalPageMapper,
fKernelPhysicalPageMapper);
// TODO: Select the best page mapper!
// enable global page feature if available
@ -1352,7 +1348,7 @@ X86PagingMethod32Bit::Init(kernel_args* args,
TRACE("vm_translation_map_init: done\n");
*_physicalPageMapper = sPhysicalPageMapper;
*_physicalPageMapper = fPhysicalPageMapper;
return B_OK;
}
@ -1367,11 +1363,11 @@ X86PagingMethod32Bit::InitPostArea(kernel_args* args)
area_id area;
// unmap the page hole hack we were using before
sKernelVirtualPageDirectory[1023] = 0;
sPageHolePageDir = NULL;
sPageHole = NULL;
fKernelVirtualPageDirectory[1023] = 0;
fPageHolePageDir = NULL;
fPageHole = NULL;
temp = (void *)sKernelVirtualPageDirectory;
temp = (void*)fKernelVirtualPageDirectory;
area = create_area("kernel_pgdir", &temp, B_EXACT_ADDRESS, B_PAGE_SIZE,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (area < B_OK)
@ -1418,7 +1414,7 @@ X86PagingMethod32Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
// check to see if a page table exists for this range
int index = VADDR_TO_PDENT(virtualAddress);
if ((sPageHolePageDir[index] & X86_PDE_PRESENT) == 0) {
if ((fPageHolePageDir[index] & X86_PDE_PRESENT) == 0) {
phys_addr_t pgtable;
page_directory_entry *e;
// we need to allocate a pgtable
@ -1430,23 +1426,23 @@ X86PagingMethod32Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
"pgtable. %#" B_PRIxPHYSADDR "\n", pgtable);
// put it in the pgdir
e = &sPageHolePageDir[index];
e = &fPageHolePageDir[index];
x86_put_pgtable_in_pgdir(e, pgtable, attributes);
// zero it out in it's new mapping
memset((unsigned int*)((addr_t)sPageHole
memset((unsigned int*)((addr_t)fPageHole
+ (virtualAddress / B_PAGE_SIZE / 1024) * B_PAGE_SIZE),
0, B_PAGE_SIZE);
}
ASSERT_PRINT(
(sPageHole[virtualAddress / B_PAGE_SIZE] & X86_PTE_PRESENT) == 0,
(fPageHole[virtualAddress / B_PAGE_SIZE] & X86_PTE_PRESENT) == 0,
"virtual address: %#" B_PRIxADDR ", pde: %#" B_PRIx32
", existing pte: %#" B_PRIx32, virtualAddress, sPageHolePageDir[index],
sPageHole[virtualAddress / B_PAGE_SIZE]);
", existing pte: %#" B_PRIx32, virtualAddress, fPageHolePageDir[index],
fPageHole[virtualAddress / B_PAGE_SIZE]);
// now, fill in the pentry
put_page_table_entry_in_pgtable(sPageHole + virtualAddress / B_PAGE_SIZE,
put_page_table_entry_in_pgtable(fPageHole + virtualAddress / B_PAGE_SIZE,
physicalAddress, attributes, 0, IS_KERNEL_ADDRESS(virtualAddress));
return B_OK;
@ -1461,25 +1457,24 @@ X86PagingMethod32Bit::IsKernelPageAccessible(addr_t virtualAddress,
// Always set it to make sure the TLBs don't contain obsolete data.
uint32 physicalPageDirectory;
read_cr3(physicalPageDirectory);
write_cr3(sKernelPhysicalPageDirectory);
write_cr3(fKernelPhysicalPageDirectory);
// get the page directory entry for the address
page_directory_entry pageDirectoryEntry;
uint32 index = VADDR_TO_PDENT(virtualAddress);
if (physicalPageDirectory == sKernelPhysicalPageDirectory) {
pageDirectoryEntry = sKernelVirtualPageDirectory[index];
} else if (sPhysicalPageMapper != NULL) {
if (physicalPageDirectory == fKernelPhysicalPageDirectory) {
pageDirectoryEntry = fKernelVirtualPageDirectory[index];
} else if (fPhysicalPageMapper != NULL) {
// map the original page directory and get the entry
void* handle;
addr_t virtualPageDirectory;
status_t error = sPhysicalPageMapper->GetPageDebug(
status_t error = fPhysicalPageMapper->GetPageDebug(
physicalPageDirectory, &virtualPageDirectory, &handle);
if (error == B_OK) {
pageDirectoryEntry
= ((page_directory_entry*)virtualPageDirectory)[index];
sPhysicalPageMapper->PutPageDebug(virtualPageDirectory,
handle);
fPhysicalPageMapper->PutPageDebug(virtualPageDirectory, handle);
} else
pageDirectoryEntry = 0;
} else
@ -1490,22 +1485,22 @@ X86PagingMethod32Bit::IsKernelPageAccessible(addr_t virtualAddress,
index = VADDR_TO_PTENT(virtualAddress);
if ((pageDirectoryEntry & X86_PDE_PRESENT) != 0
&& sPhysicalPageMapper != NULL) {
&& fPhysicalPageMapper != NULL) {
void* handle;
addr_t virtualPageTable;
status_t error = sPhysicalPageMapper->GetPageDebug(
status_t error = fPhysicalPageMapper->GetPageDebug(
pageDirectoryEntry & X86_PDE_ADDRESS_MASK, &virtualPageTable,
&handle);
if (error == B_OK) {
pageTableEntry = ((page_table_entry*)virtualPageTable)[index];
sPhysicalPageMapper->PutPageDebug(virtualPageTable, handle);
fPhysicalPageMapper->PutPageDebug(virtualPageTable, handle);
} else
pageTableEntry = 0;
} else
pageTableEntry = 0;
// switch back to the original page directory
if (physicalPageDirectory != sKernelPhysicalPageDirectory)
if (physicalPageDirectory != fKernelPhysicalPageDirectory)
write_cr3(physicalPageDirectory);
if ((pageTableEntry & X86_PTE_PRESENT) == 0)

View File

@ -11,13 +11,15 @@
#include "paging/X86PagingStructures.h"
class TranslationMapPhysicalPageMapper;
class X86PhysicalPageMapper;
class X86PagingMethod32Bit : public X86PagingMethod {
public:
X86PagingMethod32Bit();
virtual ~X86PagingMethod32Bit();
static X86PagingMethod* Create();
virtual status_t Init(kernel_args* args,
VMPhysicalPageMapper** _physicalPageMapper);
virtual status_t InitPostArea(kernel_args* args);
@ -34,9 +36,40 @@ public:
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
uint32 protection);
inline page_table_entry* PageHole() const
{ return fPageHole; }
inline page_directory_entry* PageHolePageDir() const
{ return fPageHolePageDir; }
inline uint32 KernelPhysicalPageDirectory() const
{ return fKernelPhysicalPageDirectory; }
inline page_directory_entry* KernelVirtualPageDirectory() const
{ return fKernelVirtualPageDirectory; }
inline X86PhysicalPageMapper* PhysicalPageMapper() const
{ return fPhysicalPageMapper; }
inline TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() const
{ return fKernelPhysicalPageMapper; }
static X86PagingMethod32Bit* Method();
private:
struct PhysicalPageSlotPool;
private:
page_table_entry* fPageHole;
page_directory_entry* fPageHolePageDir;
uint32 fKernelPhysicalPageDirectory;
page_directory_entry* fKernelVirtualPageDirectory;
X86PhysicalPageMapper* fPhysicalPageMapper;
TranslationMapPhysicalPageMapper* fKernelPhysicalPageMapper;
};
/*static*/ inline X86PagingMethod32Bit*
X86PagingMethod32Bit::Method()
{
return static_cast<X86PagingMethod32Bit*>(gX86PagingMethod);
}
#endif // KERNEL_ARCH_X86_PAGING_32_BIT_X86_PAGING_METHOD_32_BIT_H

View File

@ -7,6 +7,9 @@
#include "paging/X86PagingMethod.h"
X86PagingMethod* gX86PagingMethod;
X86PagingMethod::~X86PagingMethod()
{
}

View File

@ -38,4 +38,7 @@ public:
};
extern X86PagingMethod* gX86PagingMethod;
#endif // KERNEL_ARCH_X86_PAGING_X86_PAGING_METHOD_H