Skeleton classes for PAE support.

git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@37066 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2010-06-08 21:15:29 +00:00
parent d698997098
commit 5b4d62a261
9 changed files with 594 additions and 1 deletions

View File

@ -10,6 +10,7 @@ UsePrivateHeaders shared ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) paging ] ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) paging 32bit ] ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) paging pae ] ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) timers ] ;
@ -54,6 +55,12 @@ KernelMergeObject kernel_arch_x86.o :
X86PagingStructures32Bit.cpp
X86VMTranslationMap32Bit.cpp
# paging/pae
X86PagingMethodPAE.cpp
X86PagingStructuresPAE.cpp
X86VMTranslationMapPAE.cpp
# timers
x86_apic.cpp
x86_hpet.cpp
x86_pit.cpp

View File

@ -11,6 +11,7 @@
#include <arch/vm_translation_map.h>
#include "paging/32bit/X86PagingMethod32Bit.h"
#include "paging/pae/X86PagingMethodPAE.h"
//#define TRACE_VM_TMAP
@ -24,6 +25,9 @@
static union {
uint64 align;
char thirty_two[sizeof(X86PagingMethod32Bit)];
#if B_HAIKU_PHYSICAL_BITS == 64
char pae[sizeof(X86PagingMethodPAE)];
#endif
} sPagingMethodBuffer;
@ -68,7 +72,12 @@ arch_vm_translation_map_init(kernel_args *args,
}
#endif
gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod32Bit;
#if B_HAIKU_PHYSICAL_BITS == 64
if (true /* TODO: If needed! */)
gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethodPAE;
else
#endif
gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod32Bit;
return gX86PagingMethod->Init(args, _physicalPageMapper);
}

View File

@ -0,0 +1,167 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#include "paging/pae/X86PagingMethodPAE.h"
#include <stdlib.h>
#include <string.h>
#include "paging/pae/X86PagingStructuresPAE.h"
#include "paging/pae/X86VMTranslationMapPAE.h"
#include "paging/x86_physical_page_mapper.h"
#include "paging/x86_physical_page_mapper_large_memory.h"
//#define TRACE_X86_PAGING_METHOD_PAE
#ifdef TRACE_X86_PAGING_METHOD_PAE
# define TRACE(x...) dprintf(x)
#else
# define TRACE(x...) ;
#endif
#if B_HAIKU_PHYSICAL_BITS == 64
using X86LargePhysicalPageMapper::PhysicalPageSlot;
// #pragma mark - X86PagingMethodPAE::PhysicalPageSlotPool
struct X86PagingMethodPAE::PhysicalPageSlotPool
: X86LargePhysicalPageMapper::PhysicalPageSlotPool {
public:
virtual ~PhysicalPageSlotPool();
status_t InitInitial(kernel_args* args);
status_t InitInitialPostArea(kernel_args* args);
virtual status_t AllocatePool(
X86LargePhysicalPageMapper
::PhysicalPageSlotPool*& _pool);
virtual void Map(phys_addr_t physicalAddress,
addr_t virtualAddress);
public:
static PhysicalPageSlotPool sInitialPhysicalPagePool;
private:
};
X86PagingMethodPAE::PhysicalPageSlotPool
X86PagingMethodPAE::PhysicalPageSlotPool::sInitialPhysicalPagePool;
X86PagingMethodPAE::PhysicalPageSlotPool::~PhysicalPageSlotPool()
{
}
status_t
X86PagingMethodPAE::PhysicalPageSlotPool::InitInitial(kernel_args* args)
{
// TODO: Implement!
panic("unsupported");
return B_UNSUPPORTED;
}
status_t
X86PagingMethodPAE::PhysicalPageSlotPool::InitInitialPostArea(
kernel_args* args)
{
// TODO: Implement!
panic("unsupported");
return B_UNSUPPORTED;
}
void
X86PagingMethodPAE::PhysicalPageSlotPool::Map(phys_addr_t physicalAddress,
addr_t virtualAddress)
{
// TODO: Implement!
panic("unsupported");
}
status_t
X86PagingMethodPAE::PhysicalPageSlotPool::AllocatePool(
X86LargePhysicalPageMapper::PhysicalPageSlotPool*& _pool)
{
// TODO: Implement!
panic("unsupported");
return B_UNSUPPORTED;
}
// #pragma mark - X86PagingMethodPAE
X86PagingMethodPAE::X86PagingMethodPAE()
:
fPhysicalPageMapper(NULL),
fKernelPhysicalPageMapper(NULL)
{
}
X86PagingMethodPAE::~X86PagingMethodPAE()
{
}
status_t
X86PagingMethodPAE::Init(kernel_args* args,
VMPhysicalPageMapper** _physicalPageMapper)
{
// TODO: Implement!
panic("unsupported");
return B_UNSUPPORTED;
}
status_t
X86PagingMethodPAE::InitPostArea(kernel_args* args)
{
// TODO: Implement!
panic("unsupported");
return B_UNSUPPORTED;
}
status_t
X86PagingMethodPAE::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
{
// TODO: Implement!
panic("unsupported");
return B_UNSUPPORTED;
}
status_t
X86PagingMethodPAE::MapEarly(kernel_args* args, addr_t virtualAddress,
phys_addr_t physicalAddress, uint8 attributes,
phys_addr_t (*get_free_page)(kernel_args*))
{
// TODO: Implement!
panic("unsupported");
return B_UNSUPPORTED;
}
bool
X86PagingMethodPAE::IsKernelPageAccessible(addr_t virtualAddress,
uint32 protection)
{
// TODO: Implement!
return false;
}
#endif // B_HAIKU_PHYSICAL_BITS == 64

View File

@ -0,0 +1,69 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_X86_PAGING_PAE_X86_PAGING_METHOD_PAE_H
#define KERNEL_ARCH_X86_PAGING_PAE_X86_PAGING_METHOD_PAE_H
#include "paging/pae/paging.h"
#include "paging/X86PagingMethod.h"
#include "paging/X86PagingStructures.h"
#if B_HAIKU_PHYSICAL_BITS == 64
class TranslationMapPhysicalPageMapper;
class X86PhysicalPageMapper;
class X86PagingMethodPAE : public X86PagingMethod {
public:
X86PagingMethodPAE();
virtual ~X86PagingMethodPAE();
virtual status_t Init(kernel_args* args,
VMPhysicalPageMapper** _physicalPageMapper);
virtual status_t InitPostArea(kernel_args* args);
virtual status_t CreateTranslationMap(bool kernel,
VMTranslationMap** _map);
virtual status_t MapEarly(kernel_args* args,
addr_t virtualAddress,
phys_addr_t physicalAddress,
uint8 attributes,
phys_addr_t (*get_free_page)(kernel_args*));
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
uint32 protection);
inline X86PhysicalPageMapper* PhysicalPageMapper() const
{ return fPhysicalPageMapper; }
inline TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() const
{ return fKernelPhysicalPageMapper; }
static X86PagingMethodPAE* Method();
private:
struct PhysicalPageSlotPool;
friend struct PhysicalPageSlotPool;
private:
X86PhysicalPageMapper* fPhysicalPageMapper;
TranslationMapPhysicalPageMapper* fKernelPhysicalPageMapper;
};
/*static*/ inline X86PagingMethodPAE*
X86PagingMethodPAE::Method()
{
return static_cast<X86PagingMethodPAE*>(gX86PagingMethod);
}
#endif // B_HAIKU_PHYSICAL_BITS == 64
#endif // KERNEL_ARCH_X86_PAGING_PAE_X86_PAGING_METHOD_PAE_H

View File

@ -0,0 +1,49 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#include "paging/pae/X86PagingStructuresPAE.h"
#include <KernelExport.h>
#if B_HAIKU_PHYSICAL_BITS == 64
X86PagingStructuresPAE::X86PagingStructuresPAE()
{
}
X86PagingStructuresPAE::~X86PagingStructuresPAE()
{
}
void
X86PagingStructuresPAE::Init()
{
// TODO: Implement!
panic("unsupported");
}
void
X86PagingStructuresPAE::Delete()
{
// TODO: Implement!
panic("unsupported");
}
/*static*/ void
X86PagingStructuresPAE::StaticInit()
{
// TODO: Implement!
panic("unsupported");
}
#endif // B_HAIKU_PHYSICAL_BITS == 64

View File

@ -0,0 +1,31 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_X86_PAGING_PAE_X86_PAGING_STRUCTURES_PAE_H
#define KERNEL_ARCH_X86_PAGING_PAE_X86_PAGING_STRUCTURES_PAE_H
#include "paging/pae/paging.h"
#include "paging/X86PagingStructures.h"
#if B_HAIKU_PHYSICAL_BITS == 64
struct X86PagingStructuresPAE : X86PagingStructures {
X86PagingStructuresPAE();
virtual ~X86PagingStructuresPAE();
void Init();
virtual void Delete();
static void StaticInit();
};
#endif // B_HAIKU_PHYSICAL_BITS == 64
#endif // KERNEL_ARCH_X86_PAGING_PAE_X86_PAGING_STRUCTURES_PAE_H

View File

@ -0,0 +1,169 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#include "paging/pae/X86VMTranslationMapPAE.h"
#include <vm/VMAddressSpace.h>
#include <vm/VMCache.h>
#include "paging/pae/X86PagingMethodPAE.h"
#include "paging/pae/X86PagingStructuresPAE.h"
#include "paging/x86_physical_page_mapper.h"
//#define TRACE_X86_VM_TRANSLATION_MAP_PAE
#ifdef TRACE_X86_VM_TRANSLATION_MAP_PAE
# define TRACE(x...) dprintf(x)
#else
# define TRACE(x...) ;
#endif
#if B_HAIKU_PHYSICAL_BITS == 64
X86VMTranslationMapPAE::X86VMTranslationMapPAE()
:
fPagingStructures(NULL)
{
}
X86VMTranslationMapPAE::~X86VMTranslationMapPAE()
{
// TODO: Implement!
}
status_t
X86VMTranslationMapPAE::Init(bool kernel)
{
TRACE("X86VMTranslationMapPAE::Init()\n");
X86VMTranslationMap::Init(kernel);
// TODO: Implement!
panic("unsupported");
return B_UNSUPPORTED;
}
size_t
X86VMTranslationMapPAE::MaxPagesNeededToMap(addr_t start, addr_t end) const
{
// TODO: Implement!
panic("unsupported");
return 0;
}
status_t
X86VMTranslationMapPAE::Map(addr_t va, phys_addr_t pa, uint32 attributes,
uint32 memoryType, vm_page_reservation* reservation)
{
// TODO: Implement!
panic("unsupported");
return B_UNSUPPORTED;
}
status_t
X86VMTranslationMapPAE::Unmap(addr_t start, addr_t end)
{
// TODO: Implement!
panic("unsupported");
return B_UNSUPPORTED;
}
/*! Caller must have locked the cache of the page to be unmapped.
This object shouldn't be locked.
*/
status_t
X86VMTranslationMapPAE::UnmapPage(VMArea* area, addr_t address,
bool updatePageQueue)
{
// TODO: Implement!
panic("unsupported");
return B_UNSUPPORTED;
}
void
X86VMTranslationMapPAE::UnmapPages(VMArea* area, addr_t base, size_t size,
bool updatePageQueue)
{
// TODO: Implement!
panic("unsupported");
}
void
X86VMTranslationMapPAE::UnmapArea(VMArea* area, bool deletingAddressSpace,
bool ignoreTopCachePageFlags)
{
// TODO: Implement!
panic("unsupported");
}
status_t
X86VMTranslationMapPAE::Query(addr_t va, phys_addr_t *_physical,
uint32 *_flags)
{
// TODO: Implement!
panic("unsupported");
return B_UNSUPPORTED;
}
status_t
X86VMTranslationMapPAE::QueryInterrupt(addr_t va, phys_addr_t *_physical,
uint32 *_flags)
{
// TODO: Implement!
panic("unsupported");
return B_UNSUPPORTED;
}
status_t
X86VMTranslationMapPAE::Protect(addr_t start, addr_t end, uint32 attributes,
uint32 memoryType)
{
// TODO: Implement!
panic("unsupported");
return B_UNSUPPORTED;
}
status_t
X86VMTranslationMapPAE::ClearFlags(addr_t va, uint32 flags)
{
// TODO: Implement!
panic("unsupported");
return B_UNSUPPORTED;
}
bool
X86VMTranslationMapPAE::ClearAccessedAndModified(VMArea* area, addr_t address,
bool unmapIfUnaccessed, bool& _modified)
{
// TODO: Implement!
panic("unsupported");
return false;
}
X86PagingStructures*
X86VMTranslationMapPAE::PagingStructures() const
{
return fPagingStructures;
}
#endif // B_HAIKU_PHYSICAL_BITS == 64

View File

@ -0,0 +1,71 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_X86_PAGING_PAE_X86_VM_TRANSLATION_MAP_PAE_H
#define KERNEL_ARCH_X86_PAGING_PAE_X86_VM_TRANSLATION_MAP_PAE_H
#include "paging/X86VMTranslationMap.h"
#if B_HAIKU_PHYSICAL_BITS == 64
struct X86PagingStructuresPAE;
struct X86VMTranslationMapPAE : X86VMTranslationMap {
X86VMTranslationMapPAE();
virtual ~X86VMTranslationMapPAE();
status_t Init(bool kernel);
virtual size_t MaxPagesNeededToMap(addr_t start,
addr_t end) const;
virtual status_t Map(addr_t virtualAddress,
phys_addr_t physicalAddress,
uint32 attributes, uint32 memoryType,
vm_page_reservation* reservation);
virtual status_t Unmap(addr_t start, addr_t end);
virtual status_t UnmapPage(VMArea* area, addr_t address,
bool updatePageQueue);
virtual void UnmapPages(VMArea* area, addr_t base,
size_t size, bool updatePageQueue);
virtual void UnmapArea(VMArea* area,
bool deletingAddressSpace,
bool ignoreTopCachePageFlags);
virtual status_t Query(addr_t virtualAddress,
phys_addr_t* _physicalAddress,
uint32* _flags);
virtual status_t QueryInterrupt(addr_t virtualAddress,
phys_addr_t* _physicalAddress,
uint32* _flags);
virtual status_t Protect(addr_t base, addr_t top,
uint32 attributes, uint32 memoryType);
virtual status_t ClearFlags(addr_t virtualAddress,
uint32 flags);
virtual bool ClearAccessedAndModified(
VMArea* area, addr_t address,
bool unmapIfUnaccessed,
bool& _modified);
virtual X86PagingStructures* PagingStructures() const;
inline X86PagingStructuresPAE* PagingStructuresPAE() const
{ return fPagingStructures; }
private:
X86PagingStructuresPAE* fPagingStructures;
};
#endif // B_HAIKU_PHYSICAL_BITS == 64
#endif // KERNEL_ARCH_X86_PAGING_PAE_X86_VM_TRANSLATION_MAP_PAE_H

View File

@ -0,0 +1,21 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_X86_PAGING_PAE_PAGING_H
#define KERNEL_ARCH_X86_PAGING_PAE_PAGING_H
#include <SupportDefs.h>
#if B_HAIKU_PHYSICAL_BITS == 64
// TODO:...
#endif // B_HAIKU_PHYSICAL_BITS == 64
#endif // KERNEL_ARCH_X86_PAGING_PAE_PAGING_H