ARM: sync up VM code with x86

No big functional reason for this, but rather keep it in sync now
then have to do lots of work later on, when there are major changes.
Once I have it fully fleshed out for ARM, I might take a look if
we can generalise it a little more, as there's lots of code
_exactly_ the same for both platforms (and other platforms in
progress using the same code).
This commit is contained in:
Ithamar R. Adema 2014-09-08 00:49:30 +02:00
parent 95b6779381
commit 1819aa71ee
14 changed files with 189 additions and 105 deletions

View File

@ -35,7 +35,6 @@ KernelMergeObject kernel_arch_arm.o :
arch_atomic32.cpp
# paging
arm_physical_page_mapper.cpp
arm_physical_page_mapper_large_memory.cpp
ARMPagingMethod.cpp
ARMPagingStructures.cpp

View File

@ -16,6 +16,7 @@
#include <AutoDeleter.h>
#include <arch/smp.h>
#include <arch_system_info.h>
#include <boot/kernel_args.h>
#include <int.h>
@ -37,11 +38,14 @@
#endif
#define MAX_INITIAL_POOLS \
(ROUNDUP(SMP_MAX_CPUS * TOTAL_SLOTS_PER_CPU + EXTRA_SLOTS, 1024) / 1024)
using ARMLargePhysicalPageMapper::PhysicalPageSlot;
// #pragma mark - ARMPagingMethod32Bit::PhysicalPageSlotPool
// #pragma mark - X86PagingMethod32Bit::PhysicalPageSlotPool
struct ARMPagingMethod32Bit::PhysicalPageSlotPool
: ARMLargePhysicalPageMapper::PhysicalPageSlotPool {
@ -61,7 +65,7 @@ public:
addr_t virtualAddress);
public:
static PhysicalPageSlotPool sInitialPhysicalPagePool;
static PhysicalPageSlotPool sInitialPhysicalPagePool[MAX_INITIAL_POOLS];
private:
area_id fDataArea;
@ -72,7 +76,8 @@ private:
ARMPagingMethod32Bit::PhysicalPageSlotPool
ARMPagingMethod32Bit::PhysicalPageSlotPool::sInitialPhysicalPagePool;
ARMPagingMethod32Bit::PhysicalPageSlotPool::sInitialPhysicalPagePool[
MAX_INITIAL_POOLS];
ARMPagingMethod32Bit::PhysicalPageSlotPool::~PhysicalPageSlotPool()
@ -96,6 +101,11 @@ ARMPagingMethod32Bit::PhysicalPageSlotPool::InitInitial(kernel_args* args)
size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
page_table_entry* pageTable = (page_table_entry*)vm_allocate_early(args,
areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
if (pageTable == 0) {
panic("ARMPagingMethod32Bit::PhysicalPageSlotPool::InitInitial(): "
"Failed to allocate memory for page table!");
return B_ERROR;
}
// prepare the page table
_EarlyPreparePageTables(pageTable, virtualBase, 1024 * B_PAGE_SIZE);
@ -263,31 +273,36 @@ status_t
ARMPagingMethod32Bit::Init(kernel_args* args,
VMPhysicalPageMapper** _physicalPageMapper)
{
TRACE("vm_translation_map_init: entry\n");
TRACE("X86PagingMethod32Bit::Init(): entry\n");
fKernelPhysicalPageDirectory = args->arch_args.phys_pgdir;
fKernelVirtualPageDirectory = (page_directory_entry*)
args->arch_args.vir_pgdir;
#ifdef TRACE_X86_PAGING_METHOD_32_BIT
TRACE("page dir: %p (physical: %#" B_PRIx32 ")\n",
fKernelVirtualPageDirectory, fKernelPhysicalPageDirectory);
#endif
ARMPagingStructures32Bit::StaticInit();
// create the initial pool for the physical page mapper
PhysicalPageSlotPool* pool
= new(&PhysicalPageSlotPool::sInitialPhysicalPagePool)
PhysicalPageSlotPool;
status_t error = pool->InitInitial(args);
if (error != B_OK) {
panic("ARMPagingMethod32Bit::Init(): Failed to create initial pool "
"for physical page mapper!");
return error;
// create the initial pools for the physical page mapper
int32 poolCount = _GetInitialPoolCount();
PhysicalPageSlotPool* pool = PhysicalPageSlotPool::sInitialPhysicalPagePool;
for (int32 i = 0; i < poolCount; i++) {
new(&pool[i]) PhysicalPageSlotPool;
status_t error = pool[i].InitInitial(args);
if (error != B_OK) {
panic("ARMPagingMethod32Bit::Init(): Failed to create initial pool "
"for physical page mapper!");
return error;
}
}
// create physical page mapper
large_memory_physical_page_ops_init(args, pool, fPhysicalPageMapper,
fKernelPhysicalPageMapper);
large_memory_physical_page_ops_init(args, pool, poolCount, sizeof(*pool),
fPhysicalPageMapper, fKernelPhysicalPageMapper);
// TODO: Select the best page mapper!
// enable global page feature if available
@ -309,20 +324,21 @@ status_t
ARMPagingMethod32Bit::InitPostArea(kernel_args* args)
{
void *temp;
status_t error;
area_id area;
temp = (void*)fKernelVirtualPageDirectory;
area = create_area("kernel_pgdir", &temp, B_EXACT_ADDRESS,
ARM_MMU_L1_TABLE_SIZE, B_ALREADY_WIRED, B_KERNEL_READ_AREA
| B_KERNEL_WRITE_AREA);
area = create_area("kernel_pgdir", &temp, B_EXACT_ADDRESS, ARM_MMU_L1_TABLE_SIZE,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (area < B_OK)
return area;
error = PhysicalPageSlotPool::sInitialPhysicalPagePool
.InitInitialPostArea(args);
if (error != B_OK)
return error;
int32 poolCount = _GetInitialPoolCount();
for (int32 i = 0; i < poolCount; i++) {
status_t error = PhysicalPageSlotPool::sInitialPhysicalPagePool[i]
.InitInitialPostArea(args);
if (error != B_OK)
return error;
}
return B_OK;
}
@ -358,7 +374,7 @@ get_free_pgtable(kernel_args* args)
status_t
ARMPagingMethod32Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
phys_addr_t physicalAddress, uint8 attributes,
phys_addr_t (*get_free_page)(kernel_args*))
page_num_t (*get_free_page)(kernel_args*))
{
// check to see if a page table exists for this range
int index = VADDR_TO_PDENT(virtualAddress);
@ -404,9 +420,8 @@ ARMPagingMethod32Bit::IsKernelPageAccessible(addr_t virtualAddress,
#if 0
// We only trust the kernel team's page directory. So switch to it first.
// Always set it to make sure the TLBs don't contain obsolete data.
uint32 physicalPageDirectory;
read_cr3(physicalPageDirectory);
write_cr3(fKernelPhysicalPageDirectory);
uint32 physicalPageDirectory = x86_read_cr3();
x86_write_cr3(fKernelPhysicalPageDirectory);
// get the page directory entry for the address
page_directory_entry pageDirectoryEntry;
@ -433,12 +448,12 @@ ARMPagingMethod32Bit::IsKernelPageAccessible(addr_t virtualAddress,
page_table_entry pageTableEntry;
index = VADDR_TO_PTENT(virtualAddress);
if ((pageDirectoryEntry & ARM_PDE_PRESENT) != 0
if ((pageDirectoryEntry & X86_PDE_PRESENT) != 0
&& fPhysicalPageMapper != NULL) {
void* handle;
addr_t virtualPageTable;
status_t error = fPhysicalPageMapper->GetPageDebug(
pageDirectoryEntry & ARM_PDE_ADDRESS_MASK, &virtualPageTable,
pageDirectoryEntry & X86_PDE_ADDRESS_MASK, &virtualPageTable,
&handle);
if (error == B_OK) {
pageTableEntry = ((page_table_entry*)virtualPageTable)[index];
@ -450,14 +465,14 @@ ARMPagingMethod32Bit::IsKernelPageAccessible(addr_t virtualAddress,
// switch back to the original page directory
if (physicalPageDirectory != fKernelPhysicalPageDirectory)
write_cr3(physicalPageDirectory);
x86_write_cr3(physicalPageDirectory);
if ((pageTableEntry & ARM_PTE_PRESENT) == 0)
if ((pageTableEntry & X86_PTE_PRESENT) == 0)
return false;
// present means kernel-readable, so check for writable
return (protection & B_KERNEL_WRITE_AREA) == 0
|| (pageTableEntry & ARM_PTE_WRITABLE) != 0;
|| (pageTableEntry & X86_PTE_WRITABLE) != 0;
#endif
//IRA: fix the above!
return true;
@ -487,24 +502,33 @@ ARMPagingMethod32Bit::PutPageTableEntryInTable(page_table_entry* entry,
page_table_entry page = (physicalAddress & ARM_PTE_ADDRESS_MASK)
| ARM_MMU_L2_TYPE_SMALLEXT;
#if 0 //IRA
| ARM_PTE_PRESENT | (globalPage ? ARM_PTE_GLOBAL : 0)
| X86_PTE_PRESENT | (globalPage ? X86_PTE_GLOBAL : 0)
| MemoryTypeToPageTableEntryFlags(memoryType);
// if the page is user accessible, it's automatically
// accessible in kernel space, too (but with the same
// protection)
if ((attributes & B_USER_PROTECTION) != 0) {
page |= ARM_PTE_USER;
page |= X86_PTE_USER;
if ((attributes & B_WRITE_AREA) != 0)
page |= ARM_PTE_WRITABLE;
page |= X86_PTE_WRITABLE;
} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
page |= ARM_PTE_WRITABLE;
page |= X86_PTE_WRITABLE;
#endif
// put it in the page table
*(volatile page_table_entry*)entry = page;
}
inline int32
ARMPagingMethod32Bit::_GetInitialPoolCount()
{
int32 requiredSlots = smp_get_num_cpus() * TOTAL_SLOTS_PER_CPU
+ EXTRA_SLOTS;
return (requiredSlots + 1023) / 1024;
}
/*static*/ void
ARMPagingMethod32Bit::_EarlyPreparePageTables(page_table_entry* pageTables,
addr_t address, size_t size)

View File

@ -32,7 +32,7 @@ public:
addr_t virtualAddress,
phys_addr_t physicalAddress,
uint8 attributes,
phys_addr_t (*get_free_page)(kernel_args*));
page_num_t (*get_free_page)(kernel_args*));
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
uint32 protection);
@ -77,6 +77,8 @@ private:
friend struct PhysicalPageSlotPool;
private:
inline int32 _GetInitialPoolCount();
static void _EarlyPreparePageTables(
page_table_entry* pageTables,
addr_t address, size_t size);
@ -150,7 +152,7 @@ ARMPagingMethod32Bit::MemoryTypeToPageTableEntryFlags(uint32 memoryType)
// (usually only write-combining for the frame buffer).
switch (memoryType) {
case B_MTR_UC:
return ARM_PTE_CACHING_DISABLED | ARM_PTE_WRITE_THROUGH;
return X86_PTE_CACHING_DISABLED | X86_PTE_WRITE_THROUGH;
case B_MTR_WC:
// ARM_PTE_WRITE_THROUGH would be closer, but the combination with
@ -158,7 +160,7 @@ ARMPagingMethod32Bit::MemoryTypeToPageTableEntryFlags(uint32 memoryType)
return 0;
case B_MTR_WT:
return ARM_PTE_WRITE_THROUGH;
return X86_PTE_WRITE_THROUGH;
case B_MTR_WP:
case B_MTR_WB:

View File

@ -101,8 +101,7 @@ ARMPagingStructures32Bit::Delete()
#if 0
// this sanity check can be enabled when corruption due to
// overwriting an active page directory is suspected
uint32 activePageDirectory;
read_cr3(activePageDirectory);
uint32 activePageDirectory = x86_read_cr3();
if (activePageDirectory == pgdir_phys)
panic("deleting a still active page directory\n");
#endif

View File

@ -262,6 +262,62 @@ ARMVMTranslationMap32Bit::Unmap(addr_t start, addr_t end)
}
status_t
ARMVMTranslationMap32Bit::DebugMarkRangePresent(addr_t start, addr_t end,
bool markPresent)
{
#if 0
start = ROUNDDOWN(start, B_PAGE_SIZE);
if (start >= end)
return B_OK;
page_directory_entry *pd = fPagingStructures->pgdir_virt;
do {
int index = VADDR_TO_PDENT(start);
if ((pd[index] & X86_PDE_PRESENT) == 0) {
// no page table here, move the start up to access the next page
// table
start = ROUNDUP(start + 1, kPageTableAlignment);
continue;
}
Thread* thread = thread_get_current_thread();
ThreadCPUPinner pinner(thread);
page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
pd[index] & X86_PDE_ADDRESS_MASK);
for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
index++, start += B_PAGE_SIZE) {
if ((pt[index] & X86_PTE_PRESENT) == 0) {
if (!markPresent)
continue;
X86PagingMethod32Bit::SetPageTableEntryFlags(&pt[index],
X86_PTE_PRESENT);
} else {
if (markPresent)
continue;
page_table_entry oldEntry
= X86PagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
X86_PTE_PRESENT);
if ((oldEntry & X86_PTE_ACCESSED) != 0) {
// Note, that we only need to invalidate the address, if the
// accessed flags was set, since only then the entry could
// have been in any TLB.
InvalidatePage(start);
}
}
}
} while (start != 0 && start < end);
#endif
return B_OK;
}
/*! Caller must have locked the cache of the page to be unmapped.
This object shouldn't be locked.
*/
@ -579,12 +635,12 @@ ARMVMTranslationMap32Bit::Query(addr_t va, phys_addr_t *_physical,
#if 0 //IRA
// read in the page state flags
if ((entry & ARM_PTE_USER) != 0) {
*_flags |= ((entry & ARM_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
if ((entry & X86_PTE_USER) != 0) {
*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
| B_READ_AREA;
}
*_flags |= ((entry & ARM_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
| B_KERNEL_READ_AREA
| ((entry & ARM_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
| ((entry & ARM_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
@ -627,16 +683,16 @@ ARMVMTranslationMap32Bit::QueryInterrupt(addr_t va, phys_addr_t *_physical,
#if 0
// read in the page state flags
if ((entry & ARM_PTE_USER) != 0) {
*_flags |= ((entry & ARM_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
if ((entry & X86_PTE_USER) != 0) {
*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
| B_READ_AREA;
}
*_flags |= ((entry & ARM_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
*_flags |= ((entry & X86_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
| B_KERNEL_READ_AREA
| ((entry & ARM_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
| ((entry & ARM_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
| ((entry & ARM_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
| ((entry & X86_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
| ((entry & X86_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
| ((entry & X86_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
#else
*_flags = B_KERNEL_WRITE_AREA | B_KERNEL_READ_AREA;
if (*_physical != 0)
@ -733,8 +789,8 @@ ARMVMTranslationMap32Bit::ClearFlags(addr_t va, uint32 flags)
return B_OK;
}
#if 0 //IRA
uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? ARM_PTE_DIRTY : 0)
| ((flags & PAGE_ACCESSED) ? ARM_PTE_ACCESSED : 0);
uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_PTE_DIRTY : 0)
| ((flags & PAGE_ACCESSED) ? X86_PTE_ACCESSED : 0);
#else
uint32 flagsToClear = 0;
#endif
@ -820,7 +876,7 @@ ARMVMTranslationMap32Bit::ClearAccessedAndModified(VMArea* area, addr_t address,
pinner.Unlock();
_modified = true /* (oldEntry & ARM_PTE_DIRTY) != 0 */; // XXX IRA
_modified = true /* (oldEntry & X86_PTE_DIRTY) != 0 */; // XXX IRA
if (true /*(oldEntry & ARM_PTE_ACCESSED) != 0*/) {
// Note, that we only need to invalidate the address, if the

View File

@ -27,6 +27,9 @@ struct ARMVMTranslationMap32Bit : ARMVMTranslationMap {
vm_page_reservation* reservation);
virtual status_t Unmap(addr_t start, addr_t end);
virtual status_t DebugMarkRangePresent(addr_t start, addr_t end,
bool markPresent);
virtual status_t UnmapPage(VMArea* area, addr_t address,
bool updatePageQueue);
virtual void UnmapPages(VMArea* area, addr_t base,

View File

@ -19,11 +19,11 @@
#include <arm_mmu.h>
#define FIRST_USER_PGDIR_ENT (VADDR_TO_PDENT(USER_BASE))
#define NUM_USER_PGDIR_ENTS (VADDR_TO_PDENT(ROUNDUP(USER_SIZE, \
B_PAGE_SIZE * 1024)))
#define FIRST_KERNEL_PGDIR_ENT (VADDR_TO_PDENT(KERNEL_BASE))
#define NUM_KERNEL_PGDIR_ENTS (VADDR_TO_PDENT(KERNEL_SIZE))
#define FIRST_USER_PGDIR_ENT (VADDR_TO_PDENT(USER_BASE))
#define NUM_USER_PGDIR_ENTS (VADDR_TO_PDENT(ROUNDUP(USER_SIZE, \
B_PAGE_SIZE * 1024)))
#define FIRST_KERNEL_PGDIR_ENT (VADDR_TO_PDENT(KERNEL_BASE))
#define NUM_KERNEL_PGDIR_ENTS (VADDR_TO_PDENT(KERNEL_SIZE))
static const size_t kPageTableAlignment = 1024 * B_PAGE_SIZE;

View File

@ -8,6 +8,8 @@
#include <SupportDefs.h>
#include <vm/vm_types.h>
struct kernel_args;
struct VMPhysicalPageMapper;
@ -30,7 +32,7 @@ public:
addr_t virtualAddress,
phys_addr_t physicalAddress,
uint8 attributes,
phys_addr_t (*get_free_page)(kernel_args*))
page_num_t (*get_free_page)(kernel_args*))
= 0;
virtual bool IsKernelPageAccessible(addr_t virtualAddress,

View File

@ -18,7 +18,7 @@
struct ARMPagingStructures : DeferredDeletable {
uint32 pgdir_phys;
phys_addr_t pgdir_phys;
int32 ref_count;
CPUSet active_on_cpus;
// mask indicating on which CPUs the map is currently used

View File

@ -9,6 +9,11 @@
#include <vm/VMTranslationMap.h>
#if __GNUC__ < 4
#define final
#endif
#define PAGE_INVALIDATE_CACHE_SIZE 64
@ -22,12 +27,12 @@ struct ARMVMTranslationMap : VMTranslationMap {
status_t Init(bool kernel);
virtual bool Lock();
virtual void Unlock();
virtual bool Lock() final;
virtual void Unlock() final;
virtual addr_t MappedSize() const;
virtual addr_t MappedSize() const final;
virtual void Flush();
virtual void Flush() final;
virtual ARMPagingStructures* PagingStructures() const = 0;

View File

@ -1,16 +0,0 @@
/*
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#include "paging/arm_physical_page_mapper.h"
TranslationMapPhysicalPageMapper::~TranslationMapPhysicalPageMapper()
{
}
ARMPhysicalPageMapper::~ARMPhysicalPageMapper()
{
}

View File

@ -10,12 +10,11 @@
struct kernel_args;
struct vm_translation_map_ops;
class TranslationMapPhysicalPageMapper {
public:
virtual ~TranslationMapPhysicalPageMapper();
virtual ~TranslationMapPhysicalPageMapper() { }
virtual void Delete() = 0;
@ -26,8 +25,6 @@ public:
class ARMPhysicalPageMapper : public VMPhysicalPageMapper {
public:
virtual ~ARMPhysicalPageMapper();
virtual status_t CreateTranslationMapPhysicalPageMapper(
TranslationMapPhysicalPageMapper** _mapper)
= 0;

View File

@ -49,11 +49,6 @@
// a little longer, thus avoiding re-mapping.
#define SLOTS_PER_TRANSLATION_MAP 4
#define USER_SLOTS_PER_CPU 16
#define KERNEL_SLOTS_PER_CPU 16
#define TOTAL_SLOTS_PER_CPU (USER_SLOTS_PER_CPU \
+ KERNEL_SLOTS_PER_CPU + 1)
// one slot is for use in interrupts
using ARMLargePhysicalPageMapper::PhysicalPageSlot;
@ -126,9 +121,10 @@ public:
LargeMemoryPhysicalPageMapper();
status_t Init(kernel_args* args,
PhysicalPageSlotPool* initialPool,
TranslationMapPhysicalPageMapper*&
_kernelPageMapper);
PhysicalPageSlotPool* initialPools,
int32 initalPoolCount, size_t poolSize,
TranslationMapPhysicalPageMapper*&
_kernelPageMapper);
virtual status_t CreateTranslationMapPhysicalPageMapper(
TranslationMapPhysicalPageMapper** _mapper);
@ -402,7 +398,7 @@ LargeMemoryTranslationMapPhysicalPageMapper::GetPageTableAt(
slot.slot->address + B_PAGE_SIZE);
slot.valid.SetBit(currentCPU);
}
return (void*)slot.slot->address + off;
return (uint8*)slot.slot->address + off;
}
}
@ -415,7 +411,7 @@ LargeMemoryTranslationMapPhysicalPageMapper::GetPageTableAt(
slot.valid.ClearAll();
slot.valid.SetBit(currentCPU);
return (void*)slot.slot->address + off;
return (uint8*)slot.slot->address + off;
}
@ -432,11 +428,16 @@ LargeMemoryPhysicalPageMapper::LargeMemoryPhysicalPageMapper()
status_t
LargeMemoryPhysicalPageMapper::Init(kernel_args* args,
PhysicalPageSlotPool* initialPool,
PhysicalPageSlotPool* initialPools, int32 initialPoolCount, size_t poolSize,
TranslationMapPhysicalPageMapper*& _kernelPageMapper)
{
fInitialPool = initialPool;
fNonEmptyPools.Add(fInitialPool);
ASSERT(initialPoolCount >= 1);
fInitialPool = initialPools;
for (int32 i = 0; i < initialPoolCount; i++) {
uint8* pointer = (uint8*)initialPools + i * poolSize;
fNonEmptyPools.Add((PhysicalPageSlotPool*)pointer);
}
// get the debug slot
GetSlot(true, fDebugSlot);
@ -572,7 +573,7 @@ status_t
LargeMemoryPhysicalPageMapper::MemsetPhysical(phys_addr_t address, int value,
phys_size_t length)
{
phys_addr_t pageOffset = address % B_PAGE_SIZE;
addr_t pageOffset = address % B_PAGE_SIZE;
Thread* thread = thread_get_current_thread();
ThreadCPUPinner _(thread);
@ -603,7 +604,7 @@ LargeMemoryPhysicalPageMapper::MemcpyFromPhysical(void* _to, phys_addr_t from,
size_t length, bool user)
{
uint8* to = (uint8*)_to;
phys_addr_t pageOffset = from % B_PAGE_SIZE;
addr_t pageOffset = from % B_PAGE_SIZE;
Thread* thread = thread_get_current_thread();
ThreadCPUPinner _(thread);
@ -643,7 +644,7 @@ LargeMemoryPhysicalPageMapper::MemcpyToPhysical(phys_addr_t to,
const void* _from, size_t length, bool user)
{
const uint8* from = (const uint8*)_from;
phys_addr_t pageOffset = to % B_PAGE_SIZE;
addr_t pageOffset = to % B_PAGE_SIZE;
Thread* thread = thread_get_current_thread();
ThreadCPUPinner _(thread);
@ -759,12 +760,14 @@ LargeMemoryPhysicalPageMapper::GetSlotQueue(int32 cpu, bool user)
status_t
large_memory_physical_page_ops_init(kernel_args* args,
ARMLargePhysicalPageMapper::PhysicalPageSlotPool* initialPool,
ARMLargePhysicalPageMapper::PhysicalPageSlotPool* initialPools,
int32 initialPoolCount, size_t poolSize,
ARMPhysicalPageMapper*& _pageMapper,
TranslationMapPhysicalPageMapper*& _kernelPageMapper)
{
new(&sPhysicalPageMapper) LargeMemoryPhysicalPageMapper;
sPhysicalPageMapper.Init(args, initialPool, _kernelPageMapper);
sPhysicalPageMapper.Init(args, initialPools, initialPoolCount, poolSize,
_kernelPageMapper);
_pageMapper = &sPhysicalPageMapper;
return B_OK;

View File

@ -11,6 +11,15 @@
#include <util/DoublyLinkedList.h>
#define USER_SLOTS_PER_CPU 16
#define KERNEL_SLOTS_PER_CPU 16
#define TOTAL_SLOTS_PER_CPU (USER_SLOTS_PER_CPU \
+ KERNEL_SLOTS_PER_CPU + 1)
// one slot is for use in interrupts
#define EXTRA_SLOTS 2
class TranslationMapPhysicalPageMapper;
class ARMPhysicalPageMapper;
struct kernel_args;
@ -53,7 +62,8 @@ protected:
status_t large_memory_physical_page_ops_init(kernel_args* args,
ARMLargePhysicalPageMapper::PhysicalPageSlotPool* initialPool,
ARMLargePhysicalPageMapper::PhysicalPageSlotPool* initialPools,
int32 initialPoolCount, size_t poolSize,
ARMPhysicalPageMapper*& _pageMapper,
TranslationMapPhysicalPageMapper*& _kernelPageMapper);