* ARM: Major VM work
- This is mostly a copy of the x86 32bit paging method and infrastructure, this was copied for two reasons: 1) It is the most complete VM arch 2) The first ARM PAE patches have landed on alkml, so we will have to deal with it in the future as well, and this infrastructure has proven to be ready ;) - No protection features, or dirty/accessed tracking yet - Lots of #if 0 but.... It boots all the way up to init_modules() now, and then dies because of a lack of (ARM) ELF relocation implementation! Since at this point the VM can be fully initialised, I'm going to focus on CPU exceptions next, so we can get KDL to trigger when it happens, and I can actually debug from there ;) git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@39206 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
107ce9b0fb
commit
c917cd6261
@ -5,20 +5,4 @@
|
||||
#ifndef _KERNEL_ARCH_ARM_VM_TRANSLATION_MAP_H
|
||||
#define _KERNEL_ARCH_ARM_VM_TRANSLATION_MAP_H
|
||||
|
||||
#include <arch/vm_translation_map.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
status_t arm_map_address_range(addr_t virtualAddress, addr_t physicalAddress,
|
||||
size_t size);
|
||||
void arm_unmap_address_range(addr_t virtualAddress, size_t size);
|
||||
status_t arm_remap_address_range(addr_t *virtualAddress, size_t size,
|
||||
bool unmap);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _KERNEL_ARCH_ARM_VM_TRANSLATION_MAP_H */
|
||||
|
@ -5,8 +5,8 @@ UsePrivateKernelHeaders ;
|
||||
UsePrivateHeaders [ FDirName kernel arch $(TARGET_ARCH) board $(TARGET_BOOT_BOARD) ] ;
|
||||
|
||||
|
||||
SEARCH_SOURCE += [ FDirName $(SUBDIR) $(DOTDOT) generic ] ;
|
||||
|
||||
SEARCH_SOURCE += [ FDirName $(SUBDIR) paging ] ;
|
||||
SEARCH_SOURCE += [ FDirName $(SUBDIR) paging 32bit ] ;
|
||||
|
||||
KernelMergeObject kernel_arch_arm.o :
|
||||
# arch_atomic.c
|
||||
@ -29,9 +29,19 @@ KernelMergeObject kernel_arch_arm.o :
|
||||
arch_vm_translation_map.cpp
|
||||
arch_asm.S
|
||||
uart.cpp
|
||||
generic_vm_physical_page_mapper.cpp
|
||||
generic_vm_physical_page_ops.cpp
|
||||
#
|
||||
|
||||
# paging
|
||||
arm_physical_page_mapper.cpp
|
||||
arm_physical_page_mapper_large_memory.cpp
|
||||
ARMPagingMethod.cpp
|
||||
ARMPagingStructures.cpp
|
||||
ARMVMTranslationMap.cpp
|
||||
|
||||
# paging/32bit
|
||||
ARMPagingMethod32Bit.cpp
|
||||
ARMPagingStructures32Bit.cpp
|
||||
ARMVMTranslationMap32Bit.cpp
|
||||
|
||||
:
|
||||
$(TARGET_KERNEL_PIC_CCFLAGS) -Wno-unused
|
||||
:
|
||||
|
@ -1,136 +1,149 @@
|
||||
/*
|
||||
* Copyright 2007, François Revol, revol@free.fr.
|
||||
* Copyirght 2010, Ithamar R. Adema, ithamar.adema@team-embedded.nl
|
||||
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
#include <KernelExport.h>
|
||||
#include <kernel.h>
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_priv.h>
|
||||
#include <vm/VMAddressSpace.h>
|
||||
#include <int.h>
|
||||
#include <boot/kernel_args.h>
|
||||
|
||||
#include <arch/vm_translation_map.h>
|
||||
#include <arch/cpu.h>
|
||||
//#include <arch_mmu.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "generic_vm_physical_page_mapper.h"
|
||||
#include <boot/kernel_args.h>
|
||||
|
||||
#include "paging/32bit/ARMPagingMethod32Bit.h"
|
||||
//#include "paging/pae/ARMPagingMethodPAE.h"
|
||||
|
||||
|
||||
void *
|
||||
m68k_translation_map_get_pgdir(VMTranslationMap *map)
|
||||
{
|
||||
return NULL;
|
||||
#warning ARM:WRITEME
|
||||
//get_vm_ops()->m68k_translation_map_get_pgdir(map);
|
||||
}
|
||||
|
||||
// #pragma mark -
|
||||
// VM API
|
||||
#define TRACE_VM_TMAP
|
||||
#ifdef TRACE_VM_TMAP
|
||||
# define TRACE(x...) dprintf(x)
|
||||
#else
|
||||
# define TRACE(x...) ;
|
||||
#endif
|
||||
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_init(kernel_args *args,
|
||||
VMPhysicalPageMapper** _physicalPageMapper)
|
||||
{
|
||||
return NULL;
|
||||
#warning ARM:WRITEME
|
||||
static union {
|
||||
uint64 align;
|
||||
char thirty_two[sizeof(ARMPagingMethod32Bit)];
|
||||
#if B_HAIKU_PHYSICAL_BITS == 64
|
||||
char pae[sizeof(ARMPagingMethodPAE)];
|
||||
#endif
|
||||
} sPagingMethodBuffer;
|
||||
|
||||
|
||||
// #pragma mark - VM API
|
||||
|
||||
//get_vm_ops()->arch_vm_translation_map_init_map(map, kernel);
|
||||
}
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
|
||||
{
|
||||
return NULL;
|
||||
#warning ARM:WRITEME
|
||||
}
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_init_kernel_map_post_sem(VMTranslationMap *map)
|
||||
{
|
||||
return NULL;
|
||||
#warning ARM:WRITEME
|
||||
|
||||
//get_vm_ops()->arch_vm_translation_map_init_kernel_map_post_sem(map);
|
||||
return gARMPagingMethod->CreateTranslationMap(kernel, _map);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_init(kernel_args *args)
|
||||
arch_vm_translation_map_init(kernel_args *args,
|
||||
VMPhysicalPageMapper** _physicalPageMapper)
|
||||
{
|
||||
return NULL;
|
||||
#warning ARM:WRITEME
|
||||
TRACE("vm_translation_map_init: entry\n");
|
||||
|
||||
//get_vm_ops()->arch_vm_translation_map_init(args);
|
||||
}
|
||||
#ifdef TRACE_VM_TMAP
|
||||
TRACE("physical memory ranges:\n");
|
||||
for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
|
||||
phys_addr_t start = args->physical_memory_range[i].start;
|
||||
phys_addr_t end = start + args->physical_memory_range[i].size;
|
||||
TRACE(" %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
|
||||
end);
|
||||
}
|
||||
|
||||
TRACE("allocated physical ranges:\n");
|
||||
for (uint32 i = 0; i < args->num_physical_allocated_ranges; i++) {
|
||||
phys_addr_t start = args->physical_allocated_range[i].start;
|
||||
phys_addr_t end = start + args->physical_allocated_range[i].size;
|
||||
TRACE(" %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
|
||||
end);
|
||||
}
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_init_post_area(kernel_args *args)
|
||||
{
|
||||
return NULL;
|
||||
#warning ARM:WRITEME
|
||||
TRACE("allocated virtual ranges:\n");
|
||||
for (uint32 i = 0; i < args->num_virtual_allocated_ranges; i++) {
|
||||
addr_t start = args->virtual_allocated_range[i].start;
|
||||
addr_t end = start + args->virtual_allocated_range[i].size;
|
||||
TRACE(" %#10" B_PRIxADDR " - %#10" B_PRIxADDR "\n", start, end);
|
||||
}
|
||||
#endif
|
||||
|
||||
//get_vm_ops()->arch_vm_translation_map_init_post_area(args);
|
||||
#if B_HAIKU_PHYSICAL_BITS == 64 //IRA: Check all 64 bit code and adjust for ARM
|
||||
bool paeAvailable = x86_check_feature(IA32_FEATURE_PAE, FEATURE_COMMON);
|
||||
bool paeNeeded = false;
|
||||
for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
|
||||
phys_addr_t end = args->physical_memory_range[i].start
|
||||
+ args->physical_memory_range[i].size;
|
||||
if (end > 0x100000000LL) {
|
||||
paeNeeded = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (paeAvailable && paeNeeded) {
|
||||
dprintf("using PAE paging\n");
|
||||
gARMPagingMethod = new(&sPagingMethodBuffer) ARMPagingMethodPAE;
|
||||
} else {
|
||||
dprintf("using 32 bit paging (PAE not %s)\n",
|
||||
paeNeeded ? "available" : "needed");
|
||||
gARMPagingMethod = new(&sPagingMethodBuffer) ARMPagingMethod32Bit;
|
||||
}
|
||||
#else
|
||||
gARMPagingMethod = new(&sPagingMethodBuffer) ARMPagingMethod32Bit;
|
||||
#endif
|
||||
|
||||
return gARMPagingMethod->Init(args, _physicalPageMapper);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_init_post_sem(kernel_args *args)
|
||||
{
|
||||
return NULL;
|
||||
#warning ARM:WRITEME
|
||||
|
||||
//get_vm_ops()->arch_vm_translation_map_init_post_sem(args);
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
/** Directly maps a page without having knowledge of any kernel structures.
|
||||
* Used only during VM setup.
|
||||
* It currently ignores the "attributes" parameter and sets all pages
|
||||
* read/write.
|
||||
*/
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress, uint8 attributes,
|
||||
phys_addr_t (*get_free_page)(kernel_args *))
|
||||
arch_vm_translation_map_init_post_area(kernel_args *args)
|
||||
{
|
||||
return NULL;
|
||||
#warning ARM:WRITEME
|
||||
TRACE("vm_translation_map_init_post_area: entry\n");
|
||||
|
||||
//get_vm_ops()->arch_vm_translation_map_early_map(ka, virtualAddress, physicalAddress,
|
||||
// attributes, get_free_page);
|
||||
return gARMPagingMethod->InitPostArea(args);
|
||||
}
|
||||
|
||||
|
||||
// XXX currently assumes this translation map is active
|
||||
|
||||
status_t
|
||||
arch_vm_translation_map_early_query(addr_t va, phys_addr_t *out_physical)
|
||||
arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
|
||||
uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *))
|
||||
{
|
||||
return NULL;
|
||||
#warning ARM:WRITEME
|
||||
TRACE("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
|
||||
|
||||
//get_vm_ops()->arch_vm_translation_map_early_query(va, out_physical);
|
||||
return gARMPagingMethod->MapEarly(args, va, pa, attributes, get_free_page);
|
||||
}
|
||||
|
||||
|
||||
/*! Verifies that the page at the given virtual address can be accessed in the
|
||||
current context.
|
||||
|
||||
This function is invoked in the kernel debugger. Paranoid checking is in
|
||||
order.
|
||||
|
||||
\param virtualAddress The virtual address to be checked.
|
||||
\param protection The area protection for which to check. Valid is a bitwise
|
||||
or of one or more of \c B_KERNEL_READ_AREA or \c B_KERNEL_WRITE_AREA.
|
||||
\return \c true, if the address can be accessed in all ways specified by
|
||||
\a protection, \c false otherwise.
|
||||
*/
|
||||
bool
|
||||
arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
|
||||
uint32 protection)
|
||||
uint32 protection)
|
||||
{
|
||||
#warning ARM:WRITEME
|
||||
return TRUE;
|
||||
//get_vm_ops()-arch_vm_translation_map_is_kernel_page_accessible(virtualAddress,
|
||||
// protection);
|
||||
return gARMPagingMethod->IsKernelPageAccessible(virtualAddress, protection);
|
||||
}
|
||||
|
||||
|
552
src/system/kernel/arch/arm/paging/32bit/ARMPagingMethod32Bit.cpp
Normal file
552
src/system/kernel/arch/arm/paging/32bit/ARMPagingMethod32Bit.cpp
Normal file
@ -0,0 +1,552 @@
|
||||
/*
|
||||
* Copyirght 2010, Ithamar R. Adema, ithamar.adema@team-embedded.nl
|
||||
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
|
||||
#include "paging/32bit/ARMPagingMethod32Bit.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <AutoDeleter.h>
|
||||
|
||||
#include <arch_system_info.h>
|
||||
#include <boot/kernel_args.h>
|
||||
#include <int.h>
|
||||
#include <thread.h>
|
||||
#include <vm/vm.h>
|
||||
#include <vm/VMAddressSpace.h>
|
||||
#include <arm_mmu.h>
|
||||
|
||||
#include "paging/32bit/ARMPagingStructures32Bit.h"
|
||||
#include "paging/32bit/ARMVMTranslationMap32Bit.h"
|
||||
#include "paging/arm_physical_page_mapper.h"
|
||||
#include "paging/arm_physical_page_mapper_large_memory.h"
|
||||
|
||||
|
||||
#define TRACE_ARM_PAGING_METHOD_32_BIT
|
||||
#ifdef TRACE_ARM_PAGING_METHOD_32_BIT
|
||||
# define TRACE(x...) dprintf(x)
|
||||
#else
|
||||
# define TRACE(x...) ;
|
||||
#endif
|
||||
|
||||
|
||||
using ARMLargePhysicalPageMapper::PhysicalPageSlot;
|
||||
|
||||
|
||||
// #pragma mark - ARMPagingMethod32Bit::PhysicalPageSlotPool
|
||||
|
||||
|
||||
struct ARMPagingMethod32Bit::PhysicalPageSlotPool
|
||||
: ARMLargePhysicalPageMapper::PhysicalPageSlotPool {
|
||||
public:
|
||||
virtual ~PhysicalPageSlotPool();
|
||||
|
||||
status_t InitInitial(kernel_args* args);
|
||||
status_t InitInitialPostArea(kernel_args* args);
|
||||
|
||||
void Init(area_id dataArea, void* data,
|
||||
area_id virtualArea, addr_t virtualBase);
|
||||
|
||||
virtual status_t AllocatePool(
|
||||
ARMLargePhysicalPageMapper
|
||||
::PhysicalPageSlotPool*& _pool);
|
||||
virtual void Map(phys_addr_t physicalAddress,
|
||||
addr_t virtualAddress);
|
||||
|
||||
public:
|
||||
static PhysicalPageSlotPool sInitialPhysicalPagePool;
|
||||
|
||||
private:
|
||||
area_id fDataArea;
|
||||
area_id fVirtualArea;
|
||||
addr_t fVirtualBase;
|
||||
page_table_entry* fPageTable;
|
||||
};
|
||||
|
||||
|
||||
ARMPagingMethod32Bit::PhysicalPageSlotPool
|
||||
ARMPagingMethod32Bit::PhysicalPageSlotPool::sInitialPhysicalPagePool;
|
||||
|
||||
|
||||
ARMPagingMethod32Bit::PhysicalPageSlotPool::~PhysicalPageSlotPool()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ARMPagingMethod32Bit::PhysicalPageSlotPool::InitInitial(kernel_args* args)
|
||||
{
|
||||
// allocate a virtual address range for the pages to be mapped into
|
||||
addr_t virtualBase = vm_allocate_early(args, 1024 * B_PAGE_SIZE, 0, 0,
|
||||
kPageTableAlignment);
|
||||
if (virtualBase == 0) {
|
||||
panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve "
|
||||
"physical page pool space in virtual address space!");
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
// allocate memory for the page table and data
|
||||
size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
|
||||
page_table_entry* pageTable = (page_table_entry*)vm_allocate_early(args,
|
||||
areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
|
||||
|
||||
// prepare the page table
|
||||
_EarlyPreparePageTables(pageTable, virtualBase, 1024 * B_PAGE_SIZE);
|
||||
|
||||
// init the pool structure and add the initial pool
|
||||
Init(-1, pageTable, -1, (addr_t)virtualBase);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ARMPagingMethod32Bit::PhysicalPageSlotPool::InitInitialPostArea(
|
||||
kernel_args* args)
|
||||
{
|
||||
// create an area for the (already allocated) data
|
||||
size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
|
||||
void* temp = fPageTable;
|
||||
area_id area = create_area("physical page pool", &temp,
|
||||
B_EXACT_ADDRESS, areaSize, B_ALREADY_WIRED,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
if (area < B_OK) {
|
||||
panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
|
||||
"create area for physical page pool.");
|
||||
return area;
|
||||
}
|
||||
fDataArea = area;
|
||||
|
||||
// create an area for the virtual address space
|
||||
temp = (void*)fVirtualBase;
|
||||
area = vm_create_null_area(VMAddressSpace::KernelID(),
|
||||
"physical page pool space", &temp, B_EXACT_ADDRESS,
|
||||
1024 * B_PAGE_SIZE, 0);
|
||||
if (area < B_OK) {
|
||||
panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
|
||||
"create area for physical page pool space.");
|
||||
return area;
|
||||
}
|
||||
fVirtualArea = area;
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ARMPagingMethod32Bit::PhysicalPageSlotPool::Init(area_id dataArea, void* data,
|
||||
area_id virtualArea, addr_t virtualBase)
|
||||
{
|
||||
fDataArea = dataArea;
|
||||
fVirtualArea = virtualArea;
|
||||
fVirtualBase = virtualBase;
|
||||
fPageTable = (page_table_entry*)data;
|
||||
|
||||
// init slot list
|
||||
fSlots = (PhysicalPageSlot*)(fPageTable + 1024);
|
||||
addr_t slotAddress = virtualBase;
|
||||
for (int32 i = 0; i < 1024; i++, slotAddress += B_PAGE_SIZE) {
|
||||
PhysicalPageSlot* slot = &fSlots[i];
|
||||
slot->next = slot + 1;
|
||||
slot->pool = this;
|
||||
slot->address = slotAddress;
|
||||
}
|
||||
|
||||
fSlots[1023].next = NULL;
|
||||
// terminate list
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ARMPagingMethod32Bit::PhysicalPageSlotPool::Map(phys_addr_t physicalAddress,
|
||||
addr_t virtualAddress)
|
||||
{
|
||||
page_table_entry& pte = fPageTable[(virtualAddress - fVirtualBase) / B_PAGE_SIZE];
|
||||
pte = (physicalAddress & ARM_PTE_ADDRESS_MASK)
|
||||
| ARM_PTE_TYPE_SMALL_PAGE;
|
||||
|
||||
arch_cpu_invalidate_TLB_range(virtualAddress, virtualAddress + B_PAGE_SIZE);
|
||||
// invalidate_TLB(virtualAddress);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ARMPagingMethod32Bit::PhysicalPageSlotPool::AllocatePool(
|
||||
ARMLargePhysicalPageMapper::PhysicalPageSlotPool*& _pool)
|
||||
{
|
||||
// create the pool structure
|
||||
PhysicalPageSlotPool* pool = new(std::nothrow) PhysicalPageSlotPool;
|
||||
if (pool == NULL)
|
||||
return B_NO_MEMORY;
|
||||
ObjectDeleter<PhysicalPageSlotPool> poolDeleter(pool);
|
||||
|
||||
// create an area that can contain the page table and the slot
|
||||
// structures
|
||||
size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]);
|
||||
void* data;
|
||||
virtual_address_restrictions virtualRestrictions = {};
|
||||
virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
|
||||
physical_address_restrictions physicalRestrictions = {};
|
||||
area_id dataArea = create_area_etc(B_SYSTEM_TEAM, "physical page pool",
|
||||
PAGE_ALIGN(areaSize), B_FULL_LOCK,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
|
||||
&virtualRestrictions, &physicalRestrictions, &data);
|
||||
if (dataArea < 0)
|
||||
return dataArea;
|
||||
|
||||
// create the null area for the virtual address space
|
||||
void* virtualBase;
|
||||
area_id virtualArea = vm_create_null_area(
|
||||
VMAddressSpace::KernelID(), "physical page pool space",
|
||||
&virtualBase, B_ANY_KERNEL_BLOCK_ADDRESS, 1024 * B_PAGE_SIZE,
|
||||
CREATE_AREA_PRIORITY_VIP);
|
||||
if (virtualArea < 0) {
|
||||
delete_area(dataArea);
|
||||
return virtualArea;
|
||||
}
|
||||
|
||||
// prepare the page table
|
||||
memset(data, 0, B_PAGE_SIZE);
|
||||
|
||||
// get the page table's physical address
|
||||
phys_addr_t physicalTable;
|
||||
ARMVMTranslationMap32Bit* map = static_cast<ARMVMTranslationMap32Bit*>(
|
||||
VMAddressSpace::Kernel()->TranslationMap());
|
||||
uint32 dummyFlags;
|
||||
cpu_status state = disable_interrupts();
|
||||
map->QueryInterrupt((addr_t)data, &physicalTable, &dummyFlags);
|
||||
restore_interrupts(state);
|
||||
|
||||
// put the page table into the page directory
|
||||
int32 index = VADDR_TO_PDENT((addr_t)virtualBase);
|
||||
page_directory_entry* entry
|
||||
= &map->PagingStructures32Bit()->pgdir_virt[index];
|
||||
PutPageTableInPageDir(entry, physicalTable,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
ARMPagingStructures32Bit::UpdateAllPageDirs(index, *entry);
|
||||
|
||||
// init the pool structure
|
||||
pool->Init(dataArea, data, virtualArea, (addr_t)virtualBase);
|
||||
poolDeleter.Detach();
|
||||
_pool = pool;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - ARMPagingMethod32Bit
|
||||
|
||||
|
||||
ARMPagingMethod32Bit::ARMPagingMethod32Bit()
|
||||
:
|
||||
fKernelPhysicalPageDirectory(0),
|
||||
fKernelVirtualPageDirectory(NULL),
|
||||
fPhysicalPageMapper(NULL),
|
||||
fKernelPhysicalPageMapper(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
ARMPagingMethod32Bit::~ARMPagingMethod32Bit()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ARMPagingMethod32Bit::Init(kernel_args* args,
|
||||
VMPhysicalPageMapper** _physicalPageMapper)
|
||||
{
|
||||
TRACE("vm_translation_map_init: entry\n");
|
||||
|
||||
fKernelPhysicalPageDirectory = args->arch_args.phys_pgdir;
|
||||
fKernelVirtualPageDirectory = (page_directory_entry*)
|
||||
args->arch_args.vir_pgdir;
|
||||
|
||||
TRACE("page dir: %p (physical: %#" B_PRIx32 ")\n",
|
||||
fKernelVirtualPageDirectory, fKernelPhysicalPageDirectory);
|
||||
|
||||
ARMPagingStructures32Bit::StaticInit();
|
||||
|
||||
// create the initial pool for the physical page mapper
|
||||
PhysicalPageSlotPool* pool
|
||||
= new(&PhysicalPageSlotPool::sInitialPhysicalPagePool)
|
||||
PhysicalPageSlotPool;
|
||||
status_t error = pool->InitInitial(args);
|
||||
if (error != B_OK) {
|
||||
panic("ARMPagingMethod32Bit::Init(): Failed to create initial pool "
|
||||
"for physical page mapper!");
|
||||
return error;
|
||||
}
|
||||
|
||||
// create physical page mapper
|
||||
large_memory_physical_page_ops_init(args, pool, fPhysicalPageMapper,
|
||||
fKernelPhysicalPageMapper);
|
||||
// TODO: Select the best page mapper!
|
||||
|
||||
// enable global page feature if available
|
||||
#if 0 //IRA: check for ARMv6!!
|
||||
if (x86_check_feature(IA32_FEATURE_PGE, FEATURE_COMMON)) {
|
||||
// this prevents kernel pages from being flushed from TLB on
|
||||
// context-switch
|
||||
x86_write_cr4(x86_read_cr4() | IA32_CR4_GLOBAL_PAGES);
|
||||
}
|
||||
#endif
|
||||
TRACE("vm_translation_map_init: done\n");
|
||||
|
||||
*_physicalPageMapper = fPhysicalPageMapper;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ARMPagingMethod32Bit::InitPostArea(kernel_args* args)
|
||||
{
|
||||
void *temp;
|
||||
status_t error;
|
||||
area_id area;
|
||||
|
||||
temp = (void*)fKernelVirtualPageDirectory;
|
||||
area = create_area("kernel_pgdir", &temp, B_EXACT_ADDRESS, MMU_L1_TABLE_SIZE,
|
||||
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
if (area < B_OK)
|
||||
return area;
|
||||
|
||||
error = PhysicalPageSlotPool::sInitialPhysicalPagePool
|
||||
.InitInitialPostArea(args);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ARMPagingMethod32Bit::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
|
||||
{
|
||||
ARMVMTranslationMap32Bit* map = new(std::nothrow) ARMVMTranslationMap32Bit;
|
||||
if (map == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
status_t error = map->Init(kernel);
|
||||
if (error != B_OK) {
|
||||
delete map;
|
||||
return error;
|
||||
}
|
||||
|
||||
*_map = map;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ARMPagingMethod32Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress, uint8 attributes,
|
||||
phys_addr_t (*get_free_page)(kernel_args*))
|
||||
{
|
||||
// check to see if a page table exists for this range
|
||||
int index = VADDR_TO_PDENT(virtualAddress);
|
||||
if ((fKernelVirtualPageDirectory[index] & ARM_PDE_TYPE_MASK) == 0) {
|
||||
phys_addr_t pgtable;
|
||||
page_directory_entry *e;
|
||||
// we need to allocate a pgtable
|
||||
pgtable = get_free_page(args);
|
||||
// pgtable is in pages, convert to physical address
|
||||
pgtable *= B_PAGE_SIZE;
|
||||
|
||||
TRACE("ARMPagingMethod32Bit::MapEarly(): asked for free page for "
|
||||
"pgtable. %#" B_PRIxPHYSADDR "\n", pgtable);
|
||||
|
||||
// put it in the pgdir
|
||||
e = &fKernelVirtualPageDirectory[index];
|
||||
PutPageTableInPageDir(e, pgtable, attributes);
|
||||
|
||||
// zero it out in it's new mapping
|
||||
memset((void*)pgtable, 0, B_PAGE_SIZE);
|
||||
}
|
||||
|
||||
page_table_entry *ptEntry = (page_table_entry*)
|
||||
(fKernelVirtualPageDirectory[index] & ARM_PDE_ADDRESS_MASK);
|
||||
ptEntry += VADDR_TO_PTENT(virtualAddress);
|
||||
|
||||
ASSERT_PRINT(
|
||||
(*ptEntry & ARM_PTE_TYPE_MASK) == 0,
|
||||
"virtual address: %#" B_PRIxADDR ", pde: %#" B_PRIx32
|
||||
", existing pte: %#" B_PRIx32, virtualAddress, fKernelVirtualPageDirectory[index],
|
||||
*ptEntry);
|
||||
|
||||
// now, fill in the pentry
|
||||
PutPageTableEntryInTable(ptEntry,
|
||||
physicalAddress, attributes, 0, IS_KERNEL_ADDRESS(virtualAddress));
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
ARMPagingMethod32Bit::IsKernelPageAccessible(addr_t virtualAddress,
|
||||
uint32 protection)
|
||||
{
|
||||
#if 0
|
||||
// We only trust the kernel team's page directory. So switch to it first.
|
||||
// Always set it to make sure the TLBs don't contain obsolete data.
|
||||
uint32 physicalPageDirectory;
|
||||
read_cr3(physicalPageDirectory);
|
||||
write_cr3(fKernelPhysicalPageDirectory);
|
||||
|
||||
// get the page directory entry for the address
|
||||
page_directory_entry pageDirectoryEntry;
|
||||
uint32 index = VADDR_TO_PDENT(virtualAddress);
|
||||
|
||||
if (physicalPageDirectory == fKernelPhysicalPageDirectory) {
|
||||
pageDirectoryEntry = fKernelVirtualPageDirectory[index];
|
||||
} else if (fPhysicalPageMapper != NULL) {
|
||||
// map the original page directory and get the entry
|
||||
void* handle;
|
||||
addr_t virtualPageDirectory;
|
||||
status_t error = fPhysicalPageMapper->GetPageDebug(
|
||||
physicalPageDirectory, &virtualPageDirectory, &handle);
|
||||
if (error == B_OK) {
|
||||
pageDirectoryEntry
|
||||
= ((page_directory_entry*)virtualPageDirectory)[index];
|
||||
fPhysicalPageMapper->PutPageDebug(virtualPageDirectory, handle);
|
||||
} else
|
||||
pageDirectoryEntry = 0;
|
||||
} else
|
||||
pageDirectoryEntry = 0;
|
||||
|
||||
// map the page table and get the entry
|
||||
page_table_entry pageTableEntry;
|
||||
index = VADDR_TO_PTENT(virtualAddress);
|
||||
|
||||
if ((pageDirectoryEntry & ARM_PDE_PRESENT) != 0
|
||||
&& fPhysicalPageMapper != NULL) {
|
||||
void* handle;
|
||||
addr_t virtualPageTable;
|
||||
status_t error = fPhysicalPageMapper->GetPageDebug(
|
||||
pageDirectoryEntry & ARM_PDE_ADDRESS_MASK, &virtualPageTable,
|
||||
&handle);
|
||||
if (error == B_OK) {
|
||||
pageTableEntry = ((page_table_entry*)virtualPageTable)[index];
|
||||
fPhysicalPageMapper->PutPageDebug(virtualPageTable, handle);
|
||||
} else
|
||||
pageTableEntry = 0;
|
||||
} else
|
||||
pageTableEntry = 0;
|
||||
|
||||
// switch back to the original page directory
|
||||
if (physicalPageDirectory != fKernelPhysicalPageDirectory)
|
||||
write_cr3(physicalPageDirectory);
|
||||
|
||||
if ((pageTableEntry & ARM_PTE_PRESENT) == 0)
|
||||
return false;
|
||||
|
||||
// present means kernel-readable, so check for writable
|
||||
return (protection & B_KERNEL_WRITE_AREA) == 0
|
||||
|| (pageTableEntry & ARM_PTE_WRITABLE) != 0;
|
||||
#endif
|
||||
//IRA: fix the above!
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*static*/ void
|
||||
ARMPagingMethod32Bit::PutPageTableInPageDir(page_directory_entry* entry,
|
||||
phys_addr_t pgtablePhysical, uint32 attributes)
|
||||
{
|
||||
*entry = (pgtablePhysical & ARM_PDE_ADDRESS_MASK)
|
||||
| ARM_PDE_TYPE_COARSE_L2_PAGE_TABLE;
|
||||
// TODO: we ignore the attributes of the page table - for compatibility
|
||||
// with BeOS we allow having user accessible areas in the kernel address
|
||||
// space. This is currently being used by some drivers, mainly for the
|
||||
// frame buffer. Our current real time data implementation makes use of
|
||||
// this fact, too.
|
||||
// We might want to get rid of this possibility one day, especially if
|
||||
// we intend to port it to a platform that does not support this.
|
||||
}
|
||||
|
||||
|
||||
/*static*/ void
|
||||
ARMPagingMethod32Bit::PutPageTableEntryInTable(page_table_entry* entry,
|
||||
phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType,
|
||||
bool globalPage)
|
||||
{
|
||||
page_table_entry page = (physicalAddress & ARM_PTE_ADDRESS_MASK)
|
||||
| ARM_PTE_TYPE_SMALL_PAGE;
|
||||
#if 0 //IRA
|
||||
| ARM_PTE_PRESENT | (globalPage ? ARM_PTE_GLOBAL : 0)
|
||||
| MemoryTypeToPageTableEntryFlags(memoryType);
|
||||
|
||||
// if the page is user accessible, it's automatically
|
||||
// accessible in kernel space, too (but with the same
|
||||
// protection)
|
||||
if ((attributes & B_USER_PROTECTION) != 0) {
|
||||
page |= ARM_PTE_USER;
|
||||
if ((attributes & B_WRITE_AREA) != 0)
|
||||
page |= ARM_PTE_WRITABLE;
|
||||
} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
|
||||
page |= ARM_PTE_WRITABLE;
|
||||
#endif
|
||||
// put it in the page table
|
||||
*(volatile page_table_entry*)entry = page;
|
||||
}
|
||||
|
||||
|
||||
/*static*/ void
|
||||
ARMPagingMethod32Bit::_EarlyPreparePageTables(page_table_entry* pageTables,
|
||||
addr_t address, size_t size)
|
||||
{
|
||||
ARMPagingMethod32Bit* method = ARMPagingMethod32Bit::Method();
|
||||
memset(pageTables, 0, 256 * (size / (B_PAGE_SIZE * 256)));
|
||||
|
||||
// put the array of pgtables directly into the kernel pagedir
|
||||
// these will be wired and kept mapped into virtual space to be easy to get
|
||||
// to
|
||||
{
|
||||
addr_t virtualTable = (addr_t)pageTables;
|
||||
|
||||
for (size_t i = 0; i < (size / (B_PAGE_SIZE * 256));
|
||||
i++, virtualTable += 256*sizeof(page_directory_entry)) {
|
||||
phys_addr_t physicalTable = 0;
|
||||
_EarlyQuery(virtualTable, &physicalTable);
|
||||
page_directory_entry* entry = method->KernelVirtualPageDirectory()
|
||||
+ VADDR_TO_PDENT(address) + i;
|
||||
PutPageTableInPageDir(entry, physicalTable,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//! TODO: currently assumes this translation map is active
|
||||
/*static*/ status_t
|
||||
ARMPagingMethod32Bit::_EarlyQuery(addr_t virtualAddress,
|
||||
phys_addr_t *_physicalAddress)
|
||||
{
|
||||
ARMPagingMethod32Bit* method = ARMPagingMethod32Bit::Method();
|
||||
int index = VADDR_TO_PDENT(virtualAddress);
|
||||
if ((method->KernelVirtualPageDirectory()[index] & ARM_PDE_TYPE_MASK) == 0) {
|
||||
// no pagetable here
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
page_table_entry* entry = (page_table_entry*)
|
||||
(method->KernelVirtualPageDirectory()[index] & ARM_PDE_ADDRESS_MASK);
|
||||
entry += VADDR_TO_PTENT(virtualAddress);
|
||||
|
||||
if ((*entry & ARM_PTE_TYPE_MASK) == 0) {
|
||||
// page mapping not valid
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
*_physicalAddress = (*entry & ARM_PTE_ADDRESS_MASK)
|
||||
| VADDR_TO_PGOFF(virtualAddress);
|
||||
|
||||
return B_OK;
|
||||
}
|
174
src/system/kernel/arch/arm/paging/32bit/ARMPagingMethod32Bit.h
Normal file
174
src/system/kernel/arch/arm/paging/32bit/ARMPagingMethod32Bit.h
Normal file
@ -0,0 +1,174 @@
|
||||
/*
|
||||
* Copyirght 2010, Ithamar R. Adema, ithamar.adema@team-embedded.nl
|
||||
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef KERNEL_ARCH_ARM_PAGING_32_BIT_ARM_PAGING_METHOD_32_BIT_H
|
||||
#define KERNEL_ARCH_ARM_PAGING_32_BIT_ARM_PAGING_METHOD_32_BIT_H
|
||||
|
||||
|
||||
#include "paging/32bit/paging.h"
|
||||
#include "paging/ARMPagingMethod.h"
|
||||
#include "paging/ARMPagingStructures.h"
|
||||
|
||||
|
||||
class TranslationMapPhysicalPageMapper;
|
||||
class ARMPhysicalPageMapper;
|
||||
|
||||
|
||||
class ARMPagingMethod32Bit : public ARMPagingMethod {
|
||||
public:
|
||||
ARMPagingMethod32Bit();
|
||||
virtual ~ARMPagingMethod32Bit();
|
||||
|
||||
virtual status_t Init(kernel_args* args,
|
||||
VMPhysicalPageMapper** _physicalPageMapper);
|
||||
virtual status_t InitPostArea(kernel_args* args);
|
||||
|
||||
virtual status_t CreateTranslationMap(bool kernel,
|
||||
VMTranslationMap** _map);
|
||||
|
||||
virtual status_t MapEarly(kernel_args* args,
|
||||
addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint8 attributes,
|
||||
phys_addr_t (*get_free_page)(kernel_args*));
|
||||
|
||||
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
|
||||
uint32 protection);
|
||||
|
||||
inline uint32 KernelPhysicalPageDirectory() const
|
||||
{ return fKernelPhysicalPageDirectory; }
|
||||
inline page_directory_entry* KernelVirtualPageDirectory() const
|
||||
{ return fKernelVirtualPageDirectory; }
|
||||
inline ARMPhysicalPageMapper* PhysicalPageMapper() const
|
||||
{ return fPhysicalPageMapper; }
|
||||
inline TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() const
|
||||
{ return fKernelPhysicalPageMapper; }
|
||||
|
||||
static ARMPagingMethod32Bit* Method();
|
||||
|
||||
static void PutPageTableInPageDir(
|
||||
page_directory_entry* entry,
|
||||
phys_addr_t pgtablePhysical,
|
||||
uint32 attributes);
|
||||
static void PutPageTableEntryInTable(
|
||||
page_table_entry* entry,
|
||||
phys_addr_t physicalAddress,
|
||||
uint32 attributes, uint32 memoryType,
|
||||
bool globalPage);
|
||||
static page_table_entry SetPageTableEntry(page_table_entry* entry,
|
||||
page_table_entry newEntry);
|
||||
static page_table_entry SetPageTableEntryFlags(page_table_entry* entry,
|
||||
uint32 flags);
|
||||
static page_table_entry TestAndSetPageTableEntry(
|
||||
page_table_entry* entry,
|
||||
page_table_entry newEntry,
|
||||
page_table_entry oldEntry);
|
||||
static page_table_entry ClearPageTableEntry(page_table_entry* entry);
|
||||
static page_table_entry ClearPageTableEntryFlags(
|
||||
page_table_entry* entry, uint32 flags);
|
||||
|
||||
static uint32 MemoryTypeToPageTableEntryFlags(
|
||||
uint32 memoryType);
|
||||
|
||||
private:
|
||||
struct PhysicalPageSlotPool;
|
||||
friend struct PhysicalPageSlotPool;
|
||||
|
||||
private:
|
||||
static void _EarlyPreparePageTables(
|
||||
page_table_entry* pageTables,
|
||||
addr_t address, size_t size);
|
||||
static status_t _EarlyQuery(addr_t virtualAddress,
|
||||
phys_addr_t *_physicalAddress);
|
||||
|
||||
private:
|
||||
uint32 fKernelPhysicalPageDirectory;
|
||||
page_directory_entry* fKernelVirtualPageDirectory;
|
||||
|
||||
ARMPhysicalPageMapper* fPhysicalPageMapper;
|
||||
TranslationMapPhysicalPageMapper* fKernelPhysicalPageMapper;
|
||||
};
|
||||
|
||||
|
||||
/*static*/ inline ARMPagingMethod32Bit*
|
||||
ARMPagingMethod32Bit::Method()
|
||||
{
|
||||
return static_cast<ARMPagingMethod32Bit*>(gARMPagingMethod);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ inline page_table_entry
|
||||
ARMPagingMethod32Bit::SetPageTableEntry(page_table_entry* entry,
|
||||
page_table_entry newEntry)
|
||||
{
|
||||
return atomic_set((int32*)entry, newEntry);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ inline page_table_entry
|
||||
ARMPagingMethod32Bit::SetPageTableEntryFlags(page_table_entry* entry,
|
||||
uint32 flags)
|
||||
{
|
||||
return atomic_or((int32*)entry, flags);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ inline page_table_entry
|
||||
ARMPagingMethod32Bit::TestAndSetPageTableEntry(page_table_entry* entry,
|
||||
page_table_entry newEntry, page_table_entry oldEntry)
|
||||
{
|
||||
return atomic_test_and_set((int32*)entry, newEntry, oldEntry);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ inline page_table_entry
|
||||
ARMPagingMethod32Bit::ClearPageTableEntry(page_table_entry* entry)
|
||||
{
|
||||
return SetPageTableEntry(entry, 0);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ inline page_table_entry
|
||||
ARMPagingMethod32Bit::ClearPageTableEntryFlags(page_table_entry* entry, uint32 flags)
|
||||
{
|
||||
return atomic_and((int32*)entry, ~flags);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ inline uint32
|
||||
ARMPagingMethod32Bit::MemoryTypeToPageTableEntryFlags(uint32 memoryType)
|
||||
{
|
||||
#if 0 //IRA
|
||||
// ATM we only handle the uncacheable and write-through type explicitly. For
|
||||
// all other types we rely on the MTRRs to be set up correctly. Since we set
|
||||
// the default memory type to write-back and since the uncacheable type in
|
||||
// the PTE overrides any MTRR attribute (though, as per the specs, that is
|
||||
// not recommended for performance reasons), this reduces the work we
|
||||
// actually *have* to do with the MTRRs to setting the remaining types
|
||||
// (usually only write-combining for the frame buffer).
|
||||
switch (memoryType) {
|
||||
case B_MTR_UC:
|
||||
return ARM_PTE_CACHING_DISABLED | ARM_PTE_WRITE_THROUGH;
|
||||
|
||||
case B_MTR_WC:
|
||||
// ARM_PTE_WRITE_THROUGH would be closer, but the combination with
|
||||
// MTRR WC is "implementation defined" for Pentium Pro/II.
|
||||
return 0;
|
||||
|
||||
case B_MTR_WT:
|
||||
return ARM_PTE_WRITE_THROUGH;
|
||||
|
||||
case B_MTR_WP:
|
||||
case B_MTR_WB:
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#endif // KERNEL_ARCH_ARM_PAGING_32_BIT_ARM_PAGING_METHOD_32_BIT_H
|
@ -0,0 +1,134 @@
|
||||
/*
|
||||
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
|
||||
#include "paging/32bit/ARMPagingStructures32Bit.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include <heap.h>
|
||||
#include <util/AutoLock.h>
|
||||
|
||||
|
||||
// Accessor class to reuse the SinglyLinkedListLink of DeferredDeletable for
|
||||
// ARMPagingStructures32Bit.
|
||||
struct PagingStructuresGetLink {
|
||||
private:
|
||||
typedef SinglyLinkedListLink<ARMPagingStructures32Bit> Link;
|
||||
|
||||
public:
|
||||
inline Link* operator()(ARMPagingStructures32Bit* element) const
|
||||
{
|
||||
return (Link*)element->GetSinglyLinkedListLink();
|
||||
}
|
||||
|
||||
inline const Link* operator()(
|
||||
const ARMPagingStructures32Bit* element) const
|
||||
{
|
||||
return (const Link*)element->GetSinglyLinkedListLink();
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
typedef SinglyLinkedList<ARMPagingStructures32Bit, PagingStructuresGetLink>
|
||||
PagingStructuresList;
|
||||
|
||||
|
||||
static PagingStructuresList sPagingStructuresList;
|
||||
static spinlock sPagingStructuresListLock;
|
||||
|
||||
|
||||
ARMPagingStructures32Bit::ARMPagingStructures32Bit()
|
||||
:
|
||||
pgdir_virt(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
ARMPagingStructures32Bit::~ARMPagingStructures32Bit()
|
||||
{
|
||||
// free the page dir
|
||||
free(pgdir_virt);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ARMPagingStructures32Bit::Init(page_directory_entry* virtualPageDir,
|
||||
phys_addr_t physicalPageDir, page_directory_entry* kernelPageDir)
|
||||
{
|
||||
pgdir_virt = virtualPageDir;
|
||||
pgdir_phys = physicalPageDir;
|
||||
|
||||
#if 0 // IRA: handle UART better; identity map of DEVICE_BASE from loader gets wiped here
|
||||
// zero out the bottom portion of the new pgdir
|
||||
memset(pgdir_virt + FIRST_USER_PGDIR_ENT, 0,
|
||||
NUM_USER_PGDIR_ENTS * sizeof(page_directory_entry));
|
||||
#endif
|
||||
// insert this new map into the map list
|
||||
{
|
||||
int state = disable_interrupts();
|
||||
acquire_spinlock(&sPagingStructuresListLock);
|
||||
|
||||
// copy the top portion of the page dir from the kernel page dir
|
||||
if (kernelPageDir != NULL) {
|
||||
memcpy(pgdir_virt + FIRST_KERNEL_PGDIR_ENT,
|
||||
kernelPageDir + FIRST_KERNEL_PGDIR_ENT,
|
||||
NUM_KERNEL_PGDIR_ENTS * sizeof(page_directory_entry));
|
||||
}
|
||||
|
||||
sPagingStructuresList.Add(this);
|
||||
|
||||
release_spinlock(&sPagingStructuresListLock);
|
||||
restore_interrupts(state);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ARMPagingStructures32Bit::Delete()
|
||||
{
|
||||
// remove from global list
|
||||
InterruptsSpinLocker locker(sPagingStructuresListLock);
|
||||
sPagingStructuresList.Remove(this);
|
||||
locker.Unlock();
|
||||
|
||||
#if 0
|
||||
// this sanity check can be enabled when corruption due to
|
||||
// overwriting an active page directory is suspected
|
||||
uint32 activePageDirectory;
|
||||
read_cr3(activePageDirectory);
|
||||
if (activePageDirectory == pgdir_phys)
|
||||
panic("deleting a still active page directory\n");
|
||||
#endif
|
||||
|
||||
if (are_interrupts_enabled())
|
||||
delete this;
|
||||
else
|
||||
deferred_delete(this);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ void
|
||||
ARMPagingStructures32Bit::StaticInit()
|
||||
{
|
||||
B_INITIALIZE_SPINLOCK(&sPagingStructuresListLock);
|
||||
new (&sPagingStructuresList) PagingStructuresList;
|
||||
}
|
||||
|
||||
|
||||
/*static*/ void
|
||||
ARMPagingStructures32Bit::UpdateAllPageDirs(int index,
|
||||
page_directory_entry entry)
|
||||
{
|
||||
InterruptsSpinLocker locker(sPagingStructuresListLock);
|
||||
|
||||
PagingStructuresList::Iterator it = sPagingStructuresList.GetIterator();
|
||||
while (ARMPagingStructures32Bit* info = it.Next())
|
||||
info->pgdir_virt[index] = entry;
|
||||
}
|
@ -0,0 +1,31 @@
|
||||
/*
|
||||
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef KERNEL_ARCH_ARM_PAGING_32_BIT_ARM_PAGING_STRUCTURES_32_BIT_H
|
||||
#define KERNEL_ARCH_ARM_PAGING_32_BIT_ARM_PAGING_STRUCTURES_32_BIT_H
|
||||
|
||||
|
||||
#include "paging/32bit/paging.h"
|
||||
#include "paging/ARMPagingStructures.h"
|
||||
|
||||
|
||||
struct ARMPagingStructures32Bit : ARMPagingStructures {
|
||||
page_directory_entry* pgdir_virt;
|
||||
|
||||
ARMPagingStructures32Bit();
|
||||
virtual ~ARMPagingStructures32Bit();
|
||||
|
||||
void Init(page_directory_entry* virtualPageDir,
|
||||
phys_addr_t physicalPageDir,
|
||||
page_directory_entry* kernelPageDir);
|
||||
|
||||
virtual void Delete();
|
||||
|
||||
static void StaticInit();
|
||||
static void UpdateAllPageDirs(int index,
|
||||
page_directory_entry entry);
|
||||
};
|
||||
|
||||
|
||||
#endif // KERNEL_ARCH_ARM_PAGING_32_BIT_ARM_PAGING_STRUCTURES_32_BIT_H
|
@ -0,0 +1,859 @@
|
||||
/*
|
||||
* Copyirght 2010, Ithamar R. Adema, ithamar.adema@team-embedded.nl
|
||||
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
|
||||
#include "paging/32bit/ARMVMTranslationMap32Bit.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include <int.h>
|
||||
#include <thread.h>
|
||||
#include <slab/Slab.h>
|
||||
#include <smp.h>
|
||||
#include <util/AutoLock.h>
|
||||
#include <util/queue.h>
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_priv.h>
|
||||
#include <vm/VMAddressSpace.h>
|
||||
#include <vm/VMCache.h>
|
||||
|
||||
#include "paging/32bit/ARMPagingMethod32Bit.h"
|
||||
#include "paging/32bit/ARMPagingStructures32Bit.h"
|
||||
#include "paging/arm_physical_page_mapper.h"
|
||||
|
||||
|
||||
#define TRACE_ARM_VM_TRANSLATION_MAP_32_BIT
|
||||
#ifdef TRACE_ARM_VM_TRANSLATION_MAP_32_BIT
|
||||
# define TRACE(x...) dprintf(x)
|
||||
#else
|
||||
# define TRACE(x...) ;
|
||||
#endif
|
||||
|
||||
|
||||
ARMVMTranslationMap32Bit::ARMVMTranslationMap32Bit()
|
||||
:
|
||||
fPagingStructures(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
ARMVMTranslationMap32Bit::~ARMVMTranslationMap32Bit()
|
||||
{
|
||||
if (fPagingStructures == NULL)
|
||||
return;
|
||||
|
||||
if (fPageMapper != NULL)
|
||||
fPageMapper->Delete();
|
||||
|
||||
if (fPagingStructures->pgdir_virt != NULL) {
|
||||
// cycle through and free all of the user space pgtables
|
||||
for (uint32 i = VADDR_TO_PDENT(USER_BASE);
|
||||
i <= VADDR_TO_PDENT(USER_BASE + (USER_SIZE - 1)); i++) {
|
||||
if ((fPagingStructures->pgdir_virt[i] & ARM_PDE_TYPE_MASK) != 0) {
|
||||
addr_t address = fPagingStructures->pgdir_virt[i]
|
||||
& ARM_PDE_ADDRESS_MASK;
|
||||
vm_page* page = vm_lookup_page(address / B_PAGE_SIZE);
|
||||
if (!page)
|
||||
panic("destroy_tmap: didn't find pgtable page\n");
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
vm_page_set_state(page, PAGE_STATE_FREE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fPagingStructures->RemoveReference();
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ARMVMTranslationMap32Bit::Init(bool kernel)
|
||||
{
|
||||
TRACE("ARMVMTranslationMap32Bit::Init()\n");
|
||||
|
||||
ARMVMTranslationMap::Init(kernel);
|
||||
|
||||
fPagingStructures = new(std::nothrow) ARMPagingStructures32Bit;
|
||||
if (fPagingStructures == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
ARMPagingMethod32Bit* method = ARMPagingMethod32Bit::Method();
|
||||
|
||||
if (!kernel) {
|
||||
// user
|
||||
// allocate a physical page mapper
|
||||
status_t error = method->PhysicalPageMapper()
|
||||
->CreateTranslationMapPhysicalPageMapper(&fPageMapper);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
// allocate the page directory
|
||||
page_directory_entry* virtualPageDir = (page_directory_entry*)memalign(
|
||||
B_PAGE_SIZE, B_PAGE_SIZE);
|
||||
if (virtualPageDir == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
// look up the page directory's physical address
|
||||
phys_addr_t physicalPageDir;
|
||||
vm_get_page_mapping(VMAddressSpace::KernelID(),
|
||||
(addr_t)virtualPageDir, &physicalPageDir);
|
||||
|
||||
fPagingStructures->Init(virtualPageDir, physicalPageDir,
|
||||
method->KernelVirtualPageDirectory());
|
||||
} else {
|
||||
// kernel
|
||||
// get the physical page mapper
|
||||
fPageMapper = method->KernelPhysicalPageMapper();
|
||||
|
||||
// we already know the kernel pgdir mapping
|
||||
fPagingStructures->Init(method->KernelVirtualPageDirectory(),
|
||||
method->KernelPhysicalPageDirectory(), NULL);
|
||||
}
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
size_t
|
||||
ARMVMTranslationMap32Bit::MaxPagesNeededToMap(addr_t start, addr_t end) const
|
||||
{
|
||||
// If start == 0, the actual base address is not yet known to the caller and
|
||||
// we shall assume the worst case.
|
||||
if (start == 0) {
|
||||
// offset the range so it has the worst possible alignment
|
||||
start = 1023 * B_PAGE_SIZE;
|
||||
end += 1023 * B_PAGE_SIZE;
|
||||
}
|
||||
|
||||
return VADDR_TO_PDENT(end) + 1 - VADDR_TO_PDENT(start);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ARMVMTranslationMap32Bit::Map(addr_t va, phys_addr_t pa, uint32 attributes,
|
||||
uint32 memoryType, vm_page_reservation* reservation)
|
||||
{
|
||||
TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
|
||||
|
||||
/*
|
||||
dprintf("pgdir at 0x%x\n", pgdir);
|
||||
dprintf("index is %d\n", va / B_PAGE_SIZE / 1024);
|
||||
dprintf("final at 0x%x\n", &pgdir[va / B_PAGE_SIZE / 1024]);
|
||||
dprintf("value is 0x%x\n", *(int *)&pgdir[va / B_PAGE_SIZE / 1024]);
|
||||
dprintf("present bit is %d\n", pgdir[va / B_PAGE_SIZE / 1024].present);
|
||||
dprintf("addr is %d\n", pgdir[va / B_PAGE_SIZE / 1024].addr);
|
||||
*/
|
||||
page_directory_entry* pd = fPagingStructures->pgdir_virt;
|
||||
|
||||
// check to see if a page table exists for this range
|
||||
uint32 index = VADDR_TO_PDENT(va);
|
||||
if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
|
||||
phys_addr_t pgtable;
|
||||
vm_page *page;
|
||||
|
||||
// we need to allocate a pgtable
|
||||
page = vm_page_allocate_page(reservation,
|
||||
PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
|
||||
pgtable = (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
|
||||
|
||||
TRACE("map_tmap: asked for free page for pgtable. 0x%lx\n", pgtable);
|
||||
|
||||
// put it in the pgdir
|
||||
ARMPagingMethod32Bit::PutPageTableInPageDir(&pd[index], pgtable,
|
||||
attributes
|
||||
| ((attributes & B_USER_PROTECTION) != 0
|
||||
? B_WRITE_AREA : B_KERNEL_WRITE_AREA));
|
||||
|
||||
// update any other page directories, if it maps kernel space
|
||||
if (index >= FIRST_KERNEL_PGDIR_ENT
|
||||
&& index < (FIRST_KERNEL_PGDIR_ENT + NUM_KERNEL_PGDIR_ENTS)) {
|
||||
ARMPagingStructures32Bit::UpdateAllPageDirs(index, pd[index]);
|
||||
}
|
||||
|
||||
fMapCount++;
|
||||
}
|
||||
|
||||
// now, fill in the pentry
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner pinner(thread);
|
||||
|
||||
page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
|
||||
pd[index] & ARM_PDE_ADDRESS_MASK);
|
||||
index = VADDR_TO_PTENT(va);
|
||||
|
||||
ASSERT_PRINT((pt[index] & ARM_PTE_TYPE_MASK) == 0,
|
||||
"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx32, va,
|
||||
pt[index]);
|
||||
|
||||
ARMPagingMethod32Bit::PutPageTableEntryInTable(&pt[index], pa, attributes,
|
||||
memoryType, fIsKernelMap);
|
||||
|
||||
pinner.Unlock();
|
||||
|
||||
// Note: We don't need to invalidate the TLB for this address, as previously
|
||||
// the entry was not present and the TLB doesn't cache those entries.
|
||||
|
||||
fMapCount++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ARMVMTranslationMap32Bit::Unmap(addr_t start, addr_t end)
|
||||
{
|
||||
start = ROUNDDOWN(start, B_PAGE_SIZE);
|
||||
if (start >= end)
|
||||
return B_OK;
|
||||
|
||||
TRACE("unmap_tmap: asked to free pages 0x%lx to 0x%lx\n", start, end);
|
||||
|
||||
page_directory_entry *pd = fPagingStructures->pgdir_virt;
|
||||
|
||||
do {
|
||||
int index = VADDR_TO_PDENT(start);
|
||||
if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
|
||||
// no page table here, move the start up to access the next page
|
||||
// table
|
||||
start = ROUNDUP(start + 1, kPageTableAlignment);
|
||||
continue;
|
||||
}
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner pinner(thread);
|
||||
|
||||
page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
|
||||
pd[index] & ARM_PDE_ADDRESS_MASK);
|
||||
|
||||
for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
|
||||
index++, start += B_PAGE_SIZE) {
|
||||
if ((pt[index] & ARM_PTE_TYPE_MASK) == 0) {
|
||||
// page mapping not valid
|
||||
continue;
|
||||
}
|
||||
|
||||
TRACE("unmap_tmap: removing page 0x%lx\n", start);
|
||||
|
||||
page_table_entry oldEntry
|
||||
= ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
|
||||
ARM_PTE_TYPE_MASK);
|
||||
fMapCount--;
|
||||
|
||||
#if 0 /* IRA */
|
||||
if ((oldEntry & ARM_PTE_ACCESSED) != 0) {
|
||||
// Note, that we only need to invalidate the address, if the
|
||||
// accessed flags was set, since only then the entry could have
|
||||
// been in any TLB.
|
||||
InvalidatePage(start);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
} while (start != 0 && start < end);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
/*! Caller must have locked the cache of the page to be unmapped.
|
||||
This object shouldn't be locked.
|
||||
*/
|
||||
status_t
|
||||
ARMVMTranslationMap32Bit::UnmapPage(VMArea* area, addr_t address,
|
||||
bool updatePageQueue)
|
||||
{
|
||||
ASSERT(address % B_PAGE_SIZE == 0);
|
||||
|
||||
page_directory_entry* pd = fPagingStructures->pgdir_virt;
|
||||
|
||||
TRACE("ARMVMTranslationMap32Bit::UnmapPage(%#" B_PRIxADDR ")\n", address);
|
||||
|
||||
RecursiveLocker locker(fLock);
|
||||
|
||||
int index = VADDR_TO_PDENT(address);
|
||||
if ((pd[index] & ARM_PDE_TYPE_MASK) == 0)
|
||||
return B_ENTRY_NOT_FOUND;
|
||||
|
||||
ThreadCPUPinner pinner(thread_get_current_thread());
|
||||
|
||||
page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
|
||||
pd[index] & ARM_PDE_ADDRESS_MASK);
|
||||
|
||||
index = VADDR_TO_PTENT(address);
|
||||
page_table_entry oldEntry = ARMPagingMethod32Bit::ClearPageTableEntry(
|
||||
&pt[index]);
|
||||
|
||||
pinner.Unlock();
|
||||
|
||||
if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
|
||||
// page mapping not valid
|
||||
return B_ENTRY_NOT_FOUND;
|
||||
}
|
||||
|
||||
fMapCount--;
|
||||
|
||||
#if 0 //IRA
|
||||
if ((oldEntry & ARM_PTE_ACCESSED) != 0) {
|
||||
// Note, that we only need to invalidate the address, if the
|
||||
// accessed flags was set, since only then the entry could have been
|
||||
// in any TLB.
|
||||
InvalidatePage(address);
|
||||
Flush();
|
||||
|
||||
// NOTE: Between clearing the page table entry and Flush() other
|
||||
// processors (actually even this processor with another thread of the
|
||||
// same team) could still access the page in question via their cached
|
||||
// entry. We can obviously lose a modified flag in this case, with the
|
||||
// effect that the page looks unmodified (and might thus be recycled),
|
||||
// but is actually modified.
|
||||
// In most cases this is harmless, but for vm_remove_all_page_mappings()
|
||||
// this is actually a problem.
|
||||
// Interestingly FreeBSD seems to ignore this problem as well
|
||||
// (cf. pmap_remove_all()), unless I've missed something.
|
||||
}
|
||||
#endif
|
||||
locker.Detach();
|
||||
// PageUnmapped() will unlock for us
|
||||
#if 0 //IRA
|
||||
PageUnmapped(area, (oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
|
||||
(oldEntry & ARM_PTE_ACCESSED) != 0, (oldEntry & ARM_PTE_DIRTY) != 0,
|
||||
updatePageQueue);
|
||||
#endif
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ARMVMTranslationMap32Bit::UnmapPages(VMArea* area, addr_t base, size_t size,
|
||||
bool updatePageQueue)
|
||||
{
|
||||
if (size == 0)
|
||||
return;
|
||||
|
||||
addr_t start = base;
|
||||
addr_t end = base + size - 1;
|
||||
|
||||
TRACE("ARMVMTranslationMap32Bit::UnmapPages(%p, %#" B_PRIxADDR ", %#"
|
||||
B_PRIxADDR ")\n", area, start, end);
|
||||
|
||||
page_directory_entry* pd = fPagingStructures->pgdir_virt;
|
||||
|
||||
VMAreaMappings queue;
|
||||
|
||||
RecursiveLocker locker(fLock);
|
||||
|
||||
do {
|
||||
int index = VADDR_TO_PDENT(start);
|
||||
if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
|
||||
// no page table here, move the start up to access the next page
|
||||
// table
|
||||
start = ROUNDUP(start + 1, kPageTableAlignment);
|
||||
continue;
|
||||
}
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner pinner(thread);
|
||||
|
||||
page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
|
||||
pd[index] & ARM_PDE_ADDRESS_MASK);
|
||||
|
||||
for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
|
||||
index++, start += B_PAGE_SIZE) {
|
||||
page_table_entry oldEntry
|
||||
= ARMPagingMethod32Bit::ClearPageTableEntry(&pt[index]);
|
||||
if ((oldEntry & ARM_PTE_TYPE_MASK) == 0)
|
||||
continue;
|
||||
|
||||
fMapCount--;
|
||||
|
||||
#if 0 //IRA
|
||||
if ((oldEntry & ARM_PTE_ACCESSED) != 0) {
|
||||
// Note, that we only need to invalidate the address, if the
|
||||
// accessed flags was set, since only then the entry could have
|
||||
// been in any TLB.
|
||||
InvalidatePage(start);
|
||||
}
|
||||
#endif
|
||||
if (area->cache_type != CACHE_TYPE_DEVICE) {
|
||||
// get the page
|
||||
vm_page* page = vm_lookup_page(
|
||||
(oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
|
||||
ASSERT(page != NULL);
|
||||
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
#if 0
|
||||
// transfer the accessed/dirty flags to the page
|
||||
if ((oldEntry & ARM_PTE_ACCESSED) != 0)
|
||||
page->accessed = true;
|
||||
if ((oldEntry & ARM_PTE_DIRTY) != 0)
|
||||
page->modified = true;
|
||||
#endif
|
||||
// remove the mapping object/decrement the wired_count of the
|
||||
// page
|
||||
if (area->wiring == B_NO_LOCK) {
|
||||
vm_page_mapping* mapping = NULL;
|
||||
vm_page_mappings::Iterator iterator
|
||||
= page->mappings.GetIterator();
|
||||
while ((mapping = iterator.Next()) != NULL) {
|
||||
if (mapping->area == area)
|
||||
break;
|
||||
}
|
||||
|
||||
ASSERT(mapping != NULL);
|
||||
|
||||
area->mappings.Remove(mapping);
|
||||
page->mappings.Remove(mapping);
|
||||
queue.Add(mapping);
|
||||
} else
|
||||
page->DecrementWiredCount();
|
||||
|
||||
if (!page->IsMapped()) {
|
||||
atomic_add(&gMappedPagesCount, -1);
|
||||
|
||||
if (updatePageQueue) {
|
||||
if (page->Cache()->temporary)
|
||||
vm_page_set_state(page, PAGE_STATE_INACTIVE);
|
||||
else if (page->modified)
|
||||
vm_page_set_state(page, PAGE_STATE_MODIFIED);
|
||||
else
|
||||
vm_page_set_state(page, PAGE_STATE_CACHED);
|
||||
}
|
||||
}
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
}
|
||||
}
|
||||
|
||||
Flush();
|
||||
// flush explicitly, since we directly use the lock
|
||||
} while (start != 0 && start < end);
|
||||
|
||||
// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
|
||||
// really critical here, as in all cases this method is used, the unmapped
|
||||
// area range is unmapped for good (resized/cut) and the pages will likely
|
||||
// be freed.
|
||||
|
||||
locker.Unlock();
|
||||
|
||||
// free removed mappings
|
||||
bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
|
||||
uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
|
||||
| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
|
||||
while (vm_page_mapping* mapping = queue.RemoveHead())
|
||||
object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ARMVMTranslationMap32Bit::UnmapArea(VMArea* area, bool deletingAddressSpace,
|
||||
bool ignoreTopCachePageFlags)
|
||||
{
|
||||
if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
|
||||
ARMVMTranslationMap32Bit::UnmapPages(area, area->Base(), area->Size(),
|
||||
true);
|
||||
return;
|
||||
}
|
||||
|
||||
bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
|
||||
|
||||
page_directory_entry* pd = fPagingStructures->pgdir_virt;
|
||||
|
||||
RecursiveLocker locker(fLock);
|
||||
|
||||
VMAreaMappings mappings;
|
||||
mappings.MoveFrom(&area->mappings);
|
||||
|
||||
for (VMAreaMappings::Iterator it = mappings.GetIterator();
|
||||
vm_page_mapping* mapping = it.Next();) {
|
||||
vm_page* page = mapping->page;
|
||||
page->mappings.Remove(mapping);
|
||||
|
||||
VMCache* cache = page->Cache();
|
||||
|
||||
bool pageFullyUnmapped = false;
|
||||
if (!page->IsMapped()) {
|
||||
atomic_add(&gMappedPagesCount, -1);
|
||||
pageFullyUnmapped = true;
|
||||
}
|
||||
|
||||
if (unmapPages || cache != area->cache) {
|
||||
addr_t address = area->Base()
|
||||
+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
|
||||
|
||||
int index = VADDR_TO_PDENT(address);
|
||||
if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
|
||||
panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
|
||||
"has no page dir entry", page, area, address);
|
||||
continue;
|
||||
}
|
||||
|
||||
ThreadCPUPinner pinner(thread_get_current_thread());
|
||||
|
||||
page_table_entry* pt
|
||||
= (page_table_entry*)fPageMapper->GetPageTableAt(
|
||||
pd[index] & ARM_PDE_ADDRESS_MASK);
|
||||
page_table_entry oldEntry
|
||||
= ARMPagingMethod32Bit::ClearPageTableEntry(
|
||||
&pt[VADDR_TO_PTENT(address)]);
|
||||
|
||||
pinner.Unlock();
|
||||
|
||||
if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
|
||||
panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
|
||||
"has no page table entry", page, area, address);
|
||||
continue;
|
||||
}
|
||||
#if 0
|
||||
// transfer the accessed/dirty flags to the page and invalidate
|
||||
// the mapping, if necessary
|
||||
if ((oldEntry & ARM_PTE_ACCESSED) != 0) {
|
||||
page->accessed = true;
|
||||
|
||||
if (!deletingAddressSpace)
|
||||
InvalidatePage(address);
|
||||
}
|
||||
|
||||
if ((oldEntry & ARM_PTE_DIRTY) != 0)
|
||||
page->modified = true;
|
||||
#endif
|
||||
if (pageFullyUnmapped) {
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
|
||||
if (cache->temporary)
|
||||
vm_page_set_state(page, PAGE_STATE_INACTIVE);
|
||||
else if (page->modified)
|
||||
vm_page_set_state(page, PAGE_STATE_MODIFIED);
|
||||
else
|
||||
vm_page_set_state(page, PAGE_STATE_CACHED);
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
}
|
||||
}
|
||||
|
||||
fMapCount--;
|
||||
}
|
||||
|
||||
Flush();
|
||||
// flush explicitely, since we directly use the lock
|
||||
|
||||
locker.Unlock();
|
||||
|
||||
bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
|
||||
uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
|
||||
| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
|
||||
while (vm_page_mapping* mapping = mappings.RemoveHead())
|
||||
object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ARMVMTranslationMap32Bit::Query(addr_t va, phys_addr_t *_physical,
|
||||
uint32 *_flags)
|
||||
{
|
||||
// default the flags to not present
|
||||
*_flags = 0;
|
||||
*_physical = 0;
|
||||
|
||||
int index = VADDR_TO_PDENT(va);
|
||||
page_directory_entry *pd = fPagingStructures->pgdir_virt;
|
||||
if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
|
||||
// no pagetable here
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner pinner(thread);
|
||||
|
||||
page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
|
||||
pd[index] & ARM_PDE_ADDRESS_MASK);
|
||||
page_table_entry entry = pt[VADDR_TO_PTENT(va)];
|
||||
|
||||
*_physical = (entry & ARM_PDE_ADDRESS_MASK)
|
||||
| VADDR_TO_PGOFF(va);
|
||||
|
||||
#if 0 //IRA
|
||||
// read in the page state flags
|
||||
if ((entry & ARM_PTE_USER) != 0) {
|
||||
*_flags |= ((entry & ARM_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
|
||||
| B_READ_AREA;
|
||||
}
|
||||
|
||||
*_flags |= ((entry & ARM_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
|
||||
| B_KERNEL_READ_AREA
|
||||
| ((entry & ARM_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
|
||||
| ((entry & ARM_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
|
||||
| ((entry & ARM_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
|
||||
#else
|
||||
*_flags = B_KERNEL_WRITE_AREA | B_KERNEL_READ_AREA;
|
||||
#endif
|
||||
pinner.Unlock();
|
||||
|
||||
TRACE("query_tmap: returning pa 0x%lx for va 0x%lx\n", *_physical, va);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ARMVMTranslationMap32Bit::QueryInterrupt(addr_t va, phys_addr_t *_physical,
|
||||
uint32 *_flags)
|
||||
{
|
||||
*_flags = 0;
|
||||
*_physical = 0;
|
||||
|
||||
int index = VADDR_TO_PDENT(va);
|
||||
page_directory_entry* pd = fPagingStructures->pgdir_virt;
|
||||
if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
|
||||
// no pagetable here
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
// map page table entry
|
||||
page_table_entry* pt = (page_table_entry*)ARMPagingMethod32Bit::Method()
|
||||
->PhysicalPageMapper()->InterruptGetPageTableAt(
|
||||
pd[index] & ARM_PDE_ADDRESS_MASK);
|
||||
page_table_entry entry = pt[VADDR_TO_PTENT(va)];
|
||||
|
||||
*_physical = (entry & ARM_PDE_ADDRESS_MASK)
|
||||
| VADDR_TO_PGOFF(va);
|
||||
#if 0
|
||||
// read in the page state flags
|
||||
if ((entry & ARM_PTE_USER) != 0) {
|
||||
*_flags |= ((entry & ARM_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
|
||||
| B_READ_AREA;
|
||||
}
|
||||
|
||||
*_flags |= ((entry & ARM_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
|
||||
| B_KERNEL_READ_AREA
|
||||
| ((entry & ARM_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
|
||||
| ((entry & ARM_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
|
||||
| ((entry & ARM_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
|
||||
#else
|
||||
*_flags = B_KERNEL_WRITE_AREA | B_KERNEL_READ_AREA;
|
||||
#endif
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ARMVMTranslationMap32Bit::Protect(addr_t start, addr_t end, uint32 attributes,
|
||||
uint32 memoryType)
|
||||
{
|
||||
start = ROUNDDOWN(start, B_PAGE_SIZE);
|
||||
if (start >= end)
|
||||
return B_OK;
|
||||
|
||||
TRACE("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
|
||||
attributes);
|
||||
#if 0 //IRA
|
||||
// compute protection flags
|
||||
uint32 newProtectionFlags = 0;
|
||||
if ((attributes & B_USER_PROTECTION) != 0) {
|
||||
newProtectionFlags = ARM_PTE_USER;
|
||||
if ((attributes & B_WRITE_AREA) != 0)
|
||||
newProtectionFlags |= ARM_PTE_WRITABLE;
|
||||
} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
|
||||
newProtectionFlags = ARM_PTE_WRITABLE;
|
||||
|
||||
page_directory_entry *pd = fPagingStructures->pgdir_virt;
|
||||
|
||||
do {
|
||||
int index = VADDR_TO_PDENT(start);
|
||||
if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
|
||||
// no page table here, move the start up to access the next page
|
||||
// table
|
||||
start = ROUNDUP(start + 1, kPageTableAlignment);
|
||||
continue;
|
||||
}
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner pinner(thread);
|
||||
|
||||
page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
|
||||
pd[index] & ARM_PDE_ADDRESS_MASK);
|
||||
|
||||
for (index = VADDR_TO_PTENT(start); index < 1024 && start < end;
|
||||
index++, start += B_PAGE_SIZE) {
|
||||
page_table_entry entry = pt[index];
|
||||
if ((entry & ARM_PTE_PRESENT) == 0) {
|
||||
// page mapping not valid
|
||||
continue;
|
||||
}
|
||||
|
||||
TRACE("protect_tmap: protect page 0x%lx\n", start);
|
||||
|
||||
// set the new protection flags -- we want to do that atomically,
|
||||
// without changing the accessed or dirty flag
|
||||
page_table_entry oldEntry;
|
||||
while (true) {
|
||||
oldEntry = ARMPagingMethod32Bit::TestAndSetPageTableEntry(
|
||||
&pt[index],
|
||||
(entry & ~(ARM_PTE_PROTECTION_MASK
|
||||
| ARM_PTE_MEMORY_TYPE_MASK))
|
||||
| newProtectionFlags
|
||||
| ARMPagingMethod32Bit::MemoryTypeToPageTableEntryFlags(
|
||||
memoryType),
|
||||
entry);
|
||||
if (oldEntry == entry)
|
||||
break;
|
||||
entry = oldEntry;
|
||||
}
|
||||
|
||||
if ((oldEntry & ARM_PTE_ACCESSED) != 0) {
|
||||
// Note, that we only need to invalidate the address, if the
|
||||
// accessed flag was set, since only then the entry could have
|
||||
// been in any TLB.
|
||||
InvalidatePage(start);
|
||||
}
|
||||
}
|
||||
} while (start != 0 && start < end);
|
||||
#endif
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ARMVMTranslationMap32Bit::ClearFlags(addr_t va, uint32 flags)
|
||||
{
|
||||
int index = VADDR_TO_PDENT(va);
|
||||
page_directory_entry* pd = fPagingStructures->pgdir_virt;
|
||||
if ((pd[index] & ARM_PDE_TYPE_MASK) == 0) {
|
||||
// no pagetable here
|
||||
return B_OK;
|
||||
}
|
||||
#if 0 //IRA
|
||||
uint32 flagsToClear = ((flags & PAGE_MODIFIED) ? ARM_PTE_DIRTY : 0)
|
||||
| ((flags & PAGE_ACCESSED) ? ARM_PTE_ACCESSED : 0);
|
||||
#else
|
||||
uint32 flagsToClear = 0;
|
||||
#endif
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner pinner(thread);
|
||||
|
||||
page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
|
||||
pd[index] & ARM_PDE_ADDRESS_MASK);
|
||||
index = VADDR_TO_PTENT(va);
|
||||
|
||||
// clear out the flags we've been requested to clear
|
||||
page_table_entry oldEntry
|
||||
= ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
|
||||
flagsToClear);
|
||||
|
||||
pinner.Unlock();
|
||||
|
||||
if ((oldEntry & flagsToClear) != 0)
|
||||
InvalidatePage(va);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
ARMVMTranslationMap32Bit::ClearAccessedAndModified(VMArea* area, addr_t address,
|
||||
bool unmapIfUnaccessed, bool& _modified)
|
||||
{
|
||||
ASSERT(address % B_PAGE_SIZE == 0);
|
||||
|
||||
page_directory_entry* pd = fPagingStructures->pgdir_virt;
|
||||
|
||||
TRACE("ARMVMTranslationMap32Bit::ClearAccessedAndModified(%#" B_PRIxADDR
|
||||
")\n", address);
|
||||
|
||||
RecursiveLocker locker(fLock);
|
||||
|
||||
int index = VADDR_TO_PDENT(address);
|
||||
if ((pd[index] & ARM_PDE_TYPE_MASK) == 0)
|
||||
return false;
|
||||
|
||||
ThreadCPUPinner pinner(thread_get_current_thread());
|
||||
|
||||
page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt(
|
||||
pd[index] & ARM_PDE_ADDRESS_MASK);
|
||||
|
||||
index = VADDR_TO_PTENT(address);
|
||||
|
||||
// perform the deed
|
||||
page_table_entry oldEntry;
|
||||
|
||||
if (unmapIfUnaccessed) {
|
||||
while (true) {
|
||||
oldEntry = pt[index];
|
||||
if ((oldEntry & ARM_PTE_TYPE_MASK) == 0) {
|
||||
// page mapping not valid
|
||||
return false;
|
||||
}
|
||||
#if 0 //IRA
|
||||
if (oldEntry & ARM_PTE_ACCESSED) {
|
||||
// page was accessed -- just clear the flags
|
||||
oldEntry = ARMPagingMethod32Bit::ClearPageTableEntryFlags(
|
||||
&pt[index], ARM_PTE_ACCESSED | ARM_PTE_DIRTY);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
// page hasn't been accessed -- unmap it
|
||||
if (ARMPagingMethod32Bit::TestAndSetPageTableEntry(&pt[index], 0,
|
||||
oldEntry) == oldEntry) {
|
||||
break;
|
||||
}
|
||||
|
||||
// something changed -- check again
|
||||
}
|
||||
} else {
|
||||
#if 0 //IRA
|
||||
oldEntry = ARMPagingMethod32Bit::ClearPageTableEntryFlags(&pt[index],
|
||||
ARM_PTE_ACCESSED | ARM_PTE_DIRTY);
|
||||
#else
|
||||
oldEntry = pt[index];
|
||||
#endif
|
||||
}
|
||||
|
||||
pinner.Unlock();
|
||||
|
||||
#if 0 //IRA
|
||||
_modified = (oldEntry & ARM_PTE_DIRTY) != 0;
|
||||
|
||||
if ((oldEntry & ARM_PTE_ACCESSED) != 0) {
|
||||
// Note, that we only need to invalidate the address, if the
|
||||
// accessed flags was set, since only then the entry could have been
|
||||
// in any TLB.
|
||||
InvalidatePage(address);
|
||||
|
||||
Flush();
|
||||
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
_modified = false;
|
||||
#endif
|
||||
|
||||
if (!unmapIfUnaccessed)
|
||||
return false;
|
||||
|
||||
// We have unmapped the address. Do the "high level" stuff.
|
||||
|
||||
fMapCount--;
|
||||
|
||||
locker.Detach();
|
||||
// UnaccessedPageUnmapped() will unlock for us
|
||||
|
||||
UnaccessedPageUnmapped(area,
|
||||
(oldEntry & ARM_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
ARMPagingStructures*
|
||||
ARMVMTranslationMap32Bit::PagingStructures() const
|
||||
{
|
||||
return fPagingStructures;
|
||||
}
|
@ -0,0 +1,65 @@
|
||||
/*
|
||||
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef KERNEL_ARCH_ARM_PAGING_32_BIT_ARM_VM_TRANSLATION_MAP_32_BIT_H
|
||||
#define KERNEL_ARCH_ARM_PAGING_32_BIT_ARM_VM_TRANSLATION_MAP_32_BIT_H
|
||||
|
||||
|
||||
#include "paging/ARMVMTranslationMap.h"
|
||||
|
||||
|
||||
struct ARMPagingStructures32Bit;
|
||||
|
||||
|
||||
struct ARMVMTranslationMap32Bit : ARMVMTranslationMap {
|
||||
ARMVMTranslationMap32Bit();
|
||||
virtual ~ARMVMTranslationMap32Bit();
|
||||
|
||||
status_t Init(bool kernel);
|
||||
|
||||
virtual size_t MaxPagesNeededToMap(addr_t start,
|
||||
addr_t end) const;
|
||||
|
||||
virtual status_t Map(addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint32 attributes, uint32 memoryType,
|
||||
vm_page_reservation* reservation);
|
||||
virtual status_t Unmap(addr_t start, addr_t end);
|
||||
|
||||
virtual status_t UnmapPage(VMArea* area, addr_t address,
|
||||
bool updatePageQueue);
|
||||
virtual void UnmapPages(VMArea* area, addr_t base,
|
||||
size_t size, bool updatePageQueue);
|
||||
virtual void UnmapArea(VMArea* area,
|
||||
bool deletingAddressSpace,
|
||||
bool ignoreTopCachePageFlags);
|
||||
|
||||
virtual status_t Query(addr_t virtualAddress,
|
||||
phys_addr_t* _physicalAddress,
|
||||
uint32* _flags);
|
||||
virtual status_t QueryInterrupt(addr_t virtualAddress,
|
||||
phys_addr_t* _physicalAddress,
|
||||
uint32* _flags);
|
||||
|
||||
virtual status_t Protect(addr_t base, addr_t top,
|
||||
uint32 attributes, uint32 memoryType);
|
||||
|
||||
virtual status_t ClearFlags(addr_t virtualAddress,
|
||||
uint32 flags);
|
||||
|
||||
virtual bool ClearAccessedAndModified(
|
||||
VMArea* area, addr_t address,
|
||||
bool unmapIfUnaccessed,
|
||||
bool& _modified);
|
||||
|
||||
virtual ARMPagingStructures* PagingStructures() const;
|
||||
inline ARMPagingStructures32Bit* PagingStructures32Bit() const
|
||||
{ return fPagingStructures; }
|
||||
|
||||
private:
|
||||
ARMPagingStructures32Bit* fPagingStructures;
|
||||
};
|
||||
|
||||
|
||||
#endif // KERNEL_ARCH_ARM_PAGING_32_BIT_ARM_VM_TRANSLATION_MAP_32_BIT_H
|
53
src/system/kernel/arch/arm/paging/32bit/paging.h
Normal file
53
src/system/kernel/arch/arm/paging/32bit/paging.h
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Copyirght 2010, Ithamar R. Adema, ithamar.adema@team-embedded.nl
|
||||
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2005-2009, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
#ifndef _KERNEL_ARCH_ARM_PAGING_32_BIT_PAGING_H
|
||||
#define _KERNEL_ARCH_ARM_PAGING_32_BIT_PAGING_H
|
||||
|
||||
|
||||
#include <SupportDefs.h>
|
||||
|
||||
#include <int.h>
|
||||
#include <kernel.h>
|
||||
|
||||
#define VADDR_TO_PDENT(va) ((va) >> 20)
|
||||
#define VADDR_TO_PTENT(va) (((va) & 0xff000) >> 12)
|
||||
#define VADDR_TO_PGOFF(va) ((va) & 0x0fff)
|
||||
|
||||
// page directory entry bits
|
||||
#define ARM_PDE_TYPE_MASK 0x00000003
|
||||
#define ARM_PDE_TYPE_COARSE_L2_PAGE_TABLE 0x00000001
|
||||
#define ARM_PDE_TYPE_SECTION 0x00000002
|
||||
#define ARM_PDE_TYPE_FINE_L2_PAGE_TABLE 0x00000003
|
||||
|
||||
#define ARM_PDE_ADDRESS_MASK 0xfffffc00
|
||||
|
||||
// page table entry bits
|
||||
#define ARM_PTE_TYPE_MASK 0x00000003
|
||||
#define ARM_PTE_TYPE_LARGE_PAGE 0x00000001
|
||||
#define ARM_PTE_TYPE_SMALL_PAGE 0x00000002
|
||||
#define ARM_PTE_TYPE_EXT_SMALL_PAGE 0x00000003
|
||||
|
||||
#define ARM_PTE_ADDRESS_MASK 0xfffff000
|
||||
|
||||
#define FIRST_USER_PGDIR_ENT (VADDR_TO_PDENT(USER_BASE))
|
||||
#define NUM_USER_PGDIR_ENTS (VADDR_TO_PDENT(ROUNDUP(USER_SIZE, \
|
||||
B_PAGE_SIZE * 1024)))
|
||||
#define FIRST_KERNEL_PGDIR_ENT (VADDR_TO_PDENT(KERNEL_BASE))
|
||||
#define NUM_KERNEL_PGDIR_ENTS (VADDR_TO_PDENT(KERNEL_SIZE))
|
||||
|
||||
|
||||
static const size_t kPageTableAlignment = 1024 * B_PAGE_SIZE;
|
||||
|
||||
|
||||
typedef uint32 page_table_entry;
|
||||
typedef uint32 page_directory_entry;
|
||||
|
||||
|
||||
#endif // _KERNEL_ARCH_ARM_PAGING_32_BIT_PAGING_H
|
15
src/system/kernel/arch/arm/paging/ARMPagingMethod.cpp
Normal file
15
src/system/kernel/arch/arm/paging/ARMPagingMethod.cpp
Normal file
@ -0,0 +1,15 @@
|
||||
/*
|
||||
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include "paging/ARMPagingMethod.h"
|
||||
|
||||
|
||||
ARMPagingMethod* gARMPagingMethod;
|
||||
|
||||
|
||||
ARMPagingMethod::~ARMPagingMethod()
|
||||
{
|
||||
}
|
44
src/system/kernel/arch/arm/paging/ARMPagingMethod.h
Normal file
44
src/system/kernel/arch/arm/paging/ARMPagingMethod.h
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef KERNEL_ARCH_ARM_PAGING_ARM_PAGING_METHOD_H
|
||||
#define KERNEL_ARCH_ARM_PAGING_ARM_PAGING_METHOD_H
|
||||
|
||||
|
||||
#include <SupportDefs.h>
|
||||
|
||||
|
||||
struct kernel_args;
|
||||
struct VMPhysicalPageMapper;
|
||||
struct VMTranslationMap;
|
||||
|
||||
|
||||
class ARMPagingMethod {
|
||||
public:
|
||||
virtual ~ARMPagingMethod();
|
||||
|
||||
virtual status_t Init(kernel_args* args,
|
||||
VMPhysicalPageMapper** _physicalPageMapper)
|
||||
= 0;
|
||||
virtual status_t InitPostArea(kernel_args* args) = 0;
|
||||
|
||||
virtual status_t CreateTranslationMap(bool kernel,
|
||||
VMTranslationMap** _map) = 0;
|
||||
|
||||
virtual status_t MapEarly(kernel_args* args,
|
||||
addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint8 attributes,
|
||||
phys_addr_t (*get_free_page)(kernel_args*))
|
||||
= 0;
|
||||
|
||||
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
|
||||
uint32 protection) = 0;
|
||||
};
|
||||
|
||||
|
||||
extern ARMPagingMethod* gARMPagingMethod;
|
||||
|
||||
|
||||
#endif // KERNEL_ARCH_ARM_PAGING_ARM_PAGING_METHOD_H
|
20
src/system/kernel/arch/arm/paging/ARMPagingStructures.cpp
Normal file
20
src/system/kernel/arch/arm/paging/ARMPagingStructures.cpp
Normal file
@ -0,0 +1,20 @@
|
||||
/*
|
||||
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
#include "paging/ARMPagingStructures.h"
|
||||
|
||||
|
||||
ARMPagingStructures::ARMPagingStructures()
|
||||
:
|
||||
ref_count(1),
|
||||
active_on_cpus(0)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
ARMPagingStructures::~ARMPagingStructures()
|
||||
{
|
||||
}
|
49
src/system/kernel/arch/arm/paging/ARMPagingStructures.h
Normal file
49
src/system/kernel/arch/arm/paging/ARMPagingStructures.h
Normal file
@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2005-2009, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
#ifndef KERNEL_ARCH_ARM_PAGING_ARM_PAGING_STRUCTURES_H
|
||||
#define KERNEL_ARCH_ARM_PAGING_ARM_PAGING_STRUCTURES_H
|
||||
|
||||
|
||||
#include <SupportDefs.h>
|
||||
|
||||
#include <heap.h>
|
||||
|
||||
|
||||
struct ARMPagingStructures : DeferredDeletable {
|
||||
uint32 pgdir_phys;
|
||||
vint32 ref_count;
|
||||
vint32 active_on_cpus;
|
||||
// mask indicating on which CPUs the map is currently used
|
||||
|
||||
ARMPagingStructures();
|
||||
virtual ~ARMPagingStructures();
|
||||
|
||||
inline void AddReference();
|
||||
inline void RemoveReference();
|
||||
|
||||
virtual void Delete() = 0;
|
||||
};
|
||||
|
||||
|
||||
inline void
|
||||
ARMPagingStructures::AddReference()
|
||||
{
|
||||
atomic_add(&ref_count, 1);
|
||||
}
|
||||
|
||||
|
||||
inline void
|
||||
ARMPagingStructures::RemoveReference()
|
||||
{
|
||||
if (atomic_add(&ref_count, -1) == 1)
|
||||
Delete();
|
||||
}
|
||||
|
||||
|
||||
#endif // KERNEL_ARCH_ARM_PAGING_ARM_PAGING_STRUCTURES_H
|
147
src/system/kernel/arch/arm/paging/ARMVMTranslationMap.cpp
Normal file
147
src/system/kernel/arch/arm/paging/ARMVMTranslationMap.cpp
Normal file
@ -0,0 +1,147 @@
|
||||
/*
|
||||
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
|
||||
#include "paging/ARMVMTranslationMap.h"
|
||||
|
||||
#include <thread.h>
|
||||
#include <smp.h>
|
||||
|
||||
#include "paging/ARMPagingStructures.h"
|
||||
|
||||
|
||||
//#define TRACE_ARM_VM_TRANSLATION_MAP
|
||||
#ifdef TRACE_ARM_VM_TRANSLATION_MAP
|
||||
# define TRACE(x...) dprintf(x)
|
||||
#else
|
||||
# define TRACE(x...) ;
|
||||
#endif
|
||||
|
||||
|
||||
ARMVMTranslationMap::ARMVMTranslationMap()
|
||||
:
|
||||
fPageMapper(NULL),
|
||||
fInvalidPagesCount(0)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
ARMVMTranslationMap::~ARMVMTranslationMap()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
ARMVMTranslationMap::Init(bool kernel)
|
||||
{
|
||||
fIsKernelMap = kernel;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
/*! Acquires the map's recursive lock, and resets the invalidate pages counter
|
||||
in case it's the first locking recursion.
|
||||
*/
|
||||
bool
|
||||
ARMVMTranslationMap::Lock()
|
||||
{
|
||||
TRACE("%p->ARMVMTranslationMap::Lock()\n", this);
|
||||
|
||||
recursive_lock_lock(&fLock);
|
||||
if (recursive_lock_get_recursion(&fLock) == 1) {
|
||||
// we were the first one to grab the lock
|
||||
TRACE("clearing invalidated page count\n");
|
||||
fInvalidPagesCount = 0;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*! Unlocks the map, and, if we are actually losing the recursive lock,
|
||||
flush all pending changes of this map (ie. flush TLB caches as
|
||||
needed).
|
||||
*/
|
||||
void
|
||||
ARMVMTranslationMap::Unlock()
|
||||
{
|
||||
TRACE("%p->ARMVMTranslationMap::Unlock()\n", this);
|
||||
|
||||
if (recursive_lock_get_recursion(&fLock) == 1) {
|
||||
// we're about to release it for the last time
|
||||
Flush();
|
||||
}
|
||||
|
||||
recursive_lock_unlock(&fLock);
|
||||
}
|
||||
|
||||
|
||||
addr_t
|
||||
ARMVMTranslationMap::MappedSize() const
|
||||
{
|
||||
return fMapCount;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
ARMVMTranslationMap::Flush()
|
||||
{
|
||||
if (fInvalidPagesCount <= 0)
|
||||
return;
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
thread_pin_to_current_cpu(thread);
|
||||
|
||||
if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) {
|
||||
// invalidate all pages
|
||||
TRACE("flush_tmap: %d pages to invalidate, invalidate all\n",
|
||||
fInvalidPagesCount);
|
||||
|
||||
if (fIsKernelMap) {
|
||||
arch_cpu_global_TLB_invalidate();
|
||||
smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0,
|
||||
NULL, SMP_MSG_FLAG_SYNC);
|
||||
} else {
|
||||
cpu_status state = disable_interrupts();
|
||||
arch_cpu_user_TLB_invalidate();
|
||||
restore_interrupts(state);
|
||||
|
||||
int cpu = smp_get_current_cpu();
|
||||
uint32 cpuMask = PagingStructures()->active_on_cpus
|
||||
& ~((uint32)1 << cpu);
|
||||
if (cpuMask != 0) {
|
||||
smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES,
|
||||
0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
TRACE("flush_tmap: %d pages to invalidate, invalidate list\n",
|
||||
fInvalidPagesCount);
|
||||
|
||||
arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount);
|
||||
|
||||
if (fIsKernelMap) {
|
||||
smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
|
||||
(uint32)fInvalidPages, fInvalidPagesCount, 0, NULL,
|
||||
SMP_MSG_FLAG_SYNC);
|
||||
} else {
|
||||
int cpu = smp_get_current_cpu();
|
||||
uint32 cpuMask = PagingStructures()->active_on_cpus
|
||||
& ~((uint32)1 << cpu);
|
||||
if (cpuMask != 0) {
|
||||
smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST,
|
||||
(uint32)fInvalidPages, fInvalidPagesCount, 0, NULL,
|
||||
SMP_MSG_FLAG_SYNC);
|
||||
}
|
||||
}
|
||||
}
|
||||
fInvalidPagesCount = 0;
|
||||
|
||||
thread_unpin_from_current_cpu(thread);
|
||||
}
|
54
src/system/kernel/arch/arm/paging/ARMVMTranslationMap.h
Normal file
54
src/system/kernel/arch/arm/paging/ARMVMTranslationMap.h
Normal file
@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef KERNEL_ARCH_ARM_ARM_VM_TRANSLATION_MAP_H
|
||||
#define KERNEL_ARCH_ARM_ARM_VM_TRANSLATION_MAP_H
|
||||
|
||||
|
||||
#include <vm/VMTranslationMap.h>
|
||||
|
||||
|
||||
#define PAGE_INVALIDATE_CACHE_SIZE 64
|
||||
|
||||
|
||||
struct ARMPagingStructures;
|
||||
class TranslationMapPhysicalPageMapper;
|
||||
|
||||
|
||||
struct ARMVMTranslationMap : VMTranslationMap {
|
||||
ARMVMTranslationMap();
|
||||
virtual ~ARMVMTranslationMap();
|
||||
|
||||
status_t Init(bool kernel);
|
||||
|
||||
virtual bool Lock();
|
||||
virtual void Unlock();
|
||||
|
||||
virtual addr_t MappedSize() const;
|
||||
|
||||
virtual void Flush();
|
||||
|
||||
virtual ARMPagingStructures* PagingStructures() const = 0;
|
||||
|
||||
inline void InvalidatePage(addr_t address);
|
||||
|
||||
protected:
|
||||
TranslationMapPhysicalPageMapper* fPageMapper;
|
||||
int fInvalidPagesCount;
|
||||
addr_t fInvalidPages[PAGE_INVALIDATE_CACHE_SIZE];
|
||||
bool fIsKernelMap;
|
||||
};
|
||||
|
||||
|
||||
void
|
||||
ARMVMTranslationMap::InvalidatePage(addr_t address)
|
||||
{
|
||||
if (fInvalidPagesCount < PAGE_INVALIDATE_CACHE_SIZE)
|
||||
fInvalidPages[fInvalidPagesCount] = address;
|
||||
|
||||
fInvalidPagesCount++;
|
||||
}
|
||||
|
||||
|
||||
#endif // KERNEL_ARCH_ARM_ARM_VM_TRANSLATION_MAP_H
|
@ -0,0 +1,16 @@
|
||||
/*
|
||||
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
#include "paging/arm_physical_page_mapper.h"
|
||||
|
||||
|
||||
TranslationMapPhysicalPageMapper::~TranslationMapPhysicalPageMapper()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
ARMPhysicalPageMapper::~ARMPhysicalPageMapper()
|
||||
{
|
||||
}
|
40
src/system/kernel/arch/arm/paging/arm_physical_page_mapper.h
Normal file
40
src/system/kernel/arch/arm/paging/arm_physical_page_mapper.h
Normal file
@ -0,0 +1,40 @@
|
||||
/*
|
||||
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef KERNEL_ARCH_ARM_PAGING_ARM_PHYSICAL_PAGE_MAPPER_H
|
||||
#define KERNEL_ARCH_ARM_PAGING_ARM_PHYSICAL_PAGE_MAPPER_H
|
||||
|
||||
|
||||
#include <vm/VMTranslationMap.h>
|
||||
|
||||
|
||||
struct kernel_args;
|
||||
struct vm_translation_map_ops;
|
||||
|
||||
|
||||
class TranslationMapPhysicalPageMapper {
|
||||
public:
|
||||
virtual ~TranslationMapPhysicalPageMapper();
|
||||
|
||||
virtual void Delete() = 0;
|
||||
|
||||
virtual void* GetPageTableAt(phys_addr_t physicalAddress) = 0;
|
||||
// Must be invoked with thread pinned to current CPU.
|
||||
};
|
||||
|
||||
|
||||
class ARMPhysicalPageMapper : public VMPhysicalPageMapper {
|
||||
public:
|
||||
virtual ~ARMPhysicalPageMapper();
|
||||
|
||||
virtual status_t CreateTranslationMapPhysicalPageMapper(
|
||||
TranslationMapPhysicalPageMapper** _mapper)
|
||||
= 0;
|
||||
|
||||
virtual void* InterruptGetPageTableAt(
|
||||
phys_addr_t physicalAddress) = 0;
|
||||
};
|
||||
|
||||
|
||||
#endif // KERNEL_ARCH_ARM_PAGING_ARM_PHYSICAL_PAGE_MAPPER_H
|
@ -0,0 +1,770 @@
|
||||
/*
|
||||
* Copyirght 2010, Ithamar R. Adema, ithamar.adema@team-embedded.nl
|
||||
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
|
||||
|
||||
/*! Implementation of a physical page mapping strategy (PhysicalPageMapper,
|
||||
TranslationMapPhysicalPageMapper) suitable for machines with a lot of
|
||||
memory, i.e. more than we can afford to completely map into the kernel
|
||||
address space.
|
||||
|
||||
We allocate a single page table (one page) that can map 1024 pages and
|
||||
a corresponding virtual address space region (4 MB). Each of those 1024
|
||||
slots can map a physical page. We reserve a fixed amount of slots per CPU.
|
||||
They will be used for physical operations on that CPU (memset()/memcpy()
|
||||
and {get,put}_physical_page_current_cpu()). A few slots we reserve for each
|
||||
translation map (TranslationMapPhysicalPageMapper). Those will only be used
|
||||
with the translation map locked, mapping a page table page. The remaining
|
||||
slots remain in the global pool and are given out by get_physical_page().
|
||||
|
||||
When we run out of slots, we allocate another page table (and virtual
|
||||
address space region).
|
||||
*/
|
||||
|
||||
|
||||
#include "paging/arm_physical_page_mapper_large_memory.h"
|
||||
|
||||
#include <new>
|
||||
|
||||
#include <AutoDeleter.h>
|
||||
|
||||
#include <cpu.h>
|
||||
#include <lock.h>
|
||||
#include <smp.h>
|
||||
#include <util/AutoLock.h>
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_types.h>
|
||||
#include <vm/VMAddressSpace.h>
|
||||
|
||||
#include "paging/arm_physical_page_mapper.h"
|
||||
#include "paging/ARMPagingStructures.h"
|
||||
#include "paging/ARMVMTranslationMap.h"
|
||||
|
||||
|
||||
// The number of slots we reserve per translation map from mapping page tables.
|
||||
// One slot would suffice, since the map is locked while mapping a page table,
|
||||
// but we re-use several slots on a LRU-basis so that we can keep the mappings
|
||||
// a little longer, thus avoiding re-mapping.
|
||||
#define SLOTS_PER_TRANSLATION_MAP 4
|
||||
|
||||
#define USER_SLOTS_PER_CPU 16
|
||||
#define KERNEL_SLOTS_PER_CPU 16
|
||||
#define TOTAL_SLOTS_PER_CPU (USER_SLOTS_PER_CPU \
|
||||
+ KERNEL_SLOTS_PER_CPU + 1)
|
||||
// one slot is for use in interrupts
|
||||
|
||||
|
||||
using ARMLargePhysicalPageMapper::PhysicalPageSlot;
|
||||
using ARMLargePhysicalPageMapper::PhysicalPageSlotPool;
|
||||
|
||||
|
||||
class PhysicalPageSlotQueue {
|
||||
public:
|
||||
PhysicalPageSlotQueue();
|
||||
|
||||
inline PhysicalPageSlot* GetSlot();
|
||||
inline void GetSlots(PhysicalPageSlot*& slot1,
|
||||
PhysicalPageSlot*& slot2);
|
||||
inline void PutSlot(PhysicalPageSlot* slot);
|
||||
inline void PutSlots(PhysicalPageSlot* slot1,
|
||||
PhysicalPageSlot* slot2);
|
||||
|
||||
private:
|
||||
PhysicalPageSlot* fSlots;
|
||||
ConditionVariable fFreeSlotCondition;
|
||||
ConditionVariable fFreeSlotsCondition;
|
||||
};
|
||||
|
||||
|
||||
struct PhysicalPageOpsCPUData {
|
||||
PhysicalPageSlotQueue user;
|
||||
// Used when copying from/to user memory. This can cause a page fault
|
||||
// which might need to memcpy()/memset() a page when being handled.
|
||||
PhysicalPageSlotQueue kernel;
|
||||
// Used when memset()ing or when memcpy()ing memory non-user memory.
|
||||
PhysicalPageSlot* interruptSlot;
|
||||
|
||||
void Init();
|
||||
|
||||
private:
|
||||
static PhysicalPageSlot* _GetInitialSlot();
|
||||
};
|
||||
|
||||
|
||||
// #pragma mark -
|
||||
|
||||
|
||||
class LargeMemoryTranslationMapPhysicalPageMapper
|
||||
: public TranslationMapPhysicalPageMapper {
|
||||
public:
|
||||
LargeMemoryTranslationMapPhysicalPageMapper();
|
||||
virtual ~LargeMemoryTranslationMapPhysicalPageMapper();
|
||||
|
||||
status_t Init();
|
||||
|
||||
virtual void Delete();
|
||||
|
||||
virtual void* GetPageTableAt(phys_addr_t physicalAddress);
|
||||
|
||||
private:
|
||||
struct page_slot {
|
||||
PhysicalPageSlot* slot;
|
||||
phys_addr_t physicalAddress;
|
||||
cpu_mask_t valid;
|
||||
};
|
||||
|
||||
page_slot fSlots[SLOTS_PER_TRANSLATION_MAP];
|
||||
int32 fSlotCount; // must be a power of 2
|
||||
int32 fNextSlot;
|
||||
};
|
||||
|
||||
|
||||
class LargeMemoryPhysicalPageMapper : public ARMPhysicalPageMapper {
|
||||
public:
|
||||
LargeMemoryPhysicalPageMapper();
|
||||
|
||||
status_t Init(kernel_args* args,
|
||||
PhysicalPageSlotPool* initialPool,
|
||||
TranslationMapPhysicalPageMapper*&
|
||||
_kernelPageMapper);
|
||||
|
||||
virtual status_t CreateTranslationMapPhysicalPageMapper(
|
||||
TranslationMapPhysicalPageMapper** _mapper);
|
||||
|
||||
virtual void* InterruptGetPageTableAt(
|
||||
phys_addr_t physicalAddress);
|
||||
|
||||
virtual status_t GetPage(phys_addr_t physicalAddress,
|
||||
addr_t* virtualAddress, void** handle);
|
||||
virtual status_t PutPage(addr_t virtualAddress, void* handle);
|
||||
|
||||
virtual status_t GetPageCurrentCPU(phys_addr_t physicalAddress,
|
||||
addr_t* virtualAddress, void** handle);
|
||||
virtual status_t PutPageCurrentCPU(addr_t virtualAddress,
|
||||
void* handle);
|
||||
|
||||
virtual status_t GetPageDebug(phys_addr_t physicalAddress,
|
||||
addr_t* virtualAddress, void** handle);
|
||||
virtual status_t PutPageDebug(addr_t virtualAddress,
|
||||
void* handle);
|
||||
|
||||
virtual status_t MemsetPhysical(phys_addr_t address, int value,
|
||||
phys_size_t length);
|
||||
virtual status_t MemcpyFromPhysical(void* to, phys_addr_t from,
|
||||
size_t length, bool user);
|
||||
virtual status_t MemcpyToPhysical(phys_addr_t to,
|
||||
const void* from, size_t length, bool user);
|
||||
virtual void MemcpyPhysicalPage(phys_addr_t to,
|
||||
phys_addr_t from);
|
||||
|
||||
status_t GetSlot(bool canWait,
|
||||
PhysicalPageSlot*& slot);
|
||||
void PutSlot(PhysicalPageSlot* slot);
|
||||
|
||||
inline PhysicalPageSlotQueue* GetSlotQueue(int32 cpu, bool user);
|
||||
|
||||
private:
|
||||
typedef DoublyLinkedList<PhysicalPageSlotPool> PoolList;
|
||||
|
||||
mutex fLock;
|
||||
PoolList fEmptyPools;
|
||||
PoolList fNonEmptyPools;
|
||||
PhysicalPageSlot* fDebugSlot;
|
||||
PhysicalPageSlotPool* fInitialPool;
|
||||
LargeMemoryTranslationMapPhysicalPageMapper fKernelMapper;
|
||||
PhysicalPageOpsCPUData fPerCPUData[B_MAX_CPU_COUNT];
|
||||
};
|
||||
|
||||
static LargeMemoryPhysicalPageMapper sPhysicalPageMapper;
|
||||
|
||||
|
||||
// #pragma mark - PhysicalPageSlot / PhysicalPageSlotPool
|
||||
|
||||
|
||||
inline void
|
||||
PhysicalPageSlot::Map(phys_addr_t physicalAddress)
|
||||
{
|
||||
pool->Map(physicalAddress, address);
|
||||
}
|
||||
|
||||
|
||||
PhysicalPageSlotPool::~PhysicalPageSlotPool()
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
inline bool
|
||||
PhysicalPageSlotPool::IsEmpty() const
|
||||
{
|
||||
return fSlots == NULL;
|
||||
}
|
||||
|
||||
|
||||
inline PhysicalPageSlot*
|
||||
PhysicalPageSlotPool::GetSlot()
|
||||
{
|
||||
PhysicalPageSlot* slot = fSlots;
|
||||
fSlots = slot->next;
|
||||
return slot;
|
||||
}
|
||||
|
||||
|
||||
inline void
|
||||
PhysicalPageSlotPool::PutSlot(PhysicalPageSlot* slot)
|
||||
{
|
||||
slot->next = fSlots;
|
||||
fSlots = slot;
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - PhysicalPageSlotQueue
|
||||
|
||||
|
||||
PhysicalPageSlotQueue::PhysicalPageSlotQueue()
|
||||
:
|
||||
fSlots(NULL)
|
||||
{
|
||||
fFreeSlotCondition.Init(this, "physical page ops slot queue");
|
||||
fFreeSlotsCondition.Init(this, "physical page ops slots queue");
|
||||
}
|
||||
|
||||
|
||||
PhysicalPageSlot*
|
||||
PhysicalPageSlotQueue::GetSlot()
|
||||
{
|
||||
InterruptsLocker locker;
|
||||
|
||||
// wait for a free slot to turn up
|
||||
while (fSlots == NULL) {
|
||||
ConditionVariableEntry entry;
|
||||
fFreeSlotCondition.Add(&entry);
|
||||
locker.Unlock();
|
||||
entry.Wait();
|
||||
locker.Lock();
|
||||
}
|
||||
|
||||
PhysicalPageSlot* slot = fSlots;
|
||||
fSlots = slot->next;
|
||||
|
||||
return slot;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
PhysicalPageSlotQueue::GetSlots(PhysicalPageSlot*& slot1,
|
||||
PhysicalPageSlot*& slot2)
|
||||
{
|
||||
InterruptsLocker locker;
|
||||
|
||||
// wait for two free slot to turn up
|
||||
while (fSlots == NULL || fSlots->next == NULL) {
|
||||
ConditionVariableEntry entry;
|
||||
fFreeSlotsCondition.Add(&entry);
|
||||
locker.Unlock();
|
||||
entry.Wait();
|
||||
locker.Lock();
|
||||
}
|
||||
|
||||
slot1 = fSlots;
|
||||
slot2 = slot1->next;
|
||||
fSlots = slot2->next;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
PhysicalPageSlotQueue::PutSlot(PhysicalPageSlot* slot)
|
||||
{
|
||||
InterruptsLocker locker;
|
||||
|
||||
slot->next = fSlots;
|
||||
fSlots = slot;
|
||||
|
||||
if (slot->next == NULL)
|
||||
fFreeSlotCondition.NotifyAll();
|
||||
else if (slot->next->next == NULL)
|
||||
fFreeSlotCondition.NotifyAll();
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
PhysicalPageSlotQueue::PutSlots(PhysicalPageSlot* slot1,
|
||||
PhysicalPageSlot* slot2)
|
||||
{
|
||||
InterruptsLocker locker;
|
||||
|
||||
slot1->next = slot2;
|
||||
slot2->next = fSlots;
|
||||
fSlots = slot1;
|
||||
|
||||
if (slot2->next == NULL)
|
||||
fFreeSlotCondition.NotifyAll();
|
||||
else if (slot2->next->next == NULL)
|
||||
fFreeSlotCondition.NotifyAll();
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - PhysicalPageOpsCPUData
|
||||
|
||||
|
||||
void
|
||||
PhysicalPageOpsCPUData::Init()
|
||||
{
|
||||
for (int32 i = 0; i < USER_SLOTS_PER_CPU; i++)
|
||||
user.PutSlot(_GetInitialSlot());
|
||||
for (int32 i = 0; i < KERNEL_SLOTS_PER_CPU; i++)
|
||||
kernel.PutSlot(_GetInitialSlot());
|
||||
interruptSlot = _GetInitialSlot();
|
||||
}
|
||||
|
||||
|
||||
/* static */ PhysicalPageSlot*
|
||||
PhysicalPageOpsCPUData::_GetInitialSlot()
|
||||
{
|
||||
PhysicalPageSlot* slot;
|
||||
status_t error = sPhysicalPageMapper.GetSlot(false, slot);
|
||||
if (error != B_OK) {
|
||||
panic("PhysicalPageOpsCPUData::Init(): Failed to get initial "
|
||||
"physical page slots! Probably too many CPUs.");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return slot;
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - LargeMemoryTranslationMapPhysicalPageMapper
|
||||
|
||||
|
||||
LargeMemoryTranslationMapPhysicalPageMapper
|
||||
::LargeMemoryTranslationMapPhysicalPageMapper()
|
||||
:
|
||||
fSlotCount(sizeof(fSlots) / sizeof(page_slot)),
|
||||
fNextSlot(0)
|
||||
{
|
||||
memset(fSlots, 0, sizeof(fSlots));
|
||||
}
|
||||
|
||||
|
||||
LargeMemoryTranslationMapPhysicalPageMapper
|
||||
::~LargeMemoryTranslationMapPhysicalPageMapper()
|
||||
{
|
||||
// put our slots back to the global pool
|
||||
for (int32 i = 0; i < fSlotCount; i++) {
|
||||
if (fSlots[i].slot != NULL)
|
||||
sPhysicalPageMapper.PutSlot(fSlots[i].slot);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
LargeMemoryTranslationMapPhysicalPageMapper::Init()
|
||||
{
|
||||
// get our slots from the global pool
|
||||
for (int32 i = 0; i < fSlotCount; i++) {
|
||||
status_t error = sPhysicalPageMapper.GetSlot(true, fSlots[i].slot);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
// set to invalid physical address, so it won't be used accidentally
|
||||
fSlots[i].physicalAddress = ~(phys_addr_t)0;
|
||||
}
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
LargeMemoryTranslationMapPhysicalPageMapper::Delete()
|
||||
{
|
||||
delete this;
|
||||
}
|
||||
|
||||
|
||||
void*
|
||||
LargeMemoryTranslationMapPhysicalPageMapper::GetPageTableAt(
|
||||
phys_addr_t physicalAddress)
|
||||
{
|
||||
phys_addr_t off = physicalAddress & (B_PAGE_SIZE -1);
|
||||
physicalAddress &= ~(B_PAGE_SIZE -1);
|
||||
|
||||
int32 currentCPU = smp_get_current_cpu();
|
||||
|
||||
// maybe the address is already mapped
|
||||
for (int32 i = 0; i < fSlotCount; i++) {
|
||||
page_slot& slot = fSlots[i];
|
||||
if (slot.physicalAddress == physicalAddress) {
|
||||
fNextSlot = (i + 1) & (fSlotCount - 1);
|
||||
if ((slot.valid & (1 << currentCPU)) == 0) {
|
||||
// not valid on this CPU -- invalidate the TLB entry
|
||||
arch_cpu_invalidate_TLB_range(slot.slot->address,
|
||||
slot.slot->address + B_PAGE_SIZE);
|
||||
slot.valid |= 1 << currentCPU;
|
||||
}
|
||||
return (void*)slot.slot->address + off;
|
||||
}
|
||||
}
|
||||
|
||||
// not found -- need to map a fresh one
|
||||
page_slot& slot = fSlots[fNextSlot];
|
||||
fNextSlot = (fNextSlot + 1) & (fSlotCount - 1);
|
||||
|
||||
slot.physicalAddress = physicalAddress;
|
||||
slot.slot->Map(physicalAddress);
|
||||
slot.valid = 1 << currentCPU;
|
||||
|
||||
return (void*)slot.slot->address + off;
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - LargeMemoryPhysicalPageMapper
|
||||
|
||||
|
||||
LargeMemoryPhysicalPageMapper::LargeMemoryPhysicalPageMapper()
|
||||
:
|
||||
fInitialPool(NULL)
|
||||
{
|
||||
mutex_init(&fLock, "large memory physical page mapper");
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
LargeMemoryPhysicalPageMapper::Init(kernel_args* args,
|
||||
PhysicalPageSlotPool* initialPool,
|
||||
TranslationMapPhysicalPageMapper*& _kernelPageMapper)
|
||||
{
|
||||
fInitialPool = initialPool;
|
||||
fNonEmptyPools.Add(fInitialPool);
|
||||
|
||||
// get the debug slot
|
||||
GetSlot(true, fDebugSlot);
|
||||
|
||||
// init the kernel translation map physical page mapper
|
||||
status_t error = fKernelMapper.Init();
|
||||
if (error != B_OK) {
|
||||
panic("LargeMemoryPhysicalPageMapper::Init(): Failed to init "
|
||||
"kernel translation map physical page mapper!");
|
||||
return error;
|
||||
}
|
||||
_kernelPageMapper = &fKernelMapper;
|
||||
|
||||
// init the per-CPU data
|
||||
int32 cpuCount = smp_get_num_cpus();
|
||||
for (int32 i = 0; i < cpuCount; i++)
|
||||
fPerCPUData[i].Init();
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
LargeMemoryPhysicalPageMapper::CreateTranslationMapPhysicalPageMapper(
|
||||
TranslationMapPhysicalPageMapper** _mapper)
|
||||
{
|
||||
LargeMemoryTranslationMapPhysicalPageMapper* mapper
|
||||
= new(std::nothrow) LargeMemoryTranslationMapPhysicalPageMapper;
|
||||
if (mapper == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
status_t error = mapper->Init();
|
||||
if (error != B_OK) {
|
||||
delete mapper;
|
||||
return error;
|
||||
}
|
||||
|
||||
*_mapper = mapper;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
void*
|
||||
LargeMemoryPhysicalPageMapper::InterruptGetPageTableAt(
|
||||
phys_addr_t physicalAddress)
|
||||
{
|
||||
ASSERT(physicalAddress % B_PAGE_SIZE == 0);
|
||||
|
||||
PhysicalPageSlot* slot = fPerCPUData[smp_get_current_cpu()].interruptSlot;
|
||||
slot->Map(physicalAddress);
|
||||
return (void*)slot->address;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
LargeMemoryPhysicalPageMapper::GetPage(phys_addr_t physicalAddress,
|
||||
addr_t* virtualAddress, void** handle)
|
||||
{
|
||||
PhysicalPageSlot* slot;
|
||||
status_t error = GetSlot(true, slot);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
|
||||
slot->Map(physicalAddress);
|
||||
|
||||
*handle = slot;
|
||||
*virtualAddress = slot->address + physicalAddress % B_PAGE_SIZE;
|
||||
|
||||
smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_RANGE, *virtualAddress,
|
||||
*virtualAddress, 0, NULL, SMP_MSG_FLAG_SYNC);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
LargeMemoryPhysicalPageMapper::PutPage(addr_t virtualAddress, void* handle)
|
||||
{
|
||||
PutSlot((PhysicalPageSlot*)handle);
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
LargeMemoryPhysicalPageMapper::GetPageCurrentCPU(phys_addr_t physicalAddress,
|
||||
addr_t* virtualAddress, void** handle)
|
||||
{
|
||||
// get a slot from the per-cpu user pool
|
||||
PhysicalPageSlotQueue& slotQueue
|
||||
= fPerCPUData[smp_get_current_cpu()].user;
|
||||
PhysicalPageSlot* slot = slotQueue.GetSlot();
|
||||
slot->Map(physicalAddress);
|
||||
|
||||
*virtualAddress = slot->address + physicalAddress % B_PAGE_SIZE;
|
||||
*handle = slot;
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
LargeMemoryPhysicalPageMapper::PutPageCurrentCPU(addr_t virtualAddress,
|
||||
void* handle)
|
||||
{
|
||||
// return the slot to the per-cpu user pool
|
||||
PhysicalPageSlotQueue& slotQueue
|
||||
= fPerCPUData[smp_get_current_cpu()].user;
|
||||
slotQueue.PutSlot((PhysicalPageSlot*)handle);
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
LargeMemoryPhysicalPageMapper::GetPageDebug(phys_addr_t physicalAddress,
|
||||
addr_t* virtualAddress, void** handle)
|
||||
{
|
||||
fDebugSlot->Map(physicalAddress);
|
||||
|
||||
*handle = fDebugSlot;
|
||||
*virtualAddress = fDebugSlot->address + physicalAddress % B_PAGE_SIZE;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
LargeMemoryPhysicalPageMapper::PutPageDebug(addr_t virtualAddress, void* handle)
|
||||
{
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
LargeMemoryPhysicalPageMapper::MemsetPhysical(phys_addr_t address, int value,
|
||||
phys_size_t length)
|
||||
{
|
||||
phys_addr_t pageOffset = address % B_PAGE_SIZE;
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner _(thread);
|
||||
|
||||
PhysicalPageSlotQueue* slotQueue = GetSlotQueue(thread->cpu->cpu_num,
|
||||
false);
|
||||
PhysicalPageSlot* slot = slotQueue->GetSlot();
|
||||
|
||||
while (length > 0) {
|
||||
slot->Map(address - pageOffset);
|
||||
|
||||
size_t toSet = min_c(length, B_PAGE_SIZE - pageOffset);
|
||||
memset((void*)(slot->address + pageOffset), value, toSet);
|
||||
|
||||
length -= toSet;
|
||||
address += toSet;
|
||||
pageOffset = 0;
|
||||
}
|
||||
|
||||
slotQueue->PutSlot(slot);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
LargeMemoryPhysicalPageMapper::MemcpyFromPhysical(void* _to, phys_addr_t from,
|
||||
size_t length, bool user)
|
||||
{
|
||||
uint8* to = (uint8*)_to;
|
||||
phys_addr_t pageOffset = from % B_PAGE_SIZE;
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner _(thread);
|
||||
|
||||
PhysicalPageSlotQueue* slotQueue = GetSlotQueue(thread->cpu->cpu_num, user);
|
||||
PhysicalPageSlot* slot = slotQueue->GetSlot();
|
||||
|
||||
status_t error = B_OK;
|
||||
|
||||
while (length > 0) {
|
||||
size_t toCopy = min_c(length, B_PAGE_SIZE - pageOffset);
|
||||
|
||||
slot->Map(from - pageOffset);
|
||||
|
||||
if (user) {
|
||||
error = user_memcpy(to, (void*)(slot->address + pageOffset),
|
||||
toCopy);
|
||||
if (error != B_OK)
|
||||
break;
|
||||
} else
|
||||
memcpy(to, (void*)(slot->address + pageOffset), toCopy);
|
||||
|
||||
to += toCopy;
|
||||
from += toCopy;
|
||||
length -= toCopy;
|
||||
pageOffset = 0;
|
||||
}
|
||||
|
||||
slotQueue->PutSlot(slot);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
LargeMemoryPhysicalPageMapper::MemcpyToPhysical(phys_addr_t to,
|
||||
const void* _from, size_t length, bool user)
|
||||
{
|
||||
const uint8* from = (const uint8*)_from;
|
||||
phys_addr_t pageOffset = to % B_PAGE_SIZE;
|
||||
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner _(thread);
|
||||
|
||||
PhysicalPageSlotQueue* slotQueue = GetSlotQueue(thread->cpu->cpu_num, user);
|
||||
PhysicalPageSlot* slot = slotQueue->GetSlot();
|
||||
|
||||
status_t error = B_OK;
|
||||
|
||||
while (length > 0) {
|
||||
size_t toCopy = min_c(length, B_PAGE_SIZE - pageOffset);
|
||||
|
||||
slot->Map(to - pageOffset);
|
||||
|
||||
if (user) {
|
||||
error = user_memcpy((void*)(slot->address + pageOffset), from,
|
||||
toCopy);
|
||||
if (error != B_OK)
|
||||
break;
|
||||
} else
|
||||
memcpy((void*)(slot->address + pageOffset), from, toCopy);
|
||||
|
||||
to += toCopy;
|
||||
from += toCopy;
|
||||
length -= toCopy;
|
||||
pageOffset = 0;
|
||||
}
|
||||
|
||||
slotQueue->PutSlot(slot);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
LargeMemoryPhysicalPageMapper::MemcpyPhysicalPage(phys_addr_t to,
|
||||
phys_addr_t from)
|
||||
{
|
||||
struct thread* thread = thread_get_current_thread();
|
||||
ThreadCPUPinner _(thread);
|
||||
|
||||
PhysicalPageSlotQueue* slotQueue = GetSlotQueue(thread->cpu->cpu_num,
|
||||
false);
|
||||
PhysicalPageSlot* fromSlot;
|
||||
PhysicalPageSlot* toSlot;
|
||||
slotQueue->GetSlots(fromSlot, toSlot);
|
||||
|
||||
fromSlot->Map(from);
|
||||
toSlot->Map(to);
|
||||
|
||||
memcpy((void*)toSlot->address, (void*)fromSlot->address, B_PAGE_SIZE);
|
||||
|
||||
slotQueue->PutSlots(fromSlot, toSlot);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
LargeMemoryPhysicalPageMapper::GetSlot(bool canWait, PhysicalPageSlot*& slot)
|
||||
{
|
||||
MutexLocker locker(fLock);
|
||||
|
||||
PhysicalPageSlotPool* pool = fNonEmptyPools.Head();
|
||||
if (pool == NULL) {
|
||||
if (!canWait)
|
||||
return B_WOULD_BLOCK;
|
||||
|
||||
// allocate new pool
|
||||
locker.Unlock();
|
||||
status_t error = fInitialPool->AllocatePool(pool);
|
||||
if (error != B_OK)
|
||||
return error;
|
||||
locker.Lock();
|
||||
|
||||
fNonEmptyPools.Add(pool);
|
||||
pool = fNonEmptyPools.Head();
|
||||
}
|
||||
|
||||
slot = pool->GetSlot();
|
||||
|
||||
if (pool->IsEmpty()) {
|
||||
fNonEmptyPools.Remove(pool);
|
||||
fEmptyPools.Add(pool);
|
||||
}
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
LargeMemoryPhysicalPageMapper::PutSlot(PhysicalPageSlot* slot)
|
||||
{
|
||||
MutexLocker locker(fLock);
|
||||
|
||||
PhysicalPageSlotPool* pool = slot->pool;
|
||||
if (pool->IsEmpty()) {
|
||||
fEmptyPools.Remove(pool);
|
||||
fNonEmptyPools.Add(pool);
|
||||
}
|
||||
|
||||
pool->PutSlot(slot);
|
||||
}
|
||||
|
||||
|
||||
inline PhysicalPageSlotQueue*
|
||||
LargeMemoryPhysicalPageMapper::GetSlotQueue(int32 cpu, bool user)
|
||||
{
|
||||
return user ? &fPerCPUData[cpu].user : &fPerCPUData[cpu].kernel;
|
||||
}
|
||||
|
||||
|
||||
// #pragma mark - Initialization
|
||||
|
||||
|
||||
status_t
|
||||
large_memory_physical_page_ops_init(kernel_args* args,
|
||||
ARMLargePhysicalPageMapper::PhysicalPageSlotPool* initialPool,
|
||||
ARMPhysicalPageMapper*& _pageMapper,
|
||||
TranslationMapPhysicalPageMapper*& _kernelPageMapper)
|
||||
{
|
||||
new(&sPhysicalPageMapper) LargeMemoryPhysicalPageMapper;
|
||||
sPhysicalPageMapper.Init(args, initialPool, _kernelPageMapper);
|
||||
|
||||
_pageMapper = &sPhysicalPageMapper;
|
||||
return B_OK;
|
||||
}
|
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef KERNEL_ARCH_ARM_PAGING_ARM_PHYSICAL_PAGE_MAPPER_LARGE_MEMORY_H
|
||||
#define KERNEL_ARCH_ARM_PAGING_ARM_PHYSICAL_PAGE_MAPPER_LARGE_MEMORY_H
|
||||
|
||||
|
||||
#include <OS.h>
|
||||
|
||||
#include <util/DoublyLinkedList.h>
|
||||
|
||||
|
||||
class TranslationMapPhysicalPageMapper;
|
||||
class ARMPhysicalPageMapper;
|
||||
struct kernel_args;
|
||||
|
||||
|
||||
namespace ARMLargePhysicalPageMapper {
|
||||
|
||||
|
||||
struct PhysicalPageSlotPool;
|
||||
|
||||
|
||||
struct PhysicalPageSlot {
|
||||
PhysicalPageSlot* next;
|
||||
PhysicalPageSlotPool* pool;
|
||||
addr_t address;
|
||||
|
||||
inline void Map(phys_addr_t physicalAddress);
|
||||
};
|
||||
|
||||
|
||||
struct PhysicalPageSlotPool : DoublyLinkedListLinkImpl<PhysicalPageSlotPool> {
|
||||
|
||||
virtual ~PhysicalPageSlotPool();
|
||||
|
||||
inline bool IsEmpty() const;
|
||||
|
||||
inline PhysicalPageSlot* GetSlot();
|
||||
inline void PutSlot(PhysicalPageSlot* slot);
|
||||
|
||||
virtual status_t AllocatePool(PhysicalPageSlotPool*& _pool) = 0;
|
||||
virtual void Map(phys_addr_t physicalAddress,
|
||||
addr_t virtualAddress) = 0;
|
||||
|
||||
protected:
|
||||
PhysicalPageSlot* fSlots;
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
status_t large_memory_physical_page_ops_init(kernel_args* args,
|
||||
ARMLargePhysicalPageMapper::PhysicalPageSlotPool* initialPool,
|
||||
ARMPhysicalPageMapper*& _pageMapper,
|
||||
TranslationMapPhysicalPageMapper*& _kernelPageMapper);
|
||||
|
||||
|
||||
#endif // KERNEL_ARCH_ARM_PAGING_ARM_PHYSICAL_PAGE_MAPPER_LARGE_MEMORY_H
|
Loading…
Reference in New Issue
Block a user