PPC: Restructure paging stuff to match other platforms

First attempt.

Totally untested.
This commit is contained in:
François Revol 2013-11-08 23:45:08 +01:00
parent bfb2f7ffc4
commit 62caef87ce
14 changed files with 2678 additions and 606 deletions

View File

@ -4,6 +4,8 @@ SubDirHdrs $(SUBDIR) $(DOTDOT) generic ;
UsePrivateKernelHeaders ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) $(DOTDOT) generic ] ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) paging ] ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) paging classic ] ;
KernelMergeObject kernel_arch_ppc.o :
arch_commpage.cpp
@ -29,9 +31,26 @@ KernelMergeObject kernel_arch_ppc.o :
debug_uart_8250.cpp
arch_uart_8250.cpp
# paging
generic_vm_physical_page_mapper.cpp
generic_vm_physical_page_ops.cpp
GenericVMPhysicalPageMapper.cpp
PPCPagingMethod.cpp
PPCPagingStructures.cpp
PPCVMTranslationMap.cpp
# TODO: compile with correct -mcpu
# paging/classic
PPCPagingMethodClassic.cpp
PPCPagingStructuresClassic.cpp
PPCVMTranslationMapClassic.cpp
# TODO: compile with correct -mcpu
# paging/460
#PPCPagingMethod460.cpp
#PPCPagingStructures460.cpp
#PPCVMTranslationMap460.cpp
:
$(TARGET_KERNEL_PIC_CCFLAGS) -Wno-unused
;

View File

@ -79,7 +79,7 @@
#include <KernelExport.h>
#include <arch/cpu.h>
#include <arch_mmu.h>
//#include <arch_mmu.h>
#include <boot/kernel_args.h>
#include <int.h>
#include <kernel.h>
@ -93,38 +93,30 @@
#include <util/AutoLock.h>
#include "generic_vm_physical_page_mapper.h"
#include "generic_vm_physical_page_ops.h"
#include "GenericVMPhysicalPageMapper.h"
//#include "generic_vm_physical_page_ops.h"
//#include "GenericVMPhysicalPageMapper.h"
#include "paging/PPCVMTranslationMap.h"
#include "paging/classic/PPCPagingMethodClassic.h"
//#include "paging/460/PPCPagingMethod460.h"
static struct page_table_entry_group *sPageTable;
static size_t sPageTableSize;
static uint32 sPageTableHashMask;
static area_id sPageTableArea;
// 64 MB of iospace
#define IOSPACE_SIZE (64*1024*1024)
// We only have small (4 KB) pages. The only reason for choosing greater chunk
// size is to keep the waste of memory limited, since the generic page mapper
// allocates structures per physical/virtual chunk.
// TODO: Implement a page mapper more suitable for small pages!
#define IOSPACE_CHUNK_SIZE (16 * B_PAGE_SIZE)
static addr_t sIOSpaceBase;
static GenericVMPhysicalPageMapper sPhysicalPageMapper;
// The VSID is a 24 bit number. The lower three bits are defined by the
// (effective) segment number, which leaves us with a 21 bit space of
// VSID bases (= 2 * 1024 * 1024).
#define MAX_VSID_BASES (PAGE_SIZE * 8)
static uint32 sVSIDBaseBitmap[MAX_VSID_BASES / (sizeof(uint32) * 8)];
static spinlock sVSIDBaseBitmapLock;
#define VSID_BASE_SHIFT 3
#define VADDR_TO_VSID(vsidBase, vaddr) (vsidBase + ((vaddr) >> 28))
#define TRACE_VM_TMAP
#ifdef TRACE_VM_TMAP
# define TRACE(x...) dprintf(x)
#else
# define TRACE(x...) ;
#endif
static union {
uint64 align;
//char amcc460[sizeof(PPCPagingMethod460)];
char classic[sizeof(PPCPagingMethodClassic)];
} sPagingMethodBuffer;
#if 0
struct PPCVMTranslationMap : VMTranslationMap {
PPCVMTranslationMap();
virtual ~PPCVMTranslationMap();
@ -174,367 +166,20 @@ struct PPCVMTranslationMap : VMTranslationMap {
protected:
int fVSIDBase;
};
#endif
void
ppc_translation_map_change_asid(VMTranslationMap *map)
{
// this code depends on the kernel being at 0x80000000, fix if we change that
#if KERNEL_BASE != 0x80000000
#error fix me
#endif
int vsidBase = static_cast<PPCVMTranslationMap*>(map)->VSIDBase();
isync(); // synchronize context
asm("mtsr 0,%0" : : "g"(vsidBase));
asm("mtsr 1,%0" : : "g"(vsidBase + 1));
asm("mtsr 2,%0" : : "g"(vsidBase + 2));
asm("mtsr 3,%0" : : "g"(vsidBase + 3));
asm("mtsr 4,%0" : : "g"(vsidBase + 4));
asm("mtsr 5,%0" : : "g"(vsidBase + 5));
asm("mtsr 6,%0" : : "g"(vsidBase + 6));
asm("mtsr 7,%0" : : "g"(vsidBase + 7));
isync(); // synchronize context
}
static void
fill_page_table_entry(page_table_entry *entry, uint32 virtualSegmentID,
addr_t virtualAddress, phys_addr_t physicalAddress, uint8 protection,
uint32 memoryType, bool secondaryHash)
{
// lower 32 bit - set at once
entry->physical_page_number = physicalAddress / B_PAGE_SIZE;
entry->_reserved0 = 0;
entry->referenced = false;
entry->changed = false;
entry->write_through = (memoryType == B_MTR_UC) || (memoryType == B_MTR_WT);
entry->caching_inhibited = (memoryType == B_MTR_UC);
entry->memory_coherent = false;
entry->guarded = false;
entry->_reserved1 = 0;
entry->page_protection = protection & 0x3;
eieio();
// we need to make sure that the lower 32 bit were
// already written when the entry becomes valid
// upper 32 bit
entry->virtual_segment_id = virtualSegmentID;
entry->secondary_hash = secondaryHash;
entry->abbr_page_index = (virtualAddress >> 22) & 0x3f;
entry->valid = true;
ppc_sync();
}
page_table_entry *
PPCVMTranslationMap::LookupPageTableEntry(addr_t virtualAddress)
{
// lookup the vsid based off the va
uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
// dprintf("vm_translation_map.lookup_page_table_entry: vsid %ld, va 0x%lx\n", virtualSegmentID, virtualAddress);
// Search for the page table entry using the primary hash value
uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
for (int i = 0; i < 8; i++) {
page_table_entry *entry = &group->entry[i];
if (entry->virtual_segment_id == virtualSegmentID
&& entry->secondary_hash == false
&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
return entry;
}
// didn't find it, try the secondary hash value
hash = page_table_entry::SecondaryHash(hash);
group = &sPageTable[hash & sPageTableHashMask];
for (int i = 0; i < 8; i++) {
page_table_entry *entry = &group->entry[i];
if (entry->virtual_segment_id == virtualSegmentID
&& entry->secondary_hash == true
&& entry->abbr_page_index == ((virtualAddress >> 22) & 0x3f))
return entry;
}
return NULL;
}
bool
PPCVMTranslationMap::RemovePageTableEntry(addr_t virtualAddress)
{
page_table_entry *entry = LookupPageTableEntry(virtualAddress);
if (entry == NULL)
return false;
entry->valid = 0;
ppc_sync();
tlbie(virtualAddress);
eieio();
tlbsync();
ppc_sync();
return true;
}
static status_t
map_iospace_chunk(addr_t va, phys_addr_t pa, uint32 flags)
{
pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE))
panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);
// map the pages
return ppc_map_address_range(va, pa, IOSPACE_CHUNK_SIZE);
static_cast<PPCVMTranslationMap*>(map)->ChangeASID();
}
// #pragma mark -
PPCVMTranslationMap::PPCVMTranslationMap()
{
}
PPCVMTranslationMap::~PPCVMTranslationMap()
{
if (fMapCount > 0) {
panic("vm_translation_map.destroy_tmap: map %p has positive map count %ld\n",
this, fMapCount);
}
// mark the vsid base not in use
int baseBit = fVSIDBase >> VSID_BASE_SHIFT;
atomic_and((int32 *)&sVSIDBaseBitmap[baseBit / 32],
~(1 << (baseBit % 32)));
}
status_t
PPCVMTranslationMap::Init(bool kernel)
{
cpu_status state = disable_interrupts();
acquire_spinlock(&sVSIDBaseBitmapLock);
// allocate a VSID base for this one
if (kernel) {
// The boot loader has set up the segment registers for identical
// mapping. Two VSID bases are reserved for the kernel: 0 and 8. The
// latter one for mapping the kernel address space (0x80000000...), the
// former one for the lower addresses required by the Open Firmware
// services.
fVSIDBase = 0;
sVSIDBaseBitmap[0] |= 0x3;
} else {
int i = 0;
while (i < MAX_VSID_BASES) {
if (sVSIDBaseBitmap[i / 32] == 0xffffffff) {
i += 32;
continue;
}
if ((sVSIDBaseBitmap[i / 32] & (1 << (i % 32))) == 0) {
// we found it
sVSIDBaseBitmap[i / 32] |= 1 << (i % 32);
break;
}
i++;
}
if (i >= MAX_VSID_BASES)
panic("vm_translation_map_create: out of VSID bases\n");
fVSIDBase = i << VSID_BASE_SHIFT;
}
release_spinlock(&sVSIDBaseBitmapLock);
restore_interrupts(state);
return B_OK;
}
bool
PPCVMTranslationMap::Lock()
{
recursive_lock_lock(&fLock);
return true;
}
void
PPCVMTranslationMap::Unlock()
{
recursive_lock_unlock(&fLock);
}
size_t
PPCVMTranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
{
return 0;
}
status_t
PPCVMTranslationMap::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
uint32 attributes, uint32 memoryType, vm_page_reservation* reservation)
{
// lookup the vsid based off the va
uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
uint32 protection = 0;
// ToDo: check this
// all kernel mappings are R/W to supervisor code
if (attributes & (B_READ_AREA | B_WRITE_AREA))
protection = (attributes & B_WRITE_AREA) ? PTE_READ_WRITE : PTE_READ_ONLY;
//dprintf("vm_translation_map.map_tmap: vsid %d, pa 0x%lx, va 0x%lx\n", vsid, pa, va);
// Search for a free page table slot using the primary hash value
uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, virtualAddress);
page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
for (int i = 0; i < 8; i++) {
page_table_entry *entry = &group->entry[i];
if (entry->valid)
continue;
fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
protection, memoryType, false);
fMapCount++;
return B_OK;
}
// Didn't found one, try the secondary hash value
hash = page_table_entry::SecondaryHash(hash);
group = &sPageTable[hash & sPageTableHashMask];
for (int i = 0; i < 8; i++) {
page_table_entry *entry = &group->entry[i];
if (entry->valid)
continue;
fill_page_table_entry(entry, virtualSegmentID, virtualAddress, physicalAddress,
protection, memoryType, false);
fMapCount++;
return B_OK;
}
panic("vm_translation_map.map_tmap: hash table full\n");
return B_ERROR;
}
status_t
PPCVMTranslationMap::Unmap(addr_t start, addr_t end)
{
page_table_entry *entry;
start = ROUNDDOWN(start, B_PAGE_SIZE);
end = ROUNDUP(end, B_PAGE_SIZE);
// dprintf("vm_translation_map.unmap_tmap: start 0x%lx, end 0x%lx\n", start, end);
while (start < end) {
if (RemovePageTableEntry(start))
fMapCount--;
start += B_PAGE_SIZE;
}
return B_OK;
}
status_t
PPCVMTranslationMap::UnmapPage(VMArea* area, addr_t address,
bool updatePageQueue)
{
ASSERT(address % B_PAGE_SIZE == 0);
RecursiveLocker locker(fLock);
if (area->cache_type == CACHE_TYPE_DEVICE) {
if (!RemovePageTableEntry(address))
return B_ENTRY_NOT_FOUND;
fMapCount--;
return B_OK;
}
page_table_entry* entry = LookupPageTableEntry(address);
if (entry == NULL)
return B_ENTRY_NOT_FOUND;
page_num_t pageNumber = entry->physical_page_number;
bool accessed = entry->referenced;
bool modified = entry->changed;
RemovePageTableEntry(address);
fMapCount--;
locker.Detach();
// PageUnmapped() will unlock for us
PageUnmapped(area, pageNumber, accessed, modified, updatePageQueue);
return B_OK;
}
status_t
PPCVMTranslationMap::Query(addr_t va, phys_addr_t *_outPhysical,
uint32 *_outFlags)
{
page_table_entry *entry;
// default the flags to not present
*_outFlags = 0;
*_outPhysical = 0;
entry = LookupPageTableEntry(va);
if (entry == NULL)
return B_NO_ERROR;
// ToDo: check this!
if (IS_KERNEL_ADDRESS(va))
*_outFlags |= B_KERNEL_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_KERNEL_WRITE_AREA);
else
*_outFlags |= B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA | B_READ_AREA | (entry->page_protection == PTE_READ_ONLY ? 0 : B_WRITE_AREA);
*_outFlags |= entry->changed ? PAGE_MODIFIED : 0;
*_outFlags |= entry->referenced ? PAGE_ACCESSED : 0;
*_outFlags |= entry->valid ? PAGE_PRESENT : 0;
*_outPhysical = entry->physical_page_number * B_PAGE_SIZE;
return B_OK;
}
status_t
PPCVMTranslationMap::QueryInterrupt(addr_t virtualAddress,
phys_addr_t* _physicalAddress, uint32* _flags)
{
return PPCVMTranslationMap::Query(virtualAddress, _physicalAddress, _flags);
}
#if 0//XXX:Not needed anymore ?
addr_t
PPCVMTranslationMap::MappedSize() const
{
@ -542,103 +187,6 @@ PPCVMTranslationMap::MappedSize() const
}
status_t
PPCVMTranslationMap::Protect(addr_t base, addr_t top, uint32 attributes,
uint32 memoryType)
{
// XXX finish
return B_ERROR;
}
status_t
PPCVMTranslationMap::ClearFlags(addr_t virtualAddress, uint32 flags)
{
page_table_entry *entry = LookupPageTableEntry(virtualAddress);
if (entry == NULL)
return B_NO_ERROR;
bool modified = false;
// clear the bits
if (flags & PAGE_MODIFIED && entry->changed) {
entry->changed = false;
modified = true;
}
if (flags & PAGE_ACCESSED && entry->referenced) {
entry->referenced = false;
modified = true;
}
// synchronize
if (modified) {
tlbie(virtualAddress);
eieio();
tlbsync();
ppc_sync();
}
return B_OK;
}
bool
PPCVMTranslationMap::ClearAccessedAndModified(VMArea* area, addr_t address,
bool unmapIfUnaccessed, bool& _modified)
{
// TODO: Implement for real! ATM this is just an approximation using
// Query(), ClearFlags(), and UnmapPage(). See below!
RecursiveLocker locker(fLock);
uint32 flags;
phys_addr_t physicalAddress;
if (Query(address, &physicalAddress, &flags) != B_OK
|| (flags & PAGE_PRESENT) == 0) {
return false;
}
_modified = (flags & PAGE_MODIFIED) != 0;
if ((flags & (PAGE_ACCESSED | PAGE_MODIFIED)) != 0)
ClearFlags(address, flags & (PAGE_ACCESSED | PAGE_MODIFIED));
if ((flags & PAGE_ACCESSED) != 0)
return true;
if (!unmapIfUnaccessed)
return false;
locker.Unlock();
UnmapPage(area, address, false);
// TODO: Obvious race condition: Between querying and unmapping the
// page could have been accessed. We try to compensate by considering
// vm_page::{accessed,modified} (which would have been updated by
// UnmapPage()) below, but that doesn't quite match the required
// semantics of the method.
vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
if (page == NULL)
return false;
_modified |= page->modified;
return page->accessed;
}
void
PPCVMTranslationMap::Flush()
{
// TODO: arch_cpu_global_TLB_invalidate() is extremely expensive and doesn't
// even cut it here. We are supposed to invalidate all TLB entries for this
// map on all CPUs. We should loop over the virtual pages and invoke tlbie
// instead (which marks the entry invalid on all CPUs).
arch_cpu_global_TLB_invalidate();
}
static status_t
get_physical_page_tmap(phys_addr_t physicalAddress, addr_t *_virtualAddress,
void **handle)
@ -652,6 +200,7 @@ put_physical_page_tmap(addr_t virtualAddress, void *handle)
{
return generic_put_physical_page(virtualAddress);
}
#endif
// #pragma mark -
@ -661,18 +210,7 @@ put_physical_page_tmap(addr_t virtualAddress, void *handle)
status_t
arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
{
PPCVMTranslationMap* map = new(std::nothrow) PPCVMTranslationMap;
if (map == NULL)
return B_NO_MEMORY;
status_t error = map->Init(kernel);
if (error != B_OK) {
delete map;
return error;
}
*_map = map;
return B_OK;
return gPPCPagingMethod->CreateTranslationMap(kernel, _map);
}
@ -680,60 +218,52 @@ status_t
arch_vm_translation_map_init(kernel_args *args,
VMPhysicalPageMapper** _physicalPageMapper)
{
sPageTable = (page_table_entry_group *)args->arch_args.page_table.start;
sPageTableSize = args->arch_args.page_table.size;
sPageTableHashMask = sPageTableSize / sizeof(page_table_entry_group) - 1;
TRACE("vm_translation_map_init: entry\n");
// init physical page mapper
status_t error = generic_vm_physical_page_mapper_init(args,
map_iospace_chunk, &sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE);
if (error != B_OK)
return error;
#ifdef TRACE_VM_TMAP
TRACE("physical memory ranges:\n");
for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
phys_addr_t start = args->physical_memory_range[i].start;
phys_addr_t end = start + args->physical_memory_range[i].size;
TRACE(" %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
end);
}
new(&sPhysicalPageMapper) GenericVMPhysicalPageMapper;
TRACE("allocated physical ranges:\n");
for (uint32 i = 0; i < args->num_physical_allocated_ranges; i++) {
phys_addr_t start = args->physical_allocated_range[i].start;
phys_addr_t end = start + args->physical_allocated_range[i].size;
TRACE(" %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
end);
}
*_physicalPageMapper = &sPhysicalPageMapper;
return B_OK;
TRACE("allocated virtual ranges:\n");
for (uint32 i = 0; i < args->num_virtual_allocated_ranges; i++) {
addr_t start = args->virtual_allocated_range[i].start;
addr_t end = start + args->virtual_allocated_range[i].size;
TRACE(" %#10" B_PRIxADDR " - %#10" B_PRIxADDR "\n", start, end);
}
#endif
if (false /* TODO:Check for AMCC460! */) {
dprintf("using AMCC 460 paging\n");
panic("XXX");
//XXX:gPPCPagingMethod = new(&sPagingMethodBuffer) PPCPagingMethod460;
} else {
dprintf("using Classic paging\n");
gPPCPagingMethod = new(&sPagingMethodBuffer) PPCPagingMethodClassic;
}
return gPPCPagingMethod->Init(args, _physicalPageMapper);
}
status_t
arch_vm_translation_map_init_post_area(kernel_args *args)
{
// If the page table doesn't lie within the kernel address space, we
// remap it.
if (!IS_KERNEL_ADDRESS(sPageTable)) {
addr_t newAddress = (addr_t)sPageTable;
status_t error = ppc_remap_address_range(&newAddress, sPageTableSize,
false);
if (error != B_OK) {
panic("arch_vm_translation_map_init_post_area(): Failed to remap "
"the page table!");
return error;
}
TRACE("vm_translation_map_init_post_area: entry\n");
// set the new page table address
addr_t oldVirtualBase = (addr_t)(sPageTable);
sPageTable = (page_table_entry_group*)newAddress;
// unmap the old pages
ppc_unmap_address_range(oldVirtualBase, sPageTableSize);
// TODO: We should probably map the page table via BAT. It is relatively large,
// and due to being a hash table the access patterns might look sporadic, which
// certainly isn't to the liking of the TLB.
}
// create an area to cover the page table
sPageTableArea = create_area("page_table", (void **)&sPageTable, B_EXACT_ADDRESS,
sPageTableSize, B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
// init physical page mapper
status_t error = generic_vm_physical_page_mapper_init_post_area(args);
if (error != B_OK)
return error;
return B_OK;
return gPPCPagingMethod->InitPostArea(args);
}
@ -752,38 +282,13 @@ arch_vm_translation_map_init_post_sem(kernel_args *args)
*/
status_t
arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress,
phys_addr_t physicalAddress, uint8 attributes,
phys_addr_t (*get_free_page)(kernel_args *))
arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *))
{
uint32 virtualSegmentID = get_sr((void *)virtualAddress) & 0xffffff;
TRACE("early_tmap: entry pa %#" B_PRIxPHYSADDR " va %#" B_PRIxADDR "\n", pa,
va);
uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, (uint32)virtualAddress);
page_table_entry_group *group = &sPageTable[hash & sPageTableHashMask];
for (int32 i = 0; i < 8; i++) {
// 8 entries in a group
if (group->entry[i].valid)
continue;
fill_page_table_entry(&group->entry[i], virtualSegmentID,
virtualAddress, physicalAddress, PTE_READ_WRITE, 0, false);
return B_OK;
}
hash = page_table_entry::SecondaryHash(hash);
group = &sPageTable[hash & sPageTableHashMask];
for (int32 i = 0; i < 8; i++) {
if (group->entry[i].valid)
continue;
fill_page_table_entry(&group->entry[i], virtualSegmentID,
virtualAddress, physicalAddress, PTE_READ_WRITE, 0, true);
return B_OK;
}
return B_ERROR;
return gPPCPagingMethod->MapEarly(args, va, pa, attributes, get_free_page);
}
@ -840,46 +345,19 @@ ppc_unmap_address_range(addr_t virtualAddress, size_t size)
PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
addressSpace->TranslationMap());
for (0; virtualAddress < virtualEnd; virtualAddress += B_PAGE_SIZE)
map->RemovePageTableEntry(virtualAddress);
map->Unmap(virtualAddress, virtualEnd);
}
status_t
ppc_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap)
{
addr_t virtualAddress = ROUNDDOWN(*_virtualAddress, B_PAGE_SIZE);
size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE);
VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
// reserve space in the address space
void *newAddress = NULL;
status_t error = vm_reserve_address_range(addressSpace->ID(), &newAddress,
B_ANY_KERNEL_ADDRESS, size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (error != B_OK)
return error;
// get the area's first physical page
PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
addressSpace->TranslationMap());
page_table_entry *entry = map->LookupPageTableEntry(virtualAddress);
if (!entry)
return B_ERROR;
phys_addr_t physicalBase = (phys_addr_t)entry->physical_page_number << 12;
// map the pages
error = ppc_map_address_range((addr_t)newAddress, physicalBase, size);
if (error != B_OK)
return error;
*_virtualAddress = (addr_t)newAddress;
// unmap the old pages
if (unmap)
ppc_unmap_address_range(virtualAddress, size);
return B_OK;
return map->RemapAddressRange(_virtualAddress, size, unmap);
}
@ -887,20 +365,8 @@ bool
arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
uint32 protection)
{
VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
if (!gPPCPagingMethod)
return true;
PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
addressSpace->TranslationMap());
phys_addr_t physicalAddress;
uint32 flags;
if (map->Query(virtualAddress, &physicalAddress, &flags) != B_OK)
return false;
if ((flags & PAGE_PRESENT) == 0)
return false;
// present means kernel-readable, so check for writable
return (protection & B_KERNEL_WRITE_AREA) == 0
|| (flags & B_KERNEL_WRITE_AREA) != 0;
return gPPCPagingMethod->IsKernelPageAccessible(virtualAddress, protection);
}

View File

@ -0,0 +1,15 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#include "paging/PPCPagingMethod.h"
PPCPagingMethod* gPPCPagingMethod;
PPCPagingMethod::~PPCPagingMethod()
{
}

View File

@ -0,0 +1,46 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_PPC_PAGING_PPC_PAGING_METHOD_H
#define KERNEL_ARCH_PPC_PAGING_PPC_PAGING_METHOD_H
#include <SupportDefs.h>
#include <vm/vm_types.h>
struct kernel_args;
struct VMPhysicalPageMapper;
struct VMTranslationMap;
class PPCPagingMethod {
public:
virtual ~PPCPagingMethod();
virtual status_t Init(kernel_args* args,
VMPhysicalPageMapper** _physicalPageMapper)
= 0;
virtual status_t InitPostArea(kernel_args* args) = 0;
virtual status_t CreateTranslationMap(bool kernel,
VMTranslationMap** _map) = 0;
virtual status_t MapEarly(kernel_args* args,
addr_t virtualAddress,
phys_addr_t physicalAddress,
uint8 attributes,
page_num_t (*get_free_page)(kernel_args*))
= 0;
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
uint32 protection) = 0;
};
extern PPCPagingMethod* gPPCPagingMethod;
#endif // KERNEL_ARCH_PPC_PAGING_PPC_PAGING_METHOD_H

View File

@ -0,0 +1,20 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#include "paging/PPCPagingStructures.h"
PPCPagingStructures::PPCPagingStructures()
:
ref_count(1),
active_on_cpus(0)
{
}
PPCPagingStructures::~PPCPagingStructures()
{
}

View File

@ -0,0 +1,50 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2005-2009, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#ifndef KERNEL_ARCH_PPC_PAGING_PPC_PAGING_STRUCTURES_H
#define KERNEL_ARCH_PPC_PAGING_PPC_PAGING_STRUCTURES_H
#include <SupportDefs.h>
#include <heap.h>
struct PPCPagingStructures : DeferredDeletable {
// X86 stuff, probably useless
phys_addr_t pgdir_phys;
int32 ref_count;
int32 active_on_cpus;
// mask indicating on which CPUs the map is currently used
PPCPagingStructures();
virtual ~PPCPagingStructures();
inline void AddReference();
inline void RemoveReference();
virtual void Delete() = 0;
};
inline void
PPCPagingStructures::AddReference()
{
atomic_add(&ref_count, 1);
}
inline void
PPCPagingStructures::RemoveReference()
{
if (atomic_add(&ref_count, -1) == 1)
Delete();
}
#endif // KERNEL_ARCH_PPC_PAGING_PPC_PAGING_STRUCTURES_H

View File

@ -0,0 +1,147 @@
/*
* Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include "paging/PPCVMTranslationMap.h"
#include <thread.h>
#include <smp.h>
#include "paging/PPCPagingStructures.h"
//#define TRACE_PPC_VM_TRANSLATION_MAP
#ifdef TRACE_PPC_VM_TRANSLATION_MAP
# define TRACE(x...) dprintf(x)
#else
# define TRACE(x...) ;
#endif
PPCVMTranslationMap::PPCVMTranslationMap()
:
//X86:fPageMapper(NULL),
fInvalidPagesCount(0)
{
}
PPCVMTranslationMap::~PPCVMTranslationMap()
{
}
status_t
PPCVMTranslationMap::Init(bool kernel)
{
fIsKernelMap = kernel;
return B_OK;
}
/*! Acquires the map's recursive lock, and resets the invalidate pages counter
in case it's the first locking recursion.
*/
bool
PPCVMTranslationMap::Lock()
{
TRACE("%p->PPCVMTranslationMap::Lock()\n", this);
recursive_lock_lock(&fLock);
if (recursive_lock_get_recursion(&fLock) == 1) {
// we were the first one to grab the lock
TRACE("clearing invalidated page count\n");
fInvalidPagesCount = 0;
}
return true;
}
/*! Unlocks the map, and, if we are actually losing the recursive lock,
flush all pending changes of this map (ie. flush TLB caches as
needed).
*/
void
PPCVMTranslationMap::Unlock()
{
TRACE("%p->PPCVMTranslationMap::Unlock()\n", this);
if (recursive_lock_get_recursion(&fLock) == 1) {
// we're about to release it for the last time
Flush();
}
recursive_lock_unlock(&fLock);
}
addr_t
PPCVMTranslationMap::MappedSize() const
{
return fMapCount;
}
void
PPCVMTranslationMap::Flush()
{
if (fInvalidPagesCount <= 0)
return;
Thread* thread = thread_get_current_thread();
thread_pin_to_current_cpu(thread);
if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) {
// invalidate all pages
TRACE("flush_tmap: %d pages to invalidate, invalidate all\n",
fInvalidPagesCount);
if (fIsKernelMap) {
arch_cpu_global_TLB_invalidate();
smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0,
NULL, SMP_MSG_FLAG_SYNC);
} else {
cpu_status state = disable_interrupts();
arch_cpu_user_TLB_invalidate();
restore_interrupts(state);
int cpu = smp_get_current_cpu();
uint32 cpuMask = PagingStructures()->active_on_cpus
& ~((uint32)1 << cpu);
if (cpuMask != 0) {
smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES,
0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
}
}
} else {
TRACE("flush_tmap: %d pages to invalidate, invalidate list\n",
fInvalidPagesCount);
arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount);
if (fIsKernelMap) {
smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
(addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
SMP_MSG_FLAG_SYNC);
} else {
int cpu = smp_get_current_cpu();
uint32 cpuMask = PagingStructures()->active_on_cpus
& ~((uint32)1 << cpu);
if (cpuMask != 0) {
smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST,
(addr_t)fInvalidPages, fInvalidPagesCount, 0, NULL,
SMP_MSG_FLAG_SYNC);
}
}
}
fInvalidPagesCount = 0;
thread_unpin_from_current_cpu(thread);
}

View File

@ -0,0 +1,60 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_PPC_PPC_VM_TRANSLATION_MAP_H
#define KERNEL_ARCH_PPC_PPC_VM_TRANSLATION_MAP_H
#include <vm/VMTranslationMap.h>
#define PAGE_INVALIDATE_CACHE_SIZE 64
struct PPCPagingStructures;
class TranslationMapPhysicalPageMapper;
struct PPCVMTranslationMap : VMTranslationMap {
PPCVMTranslationMap();
virtual ~PPCVMTranslationMap();
status_t Init(bool kernel);
virtual bool Lock();
virtual void Unlock();
virtual addr_t MappedSize() const;
virtual void Flush();
virtual PPCPagingStructures* PagingStructures() const = 0;
inline void InvalidatePage(addr_t address);
virtual status_t RemapAddressRange(addr_t *_virtualAddress,
size_t size, bool unmap) = 0;
virtual void ChangeASID() = 0;
protected:
//X86:TranslationMapPhysicalPageMapper* fPageMapper;
int fInvalidPagesCount;
addr_t fInvalidPages[PAGE_INVALIDATE_CACHE_SIZE];
bool fIsKernelMap;
};
void
PPCVMTranslationMap::InvalidatePage(addr_t address)
{
if (fInvalidPagesCount < PAGE_INVALIDATE_CACHE_SIZE)
fInvalidPages[fInvalidPagesCount] = address;
fInvalidPagesCount++;
}
#endif // KERNEL_ARCH_PPC_PPC_VM_TRANSLATION_MAP_H

View File

@ -0,0 +1,423 @@
/*
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include "paging/classic/PPCPagingMethodClassic.h"
#include <stdlib.h>
#include <string.h>
#include <AutoDeleter.h>
#include <arch/cpu.h>
#include <arch_mmu.h>
#include <arch_system_info.h>
#include <boot/kernel_args.h>
#include <int.h>
#include <thread.h>
#include <vm/vm.h>
#include <vm/VMAddressSpace.h>
#include "paging/classic/PPCPagingStructuresClassic.h"
#include "paging/classic/PPCVMTranslationMapClassic.h"
#include "generic_vm_physical_page_mapper.h"
#include "generic_vm_physical_page_ops.h"
#include "GenericVMPhysicalPageMapper.h"
//#define TRACE_PPC_PAGING_METHOD_CLASSIC
#ifdef TRACE_PPC_PAGING_METHOD_CLASSIC
# define TRACE(x...) dprintf(x)
#else
# define TRACE(x...) ;
#endif
// 64 MB of iospace
#define IOSPACE_SIZE (64*1024*1024)
// We only have small (4 KB) pages. The only reason for choosing greater chunk
// size is to keep the waste of memory limited, since the generic page mapper
// allocates structures per physical/virtual chunk.
// TODO: Implement a page mapper more suitable for small pages!
#define IOSPACE_CHUNK_SIZE (16 * B_PAGE_SIZE)
static addr_t sIOSpaceBase;
static status_t
map_iospace_chunk(addr_t va, phys_addr_t pa, uint32 flags)
{
pa &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
va &= ~(B_PAGE_SIZE - 1); // make sure it's page aligned
if (va < sIOSpaceBase || va >= (sIOSpaceBase + IOSPACE_SIZE))
panic("map_iospace_chunk: passed invalid va 0x%lx\n", va);
// map the pages
return ppc_map_address_range(va, pa, IOSPACE_CHUNK_SIZE);
}
// #pragma mark - PPCPagingMethodClassic
PPCPagingMethodClassic::PPCPagingMethodClassic()
/*
:
fPageHole(NULL),
fPageHolePageDir(NULL),
fKernelPhysicalPageDirectory(0),
fKernelVirtualPageDirectory(NULL),
fPhysicalPageMapper(NULL),
fKernelPhysicalPageMapper(NULL)
*/
{
}
PPCPagingMethodClassic::~PPCPagingMethodClassic()
{
}
status_t
PPCPagingMethodClassic::Init(kernel_args* args,
VMPhysicalPageMapper** _physicalPageMapper)
{
TRACE("PPCPagingMethodClassic::Init(): entry\n");
fPageTable = (page_table_entry_group *)args->arch_args.page_table.start;
fPageTableSize = args->arch_args.page_table.size;
fPageTableHashMask = fPageTableSize / sizeof(page_table_entry_group) - 1;
// init physical page mapper
status_t error = generic_vm_physical_page_mapper_init(args,
map_iospace_chunk, &sIOSpaceBase, IOSPACE_SIZE, IOSPACE_CHUNK_SIZE);
if (error != B_OK)
return error;
new(&fPhysicalPageMapper) GenericVMPhysicalPageMapper;
*_physicalPageMapper = &fPhysicalPageMapper;
return B_OK;
#if 0//X86
fKernelPhysicalPageDirectory = args->arch_args.phys_pgdir;
fKernelVirtualPageDirectory = (page_directory_entry*)(addr_t)
args->arch_args.vir_pgdir;
#ifdef TRACE_PPC_PAGING_METHOD_CLASSIC
TRACE("page hole: %p, page dir: %p\n", fPageHole, fPageHolePageDir);
TRACE("page dir: %p (physical: %#" B_PRIx32 ")\n",
fKernelVirtualPageDirectory, fKernelPhysicalPageDirectory);
#endif
PPCPagingStructuresClassic::StaticInit();
// create the initial pool for the physical page mapper
PhysicalPageSlotPool* pool
= new(&PhysicalPageSlotPool::sInitialPhysicalPagePool)
PhysicalPageSlotPool;
status_t error = pool->InitInitial(args);
if (error != B_OK) {
panic("PPCPagingMethodClassic::Init(): Failed to create initial pool "
"for physical page mapper!");
return error;
}
// create physical page mapper
large_memory_physical_page_ops_init(args, pool, fPhysicalPageMapper,
fKernelPhysicalPageMapper);
// TODO: Select the best page mapper!
// enable global page feature if available
if (x86_check_feature(IA32_FEATURE_PGE, FEATURE_COMMON)) {
// this prevents kernel pages from being flushed from TLB on
// context-switch
x86_write_cr4(x86_read_cr4() | IA32_CR4_GLOBAL_PAGES);
}
TRACE("PPCPagingMethodClassic::Init(): done\n");
*_physicalPageMapper = fPhysicalPageMapper;
return B_OK;
#endif
}
status_t
PPCPagingMethodClassic::InitPostArea(kernel_args* args)
{
// If the page table doesn't lie within the kernel address space, we
// remap it.
if (!IS_KERNEL_ADDRESS(fPageTable)) {
addr_t newAddress = (addr_t)fPageTable;
status_t error = ppc_remap_address_range(&newAddress, fPageTableSize,
false);
if (error != B_OK) {
panic("arch_vm_translation_map_init_post_area(): Failed to remap "
"the page table!");
return error;
}
// set the new page table address
addr_t oldVirtualBase = (addr_t)(fPageTable);
fPageTable = (page_table_entry_group*)newAddress;
// unmap the old pages
ppc_unmap_address_range(oldVirtualBase, fPageTableSize);
// TODO: We should probably map the page table via BAT. It is relatively large,
// and due to being a hash table the access patterns might look sporadic, which
// certainly isn't to the liking of the TLB.
}
// create an area to cover the page table
fPageTableArea = create_area("page_table", (void **)&fPageTable, B_EXACT_ADDRESS,
fPageTableSize, B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
// init physical page mapper
status_t error = generic_vm_physical_page_mapper_init_post_area(args);
if (error != B_OK)
return error;
return B_OK;
#if 0//X86
// now that the vm is initialized, create an area that represents
// the page hole
void *temp;
status_t error;
area_id area;
// unmap the page hole hack we were using before
fKernelVirtualPageDirectory[1023] = 0;
fPageHolePageDir = NULL;
fPageHole = NULL;
temp = (void*)fKernelVirtualPageDirectory;
area = create_area("kernel_pgdir", &temp, B_EXACT_ADDRESS, B_PAGE_SIZE,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (area < B_OK)
return area;
error = PhysicalPageSlotPool::sInitialPhysicalPagePool
.InitInitialPostArea(args);
if (error != B_OK)
return error;
return B_OK;
#endif//X86
}
status_t
PPCPagingMethodClassic::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
{
PPCVMTranslationMapClassic* map = new(std::nothrow) PPCVMTranslationMapClassic;
if (map == NULL)
return B_NO_MEMORY;
status_t error = map->Init(kernel);
if (error != B_OK) {
delete map;
return error;
}
*_map = map;
return B_OK;
}
status_t
PPCPagingMethodClassic::MapEarly(kernel_args* args, addr_t virtualAddress,
phys_addr_t physicalAddress, uint8 attributes,
page_num_t (*get_free_page)(kernel_args*))
{
uint32 virtualSegmentID = get_sr((void *)virtualAddress) & 0xffffff;
uint32 hash = page_table_entry::PrimaryHash(virtualSegmentID, (uint32)virtualAddress);
page_table_entry_group *group = &fPageTable[hash & fPageTableHashMask];
for (int32 i = 0; i < 8; i++) {
// 8 entries in a group
if (group->entry[i].valid)
continue;
FillPageTableEntry(&group->entry[i], virtualSegmentID,
virtualAddress, physicalAddress, PTE_READ_WRITE, 0, false);
return B_OK;
}
hash = page_table_entry::SecondaryHash(hash);
group = &fPageTable[hash & fPageTableHashMask];
for (int32 i = 0; i < 8; i++) {
if (group->entry[i].valid)
continue;
FillPageTableEntry(&group->entry[i], virtualSegmentID,
virtualAddress, physicalAddress, PTE_READ_WRITE, 0, true);
return B_OK;
}
return B_ERROR;
}
bool
PPCPagingMethodClassic::IsKernelPageAccessible(addr_t virtualAddress,
uint32 protection)
{
// TODO:factor out to baseclass
VMAddressSpace *addressSpace = VMAddressSpace::Kernel();
//XXX:
// PPCVMTranslationMap* map = static_cast<PPCVMTranslationMap*>(
// addressSpace->TranslationMap());
// VMTranslationMap* map = addressSpace->TranslationMap();
PPCVMTranslationMapClassic* map = static_cast<PPCVMTranslationMapClassic*>(
addressSpace->TranslationMap());
phys_addr_t physicalAddress;
uint32 flags;
if (map->Query(virtualAddress, &physicalAddress, &flags) != B_OK)
return false;
if ((flags & PAGE_PRESENT) == 0)
return false;
// present means kernel-readable, so check for writable
return (protection & B_KERNEL_WRITE_AREA) == 0
|| (flags & B_KERNEL_WRITE_AREA) != 0;
}
void
PPCPagingMethodClassic::FillPageTableEntry(page_table_entry *entry,
uint32 virtualSegmentID, addr_t virtualAddress, phys_addr_t physicalAddress,
uint8 protection, uint32 memoryType, bool secondaryHash)
{
// lower 32 bit - set at once
entry->physical_page_number = physicalAddress / B_PAGE_SIZE;
entry->_reserved0 = 0;
entry->referenced = false;
entry->changed = false;
entry->write_through = (memoryType == B_MTR_UC) || (memoryType == B_MTR_WT);
entry->caching_inhibited = (memoryType == B_MTR_UC);
entry->memory_coherent = false;
entry->guarded = false;
entry->_reserved1 = 0;
entry->page_protection = protection & 0x3;
eieio();
// we need to make sure that the lower 32 bit were
// already written when the entry becomes valid
// upper 32 bit
entry->virtual_segment_id = virtualSegmentID;
entry->secondary_hash = secondaryHash;
entry->abbr_page_index = (virtualAddress >> 22) & 0x3f;
entry->valid = true;
ppc_sync();
}
#if 0//X86
/*static*/ void
PPCPagingMethodClassic::PutPageTableInPageDir(page_directory_entry* entry,
phys_addr_t pgtablePhysical, uint32 attributes)
{
*entry = (pgtablePhysical & PPC_PDE_ADDRESS_MASK)
| PPC_PDE_PRESENT
| PPC_PDE_WRITABLE
| PPC_PDE_USER;
// TODO: we ignore the attributes of the page table - for compatibility
// with BeOS we allow having user accessible areas in the kernel address
// space. This is currently being used by some drivers, mainly for the
// frame buffer. Our current real time data implementation makes use of
// this fact, too.
// We might want to get rid of this possibility one day, especially if
// we intend to port it to a platform that does not support this.
}
/*static*/ void
PPCPagingMethodClassic::PutPageTableEntryInTable(page_table_entry* entry,
phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType,
bool globalPage)
{
page_table_entry page = (physicalAddress & PPC_PTE_ADDRESS_MASK)
| PPC_PTE_PRESENT | (globalPage ? PPC_PTE_GLOBAL : 0)
| MemoryTypeToPageTableEntryFlags(memoryType);
// if the page is user accessible, it's automatically
// accessible in kernel space, too (but with the same
// protection)
if ((attributes & B_USER_PROTECTION) != 0) {
page |= PPC_PTE_USER;
if ((attributes & B_WRITE_AREA) != 0)
page |= PPC_PTE_WRITABLE;
} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
page |= PPC_PTE_WRITABLE;
// put it in the page table
*(volatile page_table_entry*)entry = page;
}
/*static*/ void
PPCPagingMethodClassic::_EarlyPreparePageTables(page_table_entry* pageTables,
addr_t address, size_t size)
{
memset(pageTables, 0, B_PAGE_SIZE * (size / (B_PAGE_SIZE * 1024)));
// put the array of pgtables directly into the kernel pagedir
// these will be wired and kept mapped into virtual space to be easy to get
// to
{
addr_t virtualTable = (addr_t)pageTables;
page_directory_entry* pageHolePageDir
= PPCPagingMethodClassic::Method()->PageHolePageDir();
for (size_t i = 0; i < (size / (B_PAGE_SIZE * 1024));
i++, virtualTable += B_PAGE_SIZE) {
phys_addr_t physicalTable = 0;
_EarlyQuery(virtualTable, &physicalTable);
page_directory_entry* entry = &pageHolePageDir[
(address / (B_PAGE_SIZE * 1024)) + i];
PutPageTableInPageDir(entry, physicalTable,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
}
}
}
//! TODO: currently assumes this translation map is active
/*static*/ status_t
PPCPagingMethodClassic::_EarlyQuery(addr_t virtualAddress,
phys_addr_t *_physicalAddress)
{
PPCPagingMethodClassic* method = PPCPagingMethodClassic::Method();
int index = VADDR_TO_PDENT(virtualAddress);
if ((method->PageHolePageDir()[index] & PPC_PDE_PRESENT) == 0) {
// no pagetable here
return B_ERROR;
}
page_table_entry* entry = method->PageHole() + virtualAddress / B_PAGE_SIZE;
if ((*entry & PPC_PTE_PRESENT) == 0) {
// page mapping not valid
return B_ERROR;
}
*_physicalAddress = *entry & PPC_PTE_ADDRESS_MASK;
return B_OK;
}
#endif

View File

@ -0,0 +1,208 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_PPC_PAGING_CLASSIC_PPC_PAGING_METHOD_CLASSIC_H
#define KERNEL_ARCH_PPC_PAGING_CLASSIC_PPC_PAGING_METHOD_CLASSIC_H
#include <arch_mmu.h>
//#include "paging/classic/paging.h"
#include "paging/PPCPagingMethod.h"
#include "paging/PPCPagingStructures.h"
#include "GenericVMPhysicalPageMapper.h"
class TranslationMapPhysicalPageMapper;
class PPCPagingMethodClassic : public PPCPagingMethod {
public:
PPCPagingMethodClassic();
virtual ~PPCPagingMethodClassic();
virtual status_t Init(kernel_args* args,
VMPhysicalPageMapper** _physicalPageMapper);
virtual status_t InitPostArea(kernel_args* args);
virtual status_t CreateTranslationMap(bool kernel,
VMTranslationMap** _map);
virtual status_t MapEarly(kernel_args* args,
addr_t virtualAddress,
phys_addr_t physicalAddress,
uint8 attributes,
page_num_t (*get_free_page)(kernel_args*));
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
uint32 protection);
#if 0//X86
inline page_table_entry* PageHole() const
{ return fPageHole; }
inline page_directory_entry* PageHolePageDir() const
{ return fPageHolePageDir; }
inline uint32 KernelPhysicalPageDirectory() const
{ return fKernelPhysicalPageDirectory; }
inline page_directory_entry* KernelVirtualPageDirectory() const
{ return fKernelVirtualPageDirectory; }
inline PPCPhysicalPageMapper* PhysicalPageMapper() const
{ return fPhysicalPageMapper; }
inline TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() const
{ return fKernelPhysicalPageMapper; }
#endif
inline page_table_entry_group* PageTable() const
{ return fPageTable; }
inline size_t PageTableSize() const
{ return fPageTableSize; }
inline uint32 PageTableHashMask() const
{ return fPageTableHashMask; }
static PPCPagingMethodClassic* Method();
void FillPageTableEntry(page_table_entry *entry,
uint32 virtualSegmentID,
addr_t virtualAddress,
phys_addr_t physicalAddress,
uint8 protection, uint32 memoryType,
bool secondaryHash);
#if 0//X86
static void PutPageTableInPageDir(
page_directory_entry* entry,
phys_addr_t pgtablePhysical,
uint32 attributes);
static void PutPageTableEntryInTable(
page_table_entry* entry,
phys_addr_t physicalAddress,
uint32 attributes, uint32 memoryType,
bool globalPage);
static page_table_entry SetPageTableEntry(page_table_entry* entry,
page_table_entry newEntry);
static page_table_entry SetPageTableEntryFlags(page_table_entry* entry,
uint32 flags);
static page_table_entry TestAndSetPageTableEntry(
page_table_entry* entry,
page_table_entry newEntry,
page_table_entry oldEntry);
static page_table_entry ClearPageTableEntry(page_table_entry* entry);
static page_table_entry ClearPageTableEntryFlags(
page_table_entry* entry, uint32 flags);
static uint32 MemoryTypeToPageTableEntryFlags(
uint32 memoryType);
#endif
private:
//XXX:x86
struct PhysicalPageSlotPool;
friend struct PhysicalPageSlotPool;
private:
#if 0//X86
static void _EarlyPreparePageTables(
page_table_entry* pageTables,
addr_t address, size_t size);
static status_t _EarlyQuery(addr_t virtualAddress,
phys_addr_t *_physicalAddress);
#endif
private:
struct page_table_entry_group *fPageTable;
size_t fPageTableSize;
uint32 fPageTableHashMask;
area_id fPageTableArea;
GenericVMPhysicalPageMapper fPhysicalPageMapper;
#if 0 //XXX:x86
page_table_entry* fPageHole;
page_directory_entry* fPageHolePageDir;
uint32 fKernelPhysicalPageDirectory;
page_directory_entry* fKernelVirtualPageDirectory;
PPCPhysicalPageMapper* fPhysicalPageMapper;
TranslationMapPhysicalPageMapper* fKernelPhysicalPageMapper;
#endif
};
/*static*/ inline PPCPagingMethodClassic*
PPCPagingMethodClassic::Method()
{
return static_cast<PPCPagingMethodClassic*>(gPPCPagingMethod);
}
#if 0//X86
/*static*/ inline page_table_entry
PPCPagingMethodClassic::SetPageTableEntry(page_table_entry* entry,
page_table_entry newEntry)
{
return atomic_set((int32*)entry, newEntry);
}
/*static*/ inline page_table_entry
PPCPagingMethodClassic::SetPageTableEntryFlags(page_table_entry* entry,
uint32 flags)
{
return atomic_or((int32*)entry, flags);
}
/*static*/ inline page_table_entry
PPCPagingMethodClassic::TestAndSetPageTableEntry(page_table_entry* entry,
page_table_entry newEntry, page_table_entry oldEntry)
{
return atomic_test_and_set((int32*)entry, newEntry, oldEntry);
}
/*static*/ inline page_table_entry
PPCPagingMethodClassic::ClearPageTableEntry(page_table_entry* entry)
{
return SetPageTableEntry(entry, 0);
}
/*static*/ inline page_table_entry
PPCPagingMethodClassic::ClearPageTableEntryFlags(page_table_entry* entry, uint32 flags)
{
return atomic_and((int32*)entry, ~flags);
}
/*static*/ inline uint32
PPCPagingMethodClassic::MemoryTypeToPageTableEntryFlags(uint32 memoryType)
{
// ATM we only handle the uncacheable and write-through type explicitly. For
// all other types we rely on the MTRRs to be set up correctly. Since we set
// the default memory type to write-back and since the uncacheable type in
// the PTE overrides any MTRR attribute (though, as per the specs, that is
// not recommended for performance reasons), this reduces the work we
// actually *have* to do with the MTRRs to setting the remaining types
// (usually only write-combining for the frame buffer).
switch (memoryType) {
case B_MTR_UC:
return PPC_PTE_CACHING_DISABLED | PPC_PTE_WRITE_THROUGH;
case B_MTR_WC:
// PPC_PTE_WRITE_THROUGH would be closer, but the combination with
// MTRR WC is "implementation defined" for Pentium Pro/II.
return 0;
case B_MTR_WT:
return PPC_PTE_WRITE_THROUGH;
case B_MTR_WP:
case B_MTR_WB:
default:
return 0;
}
}
#endif//X86
#endif // KERNEL_ARCH_PPC_PAGING_CLASSIC_PPC_PAGING_METHOD_CLASSIC_H

View File

@ -0,0 +1,141 @@
/*
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include "paging/classic/PPCPagingStructuresClassic.h"
#include <stdlib.h>
#include <heap.h>
#include <util/AutoLock.h>
// Accessor class to reuse the SinglyLinkedListLink of DeferredDeletable for
// PPCPagingStructuresClassic.
struct PagingStructuresGetLink {
private:
typedef SinglyLinkedListLink<PPCPagingStructuresClassic> Link;
public:
inline Link* operator()(PPCPagingStructuresClassic* element) const
{
return (Link*)element->GetSinglyLinkedListLink();
}
inline const Link* operator()(
const PPCPagingStructuresClassic* element) const
{
return (const Link*)element->GetSinglyLinkedListLink();
}
};
typedef SinglyLinkedList<PPCPagingStructuresClassic, PagingStructuresGetLink>
PagingStructuresList;
static PagingStructuresList sPagingStructuresList;
static spinlock sPagingStructuresListLock;
PPCPagingStructuresClassic::PPCPagingStructuresClassic()
/* :
pgdir_virt(NULL)*/
{
}
PPCPagingStructuresClassic::~PPCPagingStructuresClassic()
{
#if 0//X86
// free the page dir
free(pgdir_virt);
#endif
}
void
PPCPagingStructuresClassic::Init(/*page_directory_entry* virtualPageDir,
phys_addr_t physicalPageDir, page_directory_entry* kernelPageDir*/
page_table_entry_group *pageTable)
{
// pgdir_virt = virtualPageDir;
// pgdir_phys = physicalPageDir;
#if 0//X86
// zero out the bottom portion of the new pgdir
memset(pgdir_virt + FIRST_USER_PGDIR_ENT, 0,
NUM_USER_PGDIR_ENTS * sizeof(page_directory_entry));
#endif
// insert this new map into the map list
{
int state = disable_interrupts();
acquire_spinlock(&sPagingStructuresListLock);
#if 0//X86
// copy the top portion of the page dir from the kernel page dir
if (kernelPageDir != NULL) {
memcpy(pgdir_virt + FIRST_KERNEL_PGDIR_ENT,
kernelPageDir + FIRST_KERNEL_PGDIR_ENT,
NUM_KERNEL_PGDIR_ENTS * sizeof(page_directory_entry));
}
#endif
sPagingStructuresList.Add(this);
release_spinlock(&sPagingStructuresListLock);
restore_interrupts(state);
}
}
void
PPCPagingStructuresClassic::Delete()
{
// remove from global list
InterruptsSpinLocker locker(sPagingStructuresListLock);
sPagingStructuresList.Remove(this);
locker.Unlock();
#if 0
// this sanity check can be enabled when corruption due to
// overwriting an active page directory is suspected
uint32 activePageDirectory = x86_read_cr3();
if (activePageDirectory == pgdir_phys)
panic("deleting a still active page directory\n");
#endif
if (are_interrupts_enabled())
delete this;
else
deferred_delete(this);
}
/*static*/ void
PPCPagingStructuresClassic::StaticInit()
{
B_INITIALIZE_SPINLOCK(&sPagingStructuresListLock);
new (&sPagingStructuresList) PagingStructuresList;
}
/*static*/ void
PPCPagingStructuresClassic::UpdateAllPageDirs(int index,
page_table_entry_group entry)
//XXX:page_table_entry?
{
InterruptsSpinLocker locker(sPagingStructuresListLock);
#if 0//X86
PagingStructuresList::Iterator it = sPagingStructuresList.GetIterator();
while (PPCPagingStructuresClassic* info = it.Next())
info->pgdir_virt[index] = entry;
#endif
}

View File

@ -0,0 +1,33 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_PPC_PAGING_CLASSIC_PPC_PAGING_STRUCTURES_CLASSIC_H
#define KERNEL_ARCH_PPC_PAGING_CLASSIC_PPC_PAGING_STRUCTURES_CLASSIC_H
//#include "paging/classic/paging.h"
#include <arch_mmu.h>
#include "paging/PPCPagingStructures.h"
struct PPCPagingStructuresClassic : PPCPagingStructures {
// page_directory_entry* pgdir_virt;
PPCPagingStructuresClassic();
virtual ~PPCPagingStructuresClassic();
void Init(/*page_directory_entry* virtualPageDir,
phys_addr_t physicalPageDir,
page_directory_entry* kernelPageDir,*/
page_table_entry_group *pageTable);
virtual void Delete();
static void StaticInit();
static void UpdateAllPageDirs(int index,
page_table_entry_group entry);
};
#endif // KERNEL_ARCH_PPC_PAGING_CLASSIC_PPC_PAGING_STRUCTURES_CLASSIC_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,81 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_PPC_PAGING_CLASSIC_PPC_VM_TRANSLATION_MAP_CLASSIC_H
#define KERNEL_ARCH_PPC_PAGING_CLASSIC_PPC_VM_TRANSLATION_MAP_CLASSIC_H
#include "paging/PPCVMTranslationMap.h"
#include <arch_mmu.h>
struct PPCPagingStructuresClassic;
struct PPCVMTranslationMapClassic : PPCVMTranslationMap {
PPCVMTranslationMapClassic();
virtual ~PPCVMTranslationMapClassic();
status_t Init(bool kernel);
inline int VSIDBase() const { return fVSIDBase; }
virtual void ChangeASID();
page_table_entry* LookupPageTableEntry(addr_t virtualAddress);
bool RemovePageTableEntry(addr_t virtualAddress);
virtual size_t MaxPagesNeededToMap(addr_t start,
addr_t end) const;
virtual status_t Map(addr_t virtualAddress,
phys_addr_t physicalAddress,
uint32 attributes, uint32 memoryType,
vm_page_reservation* reservation);
virtual status_t Unmap(addr_t start, addr_t end);
virtual status_t RemapAddressRange(addr_t *_virtualAddress,
size_t size, bool unmap);
virtual status_t DebugMarkRangePresent(addr_t start, addr_t end,
bool markPresent);
virtual status_t UnmapPage(VMArea* area, addr_t address,
bool updatePageQueue);
virtual void UnmapPages(VMArea* area, addr_t base,
size_t size, bool updatePageQueue);
virtual void UnmapArea(VMArea* area,
bool deletingAddressSpace,
bool ignoreTopCachePageFlags);
virtual status_t Query(addr_t virtualAddress,
phys_addr_t* _physicalAddress,
uint32* _flags);
virtual status_t QueryInterrupt(addr_t virtualAddress,
phys_addr_t* _physicalAddress,
uint32* _flags);
virtual status_t Protect(addr_t base, addr_t top,
uint32 attributes, uint32 memoryType);
virtual status_t ClearFlags(addr_t virtualAddress,
uint32 flags);
virtual bool ClearAccessedAndModified(
VMArea* area, addr_t address,
bool unmapIfUnaccessed,
bool& _modified);
virtual PPCPagingStructures* PagingStructures() const;
inline PPCPagingStructuresClassic* PagingStructuresClassic() const
{ return fPagingStructures; }
private:
PPCPagingStructuresClassic* fPagingStructures;
//XXX:move to fPagingStructures?
int fVSIDBase;
};
#endif // KERNEL_ARCH_PPC_PAGING_CLASSIC_PPC_VM_TRANSLATION_MAP_CLASSIC_H