Fix building the m68k kernel

* Restructured the vm support code to align with the changes done to other archs.
* Not completely finished, but the kernel loads and panics. I had this sitting on the disk anyway.
* Only support 040 for now, 030 will need to be added back.
* This commit is dedicated to Jack Tramiel who passed away away last sunday:
http://www.forbes.com/sites/davidthier/2012/04/09/computer-legend-and-gaming-pioneer-jack-tramiel-dies-at-age-83/
This commit is contained in:
François Revol 2012-04-13 00:53:09 +02:00
parent abbd8f3516
commit 599f30f93d
23 changed files with 3800 additions and 146 deletions

@ -20,6 +20,8 @@ void m68k_push_iframe(struct iframe_stack *stack, struct iframe *frame);
void m68k_pop_iframe(struct iframe_stack *stack);
struct iframe *m68k_get_user_iframe(void);
uint32 m68k_next_page_directory(Thread *from, Thread *to);
/* as we won't support SMP on m68k (yet?) we can use a global here */
extern Thread *gCurrentThread;

@ -14,39 +14,4 @@
#define PAGE_SHIFT 12
struct m68k_vm_ops {
void *(*m68k_translation_map_get_pgdir)(VMTranslationMap *map);
status_t (*arch_vm_translation_map_init_map)(VMTranslationMap *map, bool kernel);
status_t (*arch_vm_translation_map_init_kernel_map_post_sem)(VMTranslationMap *map);
status_t (*arch_vm_translation_map_init)(kernel_args *args);
status_t (*arch_vm_translation_map_init_post_area)(kernel_args *args);
status_t (*arch_vm_translation_map_init_post_sem)(kernel_args *args);
status_t (*arch_vm_translation_map_early_map)(kernel_args *ka, addr_t virtualAddress, addr_t physicalAddress,
uint8 attributes, addr_t (*get_free_page)(kernel_args *));
status_t (*arch_vm_translation_map_early_query)(addr_t va, addr_t *out_physical);
void (*m68k_set_pgdir)(void *);
#if 0 /* ppc stuff only ? */
status_t (*m68k_map_address_range)(addr_t virtualAddress, addr_t physicalAddress,
size_t size);
void (*m68k_unmap_address_range)(addr_t virtualAddress, size_t size);
status_t (*m68k_remap_address_range)(addr_t *_virtualAddress, size_t size, bool unmap);
#endif
bool (*arch_vm_translation_map_is_kernel_page_accessible)(addr_t virtualAddress, uint32 protection);
};
#ifdef __cplusplus
extern "C" {
#endif
extern struct m68k_vm_ops *get_vm_ops();
extern void *m68k_translation_map_get_pgdir(VMTranslationMap *map);
extern void m68k_set_pgdir(void *rt);
#ifdef __cplusplus
}
#endif
#endif /* ARCH_M68K_VM_H */

@ -4,19 +4,35 @@ SubDirHdrs $(SUBDIR) $(DOTDOT) generic ;
UsePrivateKernelHeaders ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) $(DOTDOT) generic ] ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) paging ] ;
#SEARCH_SOURCE += [ FDirName $(SUBDIR) paging 030 ] ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) paging 040 ] ;
#SEARCH_SOURCE += [ FDirName $(SUBDIR) paging 060 ] ;
# cpu-specific stuff
KernelMergeObject arch_m68k_030.o :
arch_030_cpu.cpp
arch_030_mmu.cpp
#arch_030_mmu.cpp
arch_030_asm.S
# paging/030
#M68KPagingMethod030.cpp
#M68KPagingStructures030.cpp
#M68KVMTranslationMap030.cpp
: $(TARGET_KERNEL_PIC_CCFLAGS) -Wno-unused -m68030
;
KernelMergeObject arch_m68k_040.o :
arch_040_cpu.cpp
arch_040_mmu.cpp
#arch_040_mmu.cpp
arch_040_asm.S
# paging/040
M68KPagingMethod040.cpp
M68KPagingStructures040.cpp
M68KVMTranslationMap040.cpp
: $(TARGET_KERNEL_PIC_CCFLAGS) -Wno-unused -m68040
;
@ -46,8 +62,15 @@ KernelMergeObject kernel_arch_m68k.o :
arch_vm_translation_map.cpp
arch_asm.S
generic_vm_physical_page_mapper.cpp
generic_vm_physical_page_ops.cpp
#generic_vm_physical_page_mapper.cpp
#generic_vm_physical_page_ops.cpp
# paging
m68k_physical_page_mapper.cpp
m68k_physical_page_mapper_large_memory.cpp
M68KPagingMethod.cpp
M68KPagingStructures.cpp
M68KVMTranslationMap.cpp
:
$(TARGET_KERNEL_PIC_CCFLAGS) -Wno-unused

@ -14,6 +14,8 @@
#include <arch_thread.h>
#include <string.h>
#include <arch_cpu.h>
#include <arch/thread.h>
#include <boot/stage2.h>
@ -24,7 +26,10 @@
#include <arch_vm.h>
//#include <arch/vm_translation_map.h>
#include <string.h>
#include "paging/M68KPagingMethod.h"
#include "paging/M68KPagingStructures.h"
#include "paging/M68KVMTranslationMap.h"
#warning M68K: writeme!
// Valid initial arch_thread state. We just memcpy() it when initializing
@ -90,29 +95,20 @@ m68k_get_user_iframe(void)
}
void *
uint32
m68k_next_page_directory(Thread *from, Thread *to)
{
if (from->team->address_space != NULL && to->team->address_space != NULL) {
// they are both user space threads
if (from->team == to->team) {
// dont change the pgdir, same address space
return NULL;
}
// switching to a new address space
return m68k_translation_map_get_pgdir(
to->team->address_space->TranslationMap());
} else if (from->team->address_space == NULL && to->team->address_space == NULL) {
// they must both be kernel space threads
return NULL;
} else if (to->team->address_space == NULL) {
// the one we're switching to is kernel space
return m68k_translation_map_get_pgdir(
VMAddressSpace::Kernel()->TranslationMap());
VMAddressSpace* toAddressSpace = to->team->address_space;
if (from->team->address_space == toAddressSpace) {
// don't change the pgdir, same address space
return 0;
}
return m68k_translation_map_get_pgdir(
to->team->address_space->TranslationMap());
if (toAddressSpace == NULL)
toAddressSpace = VMAddressSpace::Kernel();
return static_cast<M68KVMTranslationMap*>(toAddressSpace->TranslationMap())
->PagingStructures()->pgroot_phys;
}
// #pragma mark -
@ -211,7 +207,10 @@ arch_thread_context_switch(Thread *from, Thread *to)
if ((newPageDirectory % B_PAGE_SIZE) != 0)
panic("arch_thread_context_switch: bad pgdir 0x%lx\n", newPageDirectory);
#warning M68K: export from arch_vm.c
m68k_set_pgdir((void *)newPageDirectory);
//m68k_set_pgdir((void *)newPageDirectory);
gM68KPagingMethod->SetPageRoot(newPageDirectory);
m68k_context_switch(&from->arch_info.sp, to->arch_info.sp);
}

@ -106,7 +106,14 @@ arch_vm_init_post_modules(kernel_args *args)
void
arch_vm_aspace_swap(struct VMAddressSpace *from, struct VMAddressSpace *to)
{
m68k_set_pgdir(m68k_translation_map_get_pgdir(to->TranslationMap()));
// This functions is only invoked when a userland thread is in the process
// of dying. It switches to the kernel team and does whatever cleanup is
// necessary (in case it is the team's main thread, it will delete the
// team).
// It is however not necessary to change the page directory. Userland team's
// page directories include all kernel mappings as well. Furthermore our
// arch specific translation map data objects are ref-counted, so they won't
// go away as long as they are still used on any CPU.
}

@ -1,8 +1,7 @@
/*
* Copyright 2007, François Revol, revol@free.fr.
* Distributed under the terms of the MIT License.
*
* Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
* Copyright 2007-2010, François Revol, revol@free.fr.
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001, Travis Geiselbrecht. All rights reserved.
@ -22,86 +21,119 @@
#include <stdlib.h>
#include "generic_vm_physical_page_mapper.h"
//#include "paging/030/M68KPagingMethod030.h"
#include "paging/040/M68KPagingMethod040.h"
//#include "paging/060/M68KPagingMethod060.h"
#define TRACE_VM_TMAP
#ifdef TRACE_VM_TMAP
# define TRACE(x...) dprintf(x)
#else
# define TRACE(x...) ;
#endif
/*
* Each mmu of the m68k family has its own tricks, registers and opcodes...
* so we use a function array to switch to the one we want.
* so they all have a specific paging method class.
*/
#warning M68K: 060: must *not* have pgtables in copyback cachable mem!!!
//extern struct m68k_vm_ops m68851_vm_ops;
extern struct m68k_vm_ops m68030_vm_ops;
extern struct m68k_vm_ops m68040_vm_ops;
static union {
uint64 align;
//char m68851[sizeof(M68KPagingMethod851)];
//char m68030[sizeof(M68KPagingMethod030)];
char m68040[sizeof(M68KPagingMethod040)];
// 060 should be identical to 040 except for copyback issue
//extern struct m68k_vm_ops m68060_vm_ops;
//char m68060[sizeof(M68KPagingMethod060)];
} sPagingMethodBuffer;
#warning M68K: use a static!
m68k_vm_ops *get_vm_ops()
{
int mmu = arch_mmu_type;
switch (mmu) {
case 68551:
panic("Unimplemented yet (mmu)");
//return &m68851_vm_ops;
return NULL;
case 68030:
return &m68030_vm_ops;
case 68040:
return &m68040_vm_ops;
case 68060:
//return &m68060_vm_ops;
panic("Unimplemented yet (mmu)");
return NULL;
default:
panic("Invalid mmu type!");
return NULL;
}
}
#if 0
void *
m68k_translation_map_get_pgdir(VMTranslationMap *map)
{
return get_vm_ops()->m68k_translation_map_get_pgdir(map);
}
#endif
// #pragma mark -
// VM API
status_t
arch_vm_translation_map_init_map(VMTranslationMap *map, bool kernel)
arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
{
return get_vm_ops()->arch_vm_translation_map_init_map(map, kernel);
return gM68KPagingMethod->CreateTranslationMap(kernel, _map);
}
status_t
arch_vm_translation_map_init_kernel_map_post_sem(VMTranslationMap *map)
arch_vm_translation_map_init(kernel_args *args,
VMPhysicalPageMapper** _physicalPageMapper)
{
return get_vm_ops()->arch_vm_translation_map_init_kernel_map_post_sem(map);
}
TRACE("vm_translation_map_init: entry\n");
#ifdef TRACE_VM_TMAP
TRACE("physical memory ranges:\n");
for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {
phys_addr_t start = args->physical_memory_range[i].start;
phys_addr_t end = start + args->physical_memory_range[i].size;
TRACE(" %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
end);
}
status_t
arch_vm_translation_map_init(kernel_args *args)
{
return get_vm_ops()->arch_vm_translation_map_init(args);
}
TRACE("allocated physical ranges:\n");
for (uint32 i = 0; i < args->num_physical_allocated_ranges; i++) {
phys_addr_t start = args->physical_allocated_range[i].start;
phys_addr_t end = start + args->physical_allocated_range[i].size;
TRACE(" %#10" B_PRIxPHYSADDR " - %#10" B_PRIxPHYSADDR "\n", start,
end);
}
status_t
arch_vm_translation_map_init_post_area(kernel_args *args)
{
return get_vm_ops()->arch_vm_translation_map_init_post_area(args);
TRACE("allocated virtual ranges:\n");
for (uint32 i = 0; i < args->num_virtual_allocated_ranges; i++) {
addr_t start = args->virtual_allocated_range[i].start;
addr_t end = start + args->virtual_allocated_range[i].size;
TRACE(" %#10" B_PRIxADDR " - %#10" B_PRIxADDR "\n", start, end);
}
#endif
switch (arch_mmu_type) {
/*
case 68030:
gM68KPagingMethod = new(&sPagingMethodBuffer) M68KPagingMethod030;
break;
*/
case 68040:
gM68KPagingMethod = new(&sPagingMethodBuffer) M68KPagingMethod040;
break;
/*
case 68060:
gM68KPagingMethod = new(&sPagingMethodBuffer) M68KPagingMethod060;
break;
*/
default:
break;
}
return gM68KPagingMethod->Init(args, _physicalPageMapper);
}
status_t
arch_vm_translation_map_init_post_sem(kernel_args *args)
{
return get_vm_ops()->arch_vm_translation_map_init_post_sem(args);
return B_OK;
}
status_t
arch_vm_translation_map_init_post_area(kernel_args *args)
{
TRACE("vm_translation_map_init_post_area: entry\n");
return gM68KPagingMethod->InitPostArea(args);
}
@ -110,62 +142,33 @@ arch_vm_translation_map_init_post_sem(kernel_args *args)
* It currently ignores the "attributes" parameter and sets all pages
* read/write.
*/
status_t
arch_vm_translation_map_early_map(kernel_args *ka, addr_t virtualAddress, addr_t physicalAddress,
uint8 attributes, addr_t (*get_free_page)(kernel_args *))
arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *))
{
return get_vm_ops()->arch_vm_translation_map_early_map(ka, virtualAddress, physicalAddress,
attributes, get_free_page);
TRACE("early_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
return gM68KPagingMethod->MapEarly(args, va, pa, attributes, get_free_page);
}
// XXX currently assumes this translation map is active
/*
status_t
arch_vm_translation_map_early_query(addr_t va, addr_t *out_physical)
{
return get_vm_ops()->arch_vm_translation_map_early_query(va, out_physical);
}
*/
// #pragma mark -
void
m68k_set_pgdir(void *rt)
{
return get_vm_ops()->m68k_set_pgdir(rt);
}
#if 0
status_t
m68k_map_address_range(addr_t virtualAddress, addr_t physicalAddress,
size_t size)
{
return get_vm_ops()->m68k_map_address_range(virtualAddress, physicalAddress, size);
}
void
m68k_unmap_address_range(addr_t virtualAddress, size_t size)
{
get_vm_ops()->m68k_unmap_address_range(virtualAddress, size);
}
status_t
m68k_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap)
{
return get_vm_ops()->m68k_remap_address_range(_virtualAddress, size, unmap);
}
#endif
bool
arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
uint32 protection)
{
return get_vm_ops()-arch_vm_translation_map_is_kernel_page_accessible(virtualAddress,
protection);
return gM68KPagingMethod->IsKernelPageAccessible(virtualAddress, protection);
}

@ -0,0 +1,776 @@
/*
* Copyright 2010-2012, François, revol@free.fr.
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include "paging/040/M68KPagingMethod040.h"
#include <stdlib.h>
#include <string.h>
#include <AutoDeleter.h>
#include <arch_system_info.h>
#include <boot/kernel_args.h>
#include <int.h>
#include <thread.h>
#include <vm/vm.h>
#include <vm/VMAddressSpace.h>
#include "paging/040/M68KPagingStructures040.h"
#include "paging/040/M68KVMTranslationMap040.h"
#include "paging/m68k_physical_page_mapper.h"
#include "paging/m68k_physical_page_mapper_large_memory.h"
#define TRACE_M68K_PAGING_METHOD_32_BIT
#ifdef TRACE_M68K_PAGING_METHOD_32_BIT
# define TRACE(x...) dprintf(x)
#else
# define TRACE(x...) ;
#endif
/* Slots per pool for the physical page mapper.
* Since m68k page tables are smaller than 1 page, but we allocate them
* at page granularity anyway, just go for this.
*/
#define SLOTS_PER_POOL 1024
using M68KLargePhysicalPageMapper::PhysicalPageSlot;
//XXX: make it a class member
//static page_table_entry sQueryDesc __attribute__ (( aligned (4) ));
//XXX:REMOVEME
//static addr_t sIOSpaceBase;
//XXX: stuff it in the class
#warning M68K:REMOVE
static inline void
init_page_root_entry(page_root_entry *entry)
{
// DT_INVALID is 0
*entry = DFL_ROOTENT_VAL;
}
static inline void
update_page_root_entry(page_root_entry *entry, page_root_entry *with)
{
// update page directory entry atomically
*entry = *with;
}
static inline void
init_page_directory_entry(page_directory_entry *entry)
{
*entry = DFL_DIRENT_VAL;
}
static inline void
update_page_directory_entry(page_directory_entry *entry, page_directory_entry *with)
{
// update page directory entry atomically
*entry = *with;
}
static inline void
init_page_table_entry(page_table_entry *entry)
{
*entry = DFL_PAGEENT_VAL;
}
static inline void
update_page_table_entry(page_table_entry *entry, page_table_entry *with)
{
// update page table entry atomically
// XXX: is it ?? (long desc?)
*entry = *with;
}
static inline void
init_page_indirect_entry(page_indirect_entry *entry)
{
#warning M68K: is it correct ?
*entry = DFL_PAGEENT_VAL;
}
static inline void
update_page_indirect_entry(page_indirect_entry *entry, page_indirect_entry *with)
{
// update page table entry atomically
// XXX: is it ?? (long desc?)
*entry = *with;
}
// #pragma mark - M68KPagingMethod040::PhysicalPageSlotPool
struct M68KPagingMethod040::PhysicalPageSlotPool
: M68KLargePhysicalPageMapper::PhysicalPageSlotPool {
public:
virtual ~PhysicalPageSlotPool();
status_t InitInitial(kernel_args* args);
status_t InitInitialPostArea(kernel_args* args);
void Init(area_id dataArea, void* data,
area_id virtualArea, addr_t virtualBase);
virtual status_t AllocatePool(
M68KLargePhysicalPageMapper
::PhysicalPageSlotPool*& _pool);
virtual void Map(phys_addr_t physicalAddress,
addr_t virtualAddress);
public:
static PhysicalPageSlotPool sInitialPhysicalPagePool;
private:
area_id fDataArea;
area_id fVirtualArea;
addr_t fVirtualBase;
page_table_entry* fPageTable;
};
M68KPagingMethod040::PhysicalPageSlotPool
M68KPagingMethod040::PhysicalPageSlotPool::sInitialPhysicalPagePool;
M68KPagingMethod040::PhysicalPageSlotPool::~PhysicalPageSlotPool()
{
}
status_t
M68KPagingMethod040::PhysicalPageSlotPool::InitInitial(kernel_args* args)
{
// allocate a virtual address range for the pages to be mapped into
addr_t virtualBase = vm_allocate_early(args, SLOTS_PER_POOL * B_PAGE_SIZE,
0, 0, kPageTableAlignment);
if (virtualBase == 0) {
panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve "
"physical page pool space in virtual address space!");
return B_ERROR;
}
// allocate memory for the page table and data
size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[SLOTS_PER_POOL]);
page_table_entry* pageTable = (page_table_entry*)vm_allocate_early(args,
areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0);
// prepare the page table
_EarlyPreparePageTables(pageTable, virtualBase,
SLOTS_PER_POOL * B_PAGE_SIZE);
// init the pool structure and add the initial pool
Init(-1, pageTable, -1, (addr_t)virtualBase);
return B_OK;
}
status_t
M68KPagingMethod040::PhysicalPageSlotPool::InitInitialPostArea(
kernel_args* args)
{
#warning M68K:WRITEME
// create an area for the (already allocated) data
size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[SLOTS_PER_POOL]);
void* temp = fPageTable;
area_id area = create_area("physical page pool", &temp,
B_EXACT_ADDRESS, areaSize, B_ALREADY_WIRED,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (area < B_OK) {
panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
"create area for physical page pool.");
return area;
}
fDataArea = area;
// create an area for the virtual address space
temp = (void*)fVirtualBase;
area = vm_create_null_area(VMAddressSpace::KernelID(),
"physical page pool space", &temp, B_EXACT_ADDRESS,
SLOTS_PER_POOL * B_PAGE_SIZE, 0);
if (area < B_OK) {
panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to "
"create area for physical page pool space.");
return area;
}
fVirtualArea = area;
return B_OK;
}
void
M68KPagingMethod040::PhysicalPageSlotPool::Init(area_id dataArea, void* data,
area_id virtualArea, addr_t virtualBase)
{
fDataArea = dataArea;
fVirtualArea = virtualArea;
fVirtualBase = virtualBase;
fPageTable = (page_table_entry*)data;
// init slot list
fSlots = (PhysicalPageSlot*)(fPageTable + SLOTS_PER_POOL);
addr_t slotAddress = virtualBase;
for (int32 i = 0; i < SLOTS_PER_POOL; i++, slotAddress += B_PAGE_SIZE) {
PhysicalPageSlot* slot = &fSlots[i];
slot->next = slot + 1;
slot->pool = this;
slot->address = slotAddress;
}
fSlots[1023].next = NULL;
// terminate list
}
void
M68KPagingMethod040::PhysicalPageSlotPool::Map(phys_addr_t physicalAddress,
addr_t virtualAddress)
{
page_table_entry& pte = fPageTable[
(virtualAddress - fVirtualBase) / B_PAGE_SIZE];
pte = TA_TO_PTEA(physicalAddress) | DT_PAGE
| M68K_PTE_SUPERVISOR | M68K_PTE_GLOBAL;
arch_cpu_invalidate_TLB_range(virtualAddress, virtualAddress);
}
status_t
M68KPagingMethod040::PhysicalPageSlotPool::AllocatePool(
M68KLargePhysicalPageMapper::PhysicalPageSlotPool*& _pool)
{
// create the pool structure
PhysicalPageSlotPool* pool = new(std::nothrow) PhysicalPageSlotPool;
if (pool == NULL)
return B_NO_MEMORY;
ObjectDeleter<PhysicalPageSlotPool> poolDeleter(pool);
// create an area that can contain the page table and the slot
// structures
size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[SLOTS_PER_POOL]);
void* data;
virtual_address_restrictions virtualRestrictions = {};
virtualRestrictions.address_specification = B_ANY_KERNEL_ADDRESS;
physical_address_restrictions physicalRestrictions = {};
area_id dataArea = create_area_etc(B_SYSTEM_TEAM, "physical page pool",
PAGE_ALIGN(areaSize), B_FULL_LOCK,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, CREATE_AREA_DONT_WAIT,
&virtualRestrictions, &physicalRestrictions, &data);
if (dataArea < 0)
return dataArea;
// create the null area for the virtual address space
void* virtualBase;
area_id virtualArea = vm_create_null_area(
VMAddressSpace::KernelID(), "physical page pool space",
&virtualBase, B_ANY_KERNEL_BLOCK_ADDRESS, SLOTS_PER_POOL * B_PAGE_SIZE,
CREATE_AREA_PRIORITY_VIP);
if (virtualArea < 0) {
delete_area(dataArea);
return virtualArea;
}
// prepare the page table
memset(data, 0, B_PAGE_SIZE);
// get the page table's physical address
phys_addr_t physicalTable;
M68KVMTranslationMap040* map = static_cast<M68KVMTranslationMap040*>(
VMAddressSpace::Kernel()->TranslationMap());
uint32 dummyFlags;
cpu_status state = disable_interrupts();
map->QueryInterrupt((addr_t)data, &physicalTable, &dummyFlags);
restore_interrupts(state);
#warning M68K:FIXME: insert *all* page tables!
panic("I'm lazy");
#if 0
// put the page table into the page directory
int32 index = (addr_t)virtualBase / (B_PAGE_SIZE * SLOTS_PER_POOL);
page_directory_entry* entry
= &map->PagingStructures040()->pgdir_virt[index];
PutPageTableInPageDir(entry, physicalTable,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
M68KPagingStructures040::UpdateAllPageDirs(index, *entry);
#endif
// init the pool structure
pool->Init(dataArea, data, virtualArea, (addr_t)virtualBase);
poolDeleter.Detach();
_pool = pool;
return B_OK;
}
// #pragma mark - M68KPagingMethod040
M68KPagingMethod040::M68KPagingMethod040()
:
//fPageHole(NULL),
//fPageHolePageDir(NULL),
fKernelPhysicalPageRoot(0),
fKernelVirtualPageRoot(NULL),
fPhysicalPageMapper(NULL),
fKernelPhysicalPageMapper(NULL)
{
}
M68KPagingMethod040::~M68KPagingMethod040()
{
}
status_t
M68KPagingMethod040::Init(kernel_args* args,
VMPhysicalPageMapper** _physicalPageMapper)
{
TRACE("M68KPagingMethod040::Init(): entry\n");
#if 0//XXX:We might actually need this trick to support Milan
// page hole set up in stage2
fPageHole = (page_table_entry*)args->arch_args.page_hole;
// calculate where the pgdir would be
fPageHolePageDir = (page_directory_entry*)
(((addr_t)args->arch_args.page_hole)
+ (B_PAGE_SIZE * 1024 - B_PAGE_SIZE));
// clear out the bottom 2 GB, unmap everything
memset(fPageHolePageDir + FIRST_USER_PGDIR_ENT, 0,
sizeof(page_directory_entry) * NUM_USER_PGDIR_ENTS);
#endif
fKernelPhysicalPageRoot = (uint32)args->arch_args.phys_pgroot;
fKernelVirtualPageRoot = (page_root_entry *)args->arch_args.vir_pgroot;
#ifdef TRACE_M68K_PAGING_METHOD_32_BIT
//TRACE("page hole: %p, page dir: %p\n", fPageHole, fPageHolePageDir);
TRACE("page root: %p (physical: %#" B_PRIx32 ")\n",
fKernelVirtualPageRoot, fKernelPhysicalPageRoot);
#endif
//sQueryDesc.type = DT_INVALID;
M68KPagingStructures040::StaticInit();
// create the initial pool for the physical page mapper
PhysicalPageSlotPool* pool
= new(&PhysicalPageSlotPool::sInitialPhysicalPagePool)
PhysicalPageSlotPool;
status_t error = pool->InitInitial(args);
if (error != B_OK) {
panic("M68KPagingMethod040::Init(): Failed to create initial pool "
"for physical page mapper!");
return error;
}
// create physical page mapper
large_memory_physical_page_ops_init(args, pool, fPhysicalPageMapper,
fKernelPhysicalPageMapper);
// TODO: Select the best page mapper!
TRACE("M68KPagingMethod040::Init(): done\n");
*_physicalPageMapper = fPhysicalPageMapper;
return B_OK;
}
status_t
M68KPagingMethod040::InitPostArea(kernel_args* args)
{
TRACE("M68KPagingMethod040::InitPostArea(): entry\n");
// now that the vm is initialized, create an area that represents
// the page hole
void *temp;
status_t error;
area_id area;
#if 0
// unmap the page hole hack we were using before
fKernelVirtualPageDirectory[1023] = 0;
fPageHolePageDir = NULL;
fPageHole = NULL;
#endif
temp = (void*)fKernelVirtualPageRoot;
area = create_area("kernel_pgdir", &temp, B_EXACT_ADDRESS, B_PAGE_SIZE,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (area < B_OK)
return area;
error = PhysicalPageSlotPool::sInitialPhysicalPagePool
.InitInitialPostArea(args);
if (error != B_OK)
return error;
// this area is used for query_tmap_interrupt()
// TODO: Note, this only works as long as all pages belong to the same
// page table, which is not yet enforced (or even tested)!
// Note we don't support SMP which makes things simpler.
#if 0 //XXX: Do we need this anymore?
area = vm_create_null_area(VMAddressSpace::KernelID(),
"interrupt query pages", (void **)&queryPage, B_ANY_ADDRESS,
B_PAGE_SIZE, 0);
if (area < B_OK)
return area;
// insert the indirect descriptor in the tree so we can map the page we want from it.
//XXX...
#endif
TRACE("M68KPagingMethod040::InitPostArea(): done\n");
return B_OK;
}
status_t
M68KPagingMethod040::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
{
M68KVMTranslationMap040* map;
map = new(std::nothrow) M68KVMTranslationMap040;
if (map == NULL)
return B_NO_MEMORY;
status_t error = map->Init(kernel);
if (error != B_OK) {
delete map;
return error;
}
*_map = map;
return B_OK;
}
status_t
M68KPagingMethod040::MapEarly(kernel_args* args, addr_t virtualAddress,
phys_addr_t physicalAddress, uint8 attributes,
phys_addr_t (*get_free_page)(kernel_args*))
{
// XXX horrible back door to map a page quickly regardless of translation
// map object, etc. used only during VM setup.
// uses a 'page hole' set up in the stage 2 bootloader. The page hole is
// created by pointing one of the pgdir entries back at itself, effectively
// mapping the contents of all of the 4MB of pagetables into a 4 MB region.
// It's only used here, and is later unmapped.
addr_t va = virtualAddress;
phys_addr_t pa = physicalAddress;
page_root_entry *pr = (page_root_entry *)fKernelPhysicalPageRoot;
page_directory_entry *pd;
page_table_entry *pt;
addr_t tbl;
uint32 index;
uint32 i;
TRACE("040::MapEarly: entry pa 0x%lx va 0x%lx\n", pa, va);
// everything much simpler here because pa = va
// thanks to transparent translation which hasn't been disabled yet
index = VADDR_TO_PRENT(va);
if (PRE_TYPE(pr[index]) != DT_ROOT) {
unsigned aindex = index & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
TRACE("missing page root entry %d ai %d\n", index, aindex);
tbl = get_free_page(args) * B_PAGE_SIZE;
if (!tbl)
return ENOMEM;
TRACE("040::MapEarly: asked for free page for pgdir. 0x%lx\n", tbl);
// zero-out
memset((void *)tbl, 0, B_PAGE_SIZE);
// for each pgdir on the allocated page:
for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) {
PutPageDirInPageRoot(&pr[aindex + i], tbl, attributes);
//TRACE("inserting tbl @ %p as %08x pr[%d] %08x\n", tbl, TA_TO_PREA(tbl), aindex + i, *(uint32 *)apr);
// clear the table
//TRACE("clearing table[%d]\n", i);
pd = (page_directory_entry *)tbl;
for (int32 j = 0; j < NUM_DIRENT_PER_TBL; j++)
pd[j] = DFL_DIRENT_VAL;
tbl += SIZ_DIRTBL;
}
}
pd = (page_directory_entry *)PRE_TO_TA(pr[index]);
index = VADDR_TO_PDENT(va);
if (PDE_TYPE(pd[index]) != DT_DIR) {
unsigned aindex = index & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
TRACE("missing page dir entry %d ai %d\n", index, aindex);
tbl = get_free_page(args) * B_PAGE_SIZE;
if (!tbl)
return ENOMEM;
TRACE("early_map: asked for free page for pgtable. 0x%lx\n", tbl);
// zero-out
memset((void *)tbl, 0, B_PAGE_SIZE);
// for each pgdir on the allocated page:
for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) {
PutPageTableInPageDir(&pd[aindex + i], tbl, attributes);
// clear the table
//TRACE("clearing table[%d]\n", i);
pt = (page_table_entry *)tbl;
for (int32 j = 0; j < NUM_PAGEENT_PER_TBL; j++)
pt[j] = DFL_PAGEENT_VAL;
tbl += SIZ_PAGETBL;
}
}
pt = (page_table_entry *)PDE_TO_TA(pd[index]);
index = VADDR_TO_PTENT(va);
// now, fill in the pentry
PutPageTableEntryInTable(&pt[index],
physicalAddress, attributes, 0, IS_KERNEL_ADDRESS(virtualAddress));
arch_cpu_invalidate_TLB_range(va, va);
return B_OK;
#if 0
// check to see if a page table exists for this range
int index = VADDR_TO_PDENT(virtualAddress);
if ((fPageHolePageDir[index] & M68K_PDE_PRESENT) == 0) {
phys_addr_t pgtable;
page_directory_entry *e;
// we need to allocate a pgtable
pgtable = get_free_page(args);
// pgtable is in pages, convert to physical address
pgtable *= B_PAGE_SIZE;
TRACE("M68KPagingMethod040::MapEarly(): asked for free page for "
"pgtable. %#" B_PRIxPHYSADDR "\n", pgtable);
// put it in the pgdir
e = &fPageHolePageDir[index];
PutPageTableInPageDir(e, pgtable, attributes);
// zero it out in it's new mapping
memset((unsigned int*)((addr_t)fPageHole
+ (virtualAddress / B_PAGE_SIZE / 1024) * B_PAGE_SIZE),
0, B_PAGE_SIZE);
}
ASSERT_PRINT(
(fPageHole[virtualAddress / B_PAGE_SIZE] & M68K_PTE_PRESENT) == 0,
"virtual address: %#" B_PRIxADDR ", pde: %#" B_PRIx32
", existing pte: %#" B_PRIx32, virtualAddress, fPageHolePageDir[index],
fPageHole[virtualAddress / B_PAGE_SIZE]);
#endif
return B_OK;
}
bool
M68KPagingMethod040::IsKernelPageAccessible(addr_t virtualAddress,
uint32 protection)
{
#warning M68K: WRITEME
return false;
}
void
M68KPagingMethod040::SetPageRoot(uint32 pageRoot)
{
#warning M68K:TODO:override this for 060
uint32 rp;
rp = pageRoot & ~((1 << 9) - 1);
asm volatile( \
"movec %0,%%srp\n" \
"movec %0,%%urp\n" \
: : "d"(rp));
}
/*static*/ void
M68KPagingMethod040::PutPageDirInPageRoot(page_root_entry* entry,
phys_addr_t pgdirPhysical, uint32 attributes)
{
*entry = TA_TO_PREA(pgdirPhysical)
| DT_DIR; // it's a page directory entry
// ToDo: we ignore the attributes of the page table - for compatibility
// with BeOS we allow having user accessible areas in the kernel address
// space. This is currently being used by some drivers, mainly for the
// frame buffer. Our current real time data implementation makes use of
// this fact, too.
// We might want to get rid of this possibility one day, especially if
// we intend to port it to a platform that does not support this.
//table.user = 1;
//table.rw = 1;
}
/*static*/ void
M68KPagingMethod040::PutPageTableInPageDir(page_directory_entry* entry,
phys_addr_t pgtablePhysical, uint32 attributes)
{
*entry = TA_TO_PDEA(pgtablePhysical)
| DT_DIR; // it's a page directory entry
}
/*static*/ void
M68KPagingMethod040::PutPageTableEntryInTable(page_table_entry* entry,
phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType,
bool globalPage)
{
page_table_entry page = TA_TO_PTEA(physicalAddress)
| DT_PAGE
#ifdef PAGE_HAS_GLOBAL_BIT
| (globalPage ? M68K_PTE_GLOBAL : 0)
#endif
| MemoryTypeToPageTableEntryFlags(memoryType);
// if the page is user accessible, it's automatically
// accessible in kernel space, too (but with the same
// protection)
if ((attributes & B_USER_PROTECTION) == 0) {
page |= M68K_PTE_SUPERVISOR;
if ((attributes & B_KERNEL_WRITE_AREA) == 0)
page |= M68K_PTE_READONLY;
} else if ((attributes & B_WRITE_AREA) == 0)
page |= M68K_PTE_READONLY;
// put it in the page table
*(volatile page_table_entry*)entry = page;
}
/*static*/ void
M68KPagingMethod040::_EarlyPreparePageTables(page_table_entry* pageTables,
addr_t address, size_t size)
{
memset(pageTables, 0, B_PAGE_SIZE *
(size / (B_PAGE_SIZE * NUM_PAGEENT_PER_TBL * NUM_PAGETBL_PER_PAGE)));
// put the array of pgtables directly into the kernel pagedir
// these will be wired and kept mapped into virtual space to be easy to get
// to
// note the bootloader allocates all page directories for us
// as a contiguous block.
// we also still have transparent translation enabled, va==pa.
{
size_t index;
addr_t virtualTable = (addr_t)pageTables;
page_root_entry *pr
= M68KPagingMethod040::Method()->fKernelVirtualPageRoot;
page_directory_entry *pd;
page_directory_entry *e;
for (size_t i = 0; i < (size / (B_PAGE_SIZE * NUM_PAGEENT_PER_TBL));
i++, virtualTable += SIZ_PAGETBL) {
// early_query handles non-page-aligned addresses
phys_addr_t physicalTable = 0;
_EarlyQuery(virtualTable, &physicalTable);
index = VADDR_TO_PRENT(address) + i / NUM_DIRENT_PER_TBL;
pd = (page_directory_entry *)PRE_TO_TA(pr[index]);
e = &pd[(VADDR_TO_PDENT(address) + i) % NUM_DIRENT_PER_TBL];
PutPageTableInPageDir(e, physicalTable,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
}
}
}
//! TODO: currently assumes this translation map is active
/*static*/ status_t
M68KPagingMethod040::_EarlyQuery(addr_t virtualAddress,
phys_addr_t *_physicalAddress)
{
M68KPagingMethod040* method = M68KPagingMethod040::Method();
page_root_entry *pr = method->fKernelVirtualPageRoot;
page_directory_entry *pd;
page_indirect_entry *pi;
page_table_entry *pt;
addr_t pa;
int32 index;
status_t err = B_ERROR; // no pagetable here
TRACE("%s(%p,)\n", __FUNCTION__, virtualAddress);
// this is used before the vm is fully up, it uses the
// transparent translation of the first 256MB
// as set up by the bootloader.
index = VADDR_TO_PRENT(virtualAddress);
TRACE("%s: pr[%d].type %d\n", __FUNCTION__, index, PRE_TYPE(pr[index]));
if (pr && PRE_TYPE(pr[index]) == DT_ROOT) {
pa = PRE_TO_TA(pr[index]);
// pa == va when in TT
// and no need to fiddle with cache
pd = (page_directory_entry *)pa;
index = VADDR_TO_PDENT(virtualAddress);
TRACE("%s: pd[%d].type %d\n", __FUNCTION__, index,
pd?(PDE_TYPE(pd[index])):-1);
if (pd && PDE_TYPE(pd[index]) == DT_DIR) {
pa = PDE_TO_TA(pd[index]);
pt = (page_table_entry *)pa;
index = VADDR_TO_PTENT(virtualAddress);
TRACE("%s: pt[%d].type %d\n", __FUNCTION__, index,
pt?(PTE_TYPE(pt[index])):-1);
if (pt && PTE_TYPE(pt[index]) == DT_INDIRECT) {
pi = (page_indirect_entry *)pt;
pa = PIE_TO_TA(pi[index]);
pt = (page_table_entry *)pa;
index = 0; // single descriptor
}
if (pt && PIE_TYPE(pt[index]) == DT_PAGE) {
*_physicalAddress = PTE_TO_PA(pt[index]);
// we should only be passed page va, but just in case.
*_physicalAddress += virtualAddress % B_PAGE_SIZE;
err = B_OK;
}
}
}
return err;
#if 0
int index = VADDR_TO_PDENT(virtualAddress);
if ((method->PageHolePageDir()[index] & M68K_PDE_PRESENT) == 0) {
// no pagetable here
return B_ERROR;
}
page_table_entry* entry = method->PageHole() + virtualAddress / B_PAGE_SIZE;
if ((*entry & M68K_PTE_PRESENT) == 0) {
// page mapping not valid
return B_ERROR;
}
*_physicalAddress = *entry & M68K_PTE_ADDRESS_MASK;
return B_OK;
#endif
}

@ -0,0 +1,189 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_M68K_PAGING_32_BIT_M68K_PAGING_METHOD_32_BIT_H
#define KERNEL_ARCH_M68K_PAGING_32_BIT_M68K_PAGING_METHOD_32_BIT_H
#include "paging/040/paging.h"
#include "paging/M68KPagingMethod.h"
#include "paging/M68KPagingStructures.h"
class TranslationMapPhysicalPageMapper;
class M68KPhysicalPageMapper;
class M68KPagingMethod040 : public M68KPagingMethod {
public:
M68KPagingMethod040();
virtual ~M68KPagingMethod040();
virtual status_t Init(kernel_args* args,
VMPhysicalPageMapper** _physicalPageMapper);
virtual status_t InitPostArea(kernel_args* args);
virtual status_t CreateTranslationMap(bool kernel,
VMTranslationMap** _map);
virtual status_t MapEarly(kernel_args* args,
addr_t virtualAddress,
phys_addr_t physicalAddress,
uint8 attributes,
phys_addr_t (*get_free_page)(kernel_args*));
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
uint32 protection);
virtual void SetPageRoot(uint32 pageRoot);
#if 0
inline page_table_entry* PageHole() const
{ return fPageHole; }
inline page_directory_entry* PageHolePageDir() const
{ return fPageHolePageDir; }
#endif
inline uint32 KernelPhysicalPageRoot() const
{ return fKernelPhysicalPageRoot; }
inline page_directory_entry* KernelVirtualPageRoot() const
{ return fKernelVirtualPageRoot; }
inline M68KPhysicalPageMapper* PhysicalPageMapper() const
{ return fPhysicalPageMapper; }
inline TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() const
{ return fKernelPhysicalPageMapper; }
static M68KPagingMethod040* Method();
static void PutPageDirInPageRoot(
page_root_entry* entry,
phys_addr_t pgdirPhysical,
uint32 attributes);
static void PutPageTableInPageDir(
page_directory_entry* entry,
phys_addr_t pgtablePhysical,
uint32 attributes);
static void PutPageTableEntryInTable(
page_table_entry* entry,
phys_addr_t physicalAddress,
uint32 attributes, uint32 memoryType,
bool globalPage);
#if 1
static page_table_entry SetPageTableEntry(page_table_entry* entry,
page_table_entry newEntry);
static page_table_entry SetPageTableEntryFlags(page_table_entry* entry,
uint32 flags);
static page_table_entry TestAndSetPageTableEntry(
page_table_entry* entry,
page_table_entry newEntry,
page_table_entry oldEntry);
static page_table_entry ClearPageTableEntry(page_table_entry* entry);
static page_table_entry ClearPageTableEntryFlags(
page_table_entry* entry, uint32 flags);
#endif
static uint32 MemoryTypeToPageTableEntryFlags(
uint32 memoryType);
private:
struct PhysicalPageSlotPool;
friend struct PhysicalPageSlotPool;
private:
static void _EarlyPreparePageTables(
page_table_entry* pageTables,
addr_t address, size_t size);
static status_t _EarlyQuery(addr_t virtualAddress,
phys_addr_t *_physicalAddress);
private:
#if 0
page_table_entry* fPageHole;
page_directory_entry* fPageHolePageDir;
#endif
uint32 fKernelPhysicalPageRoot;
page_directory_entry* fKernelVirtualPageRoot;
M68KPhysicalPageMapper* fPhysicalPageMapper;
TranslationMapPhysicalPageMapper* fKernelPhysicalPageMapper;
};
/*static*/ inline M68KPagingMethod040*
M68KPagingMethod040::Method()
{
return static_cast<M68KPagingMethod040*>(gM68KPagingMethod);
}
#if 1
/*static*/ inline page_table_entry
M68KPagingMethod040::SetPageTableEntry(page_table_entry* entry,
page_table_entry newEntry)
{
return atomic_set((int32*)entry, newEntry);
}
/*static*/ inline page_table_entry
M68KPagingMethod040::SetPageTableEntryFlags(page_table_entry* entry,
uint32 flags)
{
return atomic_or((int32*)entry, flags);
}
/*static*/ inline page_table_entry
M68KPagingMethod040::TestAndSetPageTableEntry(page_table_entry* entry,
page_table_entry newEntry, page_table_entry oldEntry)
{
return atomic_test_and_set((int32*)entry, newEntry, oldEntry);
}
/*static*/ inline page_table_entry
M68KPagingMethod040::ClearPageTableEntry(page_table_entry* entry)
{
return SetPageTableEntry(entry, DFL_PAGEENT_VAL);
}
/*static*/ inline page_table_entry
M68KPagingMethod040::ClearPageTableEntryFlags(page_table_entry* entry, uint32 flags)
{
return atomic_and((int32*)entry, ~flags);
}
#endif
/*static*/ inline uint32
M68KPagingMethod040::MemoryTypeToPageTableEntryFlags(uint32 memoryType)
{
// x86:
// ATM we only handle the uncacheable and write-through type explicitly. For
// all other types we rely on the MTRRs to be set up correctly. Since we set
// the default memory type to write-back and since the uncacheable type in
// the PTE overrides any MTRR attribute (though, as per the specs, that is
// not recommended for performance reasons), this reduces the work we
// actually *have* to do with the MTRRs to setting the remaining types
// (usually only write-combining for the frame buffer).
#warning M68K: Check this
switch (memoryType) {
case B_MTR_UC:
return CM_DISABLED_SERIALIZED | CM_CACHABLE_WRITETHROUGH;
case B_MTR_WC:
return 0;
case B_MTR_WT:
return CM_CACHABLE_WRITETHROUGH;
case B_MTR_WP:
case B_MTR_WB:
default:
return 0;
}
}
#endif // KERNEL_ARCH_M68K_PAGING_32_BIT_M68K_PAGING_METHOD_32_BIT_H

@ -0,0 +1,135 @@
/*
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include "paging/040/M68KPagingStructures040.h"
#include <stdlib.h>
#include <heap.h>
#include <util/AutoLock.h>
// Accessor class to reuse the SinglyLinkedListLink of DeferredDeletable for
// M68KPagingStructures040.
struct PagingStructuresGetLink {
private:
typedef SinglyLinkedListLink<M68KPagingStructures040> Link;
public:
inline Link* operator()(M68KPagingStructures040* element) const
{
return (Link*)element->GetSinglyLinkedListLink();
}
inline const Link* operator()(
const M68KPagingStructures040* element) const
{
return (const Link*)element->GetSinglyLinkedListLink();
}
};
typedef SinglyLinkedList<M68KPagingStructures040, PagingStructuresGetLink>
PagingStructuresList;
static PagingStructuresList sPagingStructuresList;
static spinlock sPagingStructuresListLock;
M68KPagingStructures040::M68KPagingStructures040()
:
pgroot_virt(NULL)
{
}
M68KPagingStructures040::~M68KPagingStructures040()
{
// free the page dir
free(pgroot_virt);
}
void
M68KPagingStructures040::Init(page_root_entry* virtualPageRoot,
phys_addr_t physicalPageRoot, page_root_entry* kernelPageRoot)
{
pgroot_virt = virtualPageRoot;
pgroot_phys = physicalPageRoot;
// zero out the bottom portion of the new pgroot
memset(pgroot_virt + FIRST_USER_PGROOT_ENT, 0,
NUM_USER_PGROOT_ENTS * sizeof(page_root_entry));
// insert this new map into the map list
{
int state = disable_interrupts();
acquire_spinlock(&sPagingStructuresListLock);
// copy the top portion of the page dir from the kernel page dir
if (kernelPageRoot != NULL) {
memcpy(pgroot_virt + FIRST_KERNEL_PGROOT_ENT,
kernelPageRoot + FIRST_KERNEL_PGROOT_ENT,
NUM_KERNEL_PGROOT_ENTS * sizeof(page_root_entry));
}
sPagingStructuresList.Add(this);
release_spinlock(&sPagingStructuresListLock);
restore_interrupts(state);
}
}
void
M68KPagingStructures040::Delete()
{
// remove from global list
InterruptsSpinLocker locker(sPagingStructuresListLock);
sPagingStructuresList.Remove(this);
locker.Unlock();
#if 0
// this sanity check can be enabled when corruption due to
// overwriting an active page directory is suspected
uint32 activePageDirectory;
read_cr3(activePageDirectory);
if (activePageDirectory == pgdir_phys)
panic("deleting a still active page directory\n");
#endif
if (are_interrupts_enabled())
delete this;
else
deferred_delete(this);
}
/*static*/ void
M68KPagingStructures040::StaticInit()
{
B_INITIALIZE_SPINLOCK(&sPagingStructuresListLock);
new (&sPagingStructuresList) PagingStructuresList;
}
/*static*/ void
M68KPagingStructures040::UpdateAllPageDirs(int index,
page_root_entry entry)
{
#warning M68K: TODO: allocate all kernel pgdirs at boot and remove this (also dont remove them anymore from unmap)
#warning M68K:FIXME
InterruptsSpinLocker locker(sPagingStructuresListLock);
PagingStructuresList::Iterator it = sPagingStructuresList.GetIterator();
while (M68KPagingStructures040* info = it.Next())
info->pgroot_virt[index] = entry;
}

@ -0,0 +1,31 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_M68K_PAGING_040_M68K_PAGING_STRUCTURES_040_H
#define KERNEL_ARCH_M68K_PAGING_040_M68K_PAGING_STRUCTURES_040_H
#include "paging/040/paging.h"
#include "paging/M68KPagingStructures.h"
struct M68KPagingStructures040 : M68KPagingStructures {
page_root_entry* pgroot_virt;
M68KPagingStructures040();
virtual ~M68KPagingStructures040();
void Init(page_root_entry* virtualPageRoot,
phys_addr_t physicalPageRoot,
page_root_entry* kernelPageRoot);
virtual void Delete();
static void StaticInit();
static void UpdateAllPageDirs(int index,
page_directory_entry entry);
};
#endif // KERNEL_ARCH_M68K_PAGING_040_M68K_PAGING_STRUCTURES_040_H

File diff suppressed because it is too large Load Diff

@ -0,0 +1,68 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_M68K_PAGING_040_M68K_VM_TRANSLATION_MAP_040_H
#define KERNEL_ARCH_M68K_PAGING_040_M68K_VM_TRANSLATION_MAP_040_H
#include "paging/M68KVMTranslationMap.h"
struct M68KPagingStructures040;
struct M68KVMTranslationMap040 : M68KVMTranslationMap {
M68KVMTranslationMap040();
virtual ~M68KVMTranslationMap040();
status_t Init(bool kernel);
virtual size_t MaxPagesNeededToMap(addr_t start,
addr_t end) const;
virtual status_t Map(addr_t virtualAddress,
phys_addr_t physicalAddress,
uint32 attributes, uint32 memoryType,
vm_page_reservation* reservation);
virtual status_t Unmap(addr_t start, addr_t end);
virtual status_t UnmapPage(VMArea* area, addr_t address,
bool updatePageQueue);
virtual void UnmapPages(VMArea* area, addr_t base,
size_t size, bool updatePageQueue);
virtual void UnmapArea(VMArea* area,
bool deletingAddressSpace,
bool ignoreTopCachePageFlags);
virtual status_t Query(addr_t virtualAddress,
phys_addr_t* _physicalAddress,
uint32* _flags);
virtual status_t QueryInterrupt(addr_t virtualAddress,
phys_addr_t* _physicalAddress,
uint32* _flags);
virtual status_t Protect(addr_t base, addr_t top,
uint32 attributes, uint32 memoryType);
virtual status_t ClearFlags(addr_t virtualAddress,
uint32 flags);
virtual bool ClearAccessedAndModified(
VMArea* area, addr_t address,
bool unmapIfUnaccessed,
bool& _modified);
virtual M68KPagingStructures* PagingStructures() const;
inline M68KPagingStructures040* PagingStructures040() const
{ return fPagingStructures; }
void* MapperGetPageTableAt(
phys_addr_t physicalAddress,
bool indirect=false);
private:
M68KPagingStructures040* fPagingStructures;
};
#endif // KERNEL_ARCH_M68K_PAGING_040_M68K_VM_TRANSLATION_MAP_040_H

@ -0,0 +1,127 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2005-2009, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#ifndef _KERNEL_ARCH_M68K_PAGING_040_PAGING_H
#define _KERNEL_ARCH_M68K_PAGING_040_PAGING_H
#include <SupportDefs.h>
#include <int.h>
#include <kernel.h>
#include <arch_040_mmu.h>
/* (mmu_man) Implementation details on 68030 and others:
Unlike on x86 we can't just switch the context to another team by just
setting a register to another page directory, since we only have one
page table containing both kernel and user address mappings.
The 030 supports arbitrary layout of the page directory tree, including
a 1-bit first level (2 entries top level table) that would map kernel
and user land at a single place. But 040 and later only support a fixed
splitting of 7/7/6 for 4K pages.
Since 68k SMP hardware is rare enough we don't want to support them, we
can take some shortcuts.
As we don't want a separate user and kernel space, we'll use a single
table. With the 7/7/6 split the 2nd level would require 32KB of tables,
which is small enough to not want to use the list hack from x86.
XXX: we use the hack for now, check later
Since page directories/tables don't fit exactly a page, we stuff more
than one per page, and allocate them all at once, and add them at the
same time to the tree. So we guarantee all higher-level entries modulo
the number of tables/page are either invalid or present.
*/
// 4 MB of iospace
////#define IOSPACE_SIZE (4*1024*1024)
//#define IOSPACE_SIZE (16*1024*1024)
// 256K = 2^6*4K
//#define IOSPACE_CHUNK_SIZE (NUM_PAGEENT_PER_TBL*B_PAGE_SIZE)
#define PAGE_INVALIDATE_CACHE_SIZE 64
#define FIRST_USER_PGROOT_ENT (VADDR_TO_PRENT(USER_BASE))
#define FIRST_USER_PGDIR_ENT (VADDR_TO_PDENT(USER_BASE))
#define NUM_USER_PGROOT_ENTS (VADDR_TO_PRENT(ROUNDUP(USER_SIZE, B_PAGE_SIZE * 64 * 128)))
#define NUM_USER_PGDIR_ENTS (VADDR_TO_PDENT(ROUNDUP(USER_SIZE, B_PAGE_SIZE * 64)))
#define FIRST_KERNEL_PGROOT_ENT (VADDR_TO_PRENT(KERNEL_BASE))
#define FIRST_KERNEL_PGDIR_ENT (VADDR_TO_PDENT(KERNEL_BASE))
#define NUM_KERNEL_PGROOT_ENTS (VADDR_TO_PRENT(KERNEL_SIZE))
#define NUM_KERNEL_PGDIR_ENTS (VADDR_TO_PDENT(KERNEL_SIZE))
//#define IS_KERNEL_MAP(map) (map->arch_data->rtdir_phys == sKernelPhysicalPageRoot)
// page tables are allocated as groups, so better use them all.
static const size_t kPageTableAlignment = B_PAGE_SIZE
* NUM_PAGETBL_PER_PAGE * NUM_PAGEENT_PER_TBL;
static const size_t kPageDirAlignment = B_PAGE_SIZE
* NUM_PAGEENT_PER_TBL
* NUM_DIRTBL_PER_PAGE * NUM_DIRENT_PER_TBL;
#if 0
#define VADDR_TO_PDENT(va) (((va) / B_PAGE_SIZE) / 1024)
#define VADDR_TO_PTENT(va) (((va) / B_PAGE_SIZE) % 1024)
// page directory entry bits
#define M68K_PDE_PRESENT 0x00000001
#define M68K_PDE_WRITABLE 0x00000002
#define M68K_PDE_USER 0x00000004
#define M68K_PDE_WRITE_THROUGH 0x00000008
#define M68K_PDE_CACHING_DISABLED 0x00000010
#define M68K_PDE_ACCESSED 0x00000020
#define M68K_PDE_IGNORED1 0x00000040
#define M68K_PDE_RESERVED1 0x00000080
#define M68K_PDE_IGNORED2 0x00000100
#define M68K_PDE_IGNORED3 0x00000200
#define M68K_PDE_IGNORED4 0x00000400
#define M68K_PDE_IGNORED5 0x00000800
#define M68K_PDE_ADDRESS_MASK 0xfffff000
// page table entry bits
#define M68K_PTE_PRESENT 0x00000001
#define M68K_PTE_WRITABLE 0x00000002
#define M68K_PTE_USER 0x00000004
#define M68K_PTE_WRITE_THROUGH 0x00000008
#define M68K_PTE_CACHING_DISABLED 0x00000010
#define M68K_PTE_ACCESSED 0x00000020
#define M68K_PTE_DIRTY 0x00000040
#define M68K_PTE_PAT 0x00000080
#define M68K_PTE_GLOBAL 0x00000100
#define M68K_PTE_IGNORED1 0x00000200
#define M68K_PTE_IGNORED2 0x00000400
#define M68K_PTE_IGNORED3 0x00000800
#define M68K_PTE_ADDRESS_MASK 0xfffff000
#define M68K_PTE_PROTECTION_MASK (M68K_PTE_WRITABLE | M68K_PTE_USER)
#define M68K_PTE_MEMORY_TYPE_MASK (M68K_PTE_WRITE_THROUGH \
| M68K_PTE_CACHING_DISABLED)
#define FIRST_USER_PGDIR_ENT (VADDR_TO_PDENT(USER_BASE))
#define NUM_USER_PGDIR_ENTS (VADDR_TO_PDENT(ROUNDUP(USER_SIZE, \
B_PAGE_SIZE * 1024)))
#define FIRST_KERNEL_PGDIR_ENT (VADDR_TO_PDENT(KERNEL_BASE))
#define NUM_KERNEL_PGDIR_ENTS (VADDR_TO_PDENT(KERNEL_SIZE))
static const size_t kPageTableAlignment = 1024 * B_PAGE_SIZE;
typedef uint32 page_table_entry;
typedef uint32 page_directory_entry;
#endif // 0
#endif // _KERNEL_ARCH_M68K_PAGING_040_PAGING_H

@ -0,0 +1,15 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#include "paging/M68KPagingMethod.h"
M68KPagingMethod* gM68KPagingMethod;
M68KPagingMethod::~M68KPagingMethod()
{
}

@ -0,0 +1,46 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_M68K_PAGING_M68K_PAGING_METHOD_H
#define KERNEL_ARCH_M68K_PAGING_M68K_PAGING_METHOD_H
#include <SupportDefs.h>
struct kernel_args;
struct VMPhysicalPageMapper;
struct VMTranslationMap;
class M68KPagingMethod {
public:
virtual ~M68KPagingMethod();
virtual status_t Init(kernel_args* args,
VMPhysicalPageMapper** _physicalPageMapper)
= 0;
virtual status_t InitPostArea(kernel_args* args) = 0;
virtual status_t CreateTranslationMap(bool kernel,
VMTranslationMap** _map) = 0;
virtual status_t MapEarly(kernel_args* args,
addr_t virtualAddress,
phys_addr_t physicalAddress,
uint8 attributes,
phys_addr_t (*get_free_page)(kernel_args*))
= 0;
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
uint32 protection) = 0;
virtual void SetPageRoot(uint32 pageRoot) = 0;
};
extern M68KPagingMethod* gM68KPagingMethod;
#endif // KERNEL_ARCH_M68K_PAGING_M68K_PAGING_METHOD_H

@ -0,0 +1,20 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#include "paging/M68KPagingStructures.h"
M68KPagingStructures::M68KPagingStructures()
:
ref_count(1),
active_on_cpus(0)
{
}
M68KPagingStructures::~M68KPagingStructures()
{
}

@ -0,0 +1,49 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2005-2009, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#ifndef KERNEL_ARCH_M68K_PAGING_M68K_PAGING_STRUCTURES_H
#define KERNEL_ARCH_M68K_PAGING_M68K_PAGING_STRUCTURES_H
#include <SupportDefs.h>
#include <heap.h>
struct M68KPagingStructures : DeferredDeletable {
uint32 pgroot_phys;
vint32 ref_count;
vint32 active_on_cpus;
// mask indicating on which CPUs the map is currently used
M68KPagingStructures();
virtual ~M68KPagingStructures();
inline void AddReference();
inline void RemoveReference();
virtual void Delete() = 0;
};
inline void
M68KPagingStructures::AddReference()
{
atomic_add(&ref_count, 1);
}
inline void
M68KPagingStructures::RemoveReference()
{
if (atomic_add(&ref_count, -1) == 1)
Delete();
}
#endif // KERNEL_ARCH_M68K_PAGING_M68K_PAGING_STRUCTURES_H

@ -0,0 +1,147 @@
/*
* Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Distributed under the terms of the MIT License.
*
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
* Distributed under the terms of the NewOS License.
*/
#include "paging/M68KVMTranslationMap.h"
#include <thread.h>
#include <smp.h>
#include "paging/M68KPagingStructures.h"
//#define TRACE_M68K_VM_TRANSLATION_MAP
#ifdef TRACE_M68K_VM_TRANSLATION_MAP
# define TRACE(x...) dprintf(x)
#else
# define TRACE(x...) ;
#endif
M68KVMTranslationMap::M68KVMTranslationMap()
:
fPageMapper(NULL),
fInvalidPagesCount(0)
{
}
M68KVMTranslationMap::~M68KVMTranslationMap()
{
}
status_t
M68KVMTranslationMap::Init(bool kernel)
{
fIsKernelMap = kernel;
return B_OK;
}
/*! Acquires the map's recursive lock, and resets the invalidate pages counter
in case it's the first locking recursion.
*/
bool
M68KVMTranslationMap::Lock()
{
TRACE("%p->M68KVMTranslationMap::Lock()\n", this);
recursive_lock_lock(&fLock);
if (recursive_lock_get_recursion(&fLock) == 1) {
// we were the first one to grab the lock
TRACE("clearing invalidated page count\n");
fInvalidPagesCount = 0;
}
return true;
}
/*! Unlocks the map, and, if we are actually losing the recursive lock,
flush all pending changes of this map (ie. flush TLB caches as
needed).
*/
void
M68KVMTranslationMap::Unlock()
{
TRACE("%p->M68KVMTranslationMap::Unlock()\n", this);
if (recursive_lock_get_recursion(&fLock) == 1) {
// we're about to release it for the last time
Flush();
}
recursive_lock_unlock(&fLock);
}
addr_t
M68KVMTranslationMap::MappedSize() const
{
return fMapCount;
}
void
M68KVMTranslationMap::Flush()
{
if (fInvalidPagesCount <= 0)
return;
Thread* thread = thread_get_current_thread();
thread_pin_to_current_cpu(thread);
if (fInvalidPagesCount > PAGE_INVALIDATE_CACHE_SIZE) {
// invalidate all pages
TRACE("flush_tmap: %d pages to invalidate, invalidate all\n",
fInvalidPagesCount);
if (fIsKernelMap) {
arch_cpu_global_TLB_invalidate();
smp_send_broadcast_ici(SMP_MSG_GLOBAL_INVALIDATE_PAGES, 0, 0, 0,
NULL, SMP_MSG_FLAG_SYNC);
} else {
cpu_status state = disable_interrupts();
arch_cpu_user_TLB_invalidate();
restore_interrupts(state);
int cpu = smp_get_current_cpu();
uint32 cpuMask = PagingStructures()->active_on_cpus
& ~((uint32)1 << cpu);
if (cpuMask != 0) {
smp_send_multicast_ici(cpuMask, SMP_MSG_USER_INVALIDATE_PAGES,
0, 0, 0, NULL, SMP_MSG_FLAG_SYNC);
}
}
} else {
TRACE("flush_tmap: %d pages to invalidate, invalidate list\n",
fInvalidPagesCount);
arch_cpu_invalidate_TLB_list(fInvalidPages, fInvalidPagesCount);
if (fIsKernelMap) {
smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_LIST,
(uint32)fInvalidPages, fInvalidPagesCount, 0, NULL,
SMP_MSG_FLAG_SYNC);
} else {
int cpu = smp_get_current_cpu();
uint32 cpuMask = PagingStructures()->active_on_cpus
& ~((uint32)1 << cpu);
if (cpuMask != 0) {
smp_send_multicast_ici(cpuMask, SMP_MSG_INVALIDATE_PAGE_LIST,
(uint32)fInvalidPages, fInvalidPagesCount, 0, NULL,
SMP_MSG_FLAG_SYNC);
}
}
}
fInvalidPagesCount = 0;
thread_unpin_from_current_cpu(thread);
}

@ -0,0 +1,54 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_M68K_M68K_VM_TRANSLATION_MAP_H
#define KERNEL_ARCH_M68K_M68K_VM_TRANSLATION_MAP_H
#include <vm/VMTranslationMap.h>
#define PAGE_INVALIDATE_CACHE_SIZE 64
struct M68KPagingStructures;
class TranslationMapPhysicalPageMapper;
struct M68KVMTranslationMap : VMTranslationMap {
M68KVMTranslationMap();
virtual ~M68KVMTranslationMap();
status_t Init(bool kernel);
virtual bool Lock();
virtual void Unlock();
virtual addr_t MappedSize() const;
virtual void Flush();
virtual M68KPagingStructures* PagingStructures() const = 0;
inline void InvalidatePage(addr_t address);
protected:
TranslationMapPhysicalPageMapper* fPageMapper;
int fInvalidPagesCount;
addr_t fInvalidPages[PAGE_INVALIDATE_CACHE_SIZE];
bool fIsKernelMap;
};
void
M68KVMTranslationMap::InvalidatePage(addr_t address)
{
if (fInvalidPagesCount < PAGE_INVALIDATE_CACHE_SIZE)
fInvalidPages[fInvalidPagesCount] = address;
fInvalidPagesCount++;
}
#endif // KERNEL_ARCH_M68K_M68K_VM_TRANSLATION_MAP_H

@ -0,0 +1,16 @@
/*
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#include "paging/m68k_physical_page_mapper.h"
TranslationMapPhysicalPageMapper::~TranslationMapPhysicalPageMapper()
{
}
M68KPhysicalPageMapper::~M68KPhysicalPageMapper()
{
}

@ -0,0 +1,40 @@
/*
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_M68K_PAGING_M68K_PHYSICAL_PAGE_MAPPER_H
#define KERNEL_ARCH_M68K_PAGING_M68K_PHYSICAL_PAGE_MAPPER_H
#include <vm/VMTranslationMap.h>
struct kernel_args;
struct vm_translation_map_ops;
class TranslationMapPhysicalPageMapper {
public:
virtual ~TranslationMapPhysicalPageMapper();
virtual void Delete() = 0;
virtual void* GetPageTableAt(phys_addr_t physicalAddress) = 0;
// Must be invoked with thread pinned to current CPU.
};
class M68KPhysicalPageMapper : public VMPhysicalPageMapper {
public:
virtual ~M68KPhysicalPageMapper();
virtual status_t CreateTranslationMapPhysicalPageMapper(
TranslationMapPhysicalPageMapper** _mapper)
= 0;
virtual void* InterruptGetPageTableAt(
phys_addr_t physicalAddress) = 0;
};
#endif // KERNEL_ARCH_M68K_PAGING_M68K_PHYSICAL_PAGE_MAPPER_H

@ -0,0 +1,770 @@
/*
* Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
/*! Implementation of a physical page mapping strategy (PhysicalPageMapper,
TranslationMapPhysicalPageMapper) suitable for machines with a lot of
memory, i.e. more than we can afford to completely map into the kernel
address space.
m68k: WRITEME: we use more than 1 pgtable/page
x86:
We allocate a single page table (one page) that can map 1024 pages and
a corresponding virtual address space region (4 MB). Each of those 1024
slots can map a physical page. We reserve a fixed amount of slots per CPU.
They will be used for physical operations on that CPU (memset()/memcpy()
and {get,put}_physical_page_current_cpu()). A few slots we reserve for each
translation map (TranslationMapPhysicalPageMapper). Those will only be used
with the translation map locked, mapping a page table page. The remaining
slots remain in the global pool and are given out by get_physical_page().
When we run out of slots, we allocate another page table (and virtual
address space region).
*/
#include "paging/m68k_physical_page_mapper_large_memory.h"
#include <new>
#include <AutoDeleter.h>
#include <cpu.h>
#include <lock.h>
#include <smp.h>
#include <util/AutoLock.h>
#include <vm/vm.h>
#include <vm/vm_types.h>
#include <vm/VMAddressSpace.h>
#include "paging/m68k_physical_page_mapper.h"
#include "paging/M68KPagingStructures.h"
#include "paging/M68KVMTranslationMap.h"
// The number of slots we reserve per translation map from mapping page tables.
// One slot would suffice, since the map is locked while mapping a page table,
// but we re-use several slots on a LRU-basis so that we can keep the mappings
// a little longer, thus avoiding re-mapping.
#define SLOTS_PER_TRANSLATION_MAP 4
#define USER_SLOTS_PER_CPU 16
#define KERNEL_SLOTS_PER_CPU 16
#define TOTAL_SLOTS_PER_CPU (USER_SLOTS_PER_CPU \
+ KERNEL_SLOTS_PER_CPU + 1)
// one slot is for use in interrupts
using M68KLargePhysicalPageMapper::PhysicalPageSlot;
using M68KLargePhysicalPageMapper::PhysicalPageSlotPool;
class PhysicalPageSlotQueue {
public:
PhysicalPageSlotQueue();
inline PhysicalPageSlot* GetSlot();
inline void GetSlots(PhysicalPageSlot*& slot1,
PhysicalPageSlot*& slot2);
inline void PutSlot(PhysicalPageSlot* slot);
inline void PutSlots(PhysicalPageSlot* slot1,
PhysicalPageSlot* slot2);
private:
PhysicalPageSlot* fSlots;
ConditionVariable fFreeSlotCondition;
ConditionVariable fFreeSlotsCondition;
};
struct PhysicalPageOpsCPUData {
PhysicalPageSlotQueue user;
// Used when copying from/to user memory. This can cause a page fault
// which might need to memcpy()/memset() a page when being handled.
PhysicalPageSlotQueue kernel;
// Used when memset()ing or when memcpy()ing memory non-user memory.
PhysicalPageSlot* interruptSlot;
void Init();
private:
static PhysicalPageSlot* _GetInitialSlot();
};
// #pragma mark -
class LargeMemoryTranslationMapPhysicalPageMapper
: public TranslationMapPhysicalPageMapper {
public:
LargeMemoryTranslationMapPhysicalPageMapper();
virtual ~LargeMemoryTranslationMapPhysicalPageMapper();
status_t Init();
virtual void Delete();
virtual void* GetPageTableAt(phys_addr_t physicalAddress);
private:
struct page_slot {
PhysicalPageSlot* slot;
phys_addr_t physicalAddress;
cpu_mask_t valid;
};
page_slot fSlots[SLOTS_PER_TRANSLATION_MAP];
int32 fSlotCount; // must be a power of 2
int32 fNextSlot;
};
class LargeMemoryPhysicalPageMapper : public M68KPhysicalPageMapper {
public:
LargeMemoryPhysicalPageMapper();
status_t Init(kernel_args* args,
PhysicalPageSlotPool* initialPool,
TranslationMapPhysicalPageMapper*&
_kernelPageMapper);
virtual status_t CreateTranslationMapPhysicalPageMapper(
TranslationMapPhysicalPageMapper** _mapper);
virtual void* InterruptGetPageTableAt(
phys_addr_t physicalAddress);
virtual status_t GetPage(phys_addr_t physicalAddress,
addr_t* virtualAddress, void** handle);
virtual status_t PutPage(addr_t virtualAddress, void* handle);
virtual status_t GetPageCurrentCPU(phys_addr_t physicalAddress,
addr_t* virtualAddress, void** handle);
virtual status_t PutPageCurrentCPU(addr_t virtualAddress,
void* handle);
virtual status_t GetPageDebug(phys_addr_t physicalAddress,
addr_t* virtualAddress, void** handle);
virtual status_t PutPageDebug(addr_t virtualAddress,
void* handle);
virtual status_t MemsetPhysical(phys_addr_t address, int value,
phys_size_t length);
virtual status_t MemcpyFromPhysical(void* to, phys_addr_t from,
size_t length, bool user);
virtual status_t MemcpyToPhysical(phys_addr_t to,
const void* from, size_t length, bool user);
virtual void MemcpyPhysicalPage(phys_addr_t to,
phys_addr_t from);
status_t GetSlot(bool canWait,
PhysicalPageSlot*& slot);
void PutSlot(PhysicalPageSlot* slot);
inline PhysicalPageSlotQueue* GetSlotQueue(int32 cpu, bool user);
private:
typedef DoublyLinkedList<PhysicalPageSlotPool> PoolList;
mutex fLock;
PoolList fEmptyPools;
PoolList fNonEmptyPools;
PhysicalPageSlot* fDebugSlot;
PhysicalPageSlotPool* fInitialPool;
LargeMemoryTranslationMapPhysicalPageMapper fKernelMapper;
PhysicalPageOpsCPUData fPerCPUData[B_MAX_CPU_COUNT];
};
static LargeMemoryPhysicalPageMapper sPhysicalPageMapper;
// #pragma mark - PhysicalPageSlot / PhysicalPageSlotPool
inline void
PhysicalPageSlot::Map(phys_addr_t physicalAddress)
{
pool->Map(physicalAddress, address);
}
PhysicalPageSlotPool::~PhysicalPageSlotPool()
{
}
inline bool
PhysicalPageSlotPool::IsEmpty() const
{
return fSlots == NULL;
}
inline PhysicalPageSlot*
PhysicalPageSlotPool::GetSlot()
{
PhysicalPageSlot* slot = fSlots;
fSlots = slot->next;
return slot;
}
inline void
PhysicalPageSlotPool::PutSlot(PhysicalPageSlot* slot)
{
slot->next = fSlots;
fSlots = slot;
}
// #pragma mark - PhysicalPageSlotQueue
PhysicalPageSlotQueue::PhysicalPageSlotQueue()
:
fSlots(NULL)
{
fFreeSlotCondition.Init(this, "physical page ops slot queue");
fFreeSlotsCondition.Init(this, "physical page ops slots queue");
}
PhysicalPageSlot*
PhysicalPageSlotQueue::GetSlot()
{
InterruptsLocker locker;
// wait for a free slot to turn up
while (fSlots == NULL) {
ConditionVariableEntry entry;
fFreeSlotCondition.Add(&entry);
locker.Unlock();
entry.Wait();
locker.Lock();
}
PhysicalPageSlot* slot = fSlots;
fSlots = slot->next;
return slot;
}
void
PhysicalPageSlotQueue::GetSlots(PhysicalPageSlot*& slot1,
PhysicalPageSlot*& slot2)
{
InterruptsLocker locker;
// wait for two free slot to turn up
while (fSlots == NULL || fSlots->next == NULL) {
ConditionVariableEntry entry;
fFreeSlotsCondition.Add(&entry);
locker.Unlock();
entry.Wait();
locker.Lock();
}
slot1 = fSlots;
slot2 = slot1->next;
fSlots = slot2->next;
}
void
PhysicalPageSlotQueue::PutSlot(PhysicalPageSlot* slot)
{
InterruptsLocker locker;
slot->next = fSlots;
fSlots = slot;
if (slot->next == NULL)
fFreeSlotCondition.NotifyAll();
else if (slot->next->next == NULL)
fFreeSlotCondition.NotifyAll();
}
void
PhysicalPageSlotQueue::PutSlots(PhysicalPageSlot* slot1,
PhysicalPageSlot* slot2)
{
InterruptsLocker locker;
slot1->next = slot2;
slot2->next = fSlots;
fSlots = slot1;
if (slot2->next == NULL)
fFreeSlotCondition.NotifyAll();
else if (slot2->next->next == NULL)
fFreeSlotCondition.NotifyAll();
}
// #pragma mark - PhysicalPageOpsCPUData
void
PhysicalPageOpsCPUData::Init()
{
for (int32 i = 0; i < USER_SLOTS_PER_CPU; i++)
user.PutSlot(_GetInitialSlot());
for (int32 i = 0; i < KERNEL_SLOTS_PER_CPU; i++)
kernel.PutSlot(_GetInitialSlot());
interruptSlot = _GetInitialSlot();
}
/* static */ PhysicalPageSlot*
PhysicalPageOpsCPUData::_GetInitialSlot()
{
PhysicalPageSlot* slot;
status_t error = sPhysicalPageMapper.GetSlot(false, slot);
if (error != B_OK) {
panic("PhysicalPageOpsCPUData::Init(): Failed to get initial "
"physical page slots! Probably too many CPUs.");
return NULL;
}
return slot;
}
// #pragma mark - LargeMemoryTranslationMapPhysicalPageMapper
LargeMemoryTranslationMapPhysicalPageMapper
::LargeMemoryTranslationMapPhysicalPageMapper()
:
fSlotCount(sizeof(fSlots) / sizeof(page_slot)),
fNextSlot(0)
{
memset(fSlots, 0, sizeof(fSlots));
}
LargeMemoryTranslationMapPhysicalPageMapper
::~LargeMemoryTranslationMapPhysicalPageMapper()
{
// put our slots back to the global pool
for (int32 i = 0; i < fSlotCount; i++) {
if (fSlots[i].slot != NULL)
sPhysicalPageMapper.PutSlot(fSlots[i].slot);
}
}
status_t
LargeMemoryTranslationMapPhysicalPageMapper::Init()
{
// get our slots from the global pool
for (int32 i = 0; i < fSlotCount; i++) {
status_t error = sPhysicalPageMapper.GetSlot(true, fSlots[i].slot);
if (error != B_OK)
return error;
// set to invalid physical address, so it won't be used accidentally
fSlots[i].physicalAddress = ~(phys_addr_t)0;
}
return B_OK;
}
void
LargeMemoryTranslationMapPhysicalPageMapper::Delete()
{
delete this;
}
void*
LargeMemoryTranslationMapPhysicalPageMapper::GetPageTableAt(
phys_addr_t physicalAddress)
{
ASSERT(physicalAddress % B_PAGE_SIZE == 0);
int32 currentCPU = smp_get_current_cpu();
// maybe the address is already mapped
for (int32 i = 0; i < fSlotCount; i++) {
page_slot& slot = fSlots[i];
if (slot.physicalAddress == physicalAddress) {
fNextSlot = (i + 1) & (fSlotCount - 1);
if ((slot.valid & (1 << currentCPU)) == 0) {
// not valid on this CPU -- invalidate the TLB entry
arch_cpu_invalidate_TLB_range(slot.slot->address,
slot.slot->address);
slot.valid |= 1 << currentCPU;
}
return (void*)slot.slot->address;
}
}
// not found -- need to map a fresh one
page_slot& slot = fSlots[fNextSlot];
fNextSlot = (fNextSlot + 1) & (fSlotCount - 1);
slot.physicalAddress = physicalAddress;
slot.slot->Map(physicalAddress);
slot.valid = 1 << currentCPU;
return (void*)slot.slot->address;
}
// #pragma mark - LargeMemoryPhysicalPageMapper
LargeMemoryPhysicalPageMapper::LargeMemoryPhysicalPageMapper()
:
fInitialPool(NULL)
{
mutex_init(&fLock, "large memory physical page mapper");
}
status_t
LargeMemoryPhysicalPageMapper::Init(kernel_args* args,
PhysicalPageSlotPool* initialPool,
TranslationMapPhysicalPageMapper*& _kernelPageMapper)
{
fInitialPool = initialPool;
fNonEmptyPools.Add(fInitialPool);
// get the debug slot
GetSlot(true, fDebugSlot);
// init the kernel translation map physical page mapper
status_t error = fKernelMapper.Init();
if (error != B_OK) {
panic("LargeMemoryPhysicalPageMapper::Init(): Failed to init "
"kernel translation map physical page mapper!");
return error;
}
_kernelPageMapper = &fKernelMapper;
// init the per-CPU data
int32 cpuCount = smp_get_num_cpus();
for (int32 i = 0; i < cpuCount; i++)
fPerCPUData[i].Init();
return B_OK;
}
status_t
LargeMemoryPhysicalPageMapper::CreateTranslationMapPhysicalPageMapper(
TranslationMapPhysicalPageMapper** _mapper)
{
LargeMemoryTranslationMapPhysicalPageMapper* mapper
= new(std::nothrow) LargeMemoryTranslationMapPhysicalPageMapper;
if (mapper == NULL)
return B_NO_MEMORY;
status_t error = mapper->Init();
if (error != B_OK) {
delete mapper;
return error;
}
*_mapper = mapper;
return B_OK;
}
void*
LargeMemoryPhysicalPageMapper::InterruptGetPageTableAt(
phys_addr_t physicalAddress)
{
ASSERT(physicalAddress % B_PAGE_SIZE == 0);
PhysicalPageSlot* slot = fPerCPUData[smp_get_current_cpu()].interruptSlot;
slot->Map(physicalAddress);
return (void*)slot->address;
}
status_t
LargeMemoryPhysicalPageMapper::GetPage(phys_addr_t physicalAddress,
addr_t* virtualAddress, void** handle)
{
PhysicalPageSlot* slot;
status_t error = GetSlot(true, slot);
if (error != B_OK)
return error;
slot->Map(physicalAddress);
*handle = slot;
*virtualAddress = slot->address + physicalAddress % B_PAGE_SIZE;
smp_send_broadcast_ici(SMP_MSG_INVALIDATE_PAGE_RANGE, *virtualAddress,
*virtualAddress, 0, NULL, SMP_MSG_FLAG_SYNC);
return B_OK;
}
status_t
LargeMemoryPhysicalPageMapper::PutPage(addr_t virtualAddress, void* handle)
{
PutSlot((PhysicalPageSlot*)handle);
return B_OK;
}
status_t
LargeMemoryPhysicalPageMapper::GetPageCurrentCPU(phys_addr_t physicalAddress,
addr_t* virtualAddress, void** handle)
{
// get a slot from the per-cpu user pool
PhysicalPageSlotQueue& slotQueue
= fPerCPUData[smp_get_current_cpu()].user;
PhysicalPageSlot* slot = slotQueue.GetSlot();
slot->Map(physicalAddress);
*virtualAddress = slot->address + physicalAddress % B_PAGE_SIZE;
*handle = slot;
return B_OK;
}
status_t
LargeMemoryPhysicalPageMapper::PutPageCurrentCPU(addr_t virtualAddress,
void* handle)
{
// return the slot to the per-cpu user pool
PhysicalPageSlotQueue& slotQueue
= fPerCPUData[smp_get_current_cpu()].user;
slotQueue.PutSlot((PhysicalPageSlot*)handle);
return B_OK;
}
status_t
LargeMemoryPhysicalPageMapper::GetPageDebug(phys_addr_t physicalAddress,
addr_t* virtualAddress, void** handle)
{
fDebugSlot->Map(physicalAddress);
*handle = fDebugSlot;
*virtualAddress = fDebugSlot->address + physicalAddress % B_PAGE_SIZE;
return B_OK;
}
status_t
LargeMemoryPhysicalPageMapper::PutPageDebug(addr_t virtualAddress, void* handle)
{
return B_OK;
}
status_t
LargeMemoryPhysicalPageMapper::MemsetPhysical(phys_addr_t address, int value,
phys_size_t length)
{
addr_t pageOffset = address % B_PAGE_SIZE;
Thread* thread = thread_get_current_thread();
ThreadCPUPinner _(thread);
PhysicalPageSlotQueue* slotQueue = GetSlotQueue(thread->cpu->cpu_num,
false);
PhysicalPageSlot* slot = slotQueue->GetSlot();
while (length > 0) {
slot->Map(address - pageOffset);
size_t toSet = min_c(length, B_PAGE_SIZE - pageOffset);
memset((void*)(slot->address + pageOffset), value, toSet);
length -= toSet;
address += toSet;
pageOffset = 0;
}
slotQueue->PutSlot(slot);
return B_OK;
}
status_t
LargeMemoryPhysicalPageMapper::MemcpyFromPhysical(void* _to, phys_addr_t from,
size_t length, bool user)
{
uint8* to = (uint8*)_to;
addr_t pageOffset = from % B_PAGE_SIZE;
Thread* thread = thread_get_current_thread();
ThreadCPUPinner _(thread);
PhysicalPageSlotQueue* slotQueue = GetSlotQueue(thread->cpu->cpu_num, user);
PhysicalPageSlot* slot = slotQueue->GetSlot();
status_t error = B_OK;
while (length > 0) {
size_t toCopy = min_c(length, B_PAGE_SIZE - pageOffset);
slot->Map(from - pageOffset);
if (user) {
error = user_memcpy(to, (void*)(slot->address + pageOffset),
toCopy);
if (error != B_OK)
break;
} else
memcpy(to, (void*)(slot->address + pageOffset), toCopy);
to += toCopy;
from += toCopy;
length -= toCopy;
pageOffset = 0;
}
slotQueue->PutSlot(slot);
return error;
}
status_t
LargeMemoryPhysicalPageMapper::MemcpyToPhysical(phys_addr_t to,
const void* _from, size_t length, bool user)
{
const uint8* from = (const uint8*)_from;
addr_t pageOffset = to % B_PAGE_SIZE;
Thread* thread = thread_get_current_thread();
ThreadCPUPinner _(thread);
PhysicalPageSlotQueue* slotQueue = GetSlotQueue(thread->cpu->cpu_num, user);
PhysicalPageSlot* slot = slotQueue->GetSlot();
status_t error = B_OK;
while (length > 0) {
size_t toCopy = min_c(length, B_PAGE_SIZE - pageOffset);
slot->Map(to - pageOffset);
if (user) {
error = user_memcpy((void*)(slot->address + pageOffset), from,
toCopy);
if (error != B_OK)
break;
} else
memcpy((void*)(slot->address + pageOffset), from, toCopy);
to += toCopy;
from += toCopy;
length -= toCopy;
pageOffset = 0;
}
slotQueue->PutSlot(slot);
return error;
}
void
LargeMemoryPhysicalPageMapper::MemcpyPhysicalPage(phys_addr_t to,
phys_addr_t from)
{
Thread* thread = thread_get_current_thread();
ThreadCPUPinner _(thread);
PhysicalPageSlotQueue* slotQueue = GetSlotQueue(thread->cpu->cpu_num,
false);
PhysicalPageSlot* fromSlot;
PhysicalPageSlot* toSlot;
slotQueue->GetSlots(fromSlot, toSlot);
fromSlot->Map(from);
toSlot->Map(to);
memcpy((void*)toSlot->address, (void*)fromSlot->address, B_PAGE_SIZE);
slotQueue->PutSlots(fromSlot, toSlot);
}
status_t
LargeMemoryPhysicalPageMapper::GetSlot(bool canWait, PhysicalPageSlot*& slot)
{
MutexLocker locker(fLock);
PhysicalPageSlotPool* pool = fNonEmptyPools.Head();
if (pool == NULL) {
if (!canWait)
return B_WOULD_BLOCK;
// allocate new pool
locker.Unlock();
status_t error = fInitialPool->AllocatePool(pool);
if (error != B_OK)
return error;
locker.Lock();
fNonEmptyPools.Add(pool);
pool = fNonEmptyPools.Head();
}
slot = pool->GetSlot();
if (pool->IsEmpty()) {
fNonEmptyPools.Remove(pool);
fEmptyPools.Add(pool);
}
return B_OK;
}
void
LargeMemoryPhysicalPageMapper::PutSlot(PhysicalPageSlot* slot)
{
MutexLocker locker(fLock);
PhysicalPageSlotPool* pool = slot->pool;
if (pool->IsEmpty()) {
fEmptyPools.Remove(pool);
fNonEmptyPools.Add(pool);
}
pool->PutSlot(slot);
}
inline PhysicalPageSlotQueue*
LargeMemoryPhysicalPageMapper::GetSlotQueue(int32 cpu, bool user)
{
return user ? &fPerCPUData[cpu].user : &fPerCPUData[cpu].kernel;
}
// #pragma mark - Initialization
status_t
large_memory_physical_page_ops_init(kernel_args* args,
M68KLargePhysicalPageMapper::PhysicalPageSlotPool* initialPool,
M68KPhysicalPageMapper*& _pageMapper,
TranslationMapPhysicalPageMapper*& _kernelPageMapper)
{
new(&sPhysicalPageMapper) LargeMemoryPhysicalPageMapper;
sPhysicalPageMapper.Init(args, initialPool, _kernelPageMapper);
_pageMapper = &sPhysicalPageMapper;
return B_OK;
}

@ -0,0 +1,61 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_M68K_PAGING_M68K_PHYSICAL_PAGE_MAPPER_LARGE_MEMORY_H
#define KERNEL_ARCH_M68K_PAGING_M68K_PHYSICAL_PAGE_MAPPER_LARGE_MEMORY_H
#include <OS.h>
#include <util/DoublyLinkedList.h>
class TranslationMapPhysicalPageMapper;
class M68KPhysicalPageMapper;
struct kernel_args;
namespace M68KLargePhysicalPageMapper {
struct PhysicalPageSlotPool;
struct PhysicalPageSlot {
PhysicalPageSlot* next;
PhysicalPageSlotPool* pool;
addr_t address;
inline void Map(phys_addr_t physicalAddress);
};
struct PhysicalPageSlotPool : DoublyLinkedListLinkImpl<PhysicalPageSlotPool> {
virtual ~PhysicalPageSlotPool();
inline bool IsEmpty() const;
inline PhysicalPageSlot* GetSlot();
inline void PutSlot(PhysicalPageSlot* slot);
virtual status_t AllocatePool(PhysicalPageSlotPool*& _pool) = 0;
virtual void Map(phys_addr_t physicalAddress,
addr_t virtualAddress) = 0;
protected:
PhysicalPageSlot* fSlots;
};
}
status_t large_memory_physical_page_ops_init(kernel_args* args,
M68KLargePhysicalPageMapper::PhysicalPageSlotPool* initialPool,
M68KPhysicalPageMapper*& _pageMapper,
TranslationMapPhysicalPageMapper*& _kernelPageMapper);
#endif // KERNEL_ARCH_M68K_PAGING_M68K_PHYSICAL_PAGE_MAPPER_LARGE_MEMORY_H