- move VADDR_TO_* to headers
- move part of mmu handling to mmu-dependant files and extend ops - implemented 040 mmu init. doing so I spotted a bug in ARAnyM which ignored [ID]TT0. Linux likely doesn't use them but I was too lazy touse temporary page tables. I also noticed bitfields were in wrong order, to be fixed. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@26527 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
7c730d1ed4
commit
74c5e8bd10
@ -194,4 +194,9 @@ typedef uint64 page_indirect_entry_scalar;
|
||||
#define PIE_TO_PO(e) ((((uint32 *)(&(e)))[1]) & ((1<<12)-(1<<2)))
|
||||
#define TA_TO_PIEA(a) ((a) >> 2)
|
||||
|
||||
/* 7/7/6 split */
|
||||
#define VADDR_TO_PRENT(va) (((va) / B_PAGE_SIZE) / (64*128))
|
||||
#define VADDR_TO_PDENT(va) ((((va) / B_PAGE_SIZE) / 64) % 128)
|
||||
#define VADDR_TO_PTENT(va) (((va) / B_PAGE_SIZE) % 64)
|
||||
|
||||
#endif /* _KERNEL_ARCH_M68K_030_MMU_H */
|
||||
|
@ -195,4 +195,9 @@ typedef uint64 page_indirect_entry_scalar;
|
||||
#define PIE_TO_PO(e) ((((uint32 *)(&(e)))[1]) & ((1<<12)-(1<<2)))
|
||||
#define TA_TO_PIEA(a) ((a) >> 2)
|
||||
|
||||
/* 7/7/6 split */
|
||||
#define VADDR_TO_PRENT(va) (((va) / B_PAGE_SIZE) / (64*128))
|
||||
#define VADDR_TO_PDENT(va) ((((va) / B_PAGE_SIZE) / 64) % 128)
|
||||
#define VADDR_TO_PTENT(va) (((va) / B_PAGE_SIZE) % 64)
|
||||
|
||||
#endif /* _KERNEL_ARCH_M68K_040_MMU_H */
|
||||
|
@ -37,6 +37,7 @@ panic(const char *format, ...)
|
||||
Bconputs(DEV_CONSOLE, buffer);
|
||||
// send to the emulator's stdout if available
|
||||
nat_feat_debugprintf(buffer);
|
||||
nat_feat_debugprintf("\n");
|
||||
|
||||
Bconputs(DEV_CONSOLE, "\nPress key to reboot.");
|
||||
|
||||
|
@ -79,15 +79,17 @@ static const uint32 kDefaultPageTableFlags = 0x07; // present, user, R/W
|
||||
static const size_t kMaxKernelSize = 0x100000; // 1 MB for the kernel
|
||||
|
||||
// working page directory and page table
|
||||
static uint32 *sPageDirectory = 0;
|
||||
addr_t gPageRoot = 0;
|
||||
|
||||
static addr_t sNextPhysicalAddress = 0x100000;
|
||||
static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
|
||||
static addr_t sMaxVirtualAddress = KERNEL_BASE + 0x400000;
|
||||
static addr_t sMaxVirtualAddress = KERNEL_BASE /*+ 0x400000*/;
|
||||
|
||||
#if 0
|
||||
static addr_t sNextPageTableAddress = 0x90000;
|
||||
static const uint32 kPageTableRegionEnd = 0x9e000;
|
||||
// we need to reserve 2 pages for the SMP trampoline code XXX:no
|
||||
#endif
|
||||
|
||||
static const struct boot_mmu_ops *gMMUOps;
|
||||
|
||||
@ -97,6 +99,7 @@ get_next_virtual_address(size_t size)
|
||||
addr_t address = sNextVirtualAddress;
|
||||
sNextVirtualAddress += size;
|
||||
|
||||
TRACE(("%s(%d): %08x\n", __FUNCTION__, size, address));
|
||||
return address;
|
||||
}
|
||||
|
||||
@ -107,6 +110,7 @@ get_next_physical_address(size_t size)
|
||||
addr_t address = sNextPhysicalAddress;
|
||||
sNextPhysicalAddress += size;
|
||||
|
||||
TRACE(("%s(%d): %08x\n", __FUNCTION__, size, address));
|
||||
return address;
|
||||
}
|
||||
|
||||
@ -114,6 +118,7 @@ get_next_physical_address(size_t size)
|
||||
static addr_t
|
||||
get_next_virtual_page()
|
||||
{
|
||||
TRACE(("%s\n", __FUNCTION__));
|
||||
return get_next_virtual_address(B_PAGE_SIZE);
|
||||
}
|
||||
|
||||
@ -121,14 +126,17 @@ get_next_virtual_page()
|
||||
static addr_t
|
||||
get_next_physical_page()
|
||||
{
|
||||
TRACE(("%s\n", __FUNCTION__));
|
||||
return get_next_physical_address(B_PAGE_SIZE);
|
||||
}
|
||||
|
||||
|
||||
static uint32 *
|
||||
get_next_page_table()
|
||||
// allocate a page worth of page dir or tables
|
||||
extern "C" addr_t
|
||||
mmu_get_next_page_tables()
|
||||
{
|
||||
TRACE(("get_next_page_table, sNextPageTableAddress %p, kPageTableRegionEnd %p\n",
|
||||
#if 0
|
||||
TRACE(("mmu_get_next_page_tables, sNextPageTableAddress %p, kPageTableRegionEnd %p\n",
|
||||
sNextPageTableAddress, kPageTableRegionEnd));
|
||||
|
||||
addr_t address = sNextPageTableAddress;
|
||||
@ -137,9 +145,20 @@ get_next_page_table()
|
||||
|
||||
sNextPageTableAddress += B_PAGE_SIZE;
|
||||
return (uint32 *)address;
|
||||
#endif
|
||||
addr_t tbl = get_next_physical_page();
|
||||
if (!tbl)
|
||||
return tbl;
|
||||
#if 0
|
||||
// clear them
|
||||
uint32 *p = (uint32 *)tbl;
|
||||
for (int32 i = 0; i < 1024; i++)
|
||||
p[i] = 0;
|
||||
#endif
|
||||
return tbl;
|
||||
}
|
||||
|
||||
|
||||
#if 0
|
||||
/** Adds a new page table for the specified base address */
|
||||
|
||||
static void
|
||||
@ -149,7 +168,7 @@ add_page_table(addr_t base)
|
||||
#if 0
|
||||
|
||||
// Get new page table and clear it out
|
||||
uint32 *pageTable = get_next_page_table();
|
||||
uint32 *pageTable = mmu_get_next_page_tables();
|
||||
if (pageTable > (uint32 *)(8 * 1024 * 1024))
|
||||
panic("tried to add page table beyond the indentity mapped 8 MB region\n");
|
||||
|
||||
@ -159,27 +178,16 @@ add_page_table(addr_t base)
|
||||
pageTable[i] = 0;
|
||||
|
||||
// put the new page table into the page directory
|
||||
sPageDirectory[base/(4*1024*1024)] = (uint32)pageTable | kDefaultPageTableFlags;
|
||||
gPageRoot[base/(4*1024*1024)] = (uint32)pageTable | kDefaultPageTableFlags;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static void
|
||||
unmap_page(addr_t virtualAddress)
|
||||
{
|
||||
TRACE(("unmap_page(virtualAddress = %p)\n", (void *)virtualAddress));
|
||||
|
||||
if (virtualAddress < KERNEL_BASE)
|
||||
panic("unmap_page: asked to unmap invalid page %p!\n", (void *)virtualAddress);
|
||||
#if 0
|
||||
|
||||
// unmap the page from the correct page table
|
||||
uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
|
||||
/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
|
||||
pageTable[(virtualAddress % (B_PAGE_SIZE * 1024)) / B_PAGE_SIZE] = 0;
|
||||
|
||||
asm volatile("invlpg (%0)" : : "r" (virtualAddress));
|
||||
#endif
|
||||
gMMUOps->unmap_page(virtualAddress);
|
||||
}
|
||||
|
||||
|
||||
@ -196,33 +204,26 @@ map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
|
||||
|
||||
if (virtualAddress < KERNEL_BASE)
|
||||
panic("map_page: asked to map invalid page %p!\n", (void *)virtualAddress);
|
||||
#if 0
|
||||
|
||||
// slow but I'm too lazy to fix the code below
|
||||
gMMUOps->add_page_table(virtualAddress);
|
||||
#if 0
|
||||
if (virtualAddress >= sMaxVirtualAddress) {
|
||||
// we need to add a new page table
|
||||
|
||||
add_page_table(sMaxVirtualAddress);
|
||||
sMaxVirtualAddress += B_PAGE_SIZE * 1024;
|
||||
gMMUOps->add_page_table(sMaxVirtualAddress);
|
||||
// 64 pages / page table
|
||||
sMaxVirtualAddress += B_PAGE_SIZE * 64;
|
||||
|
||||
if (virtualAddress >= sMaxVirtualAddress)
|
||||
panic("map_page: asked to map a page to %p\n", (void *)virtualAddress);
|
||||
}
|
||||
#endif
|
||||
|
||||
physicalAddress &= ~(B_PAGE_SIZE - 1);
|
||||
|
||||
// map the page to the correct page table
|
||||
uint32 *pageTable = (uint32 *)(sPageDirectory[virtualAddress
|
||||
/ (B_PAGE_SIZE * 1024)] & 0xfffff000);
|
||||
uint32 tableEntry = (virtualAddress % (B_PAGE_SIZE * 1024)) / B_PAGE_SIZE;
|
||||
|
||||
TRACE(("map_page: inserting pageTable %p, tableEntry %ld, physicalAddress %p\n",
|
||||
pageTable, tableEntry, physicalAddress));
|
||||
|
||||
pageTable[tableEntry] = physicalAddress | flags;
|
||||
|
||||
asm volatile("invlpg (%0)" : : "r" (virtualAddress));
|
||||
#endif
|
||||
TRACE(("map_page: done\n"));
|
||||
gMMUOps->map_page(virtualAddress, physicalAddress, flags);
|
||||
}
|
||||
|
||||
|
||||
@ -247,58 +248,26 @@ sort_addr_range(addr_range *range, int count)
|
||||
}
|
||||
|
||||
|
||||
#if 0
|
||||
static uint32
|
||||
get_memory_map(extended_memory **_extendedMemory)
|
||||
{
|
||||
extended_memory *block = (extended_memory *)kExtraSegmentScratch;
|
||||
bios_regs regs = { 0, 0, sizeof(extended_memory), 0, 0, (uint32)block, 0, 0};
|
||||
uint32 count = 0;
|
||||
|
||||
TRACE(("get_memory_map()\n"));
|
||||
|
||||
do {
|
||||
regs.eax = 0xe820;
|
||||
regs.edx = 'SMAP';
|
||||
|
||||
call_bios(0x15, ®s);
|
||||
if (regs.flags & CARRY_FLAG)
|
||||
return 0;
|
||||
|
||||
regs.edi += sizeof(extended_memory);
|
||||
count++;
|
||||
} while (regs.ebx != 0);
|
||||
|
||||
*_extendedMemory = block;
|
||||
|
||||
#ifdef TRACE_MMU
|
||||
dprintf("extended memory info (from 0xe820):\n");
|
||||
for (uint32 i = 0; i < count; i++) {
|
||||
dprintf(" base 0x%Lx, len 0x%Lx, type %lu\n",
|
||||
block[i].base_addr, block[i].length, block[i].type);
|
||||
}
|
||||
#endif
|
||||
|
||||
return count;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
static void
|
||||
init_page_directory(void)
|
||||
{
|
||||
TRACE(("init_page_directory\n"));
|
||||
|
||||
gMMUOps->load_rp(NULL);
|
||||
// allocate a new pg root dir
|
||||
gPageRoot = get_next_physical_page();
|
||||
gKernelArgs.arch_args.phys_pgroot = (uint32)gPageRoot;
|
||||
|
||||
// set the root pointers
|
||||
gMMUOps->load_rp(gPageRoot);
|
||||
// enable mmu translation
|
||||
gMMUOps->enable_paging();
|
||||
|
||||
#if 0
|
||||
// allocate a new pgdir
|
||||
sPageDirectory = (uint32 *)get_next_physical_page();
|
||||
gKernelArgs.arch_args.phys_pgdir = (uint32)sPageDirectory;
|
||||
|
||||
|
||||
// clear out the pgdir
|
||||
for (int32 i = 0; i < 1024; i++) {
|
||||
sPageDirectory[i] = 0;
|
||||
gPageRoot[i] = 0;
|
||||
}
|
||||
|
||||
// Identity map the first 8 MB of memory so that their
|
||||
@ -306,29 +275,29 @@ init_page_directory(void)
|
||||
// These page tables won't be taken over into the kernel.
|
||||
|
||||
// make the first page table at the first free spot
|
||||
uint32 *pageTable = get_next_page_table();
|
||||
uint32 *pageTable = mmu_get_next_page_tables();
|
||||
|
||||
for (int32 i = 0; i < 1024; i++) {
|
||||
pageTable[i] = (i * 0x1000) | kDefaultPageFlags;
|
||||
}
|
||||
|
||||
sPageDirectory[0] = (uint32)pageTable | kDefaultPageFlags;
|
||||
gPageRoot[0] = (uint32)pageTable | kDefaultPageFlags;
|
||||
|
||||
// make the second page table
|
||||
pageTable = get_next_page_table();
|
||||
pageTable = mmu_get_next_page_tables();
|
||||
|
||||
for (int32 i = 0; i < 1024; i++) {
|
||||
pageTable[i] = (i * 0x1000 + 0x400000) | kDefaultPageFlags;
|
||||
}
|
||||
|
||||
sPageDirectory[1] = (uint32)pageTable | kDefaultPageFlags;
|
||||
gPageRoot[1] = (uint32)pageTable | kDefaultPageFlags;
|
||||
|
||||
gKernelArgs.arch_args.num_pgtables = 0;
|
||||
add_page_table(KERNEL_BASE);
|
||||
|
||||
// switch to the new pgdir and enable paging
|
||||
asm("movl %0, %%eax;"
|
||||
"movl %%eax, %%cr3;" : : "m" (sPageDirectory) : "eax");
|
||||
"movl %%eax, %%cr3;" : : "m" (gPageRoot) : "eax");
|
||||
// Important. Make sure supervisor threads can fault on read only pages...
|
||||
asm("movl %%eax, %%cr0" : : "a" ((1 << 31) | (1 << 16) | (1 << 5) | 1));
|
||||
#endif
|
||||
@ -587,6 +556,8 @@ mmu_init(void)
|
||||
panic("unknown mmu type %d\n", gKernelArgs.arch_args.mmu_type);
|
||||
}
|
||||
|
||||
gMMUOps->initialize();
|
||||
|
||||
addr_t fastram_top = 0;
|
||||
if (*TOSVARramvalid == TOSVARramvalid_MAGIC)
|
||||
fastram_top = *TOSVARramtop;
|
||||
@ -603,7 +574,7 @@ mmu_init(void)
|
||||
// enable transparent translation of the first 32 MB
|
||||
gMMUOps->set_tt(0, ATARI_CHIPRAM_BASE, 0x02000000, 0);
|
||||
// enable transparent translation of the 16MB ST shadow range for I/O
|
||||
gMMUOps->set_tt(0, ATARI_SHADOW_BASE, 0x01000000, 0);
|
||||
gMMUOps->set_tt(1, ATARI_SHADOW_BASE, 0x01000000, 0);
|
||||
|
||||
init_page_directory();
|
||||
#if 0//XXX:HOLE
|
||||
@ -612,12 +583,12 @@ mmu_init(void)
|
||||
// this enables a mmu trick where the 4 MB region that this pgdir entry
|
||||
// represents now maps the 4MB of potential pagetables that the pgdir
|
||||
// points to. Thrown away later in VM bringup, but useful for now.
|
||||
sPageDirectory[1023] = (uint32)sPageDirectory | kDefaultPageFlags;
|
||||
gPageRoot[1023] = (uint32)gPageRoot | kDefaultPageFlags;
|
||||
#endif
|
||||
|
||||
// also map it on the next vpage
|
||||
gKernelArgs.arch_args.vir_pgroot = get_next_virtual_page();
|
||||
map_page(gKernelArgs.arch_args.vir_pgroot, (uint32)sPageDirectory, kDefaultPageFlags);
|
||||
map_page(gKernelArgs.arch_args.vir_pgroot, (uint32)gPageRoot, kDefaultPageFlags);
|
||||
|
||||
// map in a kernel stack
|
||||
gKernelArgs.cpu_kstack[0].start = (addr_t)mmu_allocate(NULL, KERNEL_STACK_SIZE);
|
||||
|
@ -22,6 +22,9 @@ extern addr_t mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint3
|
||||
extern void *mmu_allocate(void *virtualAddress, size_t size);
|
||||
extern void mmu_free(void *virtualAddress, size_t size);
|
||||
|
||||
extern addr_t mmu_get_next_page_tables();
|
||||
|
||||
|
||||
struct boot_mmu_ops {
|
||||
void (*initialize)(void);
|
||||
/* len=0 to disable */
|
||||
@ -29,7 +32,9 @@ struct boot_mmu_ops {
|
||||
/* load root pointers */
|
||||
status_t (*load_rp)(addr_t pa);
|
||||
status_t (*enable_paging)(void);
|
||||
|
||||
status_t (*add_page_table)(addr_t virtualAddress);
|
||||
void (*unmap_page)(addr_t virtualAddress);
|
||||
void (*map_page)(addr_t virtualAddress, addr_t pa, uint32 flags);
|
||||
};
|
||||
|
||||
extern const struct boot_mmu_ops k030MMUOps;
|
||||
@ -37,6 +42,8 @@ extern const struct boot_mmu_ops k040MMUOps;
|
||||
extern const struct boot_mmu_ops k060MMUOps;
|
||||
|
||||
|
||||
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@ -31,8 +31,7 @@
|
||||
#endif
|
||||
|
||||
|
||||
static uint32 *sPageDirectory = 0;
|
||||
|
||||
extern page_root_entry *gPageRoot;
|
||||
|
||||
|
||||
static void
|
||||
|
@ -31,14 +31,21 @@
|
||||
#endif
|
||||
|
||||
|
||||
static uint32 *sPageDirectory = 0;
|
||||
|
||||
extern page_root_entry *gPageRoot;
|
||||
|
||||
|
||||
static void
|
||||
initialize(void)
|
||||
{
|
||||
TRACE(("mmu_040:initialize\n"));
|
||||
//XXX TESTING for bitfield order
|
||||
long_page_directory_entry ent;
|
||||
*(uint64 *)&ent = 0LL;
|
||||
TRACE(("ent: %llx [0] %02x\n", ent, *(uint8 *)&ent));
|
||||
ent.type=3;
|
||||
TRACE(("ent: %llx [0] %02x\n", ent, *(uint8 *)&ent));
|
||||
ent.addr = 0x0aaaaaaa;
|
||||
TRACE(("ent: %llx [0] %02x\n", ent, *(uint8 *)&ent));
|
||||
}
|
||||
|
||||
|
||||
@ -107,7 +114,7 @@ static status_t
|
||||
enable_paging(void)
|
||||
{
|
||||
TRACE(("mmu_040:enable_paging\n"));
|
||||
uint16 tcr = 0x80; // Enable, 4K page size
|
||||
uint16 tcr = 0x8000; // Enable, 4K page size
|
||||
asm volatile( \
|
||||
"pflusha\n" \
|
||||
"movec %0,%%tcr\n" \
|
||||
@ -117,9 +124,162 @@ enable_paging(void)
|
||||
}
|
||||
|
||||
|
||||
static status_t
|
||||
add_page_table(addr_t virtualAddress)
|
||||
{
|
||||
page_root_entry *pr = gPageRoot;
|
||||
page_directory_entry *pd;
|
||||
page_table_entry *pt;
|
||||
addr_t tbl;
|
||||
uint32 index;
|
||||
uint32 i;
|
||||
|
||||
TRACE(("mmu->add_page_table(base = %p)\n", (void *)virtualAddress));
|
||||
|
||||
// everything much simpler here because pa = va
|
||||
// thanks to transparent translation
|
||||
|
||||
index = VADDR_TO_PRENT(virtualAddress);
|
||||
if (pr[index].type != DT_ROOT) {
|
||||
unsigned aindex = index & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
|
||||
TRACE(("missing page root entry %d ai %d\n", index, aindex));
|
||||
tbl = mmu_get_next_page_tables();
|
||||
if (!tbl)
|
||||
return ENOMEM;
|
||||
// for each pgdir on the allocated page:
|
||||
for (i = 0; i < NUM_DIRTBL_PER_PAGE; i++) {
|
||||
page_root_entry *apr = &pr[aindex + i];
|
||||
apr->addr = TA_TO_PREA(tbl);
|
||||
apr->type = DT_ROOT;
|
||||
TRACE(("inserting tbl @ %p as %08x entry %08x\n", tbl, TA_TO_PREA(tbl), *(uint32 *)apr));
|
||||
// clear the table
|
||||
TRACE(("clearing table[%d]\n", i));
|
||||
pd = (page_directory_entry *)tbl;
|
||||
for (int32 j = 0; j < NUM_DIRENT_PER_TBL; j++)
|
||||
*(page_directory_entry_scalar *)(&pd[j]) = DFL_DIRENT_VAL;
|
||||
tbl += SIZ_DIRTBL;
|
||||
}
|
||||
}
|
||||
TRACE(("B %08lx\n", PRE_TO_TA(pr[index])));
|
||||
pd = (page_directory_entry *)PRE_TO_TA(pr[index]);
|
||||
TRACE(("C\n"));
|
||||
|
||||
index = VADDR_TO_PDENT(virtualAddress);
|
||||
TRACE(("checking pgdir@%p[%d]\n", pd, index));
|
||||
if (pd[index].type != DT_DIR) {
|
||||
unsigned aindex = index & ~(NUM_PAGETBL_PER_PAGE-1); /* aligned */
|
||||
TRACE(("missing page dir entry %d ai %d\n", index, aindex));
|
||||
tbl = mmu_get_next_page_tables();
|
||||
if (!tbl)
|
||||
return ENOMEM;
|
||||
// for each pgdir on the allocated page:
|
||||
for (i = 0; i < NUM_PAGETBL_PER_PAGE; i++) {
|
||||
page_directory_entry *apd = &pd[aindex + i];
|
||||
apd->addr = TA_TO_PDEA(tbl);
|
||||
apd->type = DT_DIR;
|
||||
// clear the table
|
||||
TRACE(("clearing table[%d]\n", i));
|
||||
pt = (page_table_entry *)tbl;
|
||||
for (int32 j = 0; j < NUM_PAGEENT_PER_TBL; j++)
|
||||
*(page_table_entry_scalar *)(&pt[j]) = DFL_PAGEENT_VAL;
|
||||
tbl += SIZ_PAGETBL;
|
||||
}
|
||||
}
|
||||
#if 0
|
||||
pt = PDE_TO_TA(pd[index]);
|
||||
|
||||
index = VADDR_TO_PTENT(virtualAddress);
|
||||
pt[index].addr = TA_TO_PTEA(0xdeadb00b);
|
||||
pt[index].supervisor = 1;
|
||||
pt[index].type = DT_PAGE;
|
||||
#endif
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
static page_table_entry *
|
||||
lookup_pte(addr_t virtualAddress)
|
||||
{
|
||||
page_root_entry *pr = gPageRoot;
|
||||
page_directory_entry *pd;
|
||||
page_table_entry *pt;
|
||||
uint32 index;
|
||||
|
||||
index = VADDR_TO_PRENT(virtualAddress);
|
||||
if (pr[index].type != DT_ROOT)
|
||||
panic("lookup_pte: invalid page root entry %d", index);
|
||||
pd = (page_directory_entry *)PRE_TO_TA(pr[index]);
|
||||
|
||||
index = VADDR_TO_PDENT(virtualAddress);
|
||||
if (pd[index].type != DT_DIR)
|
||||
panic("lookup_pte: invalid page directory entry %d", index);
|
||||
pt = (page_table_entry *)PDE_TO_TA(pd[index]);
|
||||
|
||||
index = VADDR_TO_PTENT(virtualAddress);
|
||||
if (pt[index].type != DT_PAGE)
|
||||
panic("lookup_pte: invalid page table entry %d", index);
|
||||
|
||||
return (&pt[index]);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
unmap_page(addr_t virtualAddress)
|
||||
{
|
||||
page_table_entry *pt;
|
||||
|
||||
TRACE(("mmu->unmap_page(virtualAddress = %p)\n", (void *)virtualAddress));
|
||||
|
||||
if (virtualAddress < KERNEL_BASE)
|
||||
panic("unmap_page: asked to unmap invalid page %p!\n", (void *)virtualAddress);
|
||||
|
||||
// unmap the page from the correct page table
|
||||
pt = lookup_pte(virtualAddress);
|
||||
|
||||
pt->addr = TA_TO_PTEA(0xdeadb00b);
|
||||
pt->type = DT_INVALID;
|
||||
|
||||
asm volatile("pflush (%0)" : : "a" (virtualAddress));
|
||||
}
|
||||
|
||||
|
||||
/** insert the physical address into existing page table */
|
||||
static void
|
||||
map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
|
||||
{
|
||||
page_table_entry *pt;
|
||||
|
||||
TRACE(("mmu->map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress, physicalAddress));
|
||||
|
||||
|
||||
physicalAddress &= ~(B_PAGE_SIZE - 1);
|
||||
|
||||
// map the page to the correct page table
|
||||
|
||||
pt = lookup_pte(virtualAddress);
|
||||
|
||||
TRACE(("map_page: inserting pageTableEntry %p, physicalAddress %p\n",
|
||||
pt, physicalAddress));
|
||||
|
||||
|
||||
pt->addr = TA_TO_PTEA(physicalAddress);
|
||||
pt->supervisor = 1;
|
||||
pt->type = DT_PAGE;
|
||||
// XXX: are flags needed ? ro ? global ?
|
||||
|
||||
asm volatile("pflush (%0)" : : "a" (virtualAddress));
|
||||
TRACE(("mmu->map_page: done\n"));
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
const struct boot_mmu_ops k040MMUOps = {
|
||||
&initialize,
|
||||
&set_tt,
|
||||
&load_rp,
|
||||
&enable_paging
|
||||
&enable_paging,
|
||||
&add_page_table,
|
||||
&unmap_page,
|
||||
&map_page
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user