Map the page directory before turning on the MMU.
And actually use the virtual address for it later on. This wasn't problematic as the virtual and physical addresses are identity mapped, but it seems more correct to do it in this order.
This commit is contained in:
parent
0deac574bd
commit
3d4175bfe1
|
@ -325,6 +325,10 @@ init_page_directory()
|
|||
}
|
||||
}
|
||||
|
||||
// Map the page directory itself.
|
||||
addr_t virtualPageDirectory = mmu_map_physical_memory(
|
||||
(addr_t)sPageDirectory, ARM_MMU_L1_TABLE_SIZE, kDefaultPageFlags);
|
||||
|
||||
mmu_flush_TLB();
|
||||
|
||||
/* set up the translation table base */
|
||||
|
@ -337,6 +341,10 @@ init_page_directory()
|
|||
|
||||
/* turn on the mmu */
|
||||
mmu_write_C1(mmu_read_C1() | 0x1);
|
||||
|
||||
// Use the mapped page directory from now on.
|
||||
sPageDirectory = (uint32 *)virtualPageDirectory;
|
||||
gKernelArgs.arch_args.vir_pgdir = virtualPageDirectory;
|
||||
}
|
||||
|
||||
|
||||
|
@ -629,10 +637,6 @@ mmu_init(void)
|
|||
|
||||
init_page_directory();
|
||||
|
||||
// map the page directory on the next vpage
|
||||
gKernelArgs.arch_args.vir_pgdir = mmu_map_physical_memory(
|
||||
(addr_t)sPageDirectory, MMU_L1_TABLE_SIZE, kDefaultPageFlags);
|
||||
|
||||
// map in a kernel stack
|
||||
gKernelArgs.cpu_kstack[0].size = KERNEL_STACK_SIZE
|
||||
+ KERNEL_STACK_GUARD_PAGES * B_PAGE_SIZE;
|
||||
|
|
Loading…
Reference in New Issue