allocate all kernel page directories at boot, this will simplify later code a lot.

git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@26721 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
François Revol 2008-08-01 16:01:02 +00:00
parent 06b4c67fa5
commit 7a1fd4cbf8
4 changed files with 60 additions and 0 deletions

View File

@ -270,8 +270,12 @@ init_page_directory(void)
// set the root pointers
gMMUOps->load_rp(gPageRoot);
// allocate second level tables for kernel space
// this will simplify mmu code a lot, and only wastes 32KB
gMMUOps->allocate_kernel_pgdirs();
// enable mmu translation
gMMUOps->enable_paging();
//XXX: check for errors
//gKernelArgs.arch_args.num_pgtables = 0;
gMMUOps->add_page_table(KERNEL_BASE);

View File

@ -31,6 +31,7 @@ struct boot_mmu_ops {
status_t (*set_tt)(int which, addr_t pa, size_t len, uint32 perms);
/* load root pointers */
status_t (*load_rp)(addr_t pa);
status_t (*allocate_kernel_pgdirs)(void);
status_t (*enable_paging)(void);
status_t (*add_page_table)(addr_t virtualAddress);
void (*unmap_page)(addr_t virtualAddress);

View File

@ -103,6 +103,30 @@ load_rp(addr_t pa)
}
static status_t
allocate_kernel_pgdirs(void)
{
page_root_entry *pr = gPageRoot;
page_directory_entry *pd;
addr_t tbl;
int i;
// we'll fill in the 2nd half with ready made page dirs
for (i = NUM_ROOTENT_PER_TBL/2; i < NUM_ROOTENT_PER_TBL; i++) {
if (i % NUM_DIRTBL_PER_PAGE)
tbl += SIZ_DIRTBL;
else
tbl = mmu_get_next_page_tables();
pr[i].addr = TA_TO_PREA(tbl);
pr[i].type = DT_ROOT;
pd = (page_directory_entry *)tbl;
for (int32 j = 0; j < NUM_DIRENT_PER_TBL; j++)
*(page_directory_entry_scalar *)(&pd[j]) = DFL_DIRENT_VAL;
}
return B_OK;
}
static status_t
enable_paging(void)
{
@ -115,5 +139,6 @@ const struct boot_mmu_ops k030MMUOps = {
&initialize,
&set_tt,
&load_rp,
&allocate_kernel_pgdirs,
&enable_paging
};

View File

@ -107,6 +107,30 @@ load_rp(addr_t pa)
}
static status_t
allocate_kernel_pgdirs(void)
{
page_root_entry *pr = gPageRoot;
page_directory_entry *pd;
addr_t tbl;
int i;
// we'll fill in the 2nd half with ready made page dirs
for (i = NUM_ROOTENT_PER_TBL/2; i < NUM_ROOTENT_PER_TBL; i++) {
if (i % NUM_DIRTBL_PER_PAGE)
tbl += SIZ_DIRTBL;
else
tbl = mmu_get_next_page_tables();
pr[i].addr = TA_TO_PREA(tbl);
pr[i].type = DT_ROOT;
pd = (page_directory_entry *)tbl;
for (int32 j = 0; j < NUM_DIRENT_PER_TBL; j++)
*(page_directory_entry_scalar *)(&pd[j]) = DFL_DIRENT_VAL;
}
return B_OK;
}
static status_t
enable_paging(void)
{
@ -137,6 +161,10 @@ add_page_table(addr_t virtualAddress)
// thanks to transparent translation
index = VADDR_TO_PRENT(virtualAddress);
if (pr[index].type != DT_ROOT)
panic("invalid page root entry %d\n", index);
#if 0
// not needed anymore
if (pr[index].type != DT_ROOT) {
unsigned aindex = index & ~(NUM_DIRTBL_PER_PAGE-1); /* aligned */
//TRACE(("missing page root entry %d ai %d\n", index, aindex));
@ -157,6 +185,7 @@ add_page_table(addr_t virtualAddress)
tbl += SIZ_DIRTBL;
}
}
#endif
pd = (page_directory_entry *)PRE_TO_TA(pr[index]);
index = VADDR_TO_PDENT(virtualAddress);
@ -290,6 +319,7 @@ const struct boot_mmu_ops k040MMUOps = {
&initialize,
&set_tt,
&load_rp,
&allocate_kernel_pgdirs,
&enable_paging,
&add_page_table,
&unmap_page,