* Refactored arch_vm_translation_map_init_post_area() a bit: Pulled out

remapping stuff into separate functions and made them available to
  others.
* Remap the exception handler space in arch_int_init_post_vm() into the
  kernel address space (same issue as with the page table).


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@15783 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2006-01-02 03:30:58 +00:00
parent 835fb10b16
commit c8cd524c67
3 changed files with 112 additions and 44 deletions

View File

@ -8,8 +8,19 @@
#include <arch/vm_translation_map.h>
#ifdef __cplusplus
extern "C"
extern "C" {
#endif
void ppc_translation_map_change_asid(vm_translation_map *map);
status_t ppc_map_address_range(addr_t virtualAddress, addr_t physicalAddress,
size_t size);
void ppc_unmap_address_range(addr_t virtualAddress, size_t size);
status_t ppc_remap_address_range(addr_t *virtualAddress, size_t size,
bool unmap);
#ifdef __cplusplus
}
#endif
#endif /* _KERNEL_ARCH_PPC_VM_TRANSLATION_MAP_H */

View File

@ -6,14 +6,15 @@
* Distributed under the terms of the NewOS License.
*/
#include <int.h>
#include <boot/kernel_args.h>
#include <int.h>
#include <kscheduler.h>
#include <thread.h>
#include <timer.h>
#include <vm.h>
#include <vm_priv.h>
#include <timer.h>
#include <thread.h>
#include <string.h>
@ -164,10 +165,23 @@ status_t
arch_int_init_post_vm(kernel_args *args)
{
area_id exceptionArea;
void *handlers;
void *handlers = (void *)args->arch_args.exception_handlers.start;
// We may need to remap the exception handler area into the kernel address
// space.
if (!IS_KERNEL_ADDRESS(handlers)) {
addr_t address = (addr_t)handlers;
status_t error = ppc_remap_address_range(&address,
args->arch_args.exception_handlers.size, true);
if (error != B_OK) {
panic("arch_int_init_post_vm(): Failed to remap the exception "
"handler area!");
return error;
}
handlers = (void*)(address);
}
// create a region to map the irq vector code into (physical address 0x0)
handlers = (void *)args->arch_args.exception_handlers.start;
exceptionArea = create_area("exception_handlers",
&handlers, B_EXACT_ADDRESS, args->arch_args.exception_handlers.size,
B_ALREADY_WIRED, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
@ -178,7 +192,7 @@ arch_int_init_post_vm(kernel_args *args)
// copy the handlers into this area
memcpy(handlers, &__irqvec_start, args->arch_args.exception_handlers.size);
arch_cpu_sync_icache(0, 0x1000);
arch_cpu_sync_icache(handlers, 0x1000);
return B_OK;
}

View File

@ -549,58 +549,101 @@ arch_vm_translation_map_init_post_sem(kernel_args *args)
}
status_t
ppc_map_address_range(addr_t virtualAddress, addr_t physicalAddress,
size_t size)
{
addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
virtualAddress = ROUNDOWN(virtualAddress, B_PAGE_SIZE);
physicalAddress = ROUNDOWN(physicalAddress, B_PAGE_SIZE);
vm_address_space *addressSpace = vm_kernel_address_space();
// map the pages
for (; virtualAddress < virtualEnd;
virtualAddress += B_PAGE_SIZE, physicalAddress += B_PAGE_SIZE) {
status_t error = map_tmap(&addressSpace->translation_map,
virtualAddress, physicalAddress,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (error != B_OK)
return error;
}
return B_OK;
}
void
ppc_unmap_address_range(addr_t virtualAddress, size_t size)
{
addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
virtualAddress = ROUNDOWN(virtualAddress, B_PAGE_SIZE);
vm_address_space *addressSpace = vm_kernel_address_space();
for (0; virtualAddress < virtualEnd; virtualAddress += B_PAGE_SIZE)
remove_page_table_entry(&addressSpace->translation_map, virtualAddress);
}
status_t
ppc_remap_address_range(addr_t *_virtualAddress, size_t size, bool unmap)
{
addr_t virtualAddress = ROUNDOWN(*_virtualAddress, B_PAGE_SIZE);
size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE);
vm_address_space *addressSpace = vm_kernel_address_space();
// reserve space in the address space
void *newAddress = NULL;
status_t error = vm_reserve_address_range(addressSpace->id, &newAddress,
B_ANY_KERNEL_ADDRESS, size, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (error != B_OK)
return error;
// get the area's first physical page
page_table_entry *entry = lookup_page_table_entry(
&addressSpace->translation_map, virtualAddress);
if (!entry)
return B_ERROR;
addr_t physicalBase = entry->physical_page_number << 12;
// map the pages
error = ppc_map_address_range((addr_t)newAddress, physicalBase, size);
if (error != B_OK)
return error;
*_virtualAddress = (addr_t)newAddress;
// unmap the old pages
if (unmap)
ppc_unmap_address_range(virtualAddress, size);
return B_OK;
}
status_t
arch_vm_translation_map_init_post_area(kernel_args *args)
{
// If the page table doesn't lie within the kernel address space, we
// remap it.
if (!IS_KERNEL_ADDRESS(sPageTable)) {
vm_address_space *addressSpace = vm_kernel_address_space();
// reserve space in the address space
void *newAddress = NULL;
status_t error = vm_reserve_address_range(addressSpace->id,
&newAddress, B_ANY_KERNEL_ADDRESS, sPageTableSize,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
addr_t newAddress = (addr_t)sPageTable;
status_t error = ppc_remap_address_range(&newAddress, sPageTableSize,
false);
if (error != B_OK) {
panic("arch_vm_translation_map_init_post_area(): Failed to reserve "
"space for the page table!");
panic("arch_vm_translation_map_init_post_area(): Failed to remap "
"the page table!");
return error;
}
// get the table page's first physical page
page_table_entry *entry = lookup_page_table_entry(
&addressSpace->translation_map, (addr_t)sPageTable);
if (!entry) {
panic("arch_vm_translation_map_init_post_area(): Couldn't find "
"the physical address of the page table!");
return B_ERROR;
}
addr_t physicalBase = entry->physical_page_number << 12;
// map the pages
for (addr_t i = 0; i < sPageTableSize; i += B_PAGE_SIZE) {
addr_t virtualAddress = (addr_t)newAddress + i;
addr_t physicalAddress = physicalBase + i;
error = map_tmap(&addressSpace->translation_map, virtualAddress,
physicalAddress, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (error != B_OK) {
panic("arch_vm_translation_map_init_post_area(): Failed to "
"remap the page table!");
return error;
}
}
// set the new page table address
addr_t oldVirtualBase = (addr_t)(sPageTable);
sPageTable = (page_table_entry_group*)newAddress;
// unmap the old pages
for (addr_t i = 0; i < sPageTableSize; i += B_PAGE_SIZE) {
remove_page_table_entry(&addressSpace->translation_map,
oldVirtualBase + i);
}
ppc_unmap_address_range(oldVirtualBase, sPageTableSize);
// TODO: We should probably map the page table via BAT. It is relatively large,
// and due to being a hash table the access patterns might look sporadic, which