ppc: Keep memory mappings set up by OpenFirmware

Revert r36886 and fix compilation of insert_virtual_range_to_keep().
The use of void* vs. addr_t matches the surrounding boot loader code
but should probably be revised in favour of addr_t.

create_area() in the kernel wrongly assumed a RAM-backed address range,
which was destined to fail since ranges below the kernel address space
were ignored anyway. Use vm_map_physical_memory() instead.

This fixes a hang once the frame buffer and other resources used by OF
get unmapped. Closes ticket #5193 again.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@38291 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Andreas Färber 2010-08-20 22:22:38 +00:00
parent 8e192dad09
commit 31bce16715
2 changed files with 13 additions and 15 deletions

View File

@ -40,15 +40,13 @@ extern "C" uint8 __text_begin;
extern "C" uint8 _end;
#if 0
static status_t
insert_virtual_range_to_keep(void *start, uint32 size)
{
return insert_memory_range(gKernelArgs.arch_args.virtual_ranges_to_keep,
gKernelArgs.arch_args.num_virtual_ranges_to_keep,
MAX_VIRTUAL_RANGES_TO_KEEP, start, size);
return insert_address_range(gKernelArgs.arch_args.virtual_ranges_to_keep,
&gKernelArgs.arch_args.num_virtual_ranges_to_keep,
MAX_VIRTUAL_RANGES_TO_KEEP, (addr_t)start, size);
}
#endif
static status_t
@ -297,12 +295,6 @@ find_allocated_ranges(void *oldPageTable, void *pageTable,
// insert range in virtual ranges to keep
// TODO: ATM keeping the ranges doesn't make much sense. The OF usually identity
// maps stuff, which means that RAM will most likely be mapped < 2 GB, which we
// cannot preserve, since that doesn't lie in the kernel address space. Mappings
// >= 2 GB are probably memory mapped hardware registers or the frame buffer
// (i.e. non-RAM), which we don't handle correctly ATM.
#if 0
if (keepRange) {
if (insert_virtual_range_to_keep(map->virtual_address,
map->length) != B_OK) {
@ -310,7 +302,6 @@ find_allocated_ranges(void *oldPageTable, void *pageTable,
gKernelArgs.num_virtual_allocated_ranges);
}
}
#endif
total += map->length;
}

View File

@ -12,6 +12,7 @@
#include <boot/kernel_args.h>
#include <vm/vm.h>
#include <vm/VMAddressSpace.h>
#include <arch/vm.h>
#include <arch_mmu.h>
@ -117,10 +118,16 @@ arch_vm_init_end(kernel_args *args)
continue;
}
phys_addr_t physicalAddress;
void *address = (void*)range.start;
area_id area = create_area("boot loader reserved area", &address,
B_EXACT_ADDRESS, range.size, B_ALREADY_WIRED,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (vm_get_page_mapping(VMAddressSpace::KernelID(), range.start,
&physicalAddress) != B_OK)
panic("arch_vm_init_end(): No page mapping for %p\n", address);
area_id area = vm_map_physical_memory(VMAddressSpace::KernelID(),
"boot loader reserved area", &address,
B_EXACT_ADDRESS, range.size,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
physicalAddress, true);
if (area < 0) {
panic("arch_vm_init_end(): Failed to create area for boot loader "
"reserved area: %p - %p\n", (void*)range.start,