* platform_allocate_region() has a new boolean parameter "exactAddress"

specifying whether only the exact supplied address is acceptable. If
  false, the address is considered a hint only. It will be picked, if
  available, otherwise a greater address is tried to be acquired, and
  as last resort any address. This feature is only implemented for PPC.
  It is needed since the preferred kernel text base address 0x80000000
  might not be available (and actually isn't on my Mac mini).
* Fixed a bug in the PPC memory management code:
  is_{virtual,physical}_allocated() were checking whether the given
  range was completely contained by an existing range instead of
  checking for intersection. As a consequence we could (and did) allocate
  a range intersecting with already allocated ranges. The kernel segment
  thus overwrote OF memory for instance.
* The ELF loader makes sure that it got both text and data segment of
  the image to be loaded.

The PPC boot loader successfully loads kernel and modules now. Next
comes the hard part, I'm afraid.



git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@15708 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2005-12-29 01:50:23 +00:00
parent b566a43ec8
commit c83d9dad1c
9 changed files with 91 additions and 28 deletions

View File

@ -25,7 +25,8 @@ extern void platform_release_heap(struct stage2_args *args, void *base);
extern status_t platform_init_heap(struct stage2_args *args, void **_base, void **_top);
/* MMU/memory functions */
extern status_t platform_allocate_region(void **_virtualAddress, size_t size, uint8 protection);
extern status_t platform_allocate_region(void **_virtualAddress, size_t size,
uint8 protection, bool exactAddress);
extern status_t platform_free_region(void *address, size_t size);
/* boot options */

View File

@ -14,7 +14,8 @@ extern "C" {
#endif
extern status_t arch_set_callback(void);
extern void *arch_mmu_allocate(void *address, size_t size, uint8 protection);
extern void *arch_mmu_allocate(void *address, size_t size, uint8 protection,
bool exactAddress);
extern status_t arch_mmu_free(void *address, size_t size);
extern status_t arch_mmu_init(void);

View File

@ -213,6 +213,13 @@ elf_load_image(int fd, preloaded_image *image)
region->start, region->size, region->delta));
}
// found both, text and data?
if (image->data_region.size == 0 || image->text_region.size == 0) {
dprintf("Couldn't find both text and data segment!\n");
status = B_BAD_DATA;
goto error1;
}
// get the segment order
elf_region *firstRegion;
elf_region *secondRegion;
@ -233,10 +240,11 @@ elf_load_image(int fd, preloaded_image *image)
goto error1;
}
// if image->text_region.start == NULL (image is relocatable),
// platform_allocate_region() automatically allocates an address
// The kernel and the modules are relocatable, thus
// platform_allocate_region() can automatically allocate an address,
// but shall prefer the specified base address.
if (platform_allocate_region((void **)&firstRegion->start, totalSize,
B_READ_AREA | B_WRITE_AREA) < B_OK) {
B_READ_AREA | B_WRITE_AREA, false) < B_OK) {
status = B_NO_MEMORY;
goto error1;
}

View File

@ -558,7 +558,7 @@ TarFS::Volume::Init(boot::Partition *partition)
return B_BAD_DATA;
if (platform_allocate_region((void **)&out, kTarRegionSize,
B_READ_AREA | B_WRITE_AREA) != B_OK) {
B_READ_AREA | B_WRITE_AREA, false) != B_OK) {
TRACE(("tarfs: allocating region failed!\n"));
return B_NO_MEMORY;
}

View File

@ -153,8 +153,10 @@ kernel_args_malloc(size_t size)
if (size > kChunkSize / 2 && sFree < size) {
// the block is so large, we'll allocate a new block for it
void *block = NULL;
if (platform_allocate_region(&block, size, B_READ_AREA | B_WRITE_AREA) != B_OK)
if (platform_allocate_region(&block, size, B_READ_AREA | B_WRITE_AREA,
false) != B_OK) {
return NULL;
}
if (add_kernel_args_range(block, size) != B_OK)
panic("kernel_args max range to low!\n");
@ -163,8 +165,10 @@ kernel_args_malloc(size_t size)
// just allocate a new block and "close" the old one
void *block = NULL;
if (platform_allocate_region(&block, kChunkSize, B_READ_AREA | B_WRITE_AREA) != B_OK)
if (platform_allocate_region(&block, kChunkSize,
B_READ_AREA | B_WRITE_AREA, false) != B_OK) {
return NULL;
}
sFirstFree = (void *)((addr_t)block + size);
sLast = block;

View File

@ -593,7 +593,8 @@ mmu_init(void)
extern "C" status_t
platform_allocate_region(void **_address, size_t size, uint8 protection)
platform_allocate_region(void **_address, size_t size, uint8 protection,
bool /*exactAddress*/)
{
void *address = mmu_allocate(*_address, size);
if (address == NULL)

View File

@ -175,6 +175,10 @@ find_physical_memory_ranges(size_t &total)
static bool
is_in_range(addr_range *ranges, uint32 numRanges, void *address, size_t size)
{
// TODO: This function returns whether any single allocated range
// completely contains the given range. If the given range crosses
// allocated range boundaries, but is nevertheless covered completely, the
// function returns false!
addr_t start = (addr_t)address;
addr_t end = start + size;
@ -190,10 +194,30 @@ is_in_range(addr_range *ranges, uint32 numRanges, void *address, size_t size)
}
static bool
intersects_ranges(addr_range *ranges, uint32 numRanges, void *address,
size_t size)
{
addr_t start = (addr_t)address;
addr_t end = start + size;
for (uint32 i = 0; i < numRanges; i++) {
addr_t rangeStart = ranges[i].start;
addr_t rangeEnd = rangeStart + ranges[i].size;
if ((start >= rangeStart && start < rangeEnd)
|| (rangeStart >= start && rangeStart < end)) {
return true;
}
}
return false;
}
static bool
is_virtual_allocated(void *address, size_t size)
{
return is_in_range(gKernelArgs.virtual_allocated_range,
return intersects_ranges(gKernelArgs.virtual_allocated_range,
gKernelArgs.num_virtual_allocated_ranges,
address, size);
}
@ -202,7 +226,7 @@ is_virtual_allocated(void *address, size_t size)
static bool
is_physical_allocated(void *address, size_t size)
{
return is_in_range(gKernelArgs.physical_allocated_range,
return intersects_ranges(gKernelArgs.physical_allocated_range,
gKernelArgs.num_physical_allocated_ranges,
address, size);
}
@ -328,6 +352,7 @@ find_allocated_ranges(void *pageTable, page_table_entry_group **_physicalPageTab
for (int i = 0; i < length; i++) {
struct translation_map *map = &translations[i];
//printf("%i: map: %p, length %d -> physical: %p, mode %d\n", i, map->virtual_address, map->length, map->physical_address, map->mode);
printf("%i: map: %p, length %d -> physical: %p, mode %d\n", i, map->virtual_address, map->length, map->physical_address, map->mode);
// insert range in physical allocated, if it points to physical memory
@ -425,19 +450,34 @@ find_free_physical_range(size_t size)
static void *
find_free_virtual_range(size_t size)
find_free_virtual_range(void *base, size_t size)
{
if (base && !is_virtual_allocated(base, size))
return base;
void *firstFound = NULL;
void *firstBaseFound = NULL;
for (uint32 i = 0; i < gKernelArgs.num_virtual_allocated_ranges; i++) {
void *address = (void *)(gKernelArgs.virtual_allocated_range[i].start + gKernelArgs.virtual_allocated_range[i].size);
if (!is_virtual_allocated(address, size))
return address;
if (!is_virtual_allocated(address, size)) {
if (!base)
return address;
if (firstFound == NULL)
firstFound = address;
if (address >= base
&& (firstBaseFound == NULL || address < firstBaseFound)) {
firstBaseFound = address;
}
}
}
return NULL;
return (firstBaseFound ? firstBaseFound : firstFound);
}
extern "C" void *
arch_mmu_allocate(void *virtualAddress, size_t size, uint8 protection)
arch_mmu_allocate(void *_virtualAddress, size_t size, uint8 protection,
bool exactAddress)
{
// we only know page sizes
size = ROUNDUP(size, B_PAGE_SIZE);
@ -452,14 +492,17 @@ arch_mmu_allocate(void *virtualAddress, size_t size, uint8 protection)
else
protection = 0x21;
if (virtualAddress == NULL) {
// find free address large enough to hold "size"
virtualAddress = find_free_virtual_range(size);
if (virtualAddress == NULL)
return NULL;
} else {
if (is_virtual_allocated(virtualAddress, size))
return NULL;
// find free address large enough to hold "size"
void *virtualAddress = find_free_virtual_range(_virtualAddress, size);
if (virtualAddress == NULL)
return NULL;
// fail if the exact address was requested, but is not free
if (exactAddress && _virtualAddress && virtualAddress != _virtualAddress) {
dprintf("arch_mmu_allocate(): exact address requested, but virtual "
"range (base: %p, size: %lu) is not free.\n",
_virtualAddress, size);
return NULL;
}
// we have a free virtual range for the allocation, now
@ -468,8 +511,11 @@ arch_mmu_allocate(void *virtualAddress, size_t size, uint8 protection)
// so that we don't have to optimize for these cases :)
void *physicalAddress = find_free_physical_range(size);
if (physicalAddress == NULL)
if (physicalAddress == NULL) {
dprintf("arch_mmu_allocate(base: %p, size: %lu) no free physical "
"address\n", virtualAddress, size);
return NULL;
}
// everything went fine, so lets mark the space as used.

View File

@ -26,7 +26,7 @@ platform_init_heap(stage2_args *args, void **_base, void **_top)
*_base = NULL;
status_t error = platform_allocate_region(_base, args->heap_size,
B_READ_AREA | B_WRITE_AREA);
B_READ_AREA | B_WRITE_AREA, false);
if (error != B_OK)
return error;

View File

@ -13,12 +13,14 @@
status_t
platform_allocate_region(void **_address, size_t size, uint8 protection)
platform_allocate_region(void **_address, size_t size, uint8 protection,
bool exactAddress)
{
if (size == 0)
return B_BAD_VALUE;
void *address = arch_mmu_allocate(*_address, size, protection);
void *address = arch_mmu_allocate(*_address, size, protection,
exactAddress);
if (address == NULL)
return B_NO_MEMORY;