Rewrite allocated regions, remove mmu_free

mmu_free was only used for ACPI and only set a released flag
in the region. This released flag was never checked anywhere.

platform_free_region removes region from linked list now.
There are a lot of calls to it, so it should save some mem and make
memory layout cleaner.

Compiles on boots on AMD64
Compiles on ARM and loads kernel, but can't start it, since we havn't
setup the mmap with proper virtual addresses (with some other fixes to
ld-script and ELF32 loader EFI code.

Change-Id: Icfe871fa400b49f19e7ca1dbb9e1561309b21a22
Reviewed-on: https://review.haiku-os.org/c/haiku/+/3403
Reviewed-by: Rene Gollent <rene@gollent.com>
This commit is contained in:
Fredrik Holmqvist 2020-11-19 18:41:40 +01:00
parent 7b9ad03e70
commit e6e0db9ef4
2 changed files with 60 additions and 93 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright 2016 Haiku, Inc. All rights reserved.
* Copyright 2016-2020 Haiku, Inc. All rights reserved.
* Copyright 2014, Jessica Hamilton, jessica.l.hamilton@gmail.com.
* Copyright 2014, Henry Harrington, henry.harrington@gmail.com.
* Distributed under the terms of the MIT License.
@ -25,12 +25,20 @@
#endif
struct allocated_memory_region {
allocated_memory_region *next;
struct memory_region {
memory_region *next;
addr_t vaddr;
phys_addr_t paddr;
size_t size;
bool released;
void dprint(const char * msg) {
dprintf("%s memory_region v: %#lx p: %#lx size: %lu\n", msg, vaddr,
paddr, size);
}
bool matches(phys_addr_t expected_paddr, size_t expected_size) {
return paddr == expected_paddr && size == expected_size;
}
};
@ -43,7 +51,7 @@ static addr_t sNextVirtualAddress = KERNEL_LOAD_BASE + 32 * 1024 * 1024;
#endif
static allocated_memory_region *allocated_memory_regions = NULL;
static memory_region *allocated_regions = NULL;
extern "C" phys_addr_t
@ -77,7 +85,6 @@ extern "C" addr_t
get_current_virtual_address()
{
TRACE("%s: called\n", __func__);
return sNextVirtualAddress;
}
@ -103,18 +110,11 @@ platform_allocate_region(void **_address, size_t size, uint8 /* protection */,
return B_NO_MEMORY;
efi_physical_addr addr;
size_t aligned_size = ROUNDUP(size, B_PAGE_SIZE);
allocated_memory_region *region = new(std::nothrow) allocated_memory_region;
if (region == NULL)
return B_NO_MEMORY;
size_t pages = ROUNDUP(size, B_PAGE_SIZE) / B_PAGE_SIZE;
efi_status status = kBootServices->AllocatePages(AllocateAnyPages,
EfiLoaderData, aligned_size / B_PAGE_SIZE, &addr);
if (status != EFI_SUCCESS) {
delete region;
EfiLoaderData, pages, &addr);
if (status != EFI_SUCCESS)
return B_NO_MEMORY;
}
// Addresses above 512GB not supported.
// Memory map regions above 512GB can be ignored, but if EFI returns pages
@ -122,21 +122,20 @@ platform_allocate_region(void **_address, size_t size, uint8 /* protection */,
if (addr + size > (512ull * 1024 * 1024 * 1024))
panic("Can't currently support more than 512GB of RAM!");
region->next = allocated_memory_regions;
allocated_memory_regions = region;
region->vaddr = 0;
region->paddr = addr;
region->size = size;
region->released = false;
memory_region *region = new(std::nothrow) memory_region {
next: allocated_regions,
vaddr: *_address == NULL ? 0 : (addr_t)*_address,
paddr: addr,
size: size
};
if (*_address != NULL) {
region->vaddr = (addr_t)*_address;
if (region == NULL) {
kBootServices->FreePages(addr, pages);
return B_NO_MEMORY;
}
//dprintf("Allocated region %#lx (requested %p) %#lx %lu\n", region->vaddr, *_address, region->paddr, region->size);
//region->dprint("Allocated");
allocated_regions = region;
*_address = (void *)region->paddr;
return B_OK;
}
@ -158,52 +157,13 @@ mmu_map_physical_memory(addr_t physicalAddress, size_t size, uint32 flags)
size += pageOffset;
if (insert_physical_allocated_range(physicalAddress,
ROUNDUP(size, B_PAGE_SIZE)) != B_OK) {
ROUNDUP(size, B_PAGE_SIZE)) != B_OK)
return B_NO_MEMORY;
}
return physicalAddress + pageOffset;
}
extern "C" void
mmu_free(void *virtualAddress, size_t size)
{
TRACE("%s: called\n", __func__);
addr_t physicalAddress = (addr_t)virtualAddress;
addr_t pageOffset = physicalAddress & (B_PAGE_SIZE - 1);
physicalAddress -= pageOffset;
size += pageOffset;
size_t aligned_size = ROUNDUP(size, B_PAGE_SIZE);
for (allocated_memory_region *region = allocated_memory_regions; region;
region = region->next) {
if (region->paddr == physicalAddress && region->size == aligned_size) {
region->released = true;
return;
}
}
}
static allocated_memory_region *
get_region(void *address, size_t size)
{
TRACE("%s: called\n", __func__);
for (allocated_memory_region *region = allocated_memory_regions; region;
region = region->next) {
if (region->paddr == (phys_addr_t)address && region->size == size) {
return region;
}
}
return 0;
}
static void
convert_physical_ranges()
{
@ -213,24 +173,23 @@ convert_physical_ranges()
uint32 num_ranges = gKernelArgs.num_physical_allocated_ranges;
for (uint32 i = 0; i < num_ranges; ++i) {
allocated_memory_region *region
= new(std::nothrow) allocated_memory_region;
if (!region)
panic("Couldn't add allocated region");
// Addresses above 512GB not supported.
// Memory map regions above 512GB can be ignored, but if EFI returns
// pages above that there's nothing that can be done to fix it.
if (range[i].start + range[i].size > (512ull * 1024 * 1024 * 1024))
panic("Can't currently support more than 512GB of RAM!");
region->next = allocated_memory_regions;
allocated_memory_regions = region;
region->vaddr = 0;
region->paddr = range[i].start;
region->size = range[i].size;
region->released = false;
memory_region *region = new(std::nothrow) memory_region {
next: allocated_regions,
vaddr: 0,
paddr: range[i].start,
size: range[i].size
};
if (!region)
panic("Couldn't add allocated region");
allocated_regions = region;
// Clear out the allocated range
range[i].start = 0;
@ -250,8 +209,8 @@ platform_bootloader_address_to_kernel_address(void *address, addr_t *_result)
phys_addr_t addr = (phys_addr_t)address;
for (allocated_memory_region *region = allocated_memory_regions; region;
region = region->next) {
for (memory_region *region = allocated_regions; region;
region = region->next) {
if (region->paddr <= addr && addr < region->paddr + region->size) {
// Lazily allocate virtual memory.
if (region->vaddr == 0) {
@ -273,8 +232,10 @@ platform_kernel_address_to_bootloader_address(addr_t address, void **_result)
{
TRACE("%s: called\n", __func__);
for (allocated_memory_region *region = allocated_memory_regions; region; region = region->next) {
if (region->vaddr != 0 && region->vaddr <= address && address < region->vaddr + region->size) {
for (memory_region *region = allocated_regions; region;
region = region->next) {
if (region->vaddr != 0 && region->vaddr <= address
&& address < region->vaddr + region->size) {
*_result = (void *)(region->paddr + (address - region->vaddr));
//dprintf("Converted kernel address %#lx in region %#lx-%#lx to %p\n",
// address, region->vaddr, region->vaddr + region->size, *_result);
@ -292,11 +253,19 @@ platform_free_region(void *address, size_t size)
TRACE("%s: called to release region %p (%" B_PRIuSIZE ")\n", __func__,
address, size);
allocated_memory_region *region = get_region(address, size);
if (!region)
panic("Unknown region??");
kBootServices->FreePages((efi_physical_addr)address, ROUNDUP(size, B_PAGE_SIZE) / B_PAGE_SIZE);
return B_OK;
for (memory_region **ref = &allocated_regions; *ref;
ref = &(*ref)->next) {
if ((*ref)->matches((phys_addr_t)address, size)) {
kBootServices->FreePages((efi_physical_addr)address,
ROUNDUP(size, B_PAGE_SIZE) / B_PAGE_SIZE);
memory_region* old = *ref;
//pointer to current allocated_memory_region* now points to next
*ref = (*ref)->next;
old->dprint("Freeing");
delete old;
return B_OK;
}
}
panic("platform_free_region: Unknown region to free??");
return B_ERROR; // NOT Reached
}

View File

@ -39,8 +39,6 @@ extern phys_addr_t mmu_allocate_page();
extern addr_t mmu_map_physical_memory(addr_t physicalAddress, size_t size,
uint32 flags);
extern void mmu_free(void *virtualAddress, size_t size);
extern status_t platform_kernel_address_to_bootloader_address(addr_t address,
void **_result);