A few improvements suggested by Ingo.

This commit is contained in:
Alex Smith 2012-07-06 14:34:50 +01:00
parent 84bf29f97b
commit 15feb60325
7 changed files with 37 additions and 30 deletions

View File

@ -18,23 +18,22 @@
// 32-bit and 64-bit kernel load addresses.
#define KERNEL_BASE 0x80000000
#define KERNEL_BASE_64BIT 0xffffffff80000000ll
#define KERNEL_LOAD_BASE 0x80000000
#define KERNEL_LOAD_BASE_64BIT 0xffffffff80000000ll
#elif defined(__x86_64__)
// Base of the kernel address space.
// When compiling the bootloader, KERNEL_BASE is set to the x86 base address,
// KERNEL_BASE_64BIT is set to where the kernel loaded to.
// For the kernel, KERNEL_BASE is the base of the kernel address space. This is
// NOT the address where the kernel is loaded to: the kernel is loaded in the
// top 2GB of the virtual address space as required by GCC's kernel code model.
// The whole kernel address space is the top 512GB of the address space.
// KERNEL_BASE is the base of the kernel address space. This differs from the
// address where the kernel is loaded to: the kernel is loaded in the top 2GB
// of the virtual address space as required by GCC's kernel code model. The
// whole kernel address space is the top 512GB of the address space.
#define KERNEL_BASE 0xffffff8000000000
#define KERNEL_SIZE 0x8000000000
#define KERNEL_TOP (KERNEL_BASE + (KERNEL_SIZE - 1))
#define KERNEL_LOAD_BASE 0xffffffff80000000
// Kernel physical memory map area.
#define KERNEL_PMAP_BASE 0xffffff0000000000

View File

@ -15,6 +15,10 @@
#include <arch_config.h>
#ifndef KERNEL_LOAD_BASE
# define KERNEL_LOAD_BASE KERNEL_BASE
#endif
// macro to check whether an address is in the kernel address space (avoid
// always-true checks)
#if KERNEL_BASE == 0

View File

@ -116,8 +116,8 @@ struct ELF64Class {
AllocateRegion(AddrType* _address, AddrType size, uint8 protection,
void **_mappedAddress)
{
// Assume the real 64-bit base address is KERNEL_BASE_64BIT and the
// mappings in the loader address space are at KERNEL_BASE.
// Assume the real 64-bit base address is KERNEL_LOAD_BASE_64BIT and
// the mappings in the loader address space are at KERNEL_LOAD_BASE.
void* address = (void*)(addr_t)(*_address & 0xffffffff);
@ -127,14 +127,16 @@ struct ELF64Class {
return status;
*_mappedAddress = address;
*_address = (AddrType)(addr_t)address + KERNEL_BASE_64BIT - KERNEL_BASE;
*_address = (AddrType)(addr_t)address + KERNEL_LOAD_BASE_64BIT
- KERNEL_LOAD_BASE;
return B_OK;
}
static inline void*
Map(AddrType address)
{
return (void*)(addr_t)(address - KERNEL_BASE_64BIT + KERNEL_BASE);
return (void*)(addr_t)(address - KERNEL_LOAD_BASE_64BIT
+ KERNEL_LOAD_BASE);
}
};

View File

@ -36,7 +36,7 @@ static const uint64 kPageMappingFlags = 0x103;
static inline uint64
fix_address(uint64 address)
{
return address - KERNEL_BASE + KERNEL_BASE_64BIT;
return address - KERNEL_LOAD_BASE + KERNEL_LOAD_BASE_64BIT;
}
@ -157,7 +157,7 @@ long_mmu_init()
pdpt[510] = physicalAddress | kTableMappingFlags;
// Store the virtual memory usage information.
gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE_64BIT;
gKernelArgs.virtual_allocated_range[0].start = KERNEL_LOAD_BASE_64BIT;
gKernelArgs.virtual_allocated_range[0].size = mmu_get_virtual_usage();
gKernelArgs.num_virtual_allocated_ranges = 1;
@ -176,14 +176,14 @@ long_mmu_init()
}
// Get the physical address to map.
if (!mmu_get_virtual_mapping(KERNEL_BASE + (i * B_PAGE_SIZE),
if (!mmu_get_virtual_mapping(KERNEL_LOAD_BASE + (i * B_PAGE_SIZE),
&physicalAddress))
continue;
pageTable[i % 512] = physicalAddress | kPageMappingFlags;
}
gKernelArgs.arch_args.virtual_end = ROUNDUP(KERNEL_BASE_64BIT
gKernelArgs.arch_args.virtual_end = ROUNDUP(KERNEL_LOAD_BASE_64BIT
+ gKernelArgs.virtual_allocated_range[0].size, 0x200000);
// Sort the address ranges.

View File

@ -78,7 +78,7 @@ static uint32 *sPageDirectory = 0;
#ifdef _PXE_ENV
static addr_t sNextPhysicalAddress = 0x112000;
static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
static addr_t sNextVirtualAddress = KERNEL_LOAD_BASE + kMaxKernelSize;
static addr_t sNextPageTableAddress = 0x7d000;
static const uint32 kPageTableRegionEnd = 0x8b000;
@ -87,7 +87,7 @@ static const uint32 kPageTableRegionEnd = 0x8b000;
#else
static addr_t sNextPhysicalAddress = 0x100000;
static addr_t sNextVirtualAddress = KERNEL_BASE + kMaxKernelSize;
static addr_t sNextVirtualAddress = KERNEL_LOAD_BASE + kMaxKernelSize;
static addr_t sNextPageTableAddress = 0x90000;
static const uint32 kPageTableRegionEnd = 0x9e000;
@ -195,7 +195,7 @@ unmap_page(addr_t virtualAddress)
{
TRACE("unmap_page(virtualAddress = %p)\n", (void *)virtualAddress);
if (virtualAddress < KERNEL_BASE) {
if (virtualAddress < KERNEL_LOAD_BASE) {
panic("unmap_page: asked to unmap invalid page %p!\n",
(void *)virtualAddress);
}
@ -220,7 +220,7 @@ map_page(addr_t virtualAddress, addr_t physicalAddress, uint32 flags)
TRACE("map_page: vaddr 0x%lx, paddr 0x%lx\n", virtualAddress,
physicalAddress);
if (virtualAddress < KERNEL_BASE) {
if (virtualAddress < KERNEL_LOAD_BASE) {
panic("map_page: asked to map invalid page %p!\n",
(void *)virtualAddress);
}
@ -397,8 +397,8 @@ mmu_allocate(void *virtualAddress, size_t size)
addr_t address = (addr_t)virtualAddress;
// is the address within the valid range?
if (address < KERNEL_BASE
|| address + size >= KERNEL_BASE + kMaxKernelSize)
if (address < KERNEL_LOAD_BASE
|| address + size >= KERNEL_LOAD_BASE + kMaxKernelSize)
return NULL;
for (uint32 i = 0; i < size; i++) {
@ -479,7 +479,7 @@ mmu_free(void *virtualAddress, size_t size)
size = (size + pageOffset + B_PAGE_SIZE - 1) / B_PAGE_SIZE * B_PAGE_SIZE;
// is the address within the valid range?
if (address < KERNEL_BASE || address + size > sNextVirtualAddress) {
if (address < KERNEL_LOAD_BASE || address + size > sNextVirtualAddress) {
panic("mmu_free: asked to unmap out of range region (%p, size %lx)\n",
(void *)address, size);
}
@ -500,14 +500,14 @@ mmu_free(void *virtualAddress, size_t size)
size_t
mmu_get_virtual_usage()
{
return sNextVirtualAddress - KERNEL_BASE;
return sNextVirtualAddress - KERNEL_LOAD_BASE;
}
bool
mmu_get_virtual_mapping(addr_t virtualAddress, addr_t *_physicalAddress)
{
if (virtualAddress < KERNEL_BASE) {
if (virtualAddress < KERNEL_LOAD_BASE) {
panic("mmu_get_virtual_mapping: asked to lookup invalid page %p!\n",
(void *)virtualAddress);
}
@ -607,9 +607,9 @@ mmu_init_for_kernel(void)
// Save the memory we've virtually allocated (for the kernel and other
// stuff)
gKernelArgs.virtual_allocated_range[0].start = KERNEL_BASE;
gKernelArgs.virtual_allocated_range[0].start = KERNEL_LOAD_BASE;
gKernelArgs.virtual_allocated_range[0].size
= sNextVirtualAddress - KERNEL_BASE;
= sNextVirtualAddress - KERNEL_LOAD_BASE;
gKernelArgs.num_virtual_allocated_ranges = 1;
// sort the address ranges
@ -654,7 +654,7 @@ mmu_init(void)
{
TRACE("mmu_init\n");
gKernelArgs.arch_args.virtual_end = KERNEL_BASE;
gKernelArgs.arch_args.virtual_end = KERNEL_LOAD_BASE;
gKernelArgs.physical_allocated_range[0].start = sNextPhysicalAddress;
gKernelArgs.physical_allocated_range[0].size = 0;

View File

@ -94,7 +94,8 @@ static bool
is_iframe(addr_t frame)
{
addr_t previousFrame = *(addr_t*)frame;
return ((previousFrame & ~IFRAME_TYPE_MASK) == 0 && previousFrame != 0);
return ((previousFrame & ~(addr_t)IFRAME_TYPE_MASK) == 0
&& previousFrame != 0);
}

View File

@ -469,7 +469,8 @@ is_iframe(Thread* thread, addr_t frame)
return false;
addr_t previousFrame = *(addr_t*)frame;
return ((previousFrame & ~IFRAME_TYPE_MASK) == 0 && previousFrame != 0);
return ((previousFrame & ~(addr_t)IFRAME_TYPE_MASK) == 0
&& previousFrame != 0);
}