Begun work on VMTranslationMap implementation for x86_64.

* Added empty source files for all the 64-bit paging method code, and a
  stub implementation of X86PagingMethod64Bit.
* arch_vm_translation_map.cpp has been modified to use X86PagingMethod64Bit
  on x86_64.
This commit is contained in:
Alex Smith 2012-07-04 17:02:58 +01:00
parent 4988ca58a0
commit 950b24e32d
15 changed files with 304 additions and 64 deletions

View File

@ -36,6 +36,10 @@
#define KERNEL_SIZE 0x8000000000
#define KERNEL_TOP (KERNEL_BASE + (KERNEL_SIZE - 1))
// Kernel physical memory map area.
#define KERNEL_PMAP_BASE 0xffffff0000000000
#define KERNEL_PMAP_SIZE 0x8000000000
// Userspace address space layout.
#define USER_BASE 0x0
#define USER_BASE_ANY 0x100000
@ -51,9 +55,9 @@
// memory layout
#define KERNEL_BASE 0x80000000
#define KERNEL_SIZE 0x80000000
#define KERNEL_TOP (KERNEL_BASE + (KERNEL_SIZE - 1))
#define KERNEL_BASE 0x80000000
#define KERNEL_SIZE 0x80000000
#define KERNEL_TOP (KERNEL_BASE + (KERNEL_SIZE - 1))
/* User space layout is a little special:
* The user space does not completely cover the space not covered by the
@ -65,10 +69,10 @@
* TODO: introduce the 1Mb lower barrier again - it's only used for vm86 mode,
* and this should be moved into the kernel (and address space) completely.
*/
#define USER_BASE 0x00
#define USER_BASE_ANY 0x100000
#define USER_SIZE (KERNEL_BASE - 0x10000)
#define USER_TOP (USER_BASE + USER_SIZE)
#define USER_BASE 0x00
#define USER_BASE_ANY 0x100000
#define USER_SIZE (KERNEL_BASE - 0x10000)
#define USER_TOP (USER_BASE + USER_SIZE)
#define KERNEL_USER_DATA_BASE 0x6fff0000
#define USER_STACK_REGION 0x70000000

View File

@ -178,7 +178,6 @@ arch_int_init(kernel_args* args)
// Set up the legacy PIC.
pic_init();
panic("not implemented\n");
return B_OK;
}

View File

@ -479,51 +479,6 @@ x86_init_user_debug()
}
status_t
arch_vm_translation_map_create_map(bool kernel, VMTranslationMap** _map)
{
return B_ERROR;
}
status_t
arch_vm_translation_map_init(kernel_args *args,
VMPhysicalPageMapper** _physicalPageMapper)
{
return B_OK;
}
status_t
arch_vm_translation_map_init_post_sem(kernel_args *args)
{
return B_OK;
}
status_t
arch_vm_translation_map_init_post_area(kernel_args *args)
{
return B_OK;
}
status_t
arch_vm_translation_map_early_map(kernel_args *args, addr_t va, phys_addr_t pa,
uint8 attributes, phys_addr_t (*get_free_page)(kernel_args *))
{
return B_ERROR;
}
bool
arch_vm_translation_map_is_kernel_page_accessible(addr_t virtualAddress,
uint32 protection)
{
return true;
}
// Currently got generic elf.cpp #ifdef'd out for x86_64, define stub versions here.
status_t

View File

@ -11,13 +11,12 @@ UsePrivateKernelHeaders ;
UsePrivateHeaders shared ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) paging ] ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) paging 32bit ] ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) paging pae ] ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) timers ] ;
local archSpecificSources ;
if $(TARGET_ARCH) = x86_64 {
SEARCH_SOURCE += [ FDirName $(SUBDIR) 64 ] ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) paging 64bit ] ;
archSpecificSources =
arch.S
@ -25,9 +24,19 @@ if $(TARGET_ARCH) = x86_64 {
int.cpp
interrupts.S
stubs.cpp
# paging
#x86_physical_page_mapper_mapped.cpp
# paging/64bit
X86PagingMethod64Bit.cpp
X86PagingStructures64Bit.cpp
X86VMTranslationMap64Bit.cpp
;
} else {
SEARCH_SOURCE += [ FDirName $(SUBDIR) 32 ] ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) paging 32bit ] ;
SEARCH_SOURCE += [ FDirName $(SUBDIR) paging pae ] ;
archSpecificSources =
arch.S
@ -43,7 +52,6 @@ if $(TARGET_ARCH) = x86_64 {
arch_smp.cpp
arch_thread.cpp
arch_timer.cpp
arch_vm_translation_map.cpp
arch_system_info.cpp
arch_user_debugger.cpp
apic.cpp
@ -59,11 +67,7 @@ if $(TARGET_ARCH) = x86_64 {
x86_syscalls.cpp
# paging
x86_physical_page_mapper.cpp
x86_physical_page_mapper_large_memory.cpp
X86PagingMethod.cpp
X86PagingStructures.cpp
X86VMTranslationMap.cpp
# paging/32bit
X86PagingMethod32Bit.cpp
@ -87,7 +91,14 @@ local archGenericSources =
arch_debug_console.cpp
arch_int.cpp
arch_vm.cpp
arch_vm_translation_map.cpp
pic.cpp
# paging
x86_physical_page_mapper.cpp
X86PagingMethod.cpp
X86PagingStructures.cpp
X86VMTranslationMap.cpp
;
KernelMergeObject kernel_arch_x86.o :

View File

@ -12,8 +12,12 @@
#include <boot/kernel_args.h>
#include "paging/32bit/X86PagingMethod32Bit.h"
#include "paging/pae/X86PagingMethodPAE.h"
#ifdef __x86_64__
# include "paging/64bit/X86PagingMethod64Bit.h"
#else
# include "paging/32bit/X86PagingMethod32Bit.h"
# include "paging/pae/X86PagingMethodPAE.h"
#endif
//#define TRACE_VM_TMAP
@ -26,10 +30,14 @@
static union {
uint64 align;
#ifdef __x86_64__
char sixty_four[sizeof(X86PagingMethod64Bit)];
#else
char thirty_two[sizeof(X86PagingMethod32Bit)];
#if B_HAIKU_PHYSICAL_BITS == 64
char pae[sizeof(X86PagingMethodPAE)];
#endif
#endif
} sPagingMethodBuffer;
@ -74,7 +82,9 @@ arch_vm_translation_map_init(kernel_args *args,
}
#endif
#if B_HAIKU_PHYSICAL_BITS == 64
#ifdef __x86_64__
gX86PagingMethod = new(&sPagingMethodBuffer) X86PagingMethod64Bit;
#elif B_HAIKU_PHYSICAL_BITS == 64
bool paeAvailable = x86_check_feature(IA32_FEATURE_PAE, FEATURE_COMMON);
bool paeNeeded = false;
for (uint32 i = 0; i < args->num_physical_memory_ranges; i++) {

View File

@ -0,0 +1,82 @@
/*
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
* Distributed under the terms of the MIT License.
*/
#include "paging/64bit/X86PagingMethod64Bit.h"
#include <stdlib.h>
#include <string.h>
#include <boot/kernel_args.h>
#include <util/AutoLock.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/VMAddressSpace.h>
//#include "paging/64bit/X86PagingStructures64Bit.h"
//#include "paging/64bit/X86VMTranslationMap64Bit.h"
#include "paging/x86_physical_page_mapper.h"
#include "paging/x86_physical_page_mapper_mapped.h"
#define TRACE_X86_PAGING_METHOD_64BIT
#ifdef TRACE_X86_PAGING_METHOD_64BIT
# define TRACE(x...) dprintf(x)
#else
# define TRACE(x...) ;
#endif
// #pragma mark - X86PagingMethod64Bit
X86PagingMethod64Bit::X86PagingMethod64Bit()
{
}
X86PagingMethod64Bit::~X86PagingMethod64Bit()
{
}
status_t
X86PagingMethod64Bit::Init(kernel_args* args,
VMPhysicalPageMapper** _physicalPageMapper)
{
return B_ERROR;
}
status_t
X86PagingMethod64Bit::InitPostArea(kernel_args* args)
{
return B_ERROR;
}
status_t
X86PagingMethod64Bit::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
{
return B_ERROR;
}
status_t
X86PagingMethod64Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
phys_addr_t physicalAddress, uint8 attributes,
phys_addr_t (*get_free_page)(kernel_args*))
{
return B_ERROR;
}
bool
X86PagingMethod64Bit::IsKernelPageAccessible(addr_t virtualAddress,
uint32 protection)
{
return true;
}

View File

@ -0,0 +1,47 @@
/*
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H
#define KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H
#include <KernelExport.h>
#include <lock.h>
#include <vm/vm_types.h>
#include "paging/64bit/paging.h"
#include "paging/X86PagingMethod.h"
#include "paging/X86PagingStructures.h"
class TranslationMapPhysicalPageMapper;
class X86PhysicalPageMapper;
class X86PagingMethod64Bit : public X86PagingMethod {
public:
X86PagingMethod64Bit();
virtual ~X86PagingMethod64Bit();
virtual status_t Init(kernel_args* args,
VMPhysicalPageMapper** _physicalPageMapper);
virtual status_t InitPostArea(kernel_args* args);
virtual status_t CreateTranslationMap(bool kernel,
VMTranslationMap** _map);
virtual status_t MapEarly(kernel_args* args,
addr_t virtualAddress,
phys_addr_t physicalAddress,
uint8 attributes,
phys_addr_t (*get_free_page)(kernel_args*));
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
uint32 protection);
};
#endif // KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H

View File

@ -0,0 +1,17 @@
/*
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
* Distributed under the terms of the MIT License.
*/
#include "paging/64bit/X86PagingStructures64Bit.h"
#include <stdlib.h>
#include <string.h>
#include <KernelExport.h>
#include <int.h>
#include "paging/64bit/X86PagingMethod64Bit.h"

View File

@ -0,0 +1,16 @@
/*
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_STRUCTURES_64BIT_H
#define KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_STRUCTURES_64BIT_H
#include "paging/pae/paging.h"
#include "paging/X86PagingStructures.h"
#endif // KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_STRUCTURES_64BIT_H

View File

@ -0,0 +1,20 @@
/*
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
* Distributed under the terms of the MIT License.
*/
#include "paging/64bit/X86VMTranslationMap64Bit.h"
#include <int.h>
#include <slab/Slab.h>
#include <thread.h>
#include <util/AutoLock.h>
#include <vm/vm_page.h>
#include <vm/VMAddressSpace.h>
#include <vm/VMCache.h>
#include "paging/64bit/X86PagingMethod64Bit.h"
#include "paging/64bit/X86PagingStructures64Bit.h"
#include "paging/x86_physical_page_mapper.h"

View File

@ -0,0 +1,15 @@
/*
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_X86_PAGING_64BIT_X86_VM_TRANSLATION_MAP_64BIT_H
#define KERNEL_ARCH_X86_PAGING_64BIT_X86_VM_TRANSLATION_MAP_64BIT_H
#include "paging/X86VMTranslationMap.h"
#endif // KERNEL_ARCH_X86_PAGING_64BIT_X86_VM_TRANSLATION_MAP_64BIT_H

View File

@ -0,0 +1,12 @@
/*
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_X86_PAGING_PAE_PAGING_H
#define KERNEL_ARCH_X86_PAGING_PAE_PAGING_H
#include <OS.h>
#endif // KERNEL_ARCH_X86_PAGING_PAE_PAGING_H

View File

@ -10,7 +10,6 @@
struct kernel_args;
struct vm_translation_map_ops;
class TranslationMapPhysicalPageMapper {

View File

@ -0,0 +1,29 @@
/*
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
* Distributed under the terms of the MIT License.
*/
/*! Physical page mapper implementation for use where the whole of physical
memory is permanently mapped into the kernel address space.
This is used on x86_64 where the virtual address space is likely a great
deal larger than the amount of physical memory in the machine, so it can
all be mapped in permanently, which is faster and makes life much easier.
*/
#include "paging/x86_physical_page_mapper_mapped.h"
#include <new>
#include <cpu.h>
#include <smp.h>
#include <vm/vm.h>
#include <vm/vm_types.h>
#include <vm/VMAddressSpace.h>
#include "paging/x86_physical_page_mapper.h"
#include "paging/X86PagingStructures.h"
#include "paging/X86VMTranslationMap.h"

View File

@ -0,0 +1,24 @@
/*
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
* Distributed under the terms of the MIT License.
*/
#ifndef KERNEL_ARCH_X86_PAGING_X86_PHYSICAL_PAGE_MAPPER_MAPPED_H
#define KERNEL_ARCH_X86_PAGING_X86_PHYSICAL_PAGE_MAPPER_MAPPED_H
#include <OS.h>
#include <util/DoublyLinkedList.h>
class TranslationMapPhysicalPageMapper;
class X86PhysicalPageMapper;
struct kernel_args;
status_t mapped_physical_page_ops_init(kernel_args* args,
X86PhysicalPageMapper*& _pageMapper,
TranslationMapPhysicalPageMapper*& _kernelPageMapper);
#endif // KERNEL_ARCH_X86_PAGING_X86_PHYSICAL_PAGE_MAPPER_MAPPED_H