Finished implementation of x86_64 paging.
* vm_init now runs up until create_preloaded_image_areas(), which needs fixing to handle ELF64. * Not completely tested. I know Map(), Unmap() and Query() work fine, the other methods have not been tested as the kernel doesn't boot far enough for any of them to be called yet. As far as I know they're correct, though. * Not yet implemented the destructor for X86VMTranslationMap64Bit or Init() for a user address space.
This commit is contained in:
parent
11d35d1b9b
commit
e276cc0457
@ -107,15 +107,18 @@ print_iframe(struct iframe* frame)
|
||||
kprintf("%s iframe at %p (end = %p)\n", isUser ? "user" : "kernel", frame,
|
||||
isUser ? (uint64*)(frame + 1) : &frame->user_rsp);
|
||||
|
||||
kprintf(" rax 0x%-16lx rbx 0x%-16lx rcx 0x%-16lx rdx 0x%lx\n",
|
||||
frame->rax, frame->rbx, frame->rcx, frame->rdx);
|
||||
kprintf(" rsi 0x%-16lx rdi 0x%-16lx rbp 0x%-16lx r8 0x%lx\n",
|
||||
frame->rsi, frame->rdi, frame->rbp, frame->r8);
|
||||
kprintf(" r9 0x%-16lx r10 0x%-16lx r11 0x%-16lx r12 0x%lx\n",
|
||||
frame->r9, frame->r10, frame->r11, frame->r12);
|
||||
kprintf(" r13 0x%-16lx r14 0x%-16lx r15 0x%-16lx\n",
|
||||
frame->r13, frame->r14, frame->r15);
|
||||
kprintf(" rax 0x%-16lx rbx 0x%-16lx rcx 0x%lx\n", frame->rax,
|
||||
frame->rbx, frame->rcx);
|
||||
kprintf(" rdx 0x%-16lx rsi 0x%-16lx rdi 0x%lx\n", frame->rdx,
|
||||
frame->rsi, frame->rdi);
|
||||
kprintf(" rbp 0x%-16lx r8 0x%-16lx r9 0x%lx\n", frame->rbp,
|
||||
frame->r8, frame->r9);
|
||||
kprintf(" r10 0x%-16lx r11 0x%-16lx r12 0x%lx\n", frame->r10,
|
||||
frame->r11, frame->r12);
|
||||
kprintf(" r13 0x%-16lx r14 0x%-16lx r15 0x%lx\n", frame->r13,
|
||||
frame->r14, frame->r15);
|
||||
kprintf(" rip 0x%-16lx rflags 0x%-16lx", frame->rip, frame->flags);
|
||||
|
||||
if (isUser) {
|
||||
// from user space
|
||||
kprintf("user rsp 0x%lx", frame->user_rsp);
|
||||
|
@ -853,12 +853,9 @@ arch_cpu_init_post_vm(kernel_args* args)
|
||||
&virtualRestrictions, &physicalRestrictions,
|
||||
(void**)&sDoubleFaultStacks);
|
||||
|
||||
// TODO x86_64
|
||||
#ifndef __x86_64__
|
||||
X86PagingStructures* kernelPagingStructures
|
||||
= static_cast<X86VMTranslationMap*>(
|
||||
VMAddressSpace::Kernel()->TranslationMap())->PagingStructures();
|
||||
#endif
|
||||
|
||||
// setup task-state segments
|
||||
for (i = 0; i < args->num_cpus; i++) {
|
||||
@ -877,12 +874,9 @@ arch_cpu_init_post_vm(kernel_args* args)
|
||||
// initialize the double fault tss
|
||||
init_double_fault(i);
|
||||
|
||||
// TODO x86_64
|
||||
#ifndef __x86_64__
|
||||
// init active translation map
|
||||
gCPU[i].arch.active_paging_structures = kernelPagingStructures;
|
||||
kernelPagingStructures->AddReference();
|
||||
#endif
|
||||
}
|
||||
|
||||
// set the current hardware task on cpu 0
|
||||
|
@ -367,7 +367,7 @@ X86PagingMethod32Bit::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
|
||||
status_t
|
||||
X86PagingMethod32Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress, uint8 attributes,
|
||||
phys_addr_t (*get_free_page)(kernel_args*))
|
||||
page_num_t (*get_free_page)(kernel_args*))
|
||||
{
|
||||
// XXX horrible back door to map a page quickly regardless of translation
|
||||
// map object, etc. used only during VM setup.
|
||||
|
@ -31,7 +31,7 @@ public:
|
||||
addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint8 attributes,
|
||||
phys_addr_t (*get_free_page)(kernel_args*));
|
||||
page_num_t (*get_free_page)(kernel_args*));
|
||||
|
||||
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
|
||||
uint32 protection);
|
||||
|
@ -1,6 +1,11 @@
|
||||
/*
|
||||
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
|
||||
* Copyright 2008-2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
|
||||
@ -15,13 +20,13 @@
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/VMAddressSpace.h>
|
||||
|
||||
//#include "paging/64bit/X86PagingStructures64Bit.h"
|
||||
//#include "paging/64bit/X86VMTranslationMap64Bit.h"
|
||||
#include "paging/64bit/X86PagingStructures64Bit.h"
|
||||
#include "paging/64bit/X86VMTranslationMap64Bit.h"
|
||||
#include "paging/x86_physical_page_mapper.h"
|
||||
#include "paging/x86_physical_page_mapper_mapped.h"
|
||||
|
||||
|
||||
#define TRACE_X86_PAGING_METHOD_64BIT
|
||||
//#define TRACE_X86_PAGING_METHOD_64BIT
|
||||
#ifdef TRACE_X86_PAGING_METHOD_64BIT
|
||||
# define TRACE(x...) dprintf(x)
|
||||
#else
|
||||
@ -33,6 +38,11 @@
|
||||
|
||||
|
||||
X86PagingMethod64Bit::X86PagingMethod64Bit()
|
||||
:
|
||||
fKernelPhysicalPML4(0),
|
||||
fKernelVirtualPML4(NULL),
|
||||
fPhysicalPageMapper(NULL),
|
||||
fKernelPhysicalPageMapper(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
@ -46,6 +56,19 @@ status_t
|
||||
X86PagingMethod64Bit::Init(kernel_args* args,
|
||||
VMPhysicalPageMapper** _physicalPageMapper)
|
||||
{
|
||||
fKernelPhysicalPML4 = args->arch_args.phys_pgdir;
|
||||
fKernelVirtualPML4 = (uint64*)(addr_t)args->arch_args.vir_pgdir;
|
||||
|
||||
// Ensure that the user half of the address space is clear. This removes
|
||||
// the temporary identity mapping made by the boot loader.
|
||||
memset(fKernelVirtualPML4, 0, sizeof(uint64) * 256);
|
||||
arch_cpu_global_TLB_invalidate();
|
||||
|
||||
// Create the physical page mapper.
|
||||
mapped_physical_page_ops_init(args, fPhysicalPageMapper,
|
||||
fKernelPhysicalPageMapper);
|
||||
|
||||
*_physicalPageMapper = fPhysicalPageMapper;
|
||||
return B_ERROR;
|
||||
}
|
||||
|
||||
@ -53,23 +76,107 @@ X86PagingMethod64Bit::Init(kernel_args* args,
|
||||
status_t
|
||||
X86PagingMethod64Bit::InitPostArea(kernel_args* args)
|
||||
{
|
||||
return B_ERROR;
|
||||
// Create an area to represent the kernel PML4.
|
||||
area_id area = create_area("kernel pml4", (void**)&fKernelVirtualPML4,
|
||||
B_EXACT_ADDRESS, B_PAGE_SIZE, B_ALREADY_WIRED,
|
||||
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
|
||||
if (area < B_OK)
|
||||
return area;
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
X86PagingMethod64Bit::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
|
||||
{
|
||||
return B_ERROR;
|
||||
X86VMTranslationMap64Bit* map = new(std::nothrow) X86VMTranslationMap64Bit;
|
||||
if (map == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
status_t error = map->Init(kernel);
|
||||
if (error != B_OK) {
|
||||
delete map;
|
||||
return error;
|
||||
}
|
||||
|
||||
*_map = map;
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
X86PagingMethod64Bit::MapEarly(kernel_args* args, addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress, uint8 attributes,
|
||||
phys_addr_t (*get_free_page)(kernel_args*))
|
||||
page_num_t (*get_free_page)(kernel_args*))
|
||||
{
|
||||
return B_ERROR;
|
||||
TRACE("X86PagingMethod64Bit::MapEarly(%#" B_PRIxADDR ", %#" B_PRIxPHYSADDR
|
||||
", %#" B_PRIx8 ")\n", virtualAddress, physicalAddress, attributes);
|
||||
|
||||
// Get the PDPT. We should be mapping on an existing PDPT at this stage.
|
||||
uint64* pml4e = &fKernelVirtualPML4[VADDR_TO_PML4E(virtualAddress)];
|
||||
ASSERT((*pml4e & X86_64_PML4E_PRESENT) != 0);
|
||||
uint64* virtualPDPT = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
|
||||
*pml4e & X86_64_PML4E_ADDRESS_MASK);
|
||||
|
||||
// Get the page directory.
|
||||
uint64* pdpte = &virtualPDPT[VADDR_TO_PDPTE(virtualAddress)];
|
||||
uint64* virtualPageDir;
|
||||
if ((*pdpte & X86_64_PDPTE_PRESENT) == 0) {
|
||||
phys_addr_t physicalPageDir = get_free_page(args) * B_PAGE_SIZE;
|
||||
|
||||
TRACE("X86PagingMethod64Bit::MapEarly(): creating page directory for va"
|
||||
" %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n", virtualAddress,
|
||||
physicalPageDir);
|
||||
|
||||
SetTableEntry(pdpte, (physicalPageDir & X86_64_PDPTE_ADDRESS_MASK)
|
||||
| X86_64_PDPTE_PRESENT
|
||||
| X86_64_PDPTE_WRITABLE);
|
||||
|
||||
// Map it and zero it.
|
||||
virtualPageDir = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
|
||||
physicalPageDir);
|
||||
memset(virtualPageDir, 0, B_PAGE_SIZE);
|
||||
} else {
|
||||
virtualPageDir = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
|
||||
*pdpte & X86_64_PDPTE_ADDRESS_MASK);
|
||||
}
|
||||
|
||||
// Get the page table.
|
||||
uint64* pde = &virtualPageDir[VADDR_TO_PDE(virtualAddress)];
|
||||
uint64* virtualPageTable;
|
||||
if ((*pde & X86_64_PDE_PRESENT) == 0) {
|
||||
phys_addr_t physicalPageTable = get_free_page(args) * B_PAGE_SIZE;
|
||||
|
||||
TRACE("X86PagingMethod64Bit::MapEarly(): creating page table for va"
|
||||
" %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n", virtualAddress,
|
||||
physicalPageTable);
|
||||
|
||||
SetTableEntry(pde, (physicalPageTable & X86_64_PDE_ADDRESS_MASK)
|
||||
| X86_64_PDE_PRESENT
|
||||
| X86_64_PDE_WRITABLE);
|
||||
|
||||
// Map it and zero it.
|
||||
virtualPageTable = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
|
||||
physicalPageTable);
|
||||
memset(virtualPageTable, 0, B_PAGE_SIZE);
|
||||
} else {
|
||||
virtualPageTable = (uint64*)fKernelPhysicalPageMapper->GetPageTableAt(
|
||||
*pde & X86_64_PDE_ADDRESS_MASK);
|
||||
}
|
||||
|
||||
// The page table entry must not already be mapped.
|
||||
uint64* pte = &virtualPageTable[VADDR_TO_PTE(virtualAddress)];
|
||||
ASSERT_PRINT(
|
||||
(*pte & X86_64_PTE_PRESENT) == 0,
|
||||
"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx64,
|
||||
virtualAddress, *pte);
|
||||
|
||||
// Fill in the table entry.
|
||||
PutPageTableEntryInTable(pte, physicalAddress, attributes, 0,
|
||||
IS_KERNEL_ADDRESS(virtualAddress));
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
@ -80,3 +187,145 @@ X86PagingMethod64Bit::IsKernelPageAccessible(addr_t virtualAddress,
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
/*! Traverses down the paging structure hierarchy to find the page table for a
|
||||
virtual address, allocating new tables if required.
|
||||
*/
|
||||
/*static*/ uint64*
|
||||
X86PagingMethod64Bit::PageTableForAddress(uint64* virtualPML4,
|
||||
addr_t virtualAddress, bool isKernel, bool allocateTables,
|
||||
vm_page_reservation* reservation,
|
||||
TranslationMapPhysicalPageMapper* pageMapper, int32& mapCount)
|
||||
{
|
||||
TRACE("X86PagingMethod64Bit::PageTableEntryForAddress(%#" B_PRIxADDR ", "
|
||||
"%d)\n", virtualAddress, allocateTables);
|
||||
|
||||
// Get the PDPT.
|
||||
uint64* pml4e = &virtualPML4[VADDR_TO_PML4E(virtualAddress)];
|
||||
if ((*pml4e & X86_64_PML4E_PRESENT) == 0) {
|
||||
if (!allocateTables)
|
||||
return NULL;
|
||||
|
||||
// Allocate a new PDPT.
|
||||
vm_page* page = vm_page_allocate_page(reservation,
|
||||
PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
|
||||
phys_addr_t physicalPDPT
|
||||
= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
|
||||
|
||||
TRACE("X86PagingMethod64Bit::PageTableEntryForAddress(): creating PDPT "
|
||||
"for va %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n", virtualAddress,
|
||||
physicalPDPT);
|
||||
|
||||
SetTableEntry(pml4e, (physicalPDPT & X86_64_PML4E_ADDRESS_MASK)
|
||||
| X86_64_PML4E_PRESENT
|
||||
| X86_64_PML4E_WRITABLE
|
||||
| (isKernel ? 0 : X86_64_PML4E_USER));
|
||||
|
||||
mapCount++;
|
||||
}
|
||||
|
||||
uint64* virtualPDPT = (uint64*)pageMapper->GetPageTableAt(
|
||||
*pml4e & X86_64_PML4E_ADDRESS_MASK);
|
||||
|
||||
// Get the page directory.
|
||||
uint64* pdpte = &virtualPDPT[VADDR_TO_PDPTE(virtualAddress)];
|
||||
if ((*pdpte & X86_64_PDPTE_PRESENT) == 0) {
|
||||
if (!allocateTables)
|
||||
return NULL;
|
||||
|
||||
// Allocate a new page directory.
|
||||
vm_page* page = vm_page_allocate_page(reservation,
|
||||
PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
|
||||
phys_addr_t physicalPageDir
|
||||
= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
|
||||
|
||||
TRACE("X86PagingMethod64Bit::PageTableEntryForAddress(): creating page "
|
||||
"directory for va %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n",
|
||||
virtualAddress, physicalPageDir);
|
||||
|
||||
SetTableEntry(pdpte, (physicalPageDir & X86_64_PDPTE_ADDRESS_MASK)
|
||||
| X86_64_PDPTE_PRESENT
|
||||
| X86_64_PDPTE_WRITABLE
|
||||
| (isKernel ? 0 : X86_64_PDPTE_USER));
|
||||
|
||||
mapCount++;
|
||||
}
|
||||
|
||||
uint64* virtualPageDir = (uint64*)pageMapper->GetPageTableAt(
|
||||
*pdpte & X86_64_PDPTE_ADDRESS_MASK);
|
||||
|
||||
// Get the page table.
|
||||
uint64* pde = &virtualPageDir[VADDR_TO_PDE(virtualAddress)];
|
||||
if ((*pde & X86_64_PDE_PRESENT) == 0) {
|
||||
if (!allocateTables)
|
||||
return NULL;
|
||||
|
||||
// Allocate a new page table.
|
||||
vm_page* page = vm_page_allocate_page(reservation,
|
||||
PAGE_STATE_WIRED | VM_PAGE_ALLOC_CLEAR);
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
|
||||
phys_addr_t physicalPageTable
|
||||
= (phys_addr_t)page->physical_page_number * B_PAGE_SIZE;
|
||||
|
||||
TRACE("X86PagingMethod64Bit::PageTableEntryForAddress(): creating page "
|
||||
"table for va %#" B_PRIxADDR " at %#" B_PRIxPHYSADDR "\n",
|
||||
virtualAddress, physicalPageTable);
|
||||
|
||||
SetTableEntry(pde, (physicalPageTable & X86_64_PDE_ADDRESS_MASK)
|
||||
| X86_64_PDE_PRESENT
|
||||
| X86_64_PDE_WRITABLE
|
||||
| (isKernel ? 0 : X86_64_PDE_USER));
|
||||
|
||||
mapCount++;
|
||||
}
|
||||
|
||||
return (uint64*)pageMapper->GetPageTableAt(*pde & X86_64_PDE_ADDRESS_MASK);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ uint64*
|
||||
X86PagingMethod64Bit::PageTableEntryForAddress(uint64* virtualPML4,
|
||||
addr_t virtualAddress, bool isKernel, bool allocateTables,
|
||||
vm_page_reservation* reservation,
|
||||
TranslationMapPhysicalPageMapper* pageMapper, int32& mapCount)
|
||||
{
|
||||
uint64* virtualPageTable = PageTableForAddress(virtualPML4, virtualAddress,
|
||||
isKernel, allocateTables, reservation, pageMapper, mapCount);
|
||||
if (virtualPageTable == NULL)
|
||||
return NULL;
|
||||
|
||||
return &virtualPageTable[VADDR_TO_PTE(virtualAddress)];
|
||||
}
|
||||
|
||||
|
||||
/*static*/ void
|
||||
X86PagingMethod64Bit::PutPageTableEntryInTable(uint64* entry,
|
||||
phys_addr_t physicalAddress, uint32 attributes, uint32 memoryType,
|
||||
bool globalPage)
|
||||
{
|
||||
uint64 page = (physicalAddress & X86_64_PTE_ADDRESS_MASK)
|
||||
| X86_64_PTE_PRESENT | (globalPage ? X86_64_PTE_GLOBAL : 0)
|
||||
| MemoryTypeToPageTableEntryFlags(memoryType);
|
||||
|
||||
// if the page is user accessible, it's automatically
|
||||
// accessible in kernel space, too (but with the same
|
||||
// protection)
|
||||
if ((attributes & B_USER_PROTECTION) != 0) {
|
||||
page |= X86_64_PTE_USER;
|
||||
if ((attributes & B_WRITE_AREA) != 0)
|
||||
page |= X86_64_PTE_WRITABLE;
|
||||
} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
|
||||
page |= X86_64_PTE_WRITABLE;
|
||||
|
||||
// put it in the page table
|
||||
SetTableEntry(entry, page);
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
|
||||
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H
|
||||
@ -18,6 +19,7 @@
|
||||
|
||||
class TranslationMapPhysicalPageMapper;
|
||||
class X86PhysicalPageMapper;
|
||||
struct vm_page_reservation;
|
||||
|
||||
|
||||
class X86PagingMethod64Bit : public X86PagingMethod {
|
||||
@ -36,12 +38,131 @@ public:
|
||||
addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint8 attributes,
|
||||
phys_addr_t (*get_free_page)(kernel_args*));
|
||||
page_num_t (*get_free_page)(kernel_args*));
|
||||
|
||||
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
|
||||
uint32 protection);
|
||||
|
||||
inline X86PhysicalPageMapper* PhysicalPageMapper() const
|
||||
{ return fPhysicalPageMapper; }
|
||||
inline TranslationMapPhysicalPageMapper* KernelPhysicalPageMapper() const
|
||||
{ return fKernelPhysicalPageMapper; }
|
||||
|
||||
inline uint64* KernelVirtualPML4() const
|
||||
{ return fKernelVirtualPML4; }
|
||||
inline phys_addr_t KernelPhysicalPML4() const
|
||||
{ return fKernelPhysicalPML4; }
|
||||
|
||||
static X86PagingMethod64Bit* Method();
|
||||
|
||||
static uint64* PageTableForAddress(uint64* virtualPML4,
|
||||
addr_t virtualAddress, bool isKernel,
|
||||
bool allocateTables,
|
||||
vm_page_reservation* reservation,
|
||||
TranslationMapPhysicalPageMapper*
|
||||
pageMapper, int32& mapCount);
|
||||
static uint64* PageTableEntryForAddress(uint64* virtualPML4,
|
||||
addr_t virtualAddress, bool isKernel,
|
||||
bool allocateTables,
|
||||
vm_page_reservation* reservation,
|
||||
TranslationMapPhysicalPageMapper*
|
||||
pageMapper, int32& mapCount);
|
||||
|
||||
static void PutPageTableEntryInTable(
|
||||
uint64* entry, phys_addr_t physicalAddress,
|
||||
uint32 attributes, uint32 memoryType,
|
||||
bool globalPage);
|
||||
static uint64 SetTableEntry(uint64* entry, uint64 newEntry);
|
||||
static uint64 SetTableEntryFlags(uint64* entry, uint64 flags);
|
||||
static uint64 TestAndSetTableEntry(uint64* entry,
|
||||
uint64 newEntry, uint64 oldEntry);
|
||||
static uint64 ClearTableEntry(uint64* entry);
|
||||
static uint64 ClearTableEntryFlags(uint64* entry,
|
||||
uint64 flags);
|
||||
|
||||
static uint64 MemoryTypeToPageTableEntryFlags(
|
||||
uint32 memoryType);
|
||||
|
||||
private:
|
||||
phys_addr_t fKernelPhysicalPML4;
|
||||
uint64* fKernelVirtualPML4;
|
||||
|
||||
X86PhysicalPageMapper* fPhysicalPageMapper;
|
||||
TranslationMapPhysicalPageMapper* fKernelPhysicalPageMapper;
|
||||
};
|
||||
|
||||
|
||||
/*static*/ inline X86PagingMethod64Bit*
|
||||
X86PagingMethod64Bit::Method()
|
||||
{
|
||||
return static_cast<X86PagingMethod64Bit*>(gX86PagingMethod);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ inline uint64
|
||||
X86PagingMethod64Bit::SetTableEntry(uint64* entry, uint64 newEntry)
|
||||
{
|
||||
return atomic_set64((int64*)entry, newEntry);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ inline uint64
|
||||
X86PagingMethod64Bit::SetTableEntryFlags(uint64* entry, uint64 flags)
|
||||
{
|
||||
return atomic_or64((int64*)entry, flags);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ inline uint64
|
||||
X86PagingMethod64Bit::TestAndSetTableEntry(uint64* entry, uint64 newEntry,
|
||||
uint64 oldEntry)
|
||||
{
|
||||
return atomic_test_and_set64((int64*)entry, newEntry, oldEntry);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ inline uint64
|
||||
X86PagingMethod64Bit::ClearTableEntry(uint64* entry)
|
||||
{
|
||||
return SetTableEntry(entry, 0);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ inline uint64
|
||||
X86PagingMethod64Bit::ClearTableEntryFlags(uint64* entry, uint64 flags)
|
||||
{
|
||||
return atomic_and64((int64*)entry, ~flags);
|
||||
}
|
||||
|
||||
|
||||
/*static*/ inline uint64
|
||||
X86PagingMethod64Bit::MemoryTypeToPageTableEntryFlags(uint32 memoryType)
|
||||
{
|
||||
// ATM we only handle the uncacheable and write-through type explicitly. For
|
||||
// all other types we rely on the MTRRs to be set up correctly. Since we set
|
||||
// the default memory type to write-back and since the uncacheable type in
|
||||
// the PTE overrides any MTRR attribute (though, as per the specs, that is
|
||||
// not recommended for performance reasons), this reduces the work we
|
||||
// actually *have* to do with the MTRRs to setting the remaining types
|
||||
// (usually only write-combining for the frame buffer).
|
||||
switch (memoryType) {
|
||||
case B_MTR_UC:
|
||||
return X86_64_PTE_CACHING_DISABLED | X86_64_PTE_WRITE_THROUGH;
|
||||
|
||||
case B_MTR_WC:
|
||||
// X86_PTE_WRITE_THROUGH would be closer, but the combination with
|
||||
// MTRR WC is "implementation defined" for Pentium Pro/II.
|
||||
return 0;
|
||||
|
||||
case B_MTR_WT:
|
||||
return X86_64_PTE_WRITE_THROUGH;
|
||||
|
||||
case B_MTR_WP:
|
||||
case B_MTR_WB:
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif // KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_METHOD_64BIT_H
|
||||
|
@ -15,3 +15,35 @@
|
||||
|
||||
#include "paging/64bit/X86PagingMethod64Bit.h"
|
||||
|
||||
|
||||
X86PagingStructures64Bit::X86PagingStructures64Bit()
|
||||
:
|
||||
fVirtualPML4(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
X86PagingStructures64Bit::~X86PagingStructures64Bit()
|
||||
{
|
||||
// Free the PML4.
|
||||
free(fVirtualPML4);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
X86PagingStructures64Bit::Init(uint64* virtualPML4, phys_addr_t physicalPML4)
|
||||
{
|
||||
fVirtualPML4 = virtualPML4;
|
||||
pgdir_phys = physicalPML4;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
X86PagingStructures64Bit::Delete()
|
||||
{
|
||||
if (are_interrupts_enabled())
|
||||
delete this;
|
||||
else
|
||||
deferred_delete(this);
|
||||
}
|
||||
|
||||
|
@ -6,11 +6,25 @@
|
||||
#define KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_STRUCTURES_64BIT_H
|
||||
|
||||
|
||||
#include "paging/pae/paging.h"
|
||||
#include "paging/64bit/paging.h"
|
||||
#include "paging/X86PagingStructures.h"
|
||||
|
||||
|
||||
struct X86PagingStructures64Bit : X86PagingStructures {
|
||||
X86PagingStructures64Bit();
|
||||
virtual ~X86PagingStructures64Bit();
|
||||
|
||||
void Init(uint64* virtualPML4,
|
||||
phys_addr_t physicalPML4);
|
||||
|
||||
virtual void Delete();
|
||||
|
||||
uint64* VirtualPML4()
|
||||
{ return fVirtualPML4; }
|
||||
|
||||
private:
|
||||
uint64* fVirtualPML4;
|
||||
};
|
||||
|
||||
|
||||
#endif // KERNEL_ARCH_X86_PAGING_64BIT_X86_PAGING_STRUCTURES_64BIT_H
|
||||
|
@ -1,6 +1,11 @@
|
||||
/*
|
||||
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
|
||||
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk
|
||||
* Copyright 2008-2011, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Copyright 2002-2010, Axel Dörfler, axeld@pinc-software.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*
|
||||
* Copyright 2001-2002, Travis Geiselbrecht. All rights reserved.
|
||||
* Distributed under the terms of the NewOS License.
|
||||
*/
|
||||
|
||||
|
||||
@ -18,3 +23,751 @@
|
||||
#include "paging/64bit/X86PagingStructures64Bit.h"
|
||||
#include "paging/x86_physical_page_mapper.h"
|
||||
|
||||
|
||||
//#define TRACE_X86_VM_TRANSLATION_MAP_64BIT
|
||||
#ifdef TRACE_X86_VM_TRANSLATION_MAP_64BIT
|
||||
# define TRACE(x...) dprintf(x)
|
||||
#else
|
||||
# define TRACE(x...) ;
|
||||
#endif
|
||||
|
||||
|
||||
// #pragma mark - X86VMTranslationMap64Bit
|
||||
|
||||
|
||||
X86VMTranslationMap64Bit::X86VMTranslationMap64Bit()
|
||||
:
|
||||
fPagingStructures(NULL)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
X86VMTranslationMap64Bit::~X86VMTranslationMap64Bit()
|
||||
{
|
||||
TRACE("X86VMTranslationMap64Bit::~X86VMTranslationMap64Bit()\n");
|
||||
|
||||
panic("X86VMTranslationMap64Bit::~X86VMTranslationMap64Bit: TODO");
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
X86VMTranslationMap64Bit::Init(bool kernel)
|
||||
{
|
||||
TRACE("X86VMTranslationMap64Bit::Init()\n");
|
||||
|
||||
X86VMTranslationMap::Init(kernel);
|
||||
|
||||
fPagingStructures = new(std::nothrow) X86PagingStructures64Bit;
|
||||
if (fPagingStructures == NULL)
|
||||
return B_NO_MEMORY;
|
||||
|
||||
X86PagingMethod64Bit* method = X86PagingMethod64Bit::Method();
|
||||
|
||||
if (kernel) {
|
||||
// Get the page mapper.
|
||||
fPageMapper = method->KernelPhysicalPageMapper();
|
||||
|
||||
// Kernel PML4 is already mapped.
|
||||
fPagingStructures->Init(method->KernelVirtualPML4(),
|
||||
method->KernelPhysicalPML4());
|
||||
} else {
|
||||
panic("X86VMTranslationMap64Bit::Init(): TODO");
|
||||
}
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
size_t
|
||||
X86VMTranslationMap64Bit::MaxPagesNeededToMap(addr_t start, addr_t end) const
|
||||
{
|
||||
// If start == 0, the actual base address is not yet known to the caller and
|
||||
// we shall assume the worst case, which is where the start address is the
|
||||
// last page covered by a PDPT.
|
||||
if (start == 0) {
|
||||
start = k64BitPDPTRange - B_PAGE_SIZE;
|
||||
end += start;
|
||||
}
|
||||
|
||||
size_t requiredPDPTs = end / k64BitPDPTRange + 1
|
||||
- start / k64BitPDPTRange;
|
||||
size_t requiredPageDirs = end / k64BitPageDirectoryRange + 1
|
||||
- start / k64BitPageDirectoryRange;
|
||||
size_t requiredPageTables = end / k64BitPageTableRange + 1
|
||||
- start / k64BitPageTableRange;
|
||||
|
||||
return requiredPDPTs + requiredPageDirs + requiredPageTables;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
X86VMTranslationMap64Bit::Map(addr_t virtualAddress, phys_addr_t physicalAddress,
|
||||
uint32 attributes, uint32 memoryType, vm_page_reservation* reservation)
|
||||
{
|
||||
TRACE("X86VMTranslationMap64Bit::Map(%#" B_PRIxADDR ", %#" B_PRIxPHYSADDR
|
||||
")\n", virtualAddress, physicalAddress);
|
||||
|
||||
ThreadCPUPinner pinner(thread_get_current_thread());
|
||||
|
||||
// Look up the page table for the virtual address, allocating new tables
|
||||
// if required. Shouldn't fail.
|
||||
uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress(
|
||||
fPagingStructures->VirtualPML4(), virtualAddress, fIsKernelMap,
|
||||
true, reservation, fPageMapper, fMapCount);
|
||||
ASSERT(entry != NULL);
|
||||
|
||||
// The entry should not already exist.
|
||||
ASSERT_PRINT((*entry & X86_64_PTE_PRESENT) == 0,
|
||||
"virtual address: %#" B_PRIxADDR ", existing pte: %#" B_PRIx64,
|
||||
virtualAddress, *entry);
|
||||
|
||||
// Fill in the table entry.
|
||||
X86PagingMethod64Bit::PutPageTableEntryInTable(entry, physicalAddress,
|
||||
attributes, memoryType, fIsKernelMap);
|
||||
|
||||
// Note: We don't need to invalidate the TLB for this address, as previously
|
||||
// the entry was not present and the TLB doesn't cache those entries.
|
||||
|
||||
fMapCount++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
X86VMTranslationMap64Bit::Unmap(addr_t start, addr_t end)
|
||||
{
|
||||
start = ROUNDDOWN(start, B_PAGE_SIZE);
|
||||
if (start >= end)
|
||||
return B_OK;
|
||||
|
||||
TRACE("X86VMTranslationMap64Bit::Unmap(%#" B_PRIxADDR ", %#" B_PRIxADDR
|
||||
")\n", start, end);
|
||||
|
||||
ThreadCPUPinner pinner(thread_get_current_thread());
|
||||
|
||||
do {
|
||||
uint64* pageTable = X86PagingMethod64Bit::PageTableForAddress(
|
||||
fPagingStructures->VirtualPML4(), start, fIsKernelMap, false,
|
||||
NULL, fPageMapper, fMapCount);
|
||||
if (pageTable == NULL) {
|
||||
// Move on to the next page table.
|
||||
start = ROUNDUP(start + 1, k64BitPageTableRange);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (uint32 index = start / B_PAGE_SIZE % k64BitTableEntryCount;
|
||||
index < k64BitTableEntryCount && start < end;
|
||||
index++, start += B_PAGE_SIZE) {
|
||||
if ((pageTable[index] & X86_64_PTE_PRESENT) == 0)
|
||||
continue;
|
||||
|
||||
TRACE("X86VMTranslationMap64Bit::Unmap(): removing page %#"
|
||||
B_PRIxADDR " (%#" B_PRIxPHYSADDR ")\n", start,
|
||||
pageTable[index] & X86_64_PTE_ADDRESS_MASK);
|
||||
|
||||
uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntryFlags(
|
||||
&pageTable[index], X86_64_PTE_PRESENT);
|
||||
fMapCount--;
|
||||
|
||||
if ((oldEntry & X86_64_PTE_ACCESSED) != 0) {
|
||||
// Note, that we only need to invalidate the address, if the
|
||||
// accessed flags was set, since only then the entry could have
|
||||
// been in any TLB.
|
||||
InvalidatePage(start);
|
||||
}
|
||||
}
|
||||
} while (start != 0 && start < end);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
X86VMTranslationMap64Bit::DebugMarkRangePresent(addr_t start, addr_t end,
|
||||
bool markPresent)
|
||||
{
|
||||
start = ROUNDDOWN(start, B_PAGE_SIZE);
|
||||
if (start >= end)
|
||||
return B_OK;
|
||||
|
||||
TRACE("X86VMTranslationMap64Bit::DebugMarkRangePresent(%#" B_PRIxADDR
|
||||
", %#" B_PRIxADDR ")\n", start, end);
|
||||
|
||||
ThreadCPUPinner pinner(thread_get_current_thread());
|
||||
|
||||
do {
|
||||
uint64* pageTable = X86PagingMethod64Bit::PageTableForAddress(
|
||||
fPagingStructures->VirtualPML4(), start, fIsKernelMap, false,
|
||||
NULL, fPageMapper, fMapCount);
|
||||
if (pageTable == NULL) {
|
||||
// Move on to the next page table.
|
||||
start = ROUNDUP(start + 1, k64BitPageTableRange);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (uint32 index = start / B_PAGE_SIZE % k64BitTableEntryCount;
|
||||
index < k64BitTableEntryCount && start < end;
|
||||
index++, start += B_PAGE_SIZE) {
|
||||
if ((pageTable[index] & X86_64_PTE_PRESENT) == 0) {
|
||||
if (!markPresent)
|
||||
continue;
|
||||
|
||||
X86PagingMethod64Bit::SetTableEntryFlags(&pageTable[index],
|
||||
X86_64_PTE_PRESENT);
|
||||
} else {
|
||||
if (markPresent)
|
||||
continue;
|
||||
|
||||
uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntryFlags(
|
||||
&pageTable[index], X86_64_PTE_PRESENT);
|
||||
|
||||
if ((oldEntry & X86_64_PTE_ACCESSED) != 0) {
|
||||
// Note, that we only need to invalidate the address, if the
|
||||
// accessed flags was set, since only then the entry could
|
||||
// have been in any TLB.
|
||||
InvalidatePage(start);
|
||||
}
|
||||
}
|
||||
}
|
||||
} while (start != 0 && start < end);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
X86VMTranslationMap64Bit::UnmapPage(VMArea* area, addr_t address,
|
||||
bool updatePageQueue)
|
||||
{
|
||||
ASSERT(address % B_PAGE_SIZE == 0);
|
||||
|
||||
TRACE("X86VMTranslationMap64Bit::UnmapPage(%#" B_PRIxADDR ")\n", address);
|
||||
|
||||
ThreadCPUPinner pinner(thread_get_current_thread());
|
||||
|
||||
// Look up the page table for the virtual address.
|
||||
uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress(
|
||||
fPagingStructures->VirtualPML4(), address, fIsKernelMap,
|
||||
false, NULL, fPageMapper, fMapCount);
|
||||
if (entry == NULL)
|
||||
return B_ENTRY_NOT_FOUND;
|
||||
|
||||
RecursiveLocker locker(fLock);
|
||||
|
||||
uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntry(entry);
|
||||
|
||||
pinner.Unlock();
|
||||
|
||||
if ((oldEntry & X86_64_PTE_PRESENT) == 0)
|
||||
return B_ENTRY_NOT_FOUND;
|
||||
|
||||
fMapCount--;
|
||||
|
||||
if ((oldEntry & X86_64_PTE_ACCESSED) != 0) {
|
||||
// Note, that we only need to invalidate the address, if the
|
||||
// accessed flags was set, since only then the entry could have been
|
||||
// in any TLB.
|
||||
InvalidatePage(address);
|
||||
|
||||
Flush();
|
||||
|
||||
// NOTE: Between clearing the page table entry and Flush() other
|
||||
// processors (actually even this processor with another thread of the
|
||||
// same team) could still access the page in question via their cached
|
||||
// entry. We can obviously lose a modified flag in this case, with the
|
||||
// effect that the page looks unmodified (and might thus be recycled),
|
||||
// but is actually modified.
|
||||
// In most cases this is harmless, but for vm_remove_all_page_mappings()
|
||||
// this is actually a problem.
|
||||
// Interestingly FreeBSD seems to ignore this problem as well
|
||||
// (cf. pmap_remove_all()), unless I've missed something.
|
||||
}
|
||||
|
||||
locker.Detach();
|
||||
// PageUnmapped() will unlock for us
|
||||
|
||||
PageUnmapped(area, (oldEntry & X86_64_PTE_ADDRESS_MASK) / B_PAGE_SIZE,
|
||||
(oldEntry & X86_64_PTE_ACCESSED) != 0,
|
||||
(oldEntry & X86_64_PTE_DIRTY) != 0, updatePageQueue);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
X86VMTranslationMap64Bit::UnmapPages(VMArea* area, addr_t base, size_t size,
|
||||
bool updatePageQueue)
|
||||
{
|
||||
if (size == 0)
|
||||
return;
|
||||
|
||||
addr_t start = base;
|
||||
addr_t end = base + size - 1;
|
||||
|
||||
TRACE("X86VMTranslationMap64Bit::UnmapPages(%p, %#" B_PRIxADDR ", %#"
|
||||
B_PRIxADDR ")\n", area, start, end);
|
||||
|
||||
VMAreaMappings queue;
|
||||
|
||||
RecursiveLocker locker(fLock);
|
||||
ThreadCPUPinner pinner(thread_get_current_thread());
|
||||
|
||||
do {
|
||||
uint64* pageTable = X86PagingMethod64Bit::PageTableForAddress(
|
||||
fPagingStructures->VirtualPML4(), start, fIsKernelMap, false,
|
||||
NULL, fPageMapper, fMapCount);
|
||||
if (pageTable == NULL) {
|
||||
// Move on to the next page table.
|
||||
start = ROUNDUP(start + 1, k64BitPageTableRange);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (uint32 index = start / B_PAGE_SIZE % k64BitTableEntryCount;
|
||||
index < k64BitTableEntryCount && start < end;
|
||||
index++, start += B_PAGE_SIZE) {
|
||||
uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntry(
|
||||
&pageTable[index]);
|
||||
if ((oldEntry & X86_64_PTE_PRESENT) == 0)
|
||||
continue;
|
||||
|
||||
fMapCount--;
|
||||
|
||||
if ((oldEntry & X86_64_PTE_ACCESSED) != 0) {
|
||||
// Note, that we only need to invalidate the address, if the
|
||||
// accessed flags was set, since only then the entry could have
|
||||
// been in any TLB.
|
||||
InvalidatePage(start);
|
||||
}
|
||||
|
||||
if (area->cache_type != CACHE_TYPE_DEVICE) {
|
||||
// get the page
|
||||
vm_page* page = vm_lookup_page(
|
||||
(oldEntry & X86_64_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
|
||||
ASSERT(page != NULL);
|
||||
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
|
||||
// transfer the accessed/dirty flags to the page
|
||||
if ((oldEntry & X86_64_PTE_ACCESSED) != 0)
|
||||
page->accessed = true;
|
||||
if ((oldEntry & X86_64_PTE_DIRTY) != 0)
|
||||
page->modified = true;
|
||||
|
||||
// remove the mapping object/decrement the wired_count of the
|
||||
// page
|
||||
if (area->wiring == B_NO_LOCK) {
|
||||
vm_page_mapping* mapping = NULL;
|
||||
vm_page_mappings::Iterator iterator
|
||||
= page->mappings.GetIterator();
|
||||
while ((mapping = iterator.Next()) != NULL) {
|
||||
if (mapping->area == area)
|
||||
break;
|
||||
}
|
||||
|
||||
ASSERT(mapping != NULL);
|
||||
|
||||
area->mappings.Remove(mapping);
|
||||
page->mappings.Remove(mapping);
|
||||
queue.Add(mapping);
|
||||
} else
|
||||
page->DecrementWiredCount();
|
||||
|
||||
if (!page->IsMapped()) {
|
||||
atomic_add(&gMappedPagesCount, -1);
|
||||
|
||||
if (updatePageQueue) {
|
||||
if (page->Cache()->temporary)
|
||||
vm_page_set_state(page, PAGE_STATE_INACTIVE);
|
||||
else if (page->modified)
|
||||
vm_page_set_state(page, PAGE_STATE_MODIFIED);
|
||||
else
|
||||
vm_page_set_state(page, PAGE_STATE_CACHED);
|
||||
}
|
||||
}
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
}
|
||||
}
|
||||
|
||||
Flush();
|
||||
// flush explicitly, since we directly use the lock
|
||||
} while (start != 0 && start < end);
|
||||
|
||||
// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
|
||||
// really critical here, as in all cases this method is used, the unmapped
|
||||
// area range is unmapped for good (resized/cut) and the pages will likely
|
||||
// be freed.
|
||||
|
||||
locker.Unlock();
|
||||
|
||||
// free removed mappings
|
||||
bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
|
||||
uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
|
||||
| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
|
||||
while (vm_page_mapping* mapping = queue.RemoveHead())
|
||||
object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
X86VMTranslationMap64Bit::UnmapArea(VMArea* area, bool deletingAddressSpace,
|
||||
bool ignoreTopCachePageFlags)
|
||||
{
|
||||
TRACE("X86VMTranslationMap64Bit::UnmapArea(%p)\n", area);
|
||||
|
||||
if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
|
||||
X86VMTranslationMap64Bit::UnmapPages(area, area->Base(), area->Size(),
|
||||
true);
|
||||
return;
|
||||
}
|
||||
|
||||
bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
|
||||
|
||||
RecursiveLocker locker(fLock);
|
||||
ThreadCPUPinner pinner(thread_get_current_thread());
|
||||
|
||||
VMAreaMappings mappings;
|
||||
mappings.MoveFrom(&area->mappings);
|
||||
|
||||
for (VMAreaMappings::Iterator it = mappings.GetIterator();
|
||||
vm_page_mapping* mapping = it.Next();) {
|
||||
vm_page* page = mapping->page;
|
||||
page->mappings.Remove(mapping);
|
||||
|
||||
VMCache* cache = page->Cache();
|
||||
|
||||
bool pageFullyUnmapped = false;
|
||||
if (!page->IsMapped()) {
|
||||
atomic_add(&gMappedPagesCount, -1);
|
||||
pageFullyUnmapped = true;
|
||||
}
|
||||
|
||||
if (unmapPages || cache != area->cache) {
|
||||
addr_t address = area->Base()
|
||||
+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
|
||||
|
||||
uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress(
|
||||
fPagingStructures->VirtualPML4(), address, fIsKernelMap,
|
||||
false, NULL, fPageMapper, fMapCount);
|
||||
if (entry == NULL) {
|
||||
panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
|
||||
"has no page table", page, area, address);
|
||||
continue;
|
||||
}
|
||||
|
||||
uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntry(entry);
|
||||
|
||||
if ((oldEntry & X86_64_PTE_PRESENT) == 0) {
|
||||
panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
|
||||
"has no page table entry", page, area, address);
|
||||
continue;
|
||||
}
|
||||
|
||||
// transfer the accessed/dirty flags to the page and invalidate
|
||||
// the mapping, if necessary
|
||||
if ((oldEntry & X86_64_PTE_ACCESSED) != 0) {
|
||||
page->accessed = true;
|
||||
|
||||
if (!deletingAddressSpace)
|
||||
InvalidatePage(address);
|
||||
}
|
||||
|
||||
if ((oldEntry & X86_64_PTE_DIRTY) != 0)
|
||||
page->modified = true;
|
||||
|
||||
if (pageFullyUnmapped) {
|
||||
DEBUG_PAGE_ACCESS_START(page);
|
||||
|
||||
if (cache->temporary)
|
||||
vm_page_set_state(page, PAGE_STATE_INACTIVE);
|
||||
else if (page->modified)
|
||||
vm_page_set_state(page, PAGE_STATE_MODIFIED);
|
||||
else
|
||||
vm_page_set_state(page, PAGE_STATE_CACHED);
|
||||
|
||||
DEBUG_PAGE_ACCESS_END(page);
|
||||
}
|
||||
}
|
||||
|
||||
fMapCount--;
|
||||
}
|
||||
|
||||
Flush();
|
||||
// flush explicitely, since we directly use the lock
|
||||
|
||||
locker.Unlock();
|
||||
|
||||
bool isKernelSpace = area->address_space == VMAddressSpace::Kernel();
|
||||
uint32 freeFlags = CACHE_DONT_WAIT_FOR_MEMORY
|
||||
| (isKernelSpace ? CACHE_DONT_LOCK_KERNEL_SPACE : 0);
|
||||
while (vm_page_mapping* mapping = mappings.RemoveHead())
|
||||
object_cache_free(gPageMappingsObjectCache, mapping, freeFlags);
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
X86VMTranslationMap64Bit::Query(addr_t virtualAddress,
|
||||
phys_addr_t* _physicalAddress, uint32* _flags)
|
||||
{
|
||||
*_flags = 0;
|
||||
*_physicalAddress = 0;
|
||||
|
||||
ThreadCPUPinner pinner(thread_get_current_thread());
|
||||
|
||||
// Look up the page table for the virtual address.
|
||||
uint64* pte = X86PagingMethod64Bit::PageTableEntryForAddress(
|
||||
fPagingStructures->VirtualPML4(), virtualAddress, fIsKernelMap,
|
||||
false, NULL, fPageMapper, fMapCount);
|
||||
if (pte == NULL)
|
||||
return B_OK;
|
||||
|
||||
uint64 entry = *pte;
|
||||
|
||||
*_physicalAddress = entry & X86_64_PTE_ADDRESS_MASK;
|
||||
|
||||
// Translate the page state flags.
|
||||
if ((entry & X86_64_PTE_USER) != 0) {
|
||||
*_flags |= ((entry & X86_64_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
|
||||
| B_READ_AREA;
|
||||
}
|
||||
|
||||
*_flags |= ((entry & X86_64_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
|
||||
| B_KERNEL_READ_AREA
|
||||
| ((entry & X86_64_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
|
||||
| ((entry & X86_64_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
|
||||
| ((entry & X86_64_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
|
||||
|
||||
TRACE("X86VMTranslationMap64Bit::Query(%#" B_PRIxADDR ") -> %#"
|
||||
B_PRIxPHYSADDR ":\n", virtualAddress, *_physicalAddress);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
X86VMTranslationMap64Bit::QueryInterrupt(addr_t virtualAddress,
|
||||
phys_addr_t* _physicalAddress, uint32* _flags)
|
||||
{
|
||||
*_flags = 0;
|
||||
*_physicalAddress = 0;
|
||||
|
||||
ThreadCPUPinner pinner(thread_get_current_thread());
|
||||
|
||||
// Look up the page table for the virtual address.
|
||||
// FIXME: PageTableEntryForAddress uses GetPageTableAt() rather than
|
||||
// InterruptGetPageTableAt(). This doesn't actually matter since in our
|
||||
// page mapper both functions are the same, but perhaps this should be
|
||||
// fixed for correctness.
|
||||
uint64* pte = X86PagingMethod64Bit::PageTableEntryForAddress(
|
||||
fPagingStructures->VirtualPML4(), virtualAddress, fIsKernelMap,
|
||||
false, NULL, fPageMapper, fMapCount);
|
||||
if (pte == NULL)
|
||||
return B_OK;
|
||||
|
||||
uint64 entry = *pte;
|
||||
|
||||
*_physicalAddress = entry & X86_64_PTE_ADDRESS_MASK;
|
||||
|
||||
// Translate the page state flags.
|
||||
if ((entry & X86_64_PTE_USER) != 0) {
|
||||
*_flags |= ((entry & X86_64_PTE_WRITABLE) != 0 ? B_WRITE_AREA : 0)
|
||||
| B_READ_AREA;
|
||||
}
|
||||
|
||||
*_flags |= ((entry & X86_64_PTE_WRITABLE) != 0 ? B_KERNEL_WRITE_AREA : 0)
|
||||
| B_KERNEL_READ_AREA
|
||||
| ((entry & X86_64_PTE_DIRTY) != 0 ? PAGE_MODIFIED : 0)
|
||||
| ((entry & X86_64_PTE_ACCESSED) != 0 ? PAGE_ACCESSED : 0)
|
||||
| ((entry & X86_64_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
|
||||
|
||||
TRACE("X86VMTranslationMap64Bit::QueryInterrupt(%#" B_PRIxADDR ") -> %#"
|
||||
B_PRIxPHYSADDR ":\n", virtualAddress, *_physicalAddress);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
X86VMTranslationMap64Bit::Protect(addr_t start, addr_t end, uint32 attributes,
|
||||
uint32 memoryType)
|
||||
{
|
||||
start = ROUNDDOWN(start, B_PAGE_SIZE);
|
||||
if (start >= end)
|
||||
return B_OK;
|
||||
|
||||
TRACE("X86VMTranslationMap64Bit::Protect(%#" B_PRIxADDR ", %#" B_PRIxADDR
|
||||
", %#" B_PRIx32 ")\n", start, end, attributes);
|
||||
|
||||
// compute protection flags
|
||||
uint64 newProtectionFlags = 0;
|
||||
if ((attributes & B_USER_PROTECTION) != 0) {
|
||||
newProtectionFlags = X86_64_PTE_USER;
|
||||
if ((attributes & B_WRITE_AREA) != 0)
|
||||
newProtectionFlags |= X86_64_PTE_WRITABLE;
|
||||
} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
|
||||
newProtectionFlags = X86_64_PTE_WRITABLE;
|
||||
|
||||
ThreadCPUPinner pinner(thread_get_current_thread());
|
||||
|
||||
do {
|
||||
uint64* pageTable = X86PagingMethod64Bit::PageTableForAddress(
|
||||
fPagingStructures->VirtualPML4(), start, fIsKernelMap, false,
|
||||
NULL, fPageMapper, fMapCount);
|
||||
if (pageTable == NULL) {
|
||||
// Move on to the next page table.
|
||||
start = ROUNDUP(start + 1, k64BitPageTableRange);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (uint32 index = start / B_PAGE_SIZE % k64BitTableEntryCount;
|
||||
index < k64BitTableEntryCount && start < end;
|
||||
index++, start += B_PAGE_SIZE) {
|
||||
uint64 entry = pageTable[index];
|
||||
if ((entry & X86_64_PTE_PRESENT) == 0)
|
||||
continue;
|
||||
|
||||
TRACE("X86VMTranslationMap64Bit::Protect(): protect page %#"
|
||||
B_PRIxADDR "\n", start);
|
||||
|
||||
// set the new protection flags -- we want to do that atomically,
|
||||
// without changing the accessed or dirty flag
|
||||
uint64 oldEntry;
|
||||
while (true) {
|
||||
oldEntry = X86PagingMethod64Bit::TestAndSetTableEntry(
|
||||
&pageTable[index],
|
||||
(entry & ~(X86_64_PTE_PROTECTION_MASK
|
||||
| X86_64_PTE_MEMORY_TYPE_MASK))
|
||||
| newProtectionFlags
|
||||
| X86PagingMethod64Bit::MemoryTypeToPageTableEntryFlags(
|
||||
memoryType),
|
||||
entry);
|
||||
if (oldEntry == entry)
|
||||
break;
|
||||
entry = oldEntry;
|
||||
}
|
||||
|
||||
if ((oldEntry & X86_64_PTE_ACCESSED) != 0) {
|
||||
// Note, that we only need to invalidate the address, if the
|
||||
// accessed flag was set, since only then the entry could have
|
||||
// been in any TLB.
|
||||
InvalidatePage(start);
|
||||
}
|
||||
}
|
||||
} while (start != 0 && start < end);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
status_t
|
||||
X86VMTranslationMap64Bit::ClearFlags(addr_t address, uint32 flags)
|
||||
{
|
||||
TRACE("X86VMTranslationMap64Bit::ClearFlags(%#" B_PRIxADDR ", %#" B_PRIx32
|
||||
")\n", address, flags);
|
||||
|
||||
ThreadCPUPinner pinner(thread_get_current_thread());
|
||||
|
||||
uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress(
|
||||
fPagingStructures->VirtualPML4(), address, fIsKernelMap,
|
||||
false, NULL, fPageMapper, fMapCount);
|
||||
if (entry == NULL)
|
||||
return B_OK;
|
||||
|
||||
uint64 flagsToClear = ((flags & PAGE_MODIFIED) ? X86_64_PTE_DIRTY : 0)
|
||||
| ((flags & PAGE_ACCESSED) ? X86_64_PTE_ACCESSED : 0);
|
||||
|
||||
uint64 oldEntry = X86PagingMethod64Bit::ClearTableEntryFlags(entry,
|
||||
flagsToClear);
|
||||
|
||||
if ((oldEntry & flagsToClear) != 0)
|
||||
InvalidatePage(address);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
bool
|
||||
X86VMTranslationMap64Bit::ClearAccessedAndModified(VMArea* area, addr_t address,
|
||||
bool unmapIfUnaccessed, bool& _modified)
|
||||
{
|
||||
ASSERT(address % B_PAGE_SIZE == 0);
|
||||
|
||||
TRACE("X86VMTranslationMap64Bit::ClearAccessedAndModified(%#" B_PRIxADDR
|
||||
")\n", address);
|
||||
|
||||
RecursiveLocker locker(fLock);
|
||||
ThreadCPUPinner pinner(thread_get_current_thread());
|
||||
|
||||
uint64* entry = X86PagingMethod64Bit::PageTableEntryForAddress(
|
||||
fPagingStructures->VirtualPML4(), address, fIsKernelMap,
|
||||
false, NULL, fPageMapper, fMapCount);
|
||||
if (entry == NULL)
|
||||
return false;
|
||||
|
||||
uint64 oldEntry;
|
||||
|
||||
if (unmapIfUnaccessed) {
|
||||
while (true) {
|
||||
oldEntry = *entry;
|
||||
if ((oldEntry & X86_64_PTE_PRESENT) == 0) {
|
||||
// page mapping not valid
|
||||
return false;
|
||||
}
|
||||
|
||||
if (oldEntry & X86_64_PTE_ACCESSED) {
|
||||
// page was accessed -- just clear the flags
|
||||
oldEntry = X86PagingMethod64Bit::ClearTableEntryFlags(entry,
|
||||
X86_64_PTE_ACCESSED | X86_64_PTE_DIRTY);
|
||||
break;
|
||||
}
|
||||
|
||||
// page hasn't been accessed -- unmap it
|
||||
if (X86PagingMethod64Bit::TestAndSetTableEntry(entry, 0, oldEntry)
|
||||
== oldEntry) {
|
||||
break;
|
||||
}
|
||||
|
||||
// something changed -- check again
|
||||
}
|
||||
} else {
|
||||
oldEntry = X86PagingMethod64Bit::ClearTableEntryFlags(entry,
|
||||
X86_64_PTE_ACCESSED | X86_64_PTE_DIRTY);
|
||||
}
|
||||
|
||||
pinner.Unlock();
|
||||
|
||||
_modified = (oldEntry & X86_64_PTE_DIRTY) != 0;
|
||||
|
||||
if ((oldEntry & X86_64_PTE_ACCESSED) != 0) {
|
||||
// Note, that we only need to invalidate the address, if the
|
||||
// accessed flags was set, since only then the entry could have been
|
||||
// in any TLB.
|
||||
InvalidatePage(address);
|
||||
Flush();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!unmapIfUnaccessed)
|
||||
return false;
|
||||
|
||||
// We have unmapped the address. Do the "high level" stuff.
|
||||
|
||||
fMapCount--;
|
||||
|
||||
locker.Detach();
|
||||
// UnaccessedPageUnmapped() will unlock for us
|
||||
|
||||
UnaccessedPageUnmapped(area,
|
||||
(oldEntry & X86_64_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
X86PagingStructures*
|
||||
X86VMTranslationMap64Bit::PagingStructures() const
|
||||
{
|
||||
return fPagingStructures;
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
|
||||
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef KERNEL_ARCH_X86_PAGING_64BIT_X86_VM_TRANSLATION_MAP_64BIT_H
|
||||
@ -9,7 +10,60 @@
|
||||
#include "paging/X86VMTranslationMap.h"
|
||||
|
||||
|
||||
struct X86PagingStructures64Bit;
|
||||
|
||||
|
||||
struct X86VMTranslationMap64Bit : X86VMTranslationMap {
|
||||
X86VMTranslationMap64Bit();
|
||||
virtual ~X86VMTranslationMap64Bit();
|
||||
|
||||
status_t Init(bool kernel);
|
||||
|
||||
virtual size_t MaxPagesNeededToMap(addr_t start,
|
||||
addr_t end) const;
|
||||
|
||||
virtual status_t Map(addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint32 attributes, uint32 memoryType,
|
||||
vm_page_reservation* reservation);
|
||||
virtual status_t Unmap(addr_t start, addr_t end);
|
||||
|
||||
virtual status_t DebugMarkRangePresent(addr_t start, addr_t end,
|
||||
bool markPresent);
|
||||
|
||||
virtual status_t UnmapPage(VMArea* area, addr_t address,
|
||||
bool updatePageQueue);
|
||||
virtual void UnmapPages(VMArea* area, addr_t base,
|
||||
size_t size, bool updatePageQueue);
|
||||
virtual void UnmapArea(VMArea* area,
|
||||
bool deletingAddressSpace,
|
||||
bool ignoreTopCachePageFlags);
|
||||
|
||||
virtual status_t Query(addr_t virtualAddress,
|
||||
phys_addr_t* _physicalAddress,
|
||||
uint32* _flags);
|
||||
virtual status_t QueryInterrupt(addr_t virtualAddress,
|
||||
phys_addr_t* _physicalAddress,
|
||||
uint32* _flags);
|
||||
|
||||
virtual status_t Protect(addr_t base, addr_t top,
|
||||
uint32 attributes, uint32 memoryType);
|
||||
|
||||
virtual status_t ClearFlags(addr_t virtualAddress,
|
||||
uint32 flags);
|
||||
|
||||
virtual bool ClearAccessedAndModified(
|
||||
VMArea* area, addr_t address,
|
||||
bool unmapIfUnaccessed,
|
||||
bool& _modified);
|
||||
|
||||
virtual X86PagingStructures* PagingStructures() const;
|
||||
inline X86PagingStructures64Bit* PagingStructures64Bit() const
|
||||
{ return fPagingStructures; }
|
||||
|
||||
private:
|
||||
X86PagingStructures64Bit* fPagingStructures;
|
||||
};
|
||||
|
||||
|
||||
#endif // KERNEL_ARCH_X86_PAGING_64BIT_X86_VM_TRANSLATION_MAP_64BIT_H
|
||||
|
@ -2,11 +2,79 @@
|
||||
* Copyright 2012, Alex Smith, alex@alex-smith.me.uk.
|
||||
* Distributed under the terms of the MIT License.
|
||||
*/
|
||||
#ifndef KERNEL_ARCH_X86_PAGING_PAE_PAGING_H
|
||||
#define KERNEL_ARCH_X86_PAGING_PAE_PAGING_H
|
||||
#ifndef KERNEL_ARCH_X86_PAGING_64BIT_PAGING_H
|
||||
#define KERNEL_ARCH_X86_PAGING_64BIT_PAGING_H
|
||||
|
||||
|
||||
#include <OS.h>
|
||||
|
||||
|
||||
#endif // KERNEL_ARCH_X86_PAGING_PAE_PAGING_H
|
||||
// PML4 entry bits.
|
||||
#define X86_64_PML4E_PRESENT (1LL << 0)
|
||||
#define X86_64_PML4E_WRITABLE (1LL << 1)
|
||||
#define X86_64_PML4E_USER (1LL << 2)
|
||||
#define X86_64_PML4E_WRITE_THROUGH (1LL << 3)
|
||||
#define X86_64_PML4E_CACHING_DISABLED (1LL << 4)
|
||||
#define X86_64_PML4E_ACCESSED (1LL << 5)
|
||||
#define X86_64_PML4E_NOT_EXECUTABLE (1LL << 63)
|
||||
#define X86_64_PML4E_ADDRESS_MASK 0x000ffffffffff000L
|
||||
|
||||
// PDPT entry bits.
|
||||
#define X86_64_PDPTE_PRESENT (1LL << 0)
|
||||
#define X86_64_PDPTE_WRITABLE (1LL << 1)
|
||||
#define X86_64_PDPTE_USER (1LL << 2)
|
||||
#define X86_64_PDPTE_WRITE_THROUGH (1LL << 3)
|
||||
#define X86_64_PDPTE_CACHING_DISABLED (1LL << 4)
|
||||
#define X86_64_PDPTE_ACCESSED (1LL << 5)
|
||||
#define X86_64_PDPTE_DIRTY (1LL << 6)
|
||||
#define X86_64_PDPTE_LARGE_PAGE (1LL << 7)
|
||||
#define X86_64_PDPTE_GLOBAL (1LL << 8)
|
||||
#define X86_64_PDPTE_PAT (1LL << 12)
|
||||
#define X86_64_PDPTE_NOT_EXECUTABLE (1LL << 63)
|
||||
#define X86_64_PDPTE_ADDRESS_MASK 0x000ffffffffff000L
|
||||
|
||||
// Page directory entry bits.
|
||||
#define X86_64_PDE_PRESENT (1LL << 0)
|
||||
#define X86_64_PDE_WRITABLE (1LL << 1)
|
||||
#define X86_64_PDE_USER (1LL << 2)
|
||||
#define X86_64_PDE_WRITE_THROUGH (1LL << 3)
|
||||
#define X86_64_PDE_CACHING_DISABLED (1LL << 4)
|
||||
#define X86_64_PDE_ACCESSED (1LL << 5)
|
||||
#define X86_64_PDE_DIRTY (1LL << 6)
|
||||
#define X86_64_PDE_LARGE_PAGE (1LL << 7)
|
||||
#define X86_64_PDE_GLOBAL (1LL << 8)
|
||||
#define X86_64_PDE_PAT (1LL << 12)
|
||||
#define X86_64_PDE_NOT_EXECUTABLE (1LL << 63)
|
||||
#define X86_64_PDE_ADDRESS_MASK 0x000ffffffffff000L
|
||||
|
||||
// Page table entry bits.
|
||||
#define X86_64_PTE_PRESENT (1LL << 0)
|
||||
#define X86_64_PTE_WRITABLE (1LL << 1)
|
||||
#define X86_64_PTE_USER (1LL << 2)
|
||||
#define X86_64_PTE_WRITE_THROUGH (1LL << 3)
|
||||
#define X86_64_PTE_CACHING_DISABLED (1LL << 4)
|
||||
#define X86_64_PTE_ACCESSED (1LL << 5)
|
||||
#define X86_64_PTE_DIRTY (1LL << 6)
|
||||
#define X86_64_PTE_PAT (1LL << 7)
|
||||
#define X86_64_PTE_GLOBAL (1LL << 8)
|
||||
#define X86_64_PTE_NOT_EXECUTABLE (1LL << 63)
|
||||
#define X86_64_PTE_ADDRESS_MASK 0x000ffffffffff000L
|
||||
#define X86_64_PTE_PROTECTION_MASK (X86_64_PTE_WRITABLE | X86_64_PTE_USER)
|
||||
#define X86_64_PTE_MEMORY_TYPE_MASK (X86_64_PTE_WRITE_THROUGH \
|
||||
| X86_64_PTE_CACHING_DISABLED)
|
||||
|
||||
|
||||
static const size_t k64BitPageTableRange = 0x200000L;
|
||||
static const size_t k64BitPageDirectoryRange = 0x40000000L;
|
||||
static const size_t k64BitPDPTRange = 0x8000000000L;
|
||||
|
||||
static const size_t k64BitTableEntryCount = 512;
|
||||
|
||||
|
||||
#define VADDR_TO_PML4E(va) (((va) & 0x0000fffffffff000L) / k64BitPDPTRange)
|
||||
#define VADDR_TO_PDPTE(va) (((va) % k64BitPDPTRange) / k64BitPageDirectoryRange)
|
||||
#define VADDR_TO_PDE(va) (((va) % k64BitPageDirectoryRange) / k64BitPageTableRange)
|
||||
#define VADDR_TO_PTE(va) (((va) % k64BitPageTableRange) / B_PAGE_SIZE)
|
||||
|
||||
|
||||
#endif // KERNEL_ARCH_X86_PAGING_64BIT_PAGING_H
|
||||
|
@ -8,6 +8,8 @@
|
||||
|
||||
#include <SupportDefs.h>
|
||||
|
||||
#include <vm/vm_types.h>
|
||||
|
||||
|
||||
struct kernel_args;
|
||||
struct VMPhysicalPageMapper;
|
||||
@ -30,7 +32,7 @@ public:
|
||||
addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint8 attributes,
|
||||
phys_addr_t (*get_free_page)(kernel_args*))
|
||||
page_num_t (*get_free_page)(kernel_args*))
|
||||
= 0;
|
||||
|
||||
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
|
||||
struct X86PagingStructures : DeferredDeletable {
|
||||
uint32 pgdir_phys;
|
||||
phys_addr_t pgdir_phys;
|
||||
vint32 ref_count;
|
||||
vint32 active_on_cpus;
|
||||
// mask indicating on which CPUs the map is currently used
|
||||
|
@ -629,7 +629,7 @@ X86PagingMethodPAE::CreateTranslationMap(bool kernel, VMTranslationMap** _map)
|
||||
status_t
|
||||
X86PagingMethodPAE::MapEarly(kernel_args* args, addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress, uint8 attributes,
|
||||
phys_addr_t (*get_free_page)(kernel_args*))
|
||||
page_num_t (*get_free_page)(kernel_args*))
|
||||
{
|
||||
// check to see if a page table exists for this range
|
||||
pae_page_directory_entry* pageDirEntry = PageDirEntryForAddress(
|
||||
|
@ -39,7 +39,7 @@ public:
|
||||
addr_t virtualAddress,
|
||||
phys_addr_t physicalAddress,
|
||||
uint8 attributes,
|
||||
phys_addr_t (*get_free_page)(kernel_args*));
|
||||
page_num_t (*get_free_page)(kernel_args*));
|
||||
|
||||
virtual bool IsKernelPageAccessible(addr_t virtualAddress,
|
||||
uint32 protection);
|
||||
|
@ -697,7 +697,7 @@ X86VMTranslationMapPAE::Query(addr_t virtualAddress,
|
||||
| ((entry & X86_PAE_PTE_PRESENT) != 0 ? PAGE_PRESENT : 0);
|
||||
|
||||
TRACE("X86VMTranslationMapPAE::Query(%#" B_PRIxADDR ") -> %#"
|
||||
B_PRIxPHYSADDR ":\n", *_physicalAddress, virtualAddress);
|
||||
B_PRIxPHYSADDR ":\n", virtualAddress, *_physicalAddress);
|
||||
|
||||
return B_OK;
|
||||
}
|
||||
@ -870,7 +870,7 @@ X86VMTranslationMapPAE::ClearAccessedAndModified(VMArea* area, addr_t address,
|
||||
{
|
||||
ASSERT(address % B_PAGE_SIZE == 0);
|
||||
|
||||
TRACE("X86VMTranslationMap32Bit::ClearAccessedAndModified(%#" B_PRIxADDR
|
||||
TRACE("X86VMTranslationMapPAE::ClearAccessedAndModified(%#" B_PRIxADDR
|
||||
")\n", address);
|
||||
|
||||
pae_page_directory_entry* pageDirEntry
|
||||
|
Loading…
Reference in New Issue
Block a user