diff --git a/src/system/kernel/arch/x86/X86PagingMethod32Bit.cpp b/src/system/kernel/arch/x86/X86PagingMethod32Bit.cpp index b7a4fcdd4d..34edc25cf5 100644 --- a/src/system/kernel/arch/x86/X86PagingMethod32Bit.cpp +++ b/src/system/kernel/arch/x86/X86PagingMethod32Bit.cpp @@ -29,6 +29,7 @@ #include #include "x86_physical_page_mapper.h" +#include "x86_physical_page_mapper_large_memory.h" #include "X86VMTranslationMap.h" @@ -40,6 +41,12 @@ #endif +using X86LargePhysicalPageMapper::PhysicalPageSlot; + + +static const size_t kPageTableAlignment = 1024 * B_PAGE_SIZE; + + static X86PagingMethod32Bit sMethod; static page_table_entry *sPageHole = NULL; @@ -474,7 +481,7 @@ X86VMTranslationMap::Map(addr_t va, phys_addr_t pa, uint32 attributes, struct thread* thread = thread_get_current_thread(); ThreadCPUPinner pinner(thread); - page_table_entry* pt = fPageMapper->GetPageTableAt( + page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( pd[index] & X86_PDE_ADDRESS_MASK); index = VADDR_TO_PTENT(va); @@ -522,7 +529,7 @@ restart: struct thread* thread = thread_get_current_thread(); ThreadCPUPinner pinner(thread); - page_table_entry* pt = fPageMapper->GetPageTableAt( + page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( pd[index] & X86_PDE_ADDRESS_MASK); for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end); @@ -576,7 +583,7 @@ X86VMTranslationMap::UnmapPage(VMArea* area, addr_t address, ThreadCPUPinner pinner(thread_get_current_thread()); - page_table_entry* pt = fPageMapper->GetPageTableAt( + page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( pd[index] & X86_PDE_ADDRESS_MASK); index = VADDR_TO_PTENT(address); @@ -700,7 +707,7 @@ X86VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size, struct thread* thread = thread_get_current_thread(); ThreadCPUPinner pinner(thread); - page_table_entry* pt = fPageMapper->GetPageTableAt( + page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( pd[index] & X86_PDE_ADDRESS_MASK); for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end); @@ -837,8 +844,9 @@ X86VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace, ThreadCPUPinner pinner(thread_get_current_thread()); - page_table_entry* pt = fPageMapper->GetPageTableAt( - pd[index] & X86_PDE_ADDRESS_MASK); + page_table_entry* pt + = (page_table_entry*)fPageMapper->GetPageTableAt( + pd[index] & X86_PDE_ADDRESS_MASK); page_table_entry oldEntry = clear_page_table_entry( &pt[VADDR_TO_PTENT(address)]); @@ -913,7 +921,7 @@ X86VMTranslationMap::Query(addr_t va, phys_addr_t *_physical, uint32 *_flags) struct thread* thread = thread_get_current_thread(); ThreadCPUPinner pinner(thread); - page_table_entry* pt = fPageMapper->GetPageTableAt( + page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( pd[index] & X86_PDE_ADDRESS_MASK); page_table_entry entry = pt[VADDR_TO_PTENT(va)]; @@ -954,8 +962,9 @@ X86VMTranslationMap::QueryInterrupt(addr_t va, phys_addr_t *_physical, } // map page table entry - page_table_entry* pt = sPhysicalPageMapper->InterruptGetPageTableAt( - pd[index] & X86_PDE_ADDRESS_MASK); + page_table_entry* pt + = (page_table_entry*)sPhysicalPageMapper->InterruptGetPageTableAt( + pd[index] & X86_PDE_ADDRESS_MASK); page_table_entry entry = pt[VADDR_TO_PTENT(va)]; *_physical = entry & X86_PDE_ADDRESS_MASK; @@ -1019,7 +1028,7 @@ restart: struct thread* thread = thread_get_current_thread(); ThreadCPUPinner pinner(thread); - page_table_entry* pt = fPageMapper->GetPageTableAt( + page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( pd[index] & X86_PDE_ADDRESS_MASK); for (index = VADDR_TO_PTENT(start); index < 1024 && start < end; @@ -1078,7 +1087,7 @@ X86VMTranslationMap::ClearFlags(addr_t va, uint32 flags) struct thread* thread = thread_get_current_thread(); ThreadCPUPinner pinner(thread); - page_table_entry* pt = fPageMapper->GetPageTableAt( + page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( pd[index] & X86_PDE_ADDRESS_MASK); index = VADDR_TO_PTENT(va); @@ -1118,7 +1127,7 @@ X86VMTranslationMap::ClearAccessedAndModified(VMArea* area, addr_t address, ThreadCPUPinner pinner(thread_get_current_thread()); - page_table_entry* pt = fPageMapper->GetPageTableAt( + page_table_entry* pt = (page_table_entry*)fPageMapper->GetPageTableAt( pd[index] & X86_PDE_ADDRESS_MASK); index = VADDR_TO_PTENT(address); @@ -1277,6 +1286,205 @@ X86VMTranslationMap::Flush() } +// #pragma mark - X86PagingMethod32Bit::PhysicalPageSlotPool + + +struct X86PagingMethod32Bit::PhysicalPageSlotPool + : X86LargePhysicalPageMapper::PhysicalPageSlotPool { +public: + virtual ~PhysicalPageSlotPool(); + + status_t InitInitial(kernel_args* args); + status_t InitInitialPostArea(kernel_args* args); + + void Init(area_id dataArea, void* data, + area_id virtualArea, addr_t virtualBase); + + virtual status_t AllocatePool( + X86LargePhysicalPageMapper + ::PhysicalPageSlotPool*& _pool); + virtual void Map(phys_addr_t physicalAddress, + addr_t virtualAddress); + +public: + static PhysicalPageSlotPool sInitialPhysicalPagePool; + +private: + area_id fDataArea; + area_id fVirtualArea; + addr_t fVirtualBase; + page_table_entry* fPageTable; +}; + + +X86PagingMethod32Bit::PhysicalPageSlotPool + X86PagingMethod32Bit::PhysicalPageSlotPool::sInitialPhysicalPagePool; + + +X86PagingMethod32Bit::PhysicalPageSlotPool::~PhysicalPageSlotPool() +{ +} + + +status_t +X86PagingMethod32Bit::PhysicalPageSlotPool::InitInitial(kernel_args* args) +{ + // We reserve more, so that we can guarantee to align the base address + // to page table ranges. + addr_t virtualBase = vm_allocate_early(args, + 1024 * B_PAGE_SIZE + kPageTableAlignment - B_PAGE_SIZE, 0, 0, false); + if (virtualBase == 0) { + panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve " + "physical page pool space in virtual address space!"); + return B_ERROR; + } + virtualBase = (virtualBase + kPageTableAlignment - 1) + / kPageTableAlignment * kPageTableAlignment; + + // allocate memory for the page table and data + size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]); + page_table_entry* pageTable = (page_table_entry*)vm_allocate_early(args, + areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false); + + // prepare the page table + x86_early_prepare_page_tables(pageTable, virtualBase, + 1024 * B_PAGE_SIZE); + + // init the pool structure and add the initial pool + Init(-1, pageTable, -1, (addr_t)virtualBase); + + return B_OK; +} + + +status_t +X86PagingMethod32Bit::PhysicalPageSlotPool::InitInitialPostArea( + kernel_args* args) +{ + // create an area for the (already allocated) data + size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]); + void* temp = fPageTable; + area_id area = create_area("physical page pool", &temp, + B_EXACT_ADDRESS, areaSize, B_ALREADY_WIRED, + B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); + if (area < B_OK) { + panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to " + "create area for physical page pool."); + return area; + } + fDataArea = area; + + // create an area for the virtual address space + temp = (void*)fVirtualBase; + area = vm_create_null_area(VMAddressSpace::KernelID(), + "physical page pool space", &temp, B_EXACT_ADDRESS, + 1024 * B_PAGE_SIZE, 0); + if (area < B_OK) { + panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to " + "create area for physical page pool space."); + return area; + } + fVirtualArea = area; + + return B_OK; +} + + +void +X86PagingMethod32Bit::PhysicalPageSlotPool::Init(area_id dataArea, void* data, + area_id virtualArea, addr_t virtualBase) +{ + fDataArea = dataArea; + fVirtualArea = virtualArea; + fVirtualBase = virtualBase; + fPageTable = (page_table_entry*)data; + + // init slot list + fSlots = (PhysicalPageSlot*)(fPageTable + 1024); + addr_t slotAddress = virtualBase; + for (int32 i = 0; i < 1024; i++, slotAddress += B_PAGE_SIZE) { + PhysicalPageSlot* slot = &fSlots[i]; + slot->next = slot + 1; + slot->pool = this; + slot->address = slotAddress; + } + + fSlots[1023].next = NULL; + // terminate list +} + + +void +X86PagingMethod32Bit::PhysicalPageSlotPool::Map(phys_addr_t physicalAddress, + addr_t virtualAddress) +{ + page_table_entry& pte = fPageTable[ + (virtualAddress - fVirtualBase) / B_PAGE_SIZE]; + pte = (physicalAddress & X86_PTE_ADDRESS_MASK) + | X86_PTE_WRITABLE | X86_PTE_GLOBAL | X86_PTE_PRESENT; + + invalidate_TLB(virtualAddress); +} + + +status_t +X86PagingMethod32Bit::PhysicalPageSlotPool::AllocatePool( + X86LargePhysicalPageMapper::PhysicalPageSlotPool*& _pool) +{ + // create the pool structure + PhysicalPageSlotPool* pool = new(std::nothrow) PhysicalPageSlotPool; + if (pool == NULL) + return B_NO_MEMORY; + ObjectDeleter poolDeleter(pool); + + // create an area that can contain the page table and the slot + // structures + size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]); + void* data; + area_id dataArea = create_area_etc(B_SYSTEM_TEAM, "physical page pool", + &data, B_ANY_KERNEL_ADDRESS, PAGE_ALIGN(areaSize), B_FULL_LOCK, + B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT); + if (dataArea < 0) + return dataArea; + + // create the null area for the virtual address space + void* virtualBase; + area_id virtualArea = vm_create_null_area( + VMAddressSpace::KernelID(), "physical page pool space", + &virtualBase, B_ANY_KERNEL_BLOCK_ADDRESS, 1024 * B_PAGE_SIZE, + CREATE_AREA_PRIORITY_VIP); + if (virtualArea < 0) { + delete_area(dataArea); + return virtualArea; + } + + // prepare the page table + memset(data, 0, B_PAGE_SIZE); + + // get the page table's physical address + phys_addr_t physicalTable; + X86VMTranslationMap* map = static_cast( + VMAddressSpace::Kernel()->TranslationMap()); + uint32 dummyFlags; + cpu_status state = disable_interrupts(); + map->QueryInterrupt((addr_t)data, &physicalTable, &dummyFlags); + restore_interrupts(state); + + // put the page table into the page directory + int32 index = (addr_t)virtualBase / (B_PAGE_SIZE * 1024); + page_directory_entry* entry = &map->PagingStructures()->pgdir_virt[index]; + x86_put_pgtable_in_pgdir(entry, physicalTable, + B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); + x86_update_all_pgdirs(index, *entry); + + // init the pool structure + pool->Init(dataArea, data, virtualArea, (addr_t)virtualBase); + poolDeleter.Detach(); + _pool = pool; + return B_OK; +} + + // #pragma mark - X86PagingMethod32Bit @@ -1326,7 +1534,19 @@ X86PagingMethod32Bit::Init(kernel_args* args, B_INITIALIZE_SPINLOCK(&sPagingStructuresListLock); new (&sPagingStructuresList) PagingStructuresList; - large_memory_physical_page_ops_init(args, sPhysicalPageMapper, + // create the initial pool for the physical page mapper + PhysicalPageSlotPool* pool + = new(&PhysicalPageSlotPool::sInitialPhysicalPagePool) + PhysicalPageSlotPool; + status_t error = pool->InitInitial(args); + if (error != B_OK) { + panic("X86PagingMethod32Bit::Init(): Failed to create initial pool " + "for physical page mapper!"); + return error; + } + + // create physical page mapper + large_memory_physical_page_ops_init(args, pool, sPhysicalPageMapper, sKernelPhysicalPageMapper); // TODO: Select the best page mapper! @@ -1364,7 +1584,8 @@ X86PagingMethod32Bit::InitPostArea(kernel_args* args) if (area < B_OK) return area; - error = sPhysicalPageMapper->InitPostArea(args); + error = PhysicalPageSlotPool::sInitialPhysicalPagePool + .InitInitialPostArea(args); if (error != B_OK) return error; diff --git a/src/system/kernel/arch/x86/X86PagingMethod32Bit.h b/src/system/kernel/arch/x86/X86PagingMethod32Bit.h index 8bd7405f94..0ce0c2b902 100644 --- a/src/system/kernel/arch/x86/X86PagingMethod32Bit.h +++ b/src/system/kernel/arch/x86/X86PagingMethod32Bit.h @@ -40,6 +40,9 @@ public: virtual bool IsKernelPageAccessible(addr_t virtualAddress, uint32 protection); + +private: + struct PhysicalPageSlotPool; }; diff --git a/src/system/kernel/arch/x86/x86_physical_page_mapper.h b/src/system/kernel/arch/x86/x86_physical_page_mapper.h index 927f636ca9..c0a50b5798 100644 --- a/src/system/kernel/arch/x86/x86_physical_page_mapper.h +++ b/src/system/kernel/arch/x86/x86_physical_page_mapper.h @@ -8,8 +8,6 @@ #include -#include "x86_paging.h" - struct kernel_args; struct vm_translation_map_ops; @@ -21,7 +19,7 @@ public: virtual void Delete() = 0; - virtual page_table_entry* GetPageTableAt(phys_addr_t physicalAddress) = 0; + virtual void* GetPageTableAt(phys_addr_t physicalAddress) = 0; // Must be invoked with thread pinned to current CPU. }; @@ -30,20 +28,13 @@ class X86PhysicalPageMapper : public VMPhysicalPageMapper { public: virtual ~X86PhysicalPageMapper(); - virtual status_t InitPostArea(kernel_args* args) = 0; - virtual status_t CreateTranslationMapPhysicalPageMapper( TranslationMapPhysicalPageMapper** _mapper) = 0; - virtual page_table_entry* InterruptGetPageTableAt( + virtual void* InterruptGetPageTableAt( phys_addr_t physicalAddress) = 0; }; -status_t large_memory_physical_page_ops_init(kernel_args* args, - X86PhysicalPageMapper*& _pageMapper, - TranslationMapPhysicalPageMapper*& _kernelPageMapper); - - #endif // _KERNEL_ARCH_X86_PHYSICAL_PAGE_MAPPER_H diff --git a/src/system/kernel/arch/x86/x86_physical_page_mapper_large_memory.cpp b/src/system/kernel/arch/x86/x86_physical_page_mapper_large_memory.cpp index ab24dd8d5e..dc0d958faf 100644 --- a/src/system/kernel/arch/x86/x86_physical_page_mapper_large_memory.cpp +++ b/src/system/kernel/arch/x86/x86_physical_page_mapper_large_memory.cpp @@ -11,7 +11,7 @@ We allocate a single page table (one page) that can map 1024 pages and a corresponding virtual address space region (4 MB). Each of those 1024 - slots can map a physical page. We reserve a fixed amount of slot per CPU. + slots can map a physical page. We reserve a fixed amount of slots per CPU. They will be used for physical operations on that CPU (memset()/memcpy() and {get,put}_physical_page_current_cpu()). A few slots we reserve for each translation map (TranslationMapPhysicalPageMapper). Those will only be used @@ -23,7 +23,7 @@ */ -#include "x86_physical_page_mapper.h" +#include "x86_physical_page_mapper_large_memory.h" #include @@ -33,12 +33,12 @@ #include #include #include -#include #include #include #include #include "x86_paging.h" +#include "x86_physical_page_mapper.h" #include "X86VMTranslationMap.h" @@ -54,35 +54,9 @@ + KERNEL_SLOTS_PER_CPU + 1) // one slot is for use in interrupts -static const size_t kPageTableAlignment = 1024 * B_PAGE_SIZE; - -struct PhysicalPageSlotPool; - -struct PhysicalPageSlot { - PhysicalPageSlot* next; - PhysicalPageSlotPool* pool; - addr_t address; - - inline void Map(phys_addr_t physicalAddress); -}; - - -struct PhysicalPageSlotPool : DoublyLinkedListLinkImpl { - area_id dataArea; - area_id virtualArea; - addr_t virtualBase; - page_table_entry* pageTable; - PhysicalPageSlot* slots; - - void Init(area_id dataArea, void* data, - area_id virtualArea, addr_t virtualBase); - - inline bool IsEmpty() const; - - inline PhysicalPageSlot* GetSlot(); - inline void PutSlot(PhysicalPageSlot* slot); -}; +using X86LargePhysicalPageMapper::PhysicalPageSlot; +using X86LargePhysicalPageMapper::PhysicalPageSlotPool; class PhysicalPageSlotQueue { @@ -131,7 +105,7 @@ public: virtual void Delete(); - virtual page_table_entry* GetPageTableAt(phys_addr_t physicalAddress); + virtual void* GetPageTableAt(phys_addr_t physicalAddress); private: struct page_slot { @@ -151,14 +125,14 @@ public: LargeMemoryPhysicalPageMapper(); status_t Init(kernel_args* args, + PhysicalPageSlotPool* initialPool, TranslationMapPhysicalPageMapper*& _kernelPageMapper); - virtual status_t InitPostArea(kernel_args* args); virtual status_t CreateTranslationMapPhysicalPageMapper( TranslationMapPhysicalPageMapper** _mapper); - virtual page_table_entry* InterruptGetPageTableAt( + virtual void* InterruptGetPageTableAt( phys_addr_t physicalAddress); virtual status_t GetPage(phys_addr_t physicalAddress, @@ -190,10 +164,6 @@ public: inline PhysicalPageSlotQueue* GetSlotQueue(int32 cpu, bool user); -private: - static status_t _AllocatePool( - PhysicalPageSlotPool*& _pool); - private: typedef DoublyLinkedList PoolList; @@ -201,7 +171,7 @@ private: PoolList fEmptyPools; PoolList fNonEmptyPools; PhysicalPageSlot* fDebugSlot; - PhysicalPageSlotPool fInitialPool; + PhysicalPageSlotPool* fInitialPool; LargeMemoryTranslationMapPhysicalPageMapper fKernelMapper; PhysicalPageOpsCPUData fPerCPUData[B_MAX_CPU_COUNT]; }; @@ -215,51 +185,27 @@ static LargeMemoryPhysicalPageMapper sPhysicalPageMapper; inline void PhysicalPageSlot::Map(phys_addr_t physicalAddress) { - page_table_entry& pte = pool->pageTable[ - (address - pool->virtualBase) / B_PAGE_SIZE]; - pte = (physicalAddress & X86_PTE_ADDRESS_MASK) - | X86_PTE_WRITABLE | X86_PTE_GLOBAL | X86_PTE_PRESENT; - - invalidate_TLB(address); + pool->Map(physicalAddress, address); } -void -PhysicalPageSlotPool::Init(area_id dataArea, void* data, - area_id virtualArea, addr_t virtualBase) +PhysicalPageSlotPool::~PhysicalPageSlotPool() { - this->dataArea = dataArea; - this->virtualArea = virtualArea; - this->virtualBase = virtualBase; - pageTable = (page_table_entry*)data; - - // init slot list - slots = (PhysicalPageSlot*)(pageTable + 1024); - addr_t slotAddress = virtualBase; - for (int32 i = 0; i < 1024; i++, slotAddress += B_PAGE_SIZE) { - PhysicalPageSlot* slot = &slots[i]; - slot->next = slot + 1; - slot->pool = this; - slot->address = slotAddress; - } - - slots[1023].next = NULL; - // terminate list } inline bool PhysicalPageSlotPool::IsEmpty() const { - return slots == NULL; + return fSlots == NULL; } inline PhysicalPageSlot* PhysicalPageSlotPool::GetSlot() { - PhysicalPageSlot* slot = slots; - slots = slot->next; + PhysicalPageSlot* slot = fSlots; + fSlots = slot->next; return slot; } @@ -267,8 +213,8 @@ PhysicalPageSlotPool::GetSlot() inline void PhysicalPageSlotPool::PutSlot(PhysicalPageSlot* slot) { - slot->next = slots; - slots = slot; + slot->next = fSlots; + fSlots = slot; } @@ -435,7 +381,7 @@ LargeMemoryTranslationMapPhysicalPageMapper::Delete() } -page_table_entry* +void* LargeMemoryTranslationMapPhysicalPageMapper::GetPageTableAt( phys_addr_t physicalAddress) { @@ -453,7 +399,7 @@ LargeMemoryTranslationMapPhysicalPageMapper::GetPageTableAt( invalidate_TLB(slot.slot->address); slot.valid |= 1 << currentCPU; } - return (page_table_entry*)slot.slot->address; + return (void*)slot.slot->address; } } @@ -465,7 +411,7 @@ LargeMemoryTranslationMapPhysicalPageMapper::GetPageTableAt( slot.slot->Map(physicalAddress); slot.valid = 1 << currentCPU; - return (page_table_entry*)slot.slot->address; + return (void*)slot.slot->address; } @@ -473,6 +419,8 @@ LargeMemoryTranslationMapPhysicalPageMapper::GetPageTableAt( LargeMemoryPhysicalPageMapper::LargeMemoryPhysicalPageMapper() + : + fInitialPool(NULL) { mutex_init(&fLock, "large memory physical page mapper"); } @@ -480,32 +428,11 @@ LargeMemoryPhysicalPageMapper::LargeMemoryPhysicalPageMapper() status_t LargeMemoryPhysicalPageMapper::Init(kernel_args* args, + PhysicalPageSlotPool* initialPool, TranslationMapPhysicalPageMapper*& _kernelPageMapper) { - // We reserve more, so that we can guarantee to align the base address - // to page table ranges. - addr_t virtualBase = vm_allocate_early(args, - 1024 * B_PAGE_SIZE + kPageTableAlignment - B_PAGE_SIZE, 0, 0, false); - if (virtualBase == 0) { - panic("LargeMemoryPhysicalPageMapper::Init(): Failed to reserve " - "physical page pool space in virtual address space!"); - return B_ERROR; - } - virtualBase = (virtualBase + kPageTableAlignment - 1) - / kPageTableAlignment * kPageTableAlignment; - - // allocate memory for the page table and data - size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]); - page_table_entry* pageTable = (page_table_entry*)vm_allocate_early(args, - areaSize, ~0L, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, false); - - // prepare the page table - x86_early_prepare_page_tables(pageTable, virtualBase, - 1024 * B_PAGE_SIZE); - - // init the pool structure and add the initial pool - fInitialPool.Init(-1, pageTable, -1, (addr_t)virtualBase); - fNonEmptyPools.Add(&fInitialPool); + fInitialPool = initialPool; + fNonEmptyPools.Add(fInitialPool); // get the debug slot GetSlot(true, fDebugSlot); @@ -528,38 +455,6 @@ LargeMemoryPhysicalPageMapper::Init(kernel_args* args, } -status_t -LargeMemoryPhysicalPageMapper::InitPostArea(kernel_args* args) -{ - // create an area for the (already allocated) data - size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]); - void* temp = fInitialPool.pageTable; - area_id area = create_area("physical page pool", &temp, - B_EXACT_ADDRESS, areaSize, B_ALREADY_WIRED, - B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); - if (area < B_OK) { - panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to " - "create area for physical page pool."); - return area; - } - fInitialPool.dataArea = area; - - // create an area for the virtual address space - temp = (void*)fInitialPool.virtualBase; - area = vm_create_null_area(VMAddressSpace::KernelID(), - "physical page pool space", &temp, B_EXACT_ADDRESS, - 1024 * B_PAGE_SIZE, 0); - if (area < B_OK) { - panic("LargeMemoryPhysicalPageMapper::InitPostArea(): Failed to " - "create area for physical page pool space."); - return area; - } - fInitialPool.virtualArea = area; - - return B_OK; -} - - status_t LargeMemoryPhysicalPageMapper::CreateTranslationMapPhysicalPageMapper( TranslationMapPhysicalPageMapper** _mapper) @@ -580,7 +475,7 @@ LargeMemoryPhysicalPageMapper::CreateTranslationMapPhysicalPageMapper( } -page_table_entry* +void* LargeMemoryPhysicalPageMapper::InterruptGetPageTableAt( phys_addr_t physicalAddress) { @@ -588,7 +483,7 @@ LargeMemoryPhysicalPageMapper::InterruptGetPageTableAt( PhysicalPageSlot* slot = fPerCPUData[smp_get_current_cpu()].interruptSlot; slot->Map(physicalAddress); - return (page_table_entry*)slot->address; + return (void*)slot->address; } @@ -802,8 +697,7 @@ LargeMemoryPhysicalPageMapper::MemcpyPhysicalPage(phys_addr_t to, status_t -LargeMemoryPhysicalPageMapper::GetSlot(bool canWait, - PhysicalPageSlot*& slot) +LargeMemoryPhysicalPageMapper::GetSlot(bool canWait, PhysicalPageSlot*& slot) { MutexLocker locker(fLock); @@ -814,7 +708,7 @@ LargeMemoryPhysicalPageMapper::GetSlot(bool canWait, // allocate new pool locker.Unlock(); - status_t error = _AllocatePool(pool); + status_t error = fInitialPool->AllocatePool(pool); if (error != B_OK) return error; locker.Lock(); @@ -856,74 +750,17 @@ LargeMemoryPhysicalPageMapper::GetSlotQueue(int32 cpu, bool user) } -/* static */ status_t -LargeMemoryPhysicalPageMapper::_AllocatePool(PhysicalPageSlotPool*& _pool) -{ - // create the pool structure - PhysicalPageSlotPool* pool - = new(std::nothrow) PhysicalPageSlotPool; - if (pool == NULL) - return B_NO_MEMORY; - ObjectDeleter poolDeleter(pool); - - // create an area that can contain the page table and the slot - // structures - size_t areaSize = B_PAGE_SIZE + sizeof(PhysicalPageSlot[1024]); - void* data; - area_id dataArea = create_area_etc(B_SYSTEM_TEAM, "physical page pool", - &data, B_ANY_KERNEL_ADDRESS, PAGE_ALIGN(areaSize), B_FULL_LOCK, - B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, CREATE_AREA_DONT_WAIT); - if (dataArea < 0) - return dataArea; - - // create the null area for the virtual address space - void* virtualBase; - area_id virtualArea = vm_create_null_area( - VMAddressSpace::KernelID(), "physical page pool space", - &virtualBase, B_ANY_KERNEL_BLOCK_ADDRESS, 1024 * B_PAGE_SIZE, - CREATE_AREA_PRIORITY_VIP); - if (virtualArea < 0) { - delete_area(dataArea); - return virtualArea; - } - - // prepare the page table - memset(data, 0, B_PAGE_SIZE); - - // get the page table's physical address - phys_addr_t physicalTable; - X86VMTranslationMap* map = static_cast( - VMAddressSpace::Kernel()->TranslationMap()); - uint32 dummyFlags; - cpu_status state = disable_interrupts(); - map->QueryInterrupt((addr_t)data, &physicalTable, &dummyFlags); - restore_interrupts(state); - - // put the page table into the page directory - int32 index = (addr_t)virtualBase / (B_PAGE_SIZE * 1024); - page_directory_entry* entry = &map->PagingStructures()->pgdir_virt[index]; - x86_put_pgtable_in_pgdir(entry, physicalTable, - B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA); - x86_update_all_pgdirs(index, *entry); - - // init the pool structure - pool->Init(dataArea, data, virtualArea, (addr_t)virtualBase); - poolDeleter.Detach(); - _pool = pool; - return B_OK; -} - - // #pragma mark - Initialization status_t large_memory_physical_page_ops_init(kernel_args* args, + X86LargePhysicalPageMapper::PhysicalPageSlotPool* initialPool, X86PhysicalPageMapper*& _pageMapper, TranslationMapPhysicalPageMapper*& _kernelPageMapper) { new(&sPhysicalPageMapper) LargeMemoryPhysicalPageMapper; - sPhysicalPageMapper.Init(args, _kernelPageMapper); + sPhysicalPageMapper.Init(args, initialPool, _kernelPageMapper); _pageMapper = &sPhysicalPageMapper; return B_OK; diff --git a/src/system/kernel/arch/x86/x86_physical_page_mapper_large_memory.h b/src/system/kernel/arch/x86/x86_physical_page_mapper_large_memory.h new file mode 100644 index 0000000000..7ff23091db --- /dev/null +++ b/src/system/kernel/arch/x86/x86_physical_page_mapper_large_memory.h @@ -0,0 +1,61 @@ +/* + * Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de. + * Distributed under the terms of the MIT License. + */ +#ifndef KERNEL_ARCH_X86_X86_PHYSICAL_PAGE_MAPPER_LARGE_MEMORY_H +#define KERNEL_ARCH_X86_X86_PHYSICAL_PAGE_MAPPER_LARGE_MEMORY_H + + +#include + +#include + + +class TranslationMapPhysicalPageMapper; +class X86PhysicalPageMapper; +struct kernel_args; + + +namespace X86LargePhysicalPageMapper { + + +struct PhysicalPageSlotPool; + + +struct PhysicalPageSlot { + PhysicalPageSlot* next; + PhysicalPageSlotPool* pool; + addr_t address; + + inline void Map(phys_addr_t physicalAddress); +}; + + +struct PhysicalPageSlotPool : DoublyLinkedListLinkImpl { + + virtual ~PhysicalPageSlotPool(); + + inline bool IsEmpty() const; + + inline PhysicalPageSlot* GetSlot(); + inline void PutSlot(PhysicalPageSlot* slot); + + virtual status_t AllocatePool(PhysicalPageSlotPool*& _pool) = 0; + virtual void Map(phys_addr_t physicalAddress, + addr_t virtualAddress) = 0; + +protected: + PhysicalPageSlot* fSlots; +}; + + +} + + +status_t large_memory_physical_page_ops_init(kernel_args* args, + X86LargePhysicalPageMapper::PhysicalPageSlotPool* initialPool, + X86PhysicalPageMapper*& _pageMapper, + TranslationMapPhysicalPageMapper*& _kernelPageMapper); + + +#endif // KERNEL_ARCH_X86_X86_PHYSICAL_PAGE_MAPPER_LARGE_MEMORY_H