* Added vm_page::accessed flag. Works analogously to vm_page::modified.

* Reorganized the code for [un]mapping pages:
  - Added new VMTranslationMap::Unmap{Area,Page[s]}() which essentially do what
    vm_unmap_page[s]() did before, just in the architecture specific code, which
    allows for specific optimizations. UnmapArea() is for the special case that
    the complete area is unmapped. Particularly in case the address space is
    deleted, some work can be saved. Several TODOs could be slain.
  - Since they are only used within vm.cpp vm_map_page() and vm_unmap_page[s]()
    are now static and have lost their prefix (and the "preserveModified"
    parameter).
* Added VMTranslationMap::Protect{Page,Area}(). They are just inline wrappers
  for Protect().
* X86VMTranslationMap::Protect(): Make sure not to accidentally clear the
  accessed/dirty flags.
* X86VMTranslationMap::Unmap()/Protect(): Make page table skipping actually
  work. It was only skipping to the next page.
* Adjusted the PPC code to at least compile.

No measurable effect for the -j8 Haiku image build time, though the kernel time
drops minimally.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@35089 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2010-01-15 22:32:51 +00:00
parent e25dcf1a96
commit f082f7f019
13 changed files with 698 additions and 369 deletions

View File

@ -192,7 +192,8 @@ VMAddressSpace::GetAreaIterator()
extern "C" {
#endif
status_t vm_delete_areas(struct VMAddressSpace *aspace);
status_t vm_delete_areas(struct VMAddressSpace *aspace,
bool deletingAddressSpace);
#define vm_swap_address_space(from, to) arch_vm_aspace_swap(from, to)
#ifdef __cplusplus

View File

@ -14,6 +14,7 @@
struct kernel_args;
struct VMArea;
struct VMTranslationMap {
@ -34,6 +35,14 @@ struct VMTranslationMap {
uint32 attributes) = 0;
virtual status_t Unmap(addr_t start, addr_t end) = 0;
// map not locked
virtual status_t UnmapPage(VMArea* area, addr_t address) = 0;
virtual void UnmapPages(VMArea* area, addr_t base,
size_t size);
virtual void UnmapArea(VMArea* area,
bool deletingAddressSpace,
bool ignoreTopCachePageFlags);
virtual status_t Query(addr_t virtualAddress,
addr_t* _physicalAddress,
uint32* _flags) = 0;
@ -43,6 +52,11 @@ struct VMTranslationMap {
virtual status_t Protect(addr_t base, addr_t top,
uint32 attributes) = 0;
status_t ProtectPage(VMArea* area, addr_t address,
uint32 attributes);
status_t ProtectArea(VMArea* area,
uint32 attributes);
virtual status_t ClearFlags(addr_t virtualAddress,
uint32 flags) = 0;
@ -93,6 +107,22 @@ struct VMPhysicalPageMapper {
};
inline status_t
VMTranslationMap::ProtectPage(VMArea* area, addr_t address, uint32 attributes)
{
return Protect(address, address + B_PAGE_SIZE - 1, attributes);
}
#include <vm/VMArea.h>
inline status_t
VMTranslationMap::ProtectArea(VMArea* area, uint32 attributes)
{
return Protect(area->Base(), area->Base() + area->Size() - 1, attributes);
}
#include <arch/vm_translation_map.h>
#endif /* KERNEL_VM_VM_TRANSLATION_MAP_H */

View File

@ -94,12 +94,6 @@ bool vm_test_map_modification(struct vm_page *page);
int32 vm_test_map_activation(struct vm_page *page, bool *_modified);
void vm_clear_map_flags(struct vm_page *page, uint32 flags);
void vm_remove_all_page_mappings(struct vm_page *page, uint32 *_flags);
bool vm_unmap_page(struct VMArea* area, addr_t virtualAddress,
bool preserveModified);
status_t vm_unmap_pages(struct VMArea *area, addr_t base, size_t length,
bool preserveModified);
status_t vm_map_page(struct VMArea *area, struct vm_page *page, addr_t address,
uint32 protection);
status_t vm_get_physical_page(addr_t paddr, addr_t* vaddr, void** _handle);
status_t vm_put_physical_page(addr_t vaddr, void* handle);

View File

@ -11,7 +11,6 @@
#include <arch/vm_types.h>
#include <arch/vm_translation_map.h>
#include <condition_variable.h>
#include <kernel.h>
#include <lock.h>
@ -98,8 +97,9 @@ struct vm_page {
bool is_dummy : 1;
bool busy_writing : 1;
// used in VMAnonymousCache::Merge()
bool accessed : 1;
bool modified : 1;
uint8 unused : 2;
uint8 unused : 1;
int8 usage_count;
uint16 wired_count;

View File

@ -1,4 +1,5 @@
/*
* Copyright 2010, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2003-2007, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*
@ -71,17 +72,24 @@
spinlock.
*/
#include <KernelExport.h>
#include <kernel.h>
#include <vm/vm.h>
#include <vm/vm_priv.h>
#include <vm/VMAddressSpace.h>
#include <int.h>
#include <boot/kernel_args.h>
#include <arch/vm_translation_map.h>
#include <stdlib.h>
#include <KernelExport.h>
#include <arch/cpu.h>
#include <arch_mmu.h>
#include <stdlib.h>
#include <boot/kernel_args.h>
#include <int.h>
#include <kernel.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/vm_priv.h>
#include <vm/VMAddressSpace.h>
#include <vm/VMCache.h>
#include <util/AutoLock.h>
#include "generic_vm_physical_page_mapper.h"
#include "generic_vm_physical_page_ops.h"
@ -141,6 +149,8 @@ struct PPCVMTranslationMap : VMTranslationMap {
uint32 attributes);
virtual status_t Unmap(addr_t start, addr_t end);
virtual status_t UnmapPage(VMArea* area, addr_t address);
virtual status_t Query(addr_t virtualAddress,
addr_t* _physicalAddress,
uint32* _flags);
@ -234,7 +244,7 @@ PPCVMTranslationMap::LookupPageTableEntry(addr_t virtualAddress)
return entry;
}
// Didn't found it, try the secondary hash value
// didn't find it, try the secondary hash value
hash = page_table_entry::SecondaryHash(hash);
group = &sPageTable[hash & sPageTableHashMask];
@ -256,16 +266,17 @@ bool
PPCVMTranslationMap::RemovePageTableEntry(addr_t virtualAddress)
{
page_table_entry *entry = LookupPageTableEntry(virtualAddress);
if (entry) {
entry->valid = 0;
ppc_sync();
tlbie(virtualAddress);
eieio();
tlbsync();
ppc_sync();
}
if (entry == NULL)
return false;
return entry;
entry->valid = 0;
ppc_sync();
tlbie(virtualAddress);
eieio();
tlbsync();
ppc_sync();
return true;
}
@ -450,6 +461,69 @@ PPCVMTranslationMap::Unmap(addr_t start, addr_t end)
}
status_t
PPCVMTranslationMap::UnmapPage(VMArea* area, addr_t address)
{
ASSERT(address % B_PAGE_SIZE == 0);
RecursiveLocker locker(fLock);
if (area->cache_type == CACHE_TYPE_DEVICE) {
if (!RemovePageTableEntry(address))
return B_ENTRY_NOT_FOUND;
fMapCount--;
return B_OK;
}
page_table_entry* entry = LookupPageTableEntry(address);
if (entry == NULL)
return B_ENTRY_NOT_FOUND;
page_num_t pageNumber = entry->physical_page_number;
bool accessed = entry->referenced;
bool modified = entry->changed;
RemovePageTableEntry(address);
fMapCount--;
// get the page
vm_page* page = vm_lookup_page(pageNumber);
ASSERT(page != NULL);
// transfer the accessed/dirty flags to the page
page->accessed |= accessed;
page->modified |= modified;
// remove the mapping object/decrement the wired_count of the page
vm_page_mapping* mapping = NULL;
if (area->wiring == B_NO_LOCK) {
vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
while ((mapping = iterator.Next()) != NULL) {
if (mapping->area == area) {
area->mappings.Remove(mapping);
page->mappings.Remove(mapping);
break;
}
}
ASSERT(mapping != NULL);
} else
page->wired_count--;
if (page->wired_count == 0 && page->mappings.IsEmpty())
atomic_add(&gMappedPagesCount, -1);
locker.Unlock();
if (mapping != NULL)
free(mapping);
return B_OK;
}
status_t
PPCVMTranslationMap::Query(addr_t va, addr_t *_outPhysical, uint32 *_outFlags)
{

View File

@ -34,6 +34,13 @@ struct X86VMTranslationMap : VMTranslationMap {
uint32 attributes);
virtual status_t Unmap(addr_t start, addr_t end);
virtual status_t UnmapPage(VMArea* area, addr_t address);
virtual void UnmapPages(VMArea* area, addr_t base,
size_t size);
virtual void UnmapArea(VMArea* area,
bool deletingAddressSpace,
bool ignoreTopCachePageFlags);
virtual status_t Query(addr_t virtualAddress,
addr_t* _physicalAddress,
uint32* _flags);
@ -43,6 +50,7 @@ struct X86VMTranslationMap : VMTranslationMap {
virtual status_t Protect(addr_t base, addr_t top,
uint32 attributes);
virtual status_t ClearFlags(addr_t virtualAddress,
uint32 flags);

View File

@ -25,6 +25,7 @@
#include <vm/vm_page.h>
#include <vm/vm_priv.h>
#include <vm/VMAddressSpace.h>
#include <vm/VMCache.h>
#include "x86_paging.h"
#include "x86_physical_page_mapper.h"
@ -485,7 +486,9 @@ restart:
int index = VADDR_TO_PDENT(start);
if ((pd[index] & X86_PDE_PRESENT) == 0) {
// no pagetable here, move the start up to access the next page table
start = ROUNDUP(start + 1, B_PAGE_SIZE);
start = ROUNDUP(start + 1, B_PAGE_SIZE * 1024);
if (start == 0)
return B_OK;
goto restart;
}
@ -528,6 +531,302 @@ restart:
}
/*! Caller must have locked the cache of the page to be unmapped.
This object shouldn't be locked.
*/
status_t
X86VMTranslationMap::UnmapPage(VMArea* area, addr_t address)
{
ASSERT(address % B_PAGE_SIZE == 0);
page_directory_entry* pd = fArchData->pgdir_virt;
TRACE(("X86VMTranslationMap::UnmapPage(%#" B_PRIxADDR ")\n", address));
RecursiveLocker locker(fLock);
int index = VADDR_TO_PDENT(address);
if ((pd[index] & X86_PDE_PRESENT) == 0)
return B_ENTRY_NOT_FOUND;
ThreadCPUPinner pinner(thread_get_current_thread());
page_table_entry* pt = fArchData->page_mapper->GetPageTableAt(
pd[index] & X86_PDE_ADDRESS_MASK);
index = VADDR_TO_PTENT(address);
page_table_entry oldEntry = clear_page_table_entry(&pt[index]);
pinner.Unlock();
if ((oldEntry & X86_PTE_PRESENT) == 0) {
// page mapping not valid
return B_ENTRY_NOT_FOUND;
}
fMapCount--;
if ((oldEntry & X86_PTE_ACCESSED) != 0) {
// Note, that we only need to invalidate the address, if the
// accessed flags was set, since only then the entry could have been
// in any TLB.
if (fArchData->num_invalidate_pages
< PAGE_INVALIDATE_CACHE_SIZE) {
fArchData->pages_to_invalidate[fArchData->num_invalidate_pages]
= address;
}
fArchData->num_invalidate_pages++;
Flush();
// NOTE: Between clearing the page table entry and Flush() other
// processors (actually even this processor with another thread of the
// same team) could still access the page in question via their cached
// entry. We can obviously lose a modified flag in this case, with the
// effect that the page looks unmodified (and might thus be recycled),
// but is actually modified.
// In most cases this is harmless, but for vm_remove_all_page_mappings()
// this is actually a problem.
// Interestingly FreeBSD seems to ignore this problem as well
// (cf. pmap_remove_all()), unless I've missed something.
}
if (area->cache_type == CACHE_TYPE_DEVICE)
return B_OK;
// get the page
vm_page* page = vm_lookup_page(
(oldEntry & X86_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
ASSERT(page != NULL);
// transfer the accessed/dirty flags to the page
if ((oldEntry & X86_PTE_ACCESSED) != 0)
page->accessed = true;
if ((oldEntry & X86_PTE_DIRTY) != 0)
page->modified = true;
// remove the mapping object/decrement the wired_count of the page
vm_page_mapping* mapping = NULL;
if (area->wiring == B_NO_LOCK) {
vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
while ((mapping = iterator.Next()) != NULL) {
if (mapping->area == area) {
area->mappings.Remove(mapping);
page->mappings.Remove(mapping);
break;
}
}
ASSERT(mapping != NULL);
} else
page->wired_count--;
if (page->wired_count == 0 && page->mappings.IsEmpty())
atomic_add(&gMappedPagesCount, -1);
locker.Unlock();
if (mapping != NULL)
free(mapping);
return B_OK;
}
void
X86VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size)
{
page_directory_entry* pd = fArchData->pgdir_virt;
addr_t start = base;
addr_t end = base + size;
TRACE(("X86VMTranslationMap::UnmapPages(%p, %#" B_PRIxADDR ", %#"
B_PRIxADDR ")\n", area, start, end));
VMAreaMappings queue;
RecursiveLocker locker(fLock);
while (start < end) {
int index = VADDR_TO_PDENT(start);
if ((pd[index] & X86_PDE_PRESENT) == 0) {
// no page table here, move the start up to access the next page
// table
start = ROUNDUP(start + 1, B_PAGE_SIZE * 1024);
if (start == 0)
break;
continue;
}
struct thread* thread = thread_get_current_thread();
ThreadCPUPinner pinner(thread);
page_table_entry* pt = fArchData->page_mapper->GetPageTableAt(
pd[index] & X86_PDE_ADDRESS_MASK);
for (index = VADDR_TO_PTENT(start); (index < 1024) && (start < end);
index++, start += B_PAGE_SIZE) {
page_table_entry oldEntry = clear_page_table_entry(&pt[index]);
if ((oldEntry & X86_PTE_PRESENT) == 0)
continue;
fMapCount--;
if ((oldEntry & X86_PTE_ACCESSED) != 0) {
// Note, that we only need to invalidate the address, if the
// accessed flags was set, since only then the entry could have
// been in any TLB.
if (fArchData->num_invalidate_pages
< PAGE_INVALIDATE_CACHE_SIZE) {
fArchData->pages_to_invalidate[
fArchData->num_invalidate_pages] = start;
}
fArchData->num_invalidate_pages++;
}
if (area->cache_type != CACHE_TYPE_DEVICE) {
// get the page
vm_page* page = vm_lookup_page(
(oldEntry & X86_PTE_ADDRESS_MASK) / B_PAGE_SIZE);
ASSERT(page != NULL);
// transfer the accessed/dirty flags to the page
if ((oldEntry & X86_PTE_ACCESSED) != 0)
page->accessed = true;
if ((oldEntry & X86_PTE_DIRTY) != 0)
page->modified = true;
// remove the mapping object/decrement the wired_count of the
// page
if (area->wiring == B_NO_LOCK) {
vm_page_mapping* mapping = NULL;
vm_page_mappings::Iterator iterator
= page->mappings.GetIterator();
while ((mapping = iterator.Next()) != NULL) {
if (mapping->area == area)
break;
}
ASSERT(mapping != NULL);
area->mappings.Remove(mapping);
page->mappings.Remove(mapping);
queue.Add(mapping);
} else
page->wired_count--;
if (page->wired_count == 0 && page->mappings.IsEmpty())
atomic_add(&gMappedPagesCount, -1);
}
}
Flush();
// flush explicitely, since we directly use the lock
pinner.Unlock();
}
// TODO: As in UnmapPage() we can lose page dirty flags here. ATM it's not
// really critical here, as in all cases this method is used, the unmapped
// area range is unmapped for good (resized/cut) and the pages will likely
// be freed.
locker.Unlock();
// free removed mappings
while (vm_page_mapping* mapping = queue.RemoveHead())
free(mapping);
}
void
X86VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
bool ignoreTopCachePageFlags)
{
if (area->cache_type == CACHE_TYPE_DEVICE || area->wiring != B_NO_LOCK) {
X86VMTranslationMap::UnmapPages(area, area->Base(), area->Size());
return;
}
bool unmapPages = !deletingAddressSpace || !ignoreTopCachePageFlags;
page_directory_entry* pd = fArchData->pgdir_virt;
RecursiveLocker locker(fLock);
VMAreaMappings mappings;
mappings.MoveFrom(&area->mappings);
for (VMAreaMappings::Iterator it = mappings.GetIterator();
vm_page_mapping* mapping = it.Next();) {
vm_page* page = mapping->page;
page->mappings.Remove(mapping);
if (page->wired_count == 0 && page->mappings.IsEmpty())
atomic_add(&gMappedPagesCount, -1);
if (unmapPages || page->cache != area->cache) {
addr_t address = area->Base()
+ ((page->cache_offset * B_PAGE_SIZE) - area->cache_offset);
int index = VADDR_TO_PDENT(address);
if ((pd[index] & X86_PDE_PRESENT) == 0) {
panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
"has no page dir entry", page, area, address);
continue;
}
ThreadCPUPinner pinner(thread_get_current_thread());
page_table_entry* pt = fArchData->page_mapper->GetPageTableAt(
pd[index] & X86_PDE_ADDRESS_MASK);
page_table_entry oldEntry = clear_page_table_entry(
&pt[VADDR_TO_PTENT(address)]);
pinner.Unlock();
if ((oldEntry & X86_PTE_PRESENT) == 0) {
panic("page %p has mapping for area %p (%#" B_PRIxADDR "), but "
"has no page table entry", page, area, address);
continue;
}
// transfer the accessed/dirty flags to the page and invalidate
// the mapping, if necessary
if ((oldEntry & X86_PTE_ACCESSED) != 0) {
page->accessed = true;
if (!deletingAddressSpace) {
if (fArchData->num_invalidate_pages
< PAGE_INVALIDATE_CACHE_SIZE) {
fArchData->pages_to_invalidate[
fArchData->num_invalidate_pages] = address;
}
fArchData->num_invalidate_pages++;
}
}
if ((oldEntry & X86_PTE_DIRTY) != 0)
page->modified = true;
}
fMapCount--;
}
Flush();
// flush explicitely, since we directly use the lock
locker.Unlock();
while (vm_page_mapping* mapping = mappings.RemoveHead())
free(mapping);
}
status_t
X86VMTranslationMap::Query(addr_t va, addr_t *_physical, uint32 *_flags)
{
@ -626,6 +925,15 @@ X86VMTranslationMap::Protect(addr_t start, addr_t end, uint32 attributes)
TRACE(("protect_tmap: pages 0x%lx to 0x%lx, attributes %lx\n", start, end,
attributes));
// compute protection flags
uint32 newProtectionFlags = 0;
if ((attributes & B_USER_PROTECTION) != 0) {
newProtectionFlags = X86_PTE_USER;
if ((attributes & B_WRITE_AREA) != 0)
newProtectionFlags |= X86_PTE_WRITABLE;
} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
newProtectionFlags = X86_PTE_WRITABLE;
restart:
if (start >= end)
return B_OK;
@ -633,7 +941,9 @@ restart:
int index = VADDR_TO_PDENT(start);
if ((pd[index] & X86_PDE_PRESENT) == 0) {
// no pagetable here, move the start up to access the next page table
start = ROUNDUP(start + 1, B_PAGE_SIZE);
start = ROUNDUP(start + 1, B_PAGE_SIZE * 1024);
if (start == 0)
return B_OK;
goto restart;
}
@ -653,20 +963,19 @@ restart:
TRACE(("protect_tmap: protect page 0x%lx\n", start));
entry &= ~(X86_PTE_WRITABLE | X86_PTE_USER);
if ((attributes & B_USER_PROTECTION) != 0) {
entry |= X86_PTE_USER;
if ((attributes & B_WRITE_AREA) != 0)
entry |= X86_PTE_WRITABLE;
} else if ((attributes & B_KERNEL_WRITE_AREA) != 0)
entry |= X86_PTE_WRITABLE;
page_table_entry oldEntry = set_page_table_entry(&pt[index], entry);
// TODO: We might have cleared accessed/modified flags!
// set the new protection flags -- we want to do that atomically,
// without changing the accessed or dirty flag
page_table_entry oldEntry;
do {
oldEntry = test_and_set_page_table_entry(&pt[index],
(entry & ~(X86_PTE_WRITABLE | X86_PTE_USER))
| newProtectionFlags,
entry);
} while (oldEntry != entry);
if ((oldEntry & X86_PTE_ACCESSED) != 0) {
// Note, that we only need to invalidate the address, if the
// accessed flags was set, since only then the entry could have been
// accessed flag was set, since only then the entry could have been
// in any TLB.
if (fArchData->num_invalidate_pages
< PAGE_INVALIDATE_CACHE_SIZE) {

View File

@ -94,6 +94,21 @@ set_page_table_entry(page_table_entry* entry, page_table_entry newEntry)
}
static inline page_table_entry
test_and_set_page_table_entry(page_table_entry* entry,
page_table_entry newEntry, page_table_entry oldEntry)
{
return atomic_test_and_set((int32*)entry, newEntry, oldEntry);
}
static inline page_table_entry
clear_page_table_entry(page_table_entry* entry)
{
return set_page_table_entry(entry, 0);
}
static inline page_table_entry
clear_page_table_entry_flags(page_table_entry* entry, uint32 flags)
{

View File

@ -1409,7 +1409,7 @@ exec_team(const char* path, char**& _flatArgs, size_t flatArgsSize,
user_debug_prepare_for_exec();
delete_team_user_data(team);
vm_delete_areas(team->address_space);
vm_delete_areas(team->address_space, false);
xsi_sem_undo(team);
delete_owned_ports(team);
sem_delete_owned_sems(team);

View File

@ -161,7 +161,7 @@ VMAddressSpace::RemoveAndPut()
fDeleting = true;
WriteUnlock();
vm_delete_areas(this);
vm_delete_areas(this, true);
Put();
}

View File

@ -6,6 +6,8 @@
#include <vm/VMTranslationMap.h>
#include <vm/VMArea.h>
// #pragma mark - VMTranslationMap
@ -24,6 +26,48 @@ VMTranslationMap::~VMTranslationMap()
}
/*! Unmaps a range of pages of an area.
The default implementation just iterates over all virtual pages of the
range and calls UnmapPage(). This is obviously not particularly efficient.
*/
void
VMTranslationMap::UnmapPages(VMArea* area, addr_t base, size_t size)
{
ASSERT(base % B_PAGE_SIZE == 0);
ASSERT(size % B_PAGE_SIZE == 0);
addr_t address = base;
addr_t end = address + size;
for (; address != end; address += B_PAGE_SIZE)
UnmapPage(area, address);
}
/*! Unmaps all of an area's pages.
If \a deletingAddressSpace is \c true, the address space the area belongs to
is in the process of being destroyed and isn't used by anyone anymore. For
some architectures this can be used for optimizations (e.g. not unmapping
pages or at least not needing to invalidate TLB entries).
If \a ignoreTopCachePageFlags is \c true, the area is in the process of
being destroyed and its top cache is otherwise unreferenced. I.e. all mapped
pages that live in the top cache area going to be freed and the page
accessed and modified flags don't need to be propagated.
The default implementation just iterates over all virtual pages of the
area and calls UnmapPage(). This is obviously not particularly efficient.
*/
void
VMTranslationMap::UnmapArea(VMArea* area, bool deletingAddressSpace,
bool ignoreTopCachePageFlags)
{
addr_t address = area->Base();
addr_t end = address + area->Size();
for (; address != end; address += B_PAGE_SIZE)
UnmapPage(area, address);
}
// #pragma mark - VMPhysicalPageMapper

View File

@ -199,7 +199,8 @@ static cache_info* sCacheInfoTable;
// function declarations
static void delete_area(VMAddressSpace* addressSpace, VMArea* area);
static void delete_area(VMAddressSpace* addressSpace, VMArea* area,
bool addressSpaceCleanup);
static status_t vm_soft_fault(VMAddressSpace* addressSpace, addr_t address,
bool isWrite, bool isUser);
static status_t map_backing_store(VMAddressSpace* addressSpace,
@ -334,6 +335,34 @@ private:
// #pragma mark -
/*! The page's cache must be locked.
*/
static inline void
increment_page_wired_count(vm_page* page)
{
if (page->wired_count++ == 0 && page->mappings.IsEmpty())
atomic_add(&gMappedPagesCount, 1);
}
/*! The page's cache must be locked.
*/
static inline void
decrement_page_wired_count(vm_page* page)
{
if (--page->wired_count == 0 && page->mappings.IsEmpty())
atomic_add(&gMappedPagesCount, -1);
}
static inline addr_t
virtual_page_address(VMArea* area, vm_page* page)
{
return area->Base()
+ ((page->cache_offset << PAGE_SHIFT) - area->cache_offset);
}
//! You need to have the address space locked when calling this function
static VMArea*
lookup_area(VMAddressSpace* addressSpace, area_id id)
@ -381,6 +410,79 @@ get_area_page_protection(VMArea* area, addr_t pageAddress)
}
/*! The caller must have reserved enough pages the translation map
implementation might need to map this page.
The page's cache must be locked.
*/
static status_t
map_page(VMArea* area, vm_page* page, addr_t address, uint32 protection)
{
VMTranslationMap* map = area->address_space->TranslationMap();
if (area->wiring == B_NO_LOCK) {
DEBUG_PAGE_ACCESS_CHECK(page);
vm_page_mapping* mapping
= (vm_page_mapping*)malloc_nogrow(sizeof(vm_page_mapping));
if (mapping == NULL)
return B_NO_MEMORY;
mapping->page = page;
mapping->area = area;
map->Lock();
map->Map(address, page->physical_page_number * B_PAGE_SIZE, protection);
// insert mapping into lists
if (page->mappings.IsEmpty() && page->wired_count == 0)
atomic_add(&gMappedPagesCount, 1);
page->mappings.Add(mapping);
area->mappings.Add(mapping);
map->Unlock();
} else {
DEBUG_PAGE_ACCESS_CHECK(page);
map->Lock();
map->Map(address, page->physical_page_number * B_PAGE_SIZE, protection);
map->Unlock();
increment_page_wired_count(page);
}
if (page->usage_count < 0)
page->usage_count = 1;
if (page->state != PAGE_STATE_MODIFIED)
vm_page_set_state(page, PAGE_STATE_ACTIVE);
return B_OK;
}
/*! If \a preserveModified is \c true, the caller must hold the lock of the
page's cache.
*/
static inline bool
unmap_page(VMArea* area, addr_t virtualAddress)
{
return area->address_space->TranslationMap()->UnmapPage(area,
virtualAddress);
}
/*! If \a preserveModified is \c true, the caller must hold the lock of all
mapped pages' caches.
*/
static inline void
unmap_pages(VMArea* area, addr_t base, size_t size)
{
area->address_space->TranslationMap()->UnmapPages(area, base, size);
}
/*! Cuts a piece out of an area. If the given cut range covers the complete
area, it is deleted. If it covers the beginning or the end, the area is
resized accordingly. If the range covers some part in the middle of the
@ -399,7 +501,7 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
// Is the area fully covered?
if (area->Base() >= address && areaLast <= lastAddress) {
delete_area(addressSpace, area);
delete_area(addressSpace, area, false);
return B_OK;
}
@ -417,10 +519,7 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
return error;
// unmap pages
vm_unmap_pages(area, address, oldSize - newSize, false);
// TODO: preserveModified = false is wrong, since this could be a
// cloned area or a write-mmap()ed file, in which case we'd lose
// information.
unmap_pages(area, address, oldSize - newSize);
// If no one else uses the area's cache, we can resize it, too.
if (cache->areas == area && area->cache_next == NULL
@ -442,8 +541,7 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
size_t newSize = areaLast - lastAddress;
// unmap pages
vm_unmap_pages(area, oldBase, newBase - oldBase, false);
// TODO: See the vm_unmap_pages() above.
unmap_pages(area, oldBase, newBase - oldBase);
// resize the area
status_t error = addressSpace->ShrinkAreaHead(area, newSize);
@ -466,8 +564,7 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
addr_t secondSize = areaLast - lastAddress;
// unmap pages
vm_unmap_pages(area, address, area->Size() - firstNewSize, false);
// TODO: See the vm_unmap_pages() above.
unmap_pages(area, address, area->Size() - firstNewSize);
// resize the area
addr_t oldSize = area->Size();
@ -501,26 +598,6 @@ cut_area(VMAddressSpace* addressSpace, VMArea* area, addr_t address,
}
/*! The page's cache must be locked.
*/
static inline void
increment_page_wired_count(vm_page* page)
{
if (page->wired_count++ == 0 && page->mappings.IsEmpty())
atomic_add(&gMappedPagesCount, 1);
}
/*! The page's cache must be locked.
*/
static inline void
decrement_page_wired_count(vm_page* page)
{
if (--page->wired_count == 0 && page->mappings.IsEmpty())
atomic_add(&gMappedPagesCount, -1);
}
/*! Deletes all areas in the given address range.
The address space must be write-locked.
*/
@ -946,7 +1023,7 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
#endif
vm_page* page = vm_page_allocate_page(newPageState);
cache->InsertPage(page, offset);
vm_map_page(area, page, address, protection);
map_page(area, page, address, protection);
// TODO: This sets the page state to "active", but it would
// make more sense to set it to "wired".
@ -1130,7 +1207,7 @@ vm_map_physical_memory(team_id team, const char* name, void** _address,
status = arch_vm_set_memory_type(area, physicalAddress,
addressSpec & B_MTR_MASK);
if (status < B_OK)
delete_area(locker.AddressSpace(), area);
delete_area(locker.AddressSpace(), area, false);
}
if (status >= B_OK && !alreadyWired) {
@ -1329,7 +1406,7 @@ pre_map_area_pages(VMArea* area, VMCache* cache)
continue;
DEBUG_PAGE_ACCESS_START(page);
vm_map_page(area, page,
map_page(area, page,
baseAddress + (page->cache_offset * B_PAGE_SIZE - cacheOffset),
B_READ_AREA | B_KERNEL_READ_AREA);
DEBUG_PAGE_ACCESS_END(page);
@ -1628,7 +1705,7 @@ vm_clone_area(team_id team, const char* name, void** address,
vm_page* page = it.Next();) {
if (page->state != PAGE_STATE_BUSY) {
DEBUG_PAGE_ACCESS_START(page);
vm_map_page(newArea, page,
map_page(newArea, page,
newArea->Base() + ((page->cache_offset << PAGE_SHIFT)
- newArea->cache_offset),
protection);
@ -1654,7 +1731,8 @@ vm_clone_area(team_id team, const char* name, void** address,
static void
delete_area(VMAddressSpace* addressSpace, VMArea* area)
delete_area(VMAddressSpace* addressSpace, VMArea* area,
bool deletingAddressSpace)
{
VMAreaHash::Remove(area);
@ -1668,11 +1746,15 @@ delete_area(VMAddressSpace* addressSpace, VMArea* area)
VMCacheChainLocker cacheChainLocker(topCache);
cacheChainLocker.LockAllSourceCaches();
vm_unmap_pages(area, area->Base(), area->Size(),
!area->cache->temporary);
// TODO: Even if the cache is temporary we might need to preserve
// the modified flag, since the area could be a clone and backed by
// swap. We would lose information in this case.
// If the area's top cache is a temporary cache and the area is the only
// one referencing it (besides us currently holding a second reference),
// the unmapping code doesn't need to care about preserving the accessed
// and dirty flags of the top cache page mappings.
bool ignoreTopCachePageFlags
= topCache->temporary && topCache->RefCount() == 2;
area->address_space->TranslationMap()->UnmapArea(area,
deletingAddressSpace, ignoreTopCachePageFlags);
}
if (!area->cache->temporary)
@ -1703,7 +1785,7 @@ vm_delete_area(team_id team, area_id id, bool kernel)
if (!kernel && (area->protection & B_KERNEL_AREA) != 0)
return B_NOT_ALLOWED;
delete_area(locker.AddressSpace(), area);
delete_area(locker.AddressSpace(), area, false);
return B_OK;
}
@ -1757,8 +1839,7 @@ vm_copy_on_write_area(VMCache* lowerCache)
VMTranslationMap* map = tempArea->address_space->TranslationMap();
map->Lock();
map->Protect(tempArea->Base(),
tempArea->Base() - 1 + tempArea->Size(), protection);
map->ProtectArea(tempArea, protection);
map->Unlock();
}
@ -1868,6 +1949,7 @@ vm_set_area_protection(team_id team, area_id areaID, uint32 newProtection,
}
bool changePageProtection = true;
bool changeTopCachePagesOnly = false;
if ((area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0
&& (newProtection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) == 0) {
@ -1885,6 +1967,17 @@ vm_set_area_protection(team_id team, area_id areaID, uint32 newProtection,
// count == 0
}
}
// If only the writability changes, we can just remap the pages of the
// top cache, since the pages of lower caches are mapped read-only
// anyway. That's advantageous only, if the number of pages in the cache
// is significantly smaller than the number of pages in the area,
// though.
if (newProtection
== (area->protection & ~(B_WRITE_AREA | B_KERNEL_WRITE_AREA))
&& cache->page_count * 2 < area->Size() / B_PAGE_SIZE) {
changeTopCachePagesOnly = true;
}
} else if ((area->protection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) == 0
&& (newProtection & (B_WRITE_AREA | B_KERNEL_WRITE_AREA)) != 0) {
// !writable -> writable
@ -1906,20 +1999,7 @@ vm_set_area_protection(team_id team, area_id areaID, uint32 newProtection,
// There's a source cache, hence we can't just change all pages'
// protection or we might allow writing into pages belonging to
// a lower cache.
changePageProtection = false;
VMTranslationMap* map = area->address_space->TranslationMap();
map->Lock();
for (VMCachePagesTree::Iterator it = cache->pages.GetIterator();
vm_page* page = it.Next();) {
addr_t address = area->Base()
+ (page->cache_offset << PAGE_SHIFT);
map->Protect(address, address - 1 + B_PAGE_SIZE,
newProtection);
}
map->Unlock();
changeTopCachePagesOnly = true;
}
}
} else {
@ -1928,12 +2008,25 @@ vm_set_area_protection(team_id team, area_id areaID, uint32 newProtection,
if (status == B_OK) {
// remap existing pages in this cache
VMTranslationMap* map = area->address_space->TranslationMap();
if (changePageProtection) {
VMTranslationMap* map = area->address_space->TranslationMap();
map->Lock();
map->Protect(area->Base(), area->Base() - 1 + area->Size(),
newProtection);
if (changeTopCachePagesOnly) {
page_num_t firstPageOffset = area->cache_offset / B_PAGE_SIZE;
page_num_t lastPageOffset
= firstPageOffset + area->Size() / B_PAGE_SIZE;
for (VMCachePagesTree::Iterator it = cache->pages.GetIterator();
vm_page* page = it.Next();) {
if (page->cache_offset >= firstPageOffset
&& page->cache_offset <= lastPageOffset) {
addr_t address = virtual_page_address(area, page);
map->ProtectPage(area, address, newProtection);
}
}
} else
map->ProtectArea(area, newProtection);
map->Unlock();
}
@ -1960,14 +2053,6 @@ vm_get_page_mapping(team_id team, addr_t vaddr, addr_t* paddr)
}
static inline addr_t
virtual_page_address(VMArea* area, vm_page* page)
{
return area->Base()
+ ((page->cache_offset << PAGE_SHIFT) - area->cache_offset);
}
/*! The page's cache must be locked.
*/
bool
@ -2002,7 +2087,7 @@ int32
vm_test_map_activation(vm_page* page, bool* _modified)
{
int32 activation = 0;
bool modified = page->modified;
bool modified = false;
vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
vm_page_mapping* mapping;
@ -2023,7 +2108,10 @@ vm_test_map_activation(vm_page* page, bool* _modified)
}
if (_modified != NULL)
*_modified = modified;
*_modified = modified || page->modified;
if (page->accessed)
activation++;
return activation;
}
@ -2034,6 +2122,8 @@ vm_test_map_activation(vm_page* page, bool* _modified)
void
vm_clear_map_flags(vm_page* page, uint32 flags)
{
if ((flags & PAGE_ACCESSED) != 0)
page->accessed = false;
if ((flags & PAGE_MODIFIED) != 0)
page->modified = false;
@ -2058,256 +2148,19 @@ vm_clear_map_flags(vm_page* page, uint32 flags)
void
vm_remove_all_page_mappings(vm_page* page, uint32* _flags)
{
uint32 accumulatedFlags = 0;
if (page->modified)
accumulatedFlags |= PAGE_MODIFIED;
vm_page_mappings queue;
queue.MoveFrom(&page->mappings);
vm_page_mappings::Iterator iterator = queue.GetIterator();
vm_page_mapping* mapping;
while ((mapping = iterator.Next()) != NULL) {
while (vm_page_mapping* mapping = page->mappings.Head()) {
VMArea* area = mapping->area;
VMTranslationMap* map = area->address_space->TranslationMap();
addr_t physicalAddress;
uint32 flags;
map->Lock();
addr_t address = virtual_page_address(area, page);
map->Unmap(address, address + (B_PAGE_SIZE - 1));
map->Flush();
map->Query(address, &physicalAddress, &flags);
area->mappings.Remove(mapping);
map->Unlock();
accumulatedFlags |= flags;
map->UnmapPage(area, address);
}
if (page->wired_count == 0 && !queue.IsEmpty())
atomic_add(&gMappedPagesCount, -1);
// free now unused mappings
while ((mapping = queue.RemoveHead()) != NULL)
free(mapping);
if (_flags != NULL)
*_flags = accumulatedFlags;
}
/*! If \a preserveModified is \c true, the caller must hold the lock of the
page's cache.
*/
bool
vm_unmap_page(VMArea* area, addr_t virtualAddress, bool preserveModified)
{
VMTranslationMap* map = area->address_space->TranslationMap();
map->Lock();
addr_t physicalAddress;
uint32 flags;
status_t status = map->Query(virtualAddress, &physicalAddress, &flags);
if (status < B_OK || (flags & PAGE_PRESENT) == 0) {
map->Unlock();
return false;
if (_flags != NULL) {
*_flags = (page->modified ? PAGE_MODIFIED : 0)
| (page->accessed ? PAGE_ACCESSED : 0);
// TODO: This return value is obviously not particularly useful, as
// the caller could simply check the page's flags.
}
vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
if (page == NULL && area->cache_type != CACHE_TYPE_DEVICE) {
panic("area %p looking up page failed for pa 0x%lx\n", area,
physicalAddress);
}
if (area->wiring != B_NO_LOCK && area->cache_type != CACHE_TYPE_DEVICE)
decrement_page_wired_count(page);
map->Unmap(virtualAddress, virtualAddress + B_PAGE_SIZE - 1);
if (preserveModified) {
map->Flush();
status = map->Query(virtualAddress, &physicalAddress, &flags);
// TODO: The x86 implementation always returns 0 flags, if the entry is not
// present. I.e. we've already lost the flag.
if ((flags & PAGE_MODIFIED) != 0)
page->modified = true;
}
vm_page_mapping* mapping = NULL;
if (area->wiring == B_NO_LOCK) {
vm_page_mappings::Iterator iterator = page->mappings.GetIterator();
while ((mapping = iterator.Next()) != NULL) {
if (mapping->area == area) {
area->mappings.Remove(mapping);
page->mappings.Remove(mapping);
if (page->mappings.IsEmpty() && page->wired_count == 0)
atomic_add(&gMappedPagesCount, -1);
break;
}
}
}
map->Unlock();
if (area->wiring == B_NO_LOCK) {
if (mapping != NULL) {
free(mapping);
} else {
dprintf("vm_unmap_page: couldn't find mapping for area %p in page "
"%p\n", area, page);
}
}
return true;
}
/*! If \a preserveModified is \c true, the caller must hold the lock of all
mapped pages' caches.
*/
status_t
vm_unmap_pages(VMArea* area, addr_t base, size_t size, bool preserveModified)
{
VMTranslationMap* map = area->address_space->TranslationMap();
addr_t end = base + (size - 1);
map->Lock();
if (area->wiring != B_NO_LOCK && area->cache_type != CACHE_TYPE_DEVICE) {
// iterate through all pages and decrease their wired count
for (addr_t virtualAddress = base; virtualAddress < end;
virtualAddress += B_PAGE_SIZE) {
addr_t physicalAddress;
uint32 flags;
status_t status = map->Query(virtualAddress, &physicalAddress,
&flags);
if (status < B_OK || (flags & PAGE_PRESENT) == 0)
continue;
vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
if (page == NULL) {
panic("area %p looking up page failed for pa 0x%lx\n", area,
physicalAddress);
}
decrement_page_wired_count(page);
}
}
map->Unmap(base, end);
if (preserveModified) {
map->Flush();
for (addr_t virtualAddress = base; virtualAddress < end;
virtualAddress += B_PAGE_SIZE) {
addr_t physicalAddress;
uint32 flags;
status_t status = map->Query(virtualAddress, &physicalAddress,
&flags);
if (status < B_OK || (flags & PAGE_PRESENT) == 0)
continue;
// TODO: We just unmapped the pages, so the PAGE_PRESENT flag won't be set for
// sure. We can't just remove the check, though, since then we might also find
// pages that we haven't unmapped in the first place. Finally the x86 query()
// implementation always returns 0 flags, if the entry is not present. I.e.
// we've already lost the flag.
vm_page* page = vm_lookup_page(physicalAddress / B_PAGE_SIZE);
if (page == NULL) {
panic("area %p looking up page failed for pa 0x%lx\n", area,
physicalAddress);
}
if ((flags & PAGE_MODIFIED) != 0)
page->modified = true;
}
}
VMAreaMappings queue;
if (area->wiring == B_NO_LOCK) {
uint32 startOffset = (area->cache_offset + base - area->Base())
>> PAGE_SHIFT;
uint32 endOffset = startOffset + (size >> PAGE_SHIFT);
VMAreaMappings::Iterator iterator = area->mappings.GetIterator();
while (vm_page_mapping* mapping = iterator.Next()) {
vm_page* page = mapping->page;
if (page->cache_offset < startOffset
|| page->cache_offset >= endOffset)
continue;
page->mappings.Remove(mapping);
iterator.Remove();
if (page->mappings.IsEmpty() && page->wired_count == 0)
atomic_add(&gMappedPagesCount, -1);
queue.Add(mapping);
}
}
map->Unlock();
if (area->wiring == B_NO_LOCK) {
while (vm_page_mapping* mapping = queue.RemoveHead())
free(mapping);
}
return B_OK;
}
/*! The caller must have reserved enough pages the translation map
implementation might need to map this page.
The page's cache must be locked.
*/
status_t
vm_map_page(VMArea* area, vm_page* page, addr_t address, uint32 protection)
{
VMTranslationMap* map = area->address_space->TranslationMap();
vm_page_mapping* mapping = NULL;
DEBUG_PAGE_ACCESS_CHECK(page);
if (area->wiring == B_NO_LOCK) {
mapping = (vm_page_mapping*)malloc_nogrow(sizeof(vm_page_mapping));
if (mapping == NULL)
return B_NO_MEMORY;
mapping->page = page;
mapping->area = area;
}
map->Lock();
map->Map(address, page->physical_page_number * B_PAGE_SIZE, protection);
if (area->wiring == B_NO_LOCK) {
// insert mapping into lists
if (page->mappings.IsEmpty() && page->wired_count == 0)
atomic_add(&gMappedPagesCount, 1);
page->mappings.Add(mapping);
area->mappings.Add(mapping);
}
map->Unlock();
if (area->wiring != B_NO_LOCK)
increment_page_wired_count(page);
if (page->usage_count < 0)
page->usage_count = 1;
if (page->state != PAGE_STATE_MODIFIED)
vm_page_set_state(page, PAGE_STATE_ACTIVE);
return B_OK;
}
@ -2924,7 +2777,7 @@ dump_available_memory(int argc, char** argv)
status_t
vm_delete_areas(struct VMAddressSpace* addressSpace)
vm_delete_areas(struct VMAddressSpace* addressSpace, bool deletingAddressSpace)
{
TRACE(("vm_delete_areas: called on address space 0x%lx\n",
addressSpace->ID()));
@ -2936,7 +2789,7 @@ vm_delete_areas(struct VMAddressSpace* addressSpace)
// delete all the areas in this address space
while (VMArea* area = addressSpace->FirstArea())
delete_area(addressSpace, area);
delete_area(addressSpace, area, deletingAddressSpace);
addressSpace->WriteUnlock();
return B_OK;
@ -3933,9 +3786,7 @@ vm_soft_fault(VMAddressSpace* addressSpace, addr_t originalAddress,
// Yep there's already a page. If it's ours, we can simply adjust
// its protection. Otherwise we have to unmap it.
if (mappedPage == context.page) {
context.map->Protect(address, address + (B_PAGE_SIZE - 1),
newProtection);
context.map->ProtectPage(area, address, newProtection);
mapPage = false;
} else
unmapPage = true;
@ -3954,12 +3805,12 @@ vm_soft_fault(VMAddressSpace* addressSpace, addr_t originalAddress,
// would be mapped instead). That in turn means that our algorithm
// must have found it and therefore it cannot be busy either.
DEBUG_PAGE_ACCESS_START(mappedPage);
vm_unmap_page(area, address, true);
unmap_page(area, address);
DEBUG_PAGE_ACCESS_END(mappedPage);
}
if (mapPage)
vm_map_page(area, context.page, address, newProtection);
map_page(area, context.page, address, newProtection);
DEBUG_PAGE_ACCESS_END(context.page);
@ -4255,8 +4106,8 @@ vm_resize_area(area_id areaID, size_t newSize, bool kernel)
VMCacheChainLocker cacheChainLocker(cache);
cacheChainLocker.LockAllSourceCaches();
vm_unmap_pages(current, current->Base() + newSize,
oldSize - newSize, false);
unmap_pages(current, current->Base() + newSize,
oldSize - newSize);
cacheChainLocker.Unlock(cache);
}
@ -5313,7 +5164,7 @@ _user_set_memory_protection(void* _address, size_t size, int protection)
map->Unlock();
if (unmapPage)
vm_unmap_page(area, pageAddress, true);
unmap_page(area, pageAddress);
}
}

View File

@ -412,6 +412,7 @@ dump_page(int argc, char **argv)
kprintf("wired_count: %d\n", page->wired_count);
kprintf("usage_count: %d\n", page->usage_count);
kprintf("busy_writing: %d\n", page->busy_writing);
kprintf("accessed: %d\n", page->accessed);
kprintf("modified: %d\n", page->modified);
#if DEBUG_PAGE_QUEUE
kprintf("queue: %p\n", page->queue);
@ -2167,7 +2168,8 @@ vm_page_allocate_page(int pageState)
int oldPageState = page->state;
page->state = PAGE_STATE_BUSY;
page->usage_count = 2;
page->modified = 0;
page->accessed = false;
page->modified = false;
locker.Unlock();
@ -2204,7 +2206,8 @@ allocate_page_run(page_num_t start, page_num_t length, int pageState,
page.state = PAGE_STATE_BUSY;
page.usage_count = 1;
page.modified = 0;
page.accessed = false;
page.modified = false;
}
freeClearQueueLocker.Unlock();