* VMTranslationMap::Map()/Protect(): Added "memoryType" parameter. Not

implemented for any architecture yet.
* vm_set_area_memory_type(): Call VMTranslationMap::ProtectArea() to change the
  memory type for the already mapped pages.



git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@36574 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2010-05-01 21:33:12 +00:00
parent 3b0c1b5227
commit c1be1e0761
7 changed files with 49 additions and 26 deletions

View File

@ -12,10 +12,11 @@
#include <kernel.h>
#include <lock.h>
#include <vm/VMArea.h>
struct kernel_args;
struct vm_page_reservation;
struct VMArea;
struct VMTranslationMap {
@ -32,8 +33,8 @@ struct VMTranslationMap {
addr_t end) const = 0;
virtual status_t Map(addr_t virtualAddress,
addr_t physicalAddress,
uint32 attributes,
addr_t physicalAddress, uint32 attributes,
uint32 memoryType,
vm_page_reservation* reservation) = 0;
virtual status_t Unmap(addr_t start, addr_t end) = 0;
@ -54,7 +55,7 @@ struct VMTranslationMap {
uint32* _flags) = 0;
virtual status_t Protect(addr_t base, addr_t top,
uint32 attributes) = 0;
uint32 attributes, uint32 memoryType) = 0;
status_t ProtectPage(VMArea* area, addr_t address,
uint32 attributes);
status_t ProtectArea(VMArea* area,
@ -119,7 +120,8 @@ struct VMPhysicalPageMapper {
inline status_t
VMTranslationMap::ProtectPage(VMArea* area, addr_t address, uint32 attributes)
{
return Protect(address, address + B_PAGE_SIZE - 1, attributes);
return Protect(address, address + B_PAGE_SIZE - 1, attributes,
area->MemoryType());
}
@ -127,7 +129,8 @@ VMTranslationMap::ProtectPage(VMArea* area, addr_t address, uint32 attributes)
inline status_t
VMTranslationMap::ProtectArea(VMArea* area, uint32 attributes)
{
return Protect(area->Base(), area->Base() + area->Size() - 1, attributes);
return Protect(area->Base(), area->Base() + area->Size() - 1, attributes,
area->MemoryType());
}

View File

@ -146,8 +146,8 @@ struct PPCVMTranslationMap : VMTranslationMap {
addr_t end) const;
virtual status_t Map(addr_t virtualAddress,
addr_t physicalAddress,
uint32 attributes,
addr_t physicalAddress, uint32 attributes,
uint32 memoryType,
vm_page_reservation* reservation);
virtual status_t Unmap(addr_t start, addr_t end);
@ -162,7 +162,7 @@ struct PPCVMTranslationMap : VMTranslationMap {
uint32* _flags);
virtual status_t Protect(addr_t base, addr_t top,
uint32 attributes);
uint32 attributes, uint32 memoryType);
virtual status_t ClearFlags(addr_t virtualAddress,
uint32 flags);
@ -396,8 +396,9 @@ PPCVMTranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
status_t
PPCVMTranslationMap::Map(addr_t virtualAddress, addr_t physicalAddress,
uint32 attributes, vm_page_reservation* reservation)
uint32 attributes, uint32 memoryType, vm_page_reservation* reservation)
{
// TODO: Support memory types!
// lookup the vsid based off the va
uint32 virtualSegmentID = VADDR_TO_VSID(fVSIDBase, virtualAddress);
uint32 protection = 0;
@ -592,7 +593,8 @@ PPCVMTranslationMap::MappedSize() const
status_t
PPCVMTranslationMap::Protect(addr_t base, addr_t top, uint32 attributes)
PPCVMTranslationMap::Protect(addr_t base, addr_t top, uint32 attributes,
uint32 memoryType)
{
// XXX finish
return B_ERROR;

View File

@ -30,8 +30,8 @@ struct X86VMTranslationMap : VMTranslationMap {
addr_t end) const;
virtual status_t Map(addr_t virtualAddress,
addr_t physicalAddress,
uint32 attributes,
addr_t physicalAddress, uint32 attributes,
uint32 memoryType,
vm_page_reservation* reservation);
virtual status_t Unmap(addr_t start, addr_t end);
@ -51,7 +51,7 @@ struct X86VMTranslationMap : VMTranslationMap {
uint32* _flags);
virtual status_t Protect(addr_t base, addr_t top,
uint32 attributes);
uint32 attributes, uint32 memoryType);
virtual status_t ClearFlags(addr_t virtualAddress,
uint32 flags);

View File

@ -402,8 +402,9 @@ X86VMTranslationMap::MaxPagesNeededToMap(addr_t start, addr_t end) const
status_t
X86VMTranslationMap::Map(addr_t va, addr_t pa, uint32 attributes,
vm_page_reservation* reservation)
uint32 memoryType, vm_page_reservation* reservation)
{
// TODO: Support memory types!
TRACE("map_tmap: entry pa 0x%lx va 0x%lx\n", pa, va);
/*
@ -971,8 +972,10 @@ X86VMTranslationMap::MappedSize() const
status_t
X86VMTranslationMap::Protect(addr_t start, addr_t end, uint32 attributes)
X86VMTranslationMap::Protect(addr_t start, addr_t end, uint32 attributes,
uint32 memoryType)
{
// TODO: Support memory types!
page_directory_entry *pd = fArchData->pgdir_virt;
start = ROUNDDOWN(start, B_PAGE_SIZE);

View File

@ -727,7 +727,7 @@ IOCache::_MapPages(size_t firstPage, size_t endPage)
translationMap->Map((addr_t)fAreaBase + i * B_PAGE_SIZE,
page->physical_page_number * B_PAGE_SIZE,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, &fMappingReservation);
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, 0, &fMappingReservation);
// NOTE: We don't increment gMappedPagesCount. Our pages have state
// PAGE_STATE_UNUSED anyway and we map them only for a short time.
}

View File

@ -1360,7 +1360,8 @@ MemoryManager::_MapChunk(VMArea* vmArea, addr_t address, size_t size,
translationMap->Map(vmArea->Base() + offset,
page->physical_page_number * B_PAGE_SIZE,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, &reservation);
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA,
vmArea->MemoryType(), &reservation);
}
translationMap->Unlock();

View File

@ -502,7 +502,7 @@ map_page(VMArea* area, vm_page* page, addr_t address, uint32 protection,
map->Lock();
map->Map(address, page->physical_page_number * B_PAGE_SIZE, protection,
reservation);
area->MemoryType(), reservation);
// insert mapping into lists
if (page->mappings.IsEmpty() && page->wired_count == 0)
@ -517,7 +517,7 @@ map_page(VMArea* area, vm_page* page, addr_t address, uint32 protection,
map->Lock();
map->Map(address, page->physical_page_number * B_PAGE_SIZE, protection,
reservation);
area->MemoryType(), reservation);
map->Unlock();
increment_page_wired_count(page);
@ -1338,7 +1338,7 @@ vm_create_anonymous_area(team_id team, const char* name, void** address,
panic("couldn't lookup physical page just allocated\n");
status = map->Map(virtualAddress, physicalAddress, protection,
&reservation);
area->MemoryType(), &reservation);
if (status < B_OK)
panic("couldn't map physical page in page run\n");
@ -1466,7 +1466,7 @@ vm_map_physical_memory(team_id team, const char* name, void** _address,
for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
map->Map(area->Base() + offset, physicalAddress + offset,
protection, &reservation);
protection, area->MemoryType(), &reservation);
}
map->Unlock();
@ -1571,7 +1571,7 @@ vm_map_physical_memory_vecs(team_id team, const char* name, void** _address,
map->Map(area->Base() + offset,
(addr_t)vecs[vecIndex].iov_base + vecOffset, protection,
&reservation);
area->MemoryType(), &reservation);
vecOffset += B_PAGE_SIZE;
}
@ -1964,7 +1964,7 @@ vm_clone_area(team_id team, const char* name, void** address,
for (addr_t offset = 0; offset < newArea->Size();
offset += B_PAGE_SIZE) {
map->Map(newArea->Base() + offset, physicalAddress + offset,
protection, &reservation);
protection, newArea->MemoryType(), &reservation);
}
map->Unlock();
@ -4453,6 +4453,8 @@ vm_try_reserve_memory(size_t amount, int priority, bigtime_t timeout)
status_t
vm_set_area_memory_type(area_id id, addr_t physicalBase, uint32 type)
{
// NOTE: The caller is responsible for synchronizing calls to this function!
AddressSpaceReadLocker locker;
VMArea* area;
status_t status = locker.SetFromArea(id, area);
@ -4464,12 +4466,24 @@ vm_set_area_memory_type(area_id id, addr_t physicalBase, uint32 type)
if (type == oldType)
return B_OK;
// set the memory type of the area and the mapped pages
VMTranslationMap* map = area->address_space->TranslationMap();
map->Lock();
area->SetMemoryType(type);
map->ProtectArea(area, area->protection);
map->Unlock();
// set the physical memory type
status_t error = arch_vm_set_memory_type(area, physicalBase, type);
if (error != B_OK)
if (error != B_OK) {
// reset the memory type of the area and the mapped pages
map->Lock();
area->SetMemoryType(oldType);
map->ProtectArea(area, area->protection);
map->Unlock();
return error;
}
area->SetMemoryType(type);
return B_OK;
}