Introduce vm_map_physical_memory_vecs. It is like vm_map_physical_memory but

takes a list of iovecs describing the physical pages to be mapped. With it one
can map a set of physically disjoint pages into one linear virtual range. This
is a private API right now, but we might want to make it public as
map_physical_memory_vecs alongside map_physical_memory.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@33523 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Michael Lotz 2009-10-11 16:48:03 +00:00
parent c443d19cd2
commit 44778a8a28
2 changed files with 97 additions and 1 deletions

View File

@ -14,6 +14,7 @@
#include <vm_defs.h>
struct iovec;
struct kernel_args;
struct team;
struct system_memory_info;
@ -69,6 +70,9 @@ area_id vm_create_anonymous_area(team_id team, const char *name, void **address,
addr_t physicalAddress, uint32 flags, bool kernel);
area_id vm_map_physical_memory(team_id team, const char *name, void **address,
uint32 addressSpec, addr_t size, uint32 protection, addr_t phys_addr);
area_id vm_map_physical_memory_vecs(team_id team, const char* name,
void** _address, uint32 addressSpec, addr_t* _size, uint32 protection,
struct iovec* vecs, uint32 vecCount);
area_id vm_map_file(team_id aid, const char *name, void **address,
uint32 addressSpec, addr_t size, uint32 protection, uint32 mapping,
bool unmapAddressRange, int fd, off_t offset);

View File

@ -2184,7 +2184,7 @@ vm_map_physical_memory(team_id team, const char* name, void** _address,
size = PAGE_ALIGN(size);
// create an device cache
// create a device cache
status_t status = VMCacheFactory::CreateDeviceCache(cache, physicalAddress);
if (status != B_OK)
return status;
@ -2243,6 +2243,98 @@ vm_map_physical_memory(team_id team, const char* name, void** _address,
}
area_id
vm_map_physical_memory_vecs(team_id team, const char* name, void** _address,
uint32 addressSpec, addr_t* _size, uint32 protection, struct iovec* vecs,
uint32 vecCount)
{
TRACE(("vm_map_physical_memory_vecs(team = %ld, \"%s\", virtual = %p, "
"spec = %ld, size = %lu, protection = %ld, phys = %#lx)\n", team,
name, _address, addressSpec, size, protection, physicalAddress));
if (!arch_vm_supports_protection(protection)
|| (addressSpec & B_MTR_MASK) != 0) {
return B_NOT_SUPPORTED;
}
AddressSpaceWriteLocker locker(team);
if (!locker.IsLocked())
return B_BAD_TEAM_ID;
if (vecCount == 0)
return B_BAD_VALUE;
addr_t size = 0;
for (uint32 i = 0; i < vecCount; i++) {
if ((addr_t)vecs[i].iov_base % B_PAGE_SIZE != 0
|| vecs[i].iov_len % B_PAGE_SIZE != 0) {
return B_BAD_VALUE;
}
size += vecs[i].iov_len;
}
// create a device cache
vm_cache* cache;
status_t result = VMCacheFactory::CreateDeviceCache(cache,
(addr_t)vecs[0].iov_base);
if (result != B_OK)
return result;
// tell the page scanner to skip over this area, it's pages are special
cache->scan_skip = 1;
cache->virtual_end = size;
cache->Lock();
vm_area* area;
result = map_backing_store(locker.AddressSpace(), cache, _address,
0, size, addressSpec & ~B_MTR_MASK, B_FULL_LOCK, protection,
REGION_NO_PRIVATE_MAP, &area, name, false, true);
if (result != B_OK)
cache->ReleaseRefLocked();
cache->Unlock();
if (result != B_OK)
return result;
vm_translation_map* map = &locker.AddressSpace()->translation_map;
size_t reservePages = map->ops->map_max_pages_need(map, area->base,
area->base + (size - 1));
vm_page_reserve_pages(reservePages);
map->ops->lock(map);
uint32 vecIndex = 0;
size_t vecOffset = 0;
for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) {
while (vecOffset >= vecs[vecIndex].iov_len && vecIndex < vecCount) {
vecOffset = 0;
vecIndex++;
}
if (vecIndex >= vecCount)
break;
map->ops->map(map, area->base + offset,
(addr_t)vecs[vecIndex].iov_base + vecOffset, protection);
vecOffset += B_PAGE_SIZE;
}
map->ops->unlock(map);
vm_page_unreserve_pages(reservePages);
if (_size != NULL)
*_size = size;
area->cache_type = CACHE_TYPE_DEVICE;
return area->id;
}
area_id
vm_create_null_area(team_id team, const char* name, void** address,
uint32 addressSpec, addr_t size)