diff --git a/headers/private/kernel/vm.h b/headers/private/kernel/vm.h index d5ae0cd0a9..b3cfa4a134 100644 --- a/headers/private/kernel/vm.h +++ b/headers/private/kernel/vm.h @@ -14,6 +14,7 @@ #include +struct iovec; struct kernel_args; struct team; struct system_memory_info; @@ -69,6 +70,9 @@ area_id vm_create_anonymous_area(team_id team, const char *name, void **address, addr_t physicalAddress, uint32 flags, bool kernel); area_id vm_map_physical_memory(team_id team, const char *name, void **address, uint32 addressSpec, addr_t size, uint32 protection, addr_t phys_addr); +area_id vm_map_physical_memory_vecs(team_id team, const char* name, + void** _address, uint32 addressSpec, addr_t* _size, uint32 protection, + struct iovec* vecs, uint32 vecCount); area_id vm_map_file(team_id aid, const char *name, void **address, uint32 addressSpec, addr_t size, uint32 protection, uint32 mapping, bool unmapAddressRange, int fd, off_t offset); diff --git a/src/system/kernel/vm/vm.cpp b/src/system/kernel/vm/vm.cpp index 7323e6d72e..7a990bbf26 100644 --- a/src/system/kernel/vm/vm.cpp +++ b/src/system/kernel/vm/vm.cpp @@ -2184,7 +2184,7 @@ vm_map_physical_memory(team_id team, const char* name, void** _address, size = PAGE_ALIGN(size); - // create an device cache + // create a device cache status_t status = VMCacheFactory::CreateDeviceCache(cache, physicalAddress); if (status != B_OK) return status; @@ -2243,6 +2243,98 @@ vm_map_physical_memory(team_id team, const char* name, void** _address, } +area_id +vm_map_physical_memory_vecs(team_id team, const char* name, void** _address, + uint32 addressSpec, addr_t* _size, uint32 protection, struct iovec* vecs, + uint32 vecCount) +{ + TRACE(("vm_map_physical_memory_vecs(team = %ld, \"%s\", virtual = %p, " + "spec = %ld, size = %lu, protection = %ld, phys = %#lx)\n", team, + name, _address, addressSpec, size, protection, physicalAddress)); + + if (!arch_vm_supports_protection(protection) + || (addressSpec & B_MTR_MASK) != 0) { + return B_NOT_SUPPORTED; + } + + AddressSpaceWriteLocker locker(team); + if (!locker.IsLocked()) + return B_BAD_TEAM_ID; + + if (vecCount == 0) + return B_BAD_VALUE; + + addr_t size = 0; + for (uint32 i = 0; i < vecCount; i++) { + if ((addr_t)vecs[i].iov_base % B_PAGE_SIZE != 0 + || vecs[i].iov_len % B_PAGE_SIZE != 0) { + return B_BAD_VALUE; + } + + size += vecs[i].iov_len; + } + + // create a device cache + vm_cache* cache; + status_t result = VMCacheFactory::CreateDeviceCache(cache, + (addr_t)vecs[0].iov_base); + if (result != B_OK) + return result; + + // tell the page scanner to skip over this area, it's pages are special + cache->scan_skip = 1; + cache->virtual_end = size; + + cache->Lock(); + + vm_area* area; + result = map_backing_store(locker.AddressSpace(), cache, _address, + 0, size, addressSpec & ~B_MTR_MASK, B_FULL_LOCK, protection, + REGION_NO_PRIVATE_MAP, &area, name, false, true); + + if (result != B_OK) + cache->ReleaseRefLocked(); + + cache->Unlock(); + + if (result != B_OK) + return result; + + vm_translation_map* map = &locker.AddressSpace()->translation_map; + size_t reservePages = map->ops->map_max_pages_need(map, area->base, + area->base + (size - 1)); + + vm_page_reserve_pages(reservePages); + map->ops->lock(map); + + uint32 vecIndex = 0; + size_t vecOffset = 0; + for (addr_t offset = 0; offset < size; offset += B_PAGE_SIZE) { + while (vecOffset >= vecs[vecIndex].iov_len && vecIndex < vecCount) { + vecOffset = 0; + vecIndex++; + } + + if (vecIndex >= vecCount) + break; + + map->ops->map(map, area->base + offset, + (addr_t)vecs[vecIndex].iov_base + vecOffset, protection); + + vecOffset += B_PAGE_SIZE; + } + + map->ops->unlock(map); + vm_page_unreserve_pages(reservePages); + + if (_size != NULL) + *_size = size; + + area->cache_type = CACHE_TYPE_DEVICE; + return area->id; +} + + area_id vm_create_null_area(team_id team, const char* name, void** address, uint32 addressSpec, addr_t size)