Provide a way to directly request virtual vecs from an IOBuffer. If the buffer

is virtual already it just returns the vecs directly, if it is physical it takes
over the task of virtualizing the vecs either using vm_map_physical_memory_vecs,
if there are multiple vecs or more than one page, or falls back to page wise
mapping if mapping fails or is not needed. In the best case, scattered physical
pages are mapped into one linear virtual buffer so that subsystems operating on
virtual memory only get a single vector and can then burst read/write.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@33524 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Michael Lotz 2009-10-11 16:49:44 +00:00
parent 44778a8a28
commit 32e2b6a118
2 changed files with 100 additions and 0 deletions

View File

@ -15,6 +15,7 @@
#include <thread.h>
#include <util/AutoLock.h>
#include <vm.h>
#include <vm_address_space.h>
#include "dma_resources.h"
@ -65,6 +66,15 @@ IORequestChunk::operator delete(void* address, size_t size)
// #pragma mark -
struct virtual_vec_cookie {
uint32 vec_index;
size_t vec_offset;
area_id mapped_area;
void* physical_page_handle;
addr_t virtual_address;
};
IOBuffer*
IOBuffer::Create(uint32 count, bool vip)
{
@ -115,6 +125,92 @@ IOBuffer::SetVecs(size_t firstVecOffset, const iovec* vecs, uint32 count,
}
status_t
IOBuffer::GetNextVirtualVec(void*& _cookie, iovec& vector)
{
virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
if (cookie == NULL) {
cookie = new(std::nothrow) virtual_vec_cookie;
if (cookie == NULL)
return B_NO_MEMORY;
cookie->vec_index = 0;
cookie->vec_offset = 0;
cookie->mapped_area = -1;
cookie->physical_page_handle = NULL;
cookie->virtual_address = 0;
_cookie = cookie;
}
// recycle a potential previously mapped page
if (cookie->physical_page_handle != NULL) {
vm_put_physical_page(cookie->virtual_address,
cookie->physical_page_handle);
}
if (cookie->vec_index >= fVecCount)
return B_BAD_INDEX;
if (!fPhysical) {
vector = fVecs[cookie->vec_index++];
return B_OK;
}
if (cookie->vec_index == 0
&& (fVecCount > 1 || fVecs[0].iov_len > B_PAGE_SIZE)) {
void* mappedAddress;
addr_t mappedSize;
cookie->mapped_area = vm_map_physical_memory_vecs(
vm_kernel_address_space_id(), "io buffer mapped physical vecs",
&mappedAddress, B_ANY_KERNEL_ADDRESS, &mappedSize,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA, fVecs, fVecCount);
if (cookie->mapped_area >= 0) {
vector.iov_base = (void*)mappedAddress;
vector.iov_len = mappedSize;
return B_OK;
} else
ktrace_printf("failed to map area: %s\n", strerror(cookie->mapped_area));
}
// fallback to page wise mapping
iovec& currentVec = fVecs[cookie->vec_index];
addr_t address = (addr_t)currentVec.iov_base + cookie->vec_offset;
addr_t pageOffset = address % B_PAGE_SIZE;
status_t result = vm_get_physical_page(address - pageOffset,
&cookie->virtual_address, &cookie->physical_page_handle);
if (result != B_OK)
return result;
size_t length = min_c(currentVec.iov_len - cookie->vec_offset,
B_PAGE_SIZE - pageOffset);
vector.iov_base = (void*)(cookie->virtual_address + pageOffset);
vector.iov_len = length;
cookie->vec_offset += length;
if (cookie->vec_offset >= currentVec.iov_len) {
cookie->vec_index++;
cookie->vec_offset = 0;
}
return B_OK;
}
void
IOBuffer::FreeVirtualVecCookie(void* _cookie)
{
virtual_vec_cookie* cookie = (virtual_vec_cookie*)_cookie;
if (cookie->mapped_area >= 0)
delete_area(cookie->mapped_area);
delete cookie;
}
status_t
IOBuffer::LockMemory(team_id team, bool isWrite)
{

View File

@ -55,6 +55,10 @@ public:
size_t VecCount() const { return fVecCount; }
size_t Capacity() const { return fCapacity; }
status_t GetNextVirtualVec(void*& cookie,
iovec& vector);
void FreeVirtualVecCookie(void* cookie);
status_t LockMemory(team_id team, bool isWrite);
void UnlockMemory(team_id team, bool isWrite);
bool IsMemoryLocked() const