axeld + bonefish:

More work on the I/O scheduler, more precisely mainly the DMAResource class:
* When splitting requests into operations, we're now able to flexibly mix
  bounce buffer segments and the given physical vectors in a single
  operation. This reduces the number of operations.
* Squashed several TODO and fleshed out more of the implementation.
* Added a test driver running unit tests. There are only a few tests yet,
  but those pass.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@26519 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2008-07-19 23:27:14 +00:00
parent 9e05183977
commit 45a206a742
7 changed files with 1351 additions and 143 deletions

View File

@ -134,6 +134,8 @@ IOScheduler::_Finisher()
} else {
MutexLocker _(fLock);
operation->Parent()->RemoveOperation(operation);
if (fDMAResource != NULL)
fDMAResource->RecycleBuffer(operation->Buffer());
fUnusedOperations.Add(operation);
}

View File

@ -16,7 +16,8 @@ const size_t kMaxBounceBufferSize = 4 * B_PAGE_SIZE;
DMABuffer*
DMABuffer::Create(size_t count, void* bounceBuffer, addr_t physicalBounceBuffer)
DMABuffer::Create(size_t count, void* bounceBuffer, addr_t physicalBounceBuffer,
size_t bounceBufferSize)
{
DMABuffer* buffer = (DMABuffer*)malloc(
sizeof(DMABuffer) + sizeof(iovec) * (count - 1));
@ -25,6 +26,7 @@ DMABuffer::Create(size_t count, void* bounceBuffer, addr_t physicalBounceBuffer)
buffer->fBounceBuffer = bounceBuffer;
buffer->fPhysicalBounceBuffer = physicalBounceBuffer;
buffer->fBounceBufferSize = bounceBufferSize;
buffer->fVecCount = count;
return buffer;
@ -42,6 +44,7 @@ void
DMABuffer::AddVec(void* base, size_t size)
{
iovec& vec = fVecs[fVecCount++];
vec.iov_base = base;
vec.iov_len = size;
}
@ -86,10 +89,14 @@ DMAResource::Init(const dma_restrictions& restrictions, size_t blockSize,
fRestrictions.max_segment_count = 16;
if (fRestrictions.alignment == 0)
fRestrictions.alignment = 1;
if (fRestrictions.max_transfer_size == 0)
fRestrictions.max_transfer_size = ~(size_t)0;
if (fRestrictions.max_segment_size == 0)
fRestrictions.max_segment_size = ~(size_t)0;
if (_NeedsBoundsBuffers()) {
// TODO: Enforce that the bounce buffer size won't cross boundaries.
fBounceBufferSize = restrictions.max_segment_size;
fBounceBufferSize = fRestrictions.max_segment_size;
if (fBounceBufferSize > kMaxBounceBufferSize)
fBounceBufferSize = max_c(kMaxBounceBufferSize, fBlockSize);
}
@ -129,9 +136,8 @@ DMAResource::CreateBuffer(size_t size, DMABuffer** _buffer)
bounceBuffer = (void*)fRestrictions.low_address;
// TODO: We also need to enforce the boundary restrictions.
area = create_area("dma buffer", &bounceBuffer, size,
B_PHYSICAL_BASE_ADDRESS, B_CONTIGUOUS,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
area = create_area("dma buffer", &bounceBuffer, B_PHYSICAL_BASE_ADDRESS,
size, B_CONTIGUOUS, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (area < B_OK)
return area;
@ -151,7 +157,7 @@ DMAResource::CreateBuffer(size_t size, DMABuffer** _buffer)
}
DMABuffer* buffer = DMABuffer::Create(fRestrictions.max_segment_count,
bounceBuffer, physicalBase);
bounceBuffer, physicalBase, fBounceBufferSize);
if (buffer == NULL) {
delete_area(area);
return B_NO_MEMORY;
@ -162,11 +168,34 @@ DMAResource::CreateBuffer(size_t size, DMABuffer** _buffer)
}
inline void
DMAResource::_RestrictBoundaryAndSegmentSize(addr_t base, addr_t& length)
{
if (length > fRestrictions.max_segment_size)
length = fRestrictions.max_segment_size;
if (fRestrictions.boundary > 0) {
addr_t baseBoundary = base / fRestrictions.boundary;
if (baseBoundary
!= (base + (length - 1)) / fRestrictions.boundary) {
length = (baseBoundary + 1) * fRestrictions.boundary - base;
}
}
}
status_t
DMAResource::TranslateNext(IORequest* request, IOOperation* operation)
{
IOBuffer* buffer = request->Buffer();
off_t offset = request->Offset();
off_t originalOffset = request->Offset() + request->Length()
- request->RemainingBytes();
off_t offset = originalOffset;
// current iteration state
uint32 vecIndex = request->VecIndex();
uint32 vecOffset = request->VecOffset();
size_t totalLength = min_c(request->RemainingBytes(),
fRestrictions.max_transfer_size);
MutexLocker locker(fLock);
@ -174,30 +203,35 @@ DMAResource::TranslateNext(IORequest* request, IOOperation* operation)
if (dmaBuffer == NULL)
return B_BUSY;
dmaBuffer->SetVecCount(0);
iovec* vecs = NULL;
uint32 segmentCount = 0;
size_t totalLength = min_c(buffer->Length(),
fRestrictions.max_transfer_size);
bool partialOperation = (offset & (fBlockSize - 1)) != 0;
bool needsBounceBuffer = partialOperation;
bool partialBegin = (offset & (fBlockSize - 1)) != 0;
dprintf(" offset %Ld, block size %lu -> %s\n", offset, fBlockSize, partialBegin ? "partial" : "whole");
if (buffer->IsVirtual()) {
// Unless we need the bounce buffer anyway, we have to translate the
// virtual addresses to physical addresses, so we can check the DMA
// restrictions.
if (!needsBounceBuffer) {
dprintf(" IS VIRTUAL\n");
if (true) {
// TODO: !partialOperation || totalLength >= fBlockSize
// TODO: Maybe enforce fBounceBufferSize >= 2 * fBlockSize.
size_t transferLeft = totalLength;
vecs = fScratchVecs;
// TODO: take iteration state of the IORequest into account!
for (uint32 i = 0; i < buffer->VecCount(); i++) {
dprintf(" CREATE PHYSICAL MAP %ld\n", buffer->VecCount());
for (uint32 i = vecIndex; i < buffer->VecCount(); i++) {
iovec& vec = buffer->VecAt(i);
size_t size = vec.iov_len;
addr_t base = (addr_t)vec.iov_base + vecOffset;
size_t size = vec.iov_len - vecOffset;
vecOffset = 0;
if (size > transferLeft)
size = transferLeft;
dprintf(" size = %lu\n", size);
addr_t base = (addr_t)vec.iov_base;
while (size > 0 && segmentCount
< fRestrictions.max_segment_count) {
physical_entry entry;
@ -207,6 +241,7 @@ DMAResource::TranslateNext(IORequest* request, IOOperation* operation)
vecs[segmentCount].iov_len = entry.size;
transferLeft -= entry.size;
size -= entry.size;
segmentCount++;
}
@ -216,112 +251,200 @@ DMAResource::TranslateNext(IORequest* request, IOOperation* operation)
totalLength -= transferLeft;
}
vecIndex = 0;
vecOffset = 0;
} else {
// We do already have physical adresses.
// We do already have physical addresses.
locker.Unlock();
vecs = buffer->Vecs();
segmentCount = min_c(buffer->VecCount(),
fRestrictions.max_segment_count);
}
// locker.Lock();
dprintf(" physical count %lu\n", segmentCount);
for (uint32 i = 0; i < segmentCount; i++) {
dprintf(" [%lu] %p, %lu\n", i, vecs[i].iov_base, vecs[i].iov_len);
}
// check alignment, boundaries, etc. and set vecs in DMA buffer
size_t dmaLength = 0;
iovec vec;
if (vecs != NULL)
vec = vecs[0];
for (uint32 i = 0; i < segmentCount;) {
addr_t base = (addr_t)vec.iov_base;
size_t length = vec.iov_len;
addr_t physicalBounceBuffer = dmaBuffer->PhysicalBounceBuffer();
size_t bounceLeft = fBounceBufferSize;
if ((base & (fRestrictions.alignment - 1)) != 0) {
needsBounceBuffer = true;
break;
}
// If the offset isn't block-aligned, use the bounce buffer to bridge the
// gap to the start of the vec.
if (partialBegin) {
off_t diff = offset & (fBlockSize - 1);
addr_t base = physicalBounceBuffer;
size_t length = (diff + fRestrictions.alignment - 1)
& ~(fRestrictions.alignment - 1);
if (((base + length) & (fRestrictions.alignment - 1)) != 0) {
length = ((base + length) & ~(fRestrictions.alignment - 1)) - base;
if (length == 0) {
needsBounceBuffer = true;
break;
}
}
if (fRestrictions.boundary > 0) {
addr_t baseBoundary = base / fRestrictions.boundary;
if (baseBoundary != (base + (length - 1)) / fRestrictions.boundary)
length = (baseBoundary + 1) * fRestrictions.boundary - base;
}
physicalBounceBuffer += length;
bounceLeft -= length;
dmaBuffer->AddVec((void*)base, length);
dmaLength += length;
if ((vec.iov_len -= length) > 0) {
vec.iov_base = (void*)((addr_t)vec.iov_base + length);
} else {
if (++i < segmentCount)
vec = vecs[i];
}
vecOffset += length - diff;
offset -= diff;
dprintf(" partial begin, using bounce buffer: offset: %lld, length: %lu\n", offset, length);
}
if (dmaLength < fBlockSize) {
dmaLength = 0;
needsBounceBuffer = true;
partialOperation = true;
} else if ((dmaLength & (fBlockSize - 1)) != 0) {
size_t toCut = dmaLength & (fBlockSize - 1);
dmaLength -= toCut;
int32 dmaVecCount = dmaBuffer->VecCount();
for (int32 i = dmaVecCount - 1 && toCut > 0; i >= 0; i--) {
iovec& vec = dmaBuffer->VecAt(i);
size_t length = vec.iov_len;
if (length <= toCut) {
dmaVecCount--;
toCut -= length;
} else {
vec.iov_len -= toCut;
for (uint32 i = vecIndex; i < segmentCount;) {
if (dmaBuffer->VecCount() >= fRestrictions.max_segment_count)
break;
const iovec& vec = vecs[i];
if (vec.iov_len <= vecOffset) {
vecOffset -= vec.iov_len;
i++;
continue;
}
addr_t base = (addr_t)vec.iov_base + vecOffset;
size_t length = vec.iov_len - vecOffset;
// Cut the vec according to transfer size, segment size, and boundary.
if (dmaLength + length > fRestrictions.max_transfer_size)
{
length = fRestrictions.max_transfer_size - dmaLength;
dprintf(" vec %lu: restricting length to %lu due to transfer size limit\n", i, length);
}
_RestrictBoundaryAndSegmentSize(base, length);
size_t useBounceBuffer = 0;
// Check low address: use bounce buffer for range to low address.
// Check alignment: if not aligned, use bounce buffer for complete vec.
if (base < fRestrictions.low_address)
{
useBounceBuffer = fRestrictions.low_address - base;
dprintf(" vec %lu: below low address, using bounce buffer: %lu\n", i, useBounceBuffer);
}
else if (base & (fRestrictions.alignment - 1))
{
useBounceBuffer = length;
dprintf(" vec %lu: misalignment, using bounce buffer: %lu\n", i, useBounceBuffer);
}
// TODO: Enforce high address restriction!
// If length is 0, use bounce buffer for complete vec.
if (length == 0) {
length = vec.iov_len - vecOffset;
useBounceBuffer = length;
dprintf(" vec %lu: 0 length, using bounce buffer: %lu\n", i, useBounceBuffer);
}
if (useBounceBuffer > 0) {
if (bounceLeft == 0) {
dprintf(" vec %lu: out of bounce buffer space\n", i);
// We don't have any bounce buffer space left, we need to move
// this request to the next I/O operation.
break;
}
base = physicalBounceBuffer;
if (useBounceBuffer > length)
useBounceBuffer = length;
if (useBounceBuffer > bounceLeft)
useBounceBuffer = bounceLeft;
length = useBounceBuffer;
}
dmaBuffer->SetVecCount(dmaVecCount);
}
// check boundary and max segment size.
_RestrictBoundaryAndSegmentSize(base, length);
dprintf(" vec %lu: final length restriction: %lu\n", i, length);
operation->SetOriginalRange(offset, dmaLength);
if (needsBounceBuffer) {
// If the size of the buffer we could transfer is pathologically small,
// we always use the bounce buffer.
// TODO: Use a better heuristics than bounce buffer size / 2, Or even
// better attach the bounce buffer to the DMA buffer.
if (dmaLength < fBounceBufferSize / 2) {
if (partialOperation) {
off_t diff = offset & (fBlockSize - 1);
offset -= diff;
dmaLength += diff;
if (useBounceBuffer) {
// alignment could still be wrong
if (useBounceBuffer & (fRestrictions.alignment - 1)) {
useBounceBuffer
= (useBounceBuffer + fRestrictions.alignment - 1)
& ~(fRestrictions.alignment - 1);
if (dmaLength + useBounceBuffer
> fRestrictions.max_transfer_size) {
useBounceBuffer = (fRestrictions.max_transfer_size
- dmaLength) & ~(fRestrictions.alignment - 1);
}
}
addr_t base = (addr_t)vecs[0].iov_base;
size_t length = vecs[0].iov_len;
if ((base & (fRestrictions.alignment - 1)) != 0) {
addr_t diff = base - (base & ~(fRestrictions.alignment - 1));
length += diff;
}
physicalBounceBuffer += useBounceBuffer;
bounceLeft -= useBounceBuffer;
}
dmaLength = max_c(totalLength, fBlockSize);
dmaLength = (dmaLength + fBlockSize - 1) & ~(fBlockSize - 1);
dmaLength = min_c(dmaLength, fBounceBufferSize);
dmaBuffer->SetToBounceBuffer(dmaLength);
vecOffset += length;
operation->SetRange(offset, dmaLength);
} else
needsBounceBuffer = false;
// TODO: we might be able to join the vec with its preceding vec
// (but then we'd need to take the segment size into account again)
dmaBuffer->AddVec((void*)base, length);
dmaLength += length;
}
operation->SetPartialOperation(partialOperation);
operation->SetRequest(request);
// If total length not block aligned, use bounce buffer for padding.
if ((dmaLength & (fBlockSize - 1)) != 0) {
dprintf(" dmaLength not block aligned: %lu\n", dmaLength);
size_t length = (dmaLength + fBlockSize - 1) & ~(fBlockSize - 1);
// If total length > max transfer size, segment count > max segment
// count, truncate.
if (length > fRestrictions.max_transfer_size
|| dmaBuffer->VecCount() == fRestrictions.max_segment_count
|| bounceLeft < length - dmaLength) {
// cut the part of dma length
dprintf(" can't align length due to max transfer size, segment count "
"restrictions, or lacking bounce buffer space\n");
size_t toCut = dmaLength
& (max_c(fBlockSize, fRestrictions.alignment) - 1);
dmaLength -= toCut;
if (dmaLength == 0) {
// This can only happen, when we have too many small segments
// and hit the max segment count. In this case we just use the
// bounce buffer for as much as possible of the total length.
dmaBuffer->SetVecCount(0);
addr_t base = dmaBuffer->PhysicalBounceBuffer();
dmaLength = min_c(totalLength, fBounceBufferSize)
& ~(max_c(fBlockSize, fRestrictions.alignment) - 1);
_RestrictBoundaryAndSegmentSize(base, dmaLength);
dmaBuffer->AddVec((void*)base, dmaLength);
} else {
int32 dmaVecCount = dmaBuffer->VecCount();
for (int32 i = dmaVecCount - 1; toCut > 0 && i >= 0; i--) {
iovec& vec = dmaBuffer->VecAt(i);
size_t length = vec.iov_len;
if (length <= toCut) {
dmaVecCount--;
toCut -= length;
} else {
vec.iov_len -= toCut;
break;
}
}
dmaBuffer->SetVecCount(dmaVecCount);
}
} else {
dprintf(" adding %lu bytes final bounce buffer\n", length - dmaLength);
dmaBuffer->AddVec((void*)physicalBounceBuffer, length - dmaLength);
dmaLength = length;
}
}
operation->SetBuffer(dmaBuffer);
operation->SetOriginalRange(originalOffset,
min_c(offset + dmaLength, request->Offset() + request->Length())
- originalOffset);
operation->SetRange(offset, dmaLength);
operation->SetPartial(partialBegin,
offset + dmaLength > request->Offset() + request->Length());
status_t error = operation->SetRequest(request);
if (error != B_OK)
return error;
request->Advance(operation->OriginalLength());
return B_OK;
@ -331,6 +454,9 @@ DMAResource::TranslateNext(IORequest* request, IOOperation* operation)
void
DMAResource::RecycleBuffer(DMABuffer* buffer)
{
if (buffer == NULL)
return;
MutexLocker _(fLock);
fDMABuffers.Add(buffer);
}

View File

@ -31,7 +31,8 @@ struct dma_restrictions {
class DMABuffer : public DoublyLinkedListLinkImpl<DMABuffer> {
public:
static DMABuffer* Create(size_t count, void* bounceBuffer,
addr_t physicalBounceBuffer);
addr_t physicalBounceBuffer,
size_t bounceBufferSize);
iovec* Vecs() { return fVecs; }
iovec& VecAt(size_t index) { return fVecs[index]; }
@ -43,6 +44,8 @@ public:
void* BounceBuffer() const { return fBounceBuffer; }
addr_t PhysicalBounceBuffer() const
{ return fPhysicalBounceBuffer; }
size_t BounceBufferSize() const
{ return fBounceBufferSize; }
void SetToBounceBuffer(size_t length);
bool UsesBounceBuffer() const
@ -53,6 +56,7 @@ public:
private:
void* fBounceBuffer;
addr_t fPhysicalBounceBuffer;
size_t fBounceBufferSize;
uint32 fVecCount;
iovec fVecs[1];
};
@ -81,6 +85,8 @@ public:
private:
bool _NeedsBoundsBuffers() const;
void _RestrictBoundaryAndSegmentSize(addr_t base,
addr_t& length);
mutex fLock;
dma_restrictions fRestrictions;

View File

@ -8,11 +8,28 @@
#include <string.h>
#include <team.h>
#include <vm.h>
#include "dma_resources.h"
// partial I/O operation phases
enum {
PHASE_READ_BEGIN = 0,
PHASE_READ_END = 1,
PHASE_DO_ALL = 2
};
IORequestChunk::IORequestChunk()
:
fParent(NULL),
fStatus(1)
{
}
IORequestChunk::~IORequestChunk()
{
}
@ -21,10 +38,38 @@ IORequestChunk::~IORequestChunk()
// #pragma mark -
IOBuffer*
IOBuffer::Create(size_t count)
{
IOBuffer* buffer = (IOBuffer*)malloc(
sizeof(IOBuffer) + sizeof(iovec) * (count - 1));
if (buffer == NULL)
return NULL;
buffer->fCapacity = count;
buffer->fVecCount = 0;
buffer->fUser = false;
buffer->fPhysical = false;
return buffer;
}
void
IOBuffer::SetVecs(const iovec* vecs, uint32 count, size_t length, uint32 flags)
{
memcpy(fVecs, vecs, sizeof(iovec) * count);
fVecCount = count;
fLength = length;
fUser = (flags & B_USER_IO_REQUEST) != 0;
fPhysical = (flags & B_PHYSICAL_IO_REQUEST) != 0;
}
status_t
IOBuffer::LockMemory(bool isWrite)
{
for (uint32 i = 0; i < fCount; i++) {
for (uint32 i = 0; i < fVecCount; i++) {
status_t status = lock_memory(fVecs[i].iov_base, fVecs[i].iov_len,
isWrite ? 0 : B_READ_DEVICE);
if (status != B_OK) {
@ -50,7 +95,7 @@ IOBuffer::_UnlockMemory(size_t count, bool isWrite)
void
IOBuffer::UnlockMemory(bool isWrite)
{
_UnlockMemory(fCount, isWrite);
_UnlockMemory(fVecCount, isWrite);
}
@ -60,32 +105,84 @@ IOBuffer::UnlockMemory(bool isWrite)
bool
IOOperation::Finish()
{
dprintf("IOOperation::Finish()\n");
if (fStatus == B_OK) {
if (IsPartialOperation() && IsWrite()) {
// partial write: copy partial request to bounce buffer
status_t error = fParent->CopyData(OriginalOffset(),
(uint8*)fDMABuffer->BounceBuffer()
+ (Offset() - OriginalOffset()),
OriginalLength());
if (error == B_OK) {
// We're done with the first phase only (read-in block). Now
// do the actual write.
SetPartialOperation(false);
SetStatus(1);
// TODO: Is there a race condition, if the request is
// aborted at the same time?
return false;
}
if (fParent->IsWrite()) {
dprintf(" is write\n");
if (fPhase == PHASE_READ_BEGIN) {
dprintf(" phase read begin\n");
// partial write: copy partial begin to bounce buffer
bool skipReadEndPhase;
status_t error = _CopyPartialBegin(true, skipReadEndPhase);
if (error == B_OK) {
// We're done with the first phase only (read in begin).
// Get ready for next phase...
fPhase = HasPartialEnd() && !skipReadEndPhase
? PHASE_READ_END : PHASE_DO_ALL;
SetStatus(1);
// TODO: Is there a race condition, if the request is
// aborted at the same time?
return false;
}
SetStatus(error);
SetStatus(error);
} else if (fPhase == PHASE_READ_END) {
dprintf(" phase read end\n");
// partial write: copy partial end to bounce buffer
status_t error = _CopyPartialEnd(true);
if (error == B_OK) {
// We're done with the second phase only (read in end).
// Get ready for next phase...
fPhase = PHASE_DO_ALL;
SetStatus(1);
// TODO: Is there a race condition, if the request is
// aborted at the same time?
return false;
}
SetStatus(error);
}
}
}
if (IsRead() && UsesBounceBuffer()) {
// copy the bounce buffer to the final location
status_t error = fParent->CopyData((uint8*)fDMABuffer->BounceBuffer()
+ (Offset() - OriginalOffset()), OriginalOffset(),
OriginalLength());
if (fParent->IsRead() && UsesBounceBuffer()) {
dprintf(" read with bounce buffer\n");
// copy the bounce buffer segments to the final location
uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBuffer();
addr_t bounceBufferStart = fDMABuffer->PhysicalBounceBuffer();
addr_t bounceBufferEnd = bounceBufferStart
+ fDMABuffer->BounceBufferSize();
const iovec* vecs = fDMABuffer->Vecs();
uint32 vecCount = fDMABuffer->VecCount();
uint32 i = 0;
off_t offset = Offset();
status_t error = B_OK;
bool partialBlockOnly = false;
if (HasPartialBegin()) {
error = _CopyPartialBegin(false, partialBlockOnly);
offset += vecs[0].iov_len;
i++;
}
if (error == B_OK && HasPartialEnd() && !partialBlockOnly) {
error = _CopyPartialEnd(false);
vecCount--;
}
for (; error == B_OK && i < vecCount; i++) {
const iovec& vec = vecs[i];
addr_t base = (addr_t)vec.iov_base;
if (base >= bounceBufferStart && base < bounceBufferEnd) {
error = fParent->CopyData(
bounceBuffer + (base - bounceBufferStart), offset,
vec.iov_len);
}
offset += vec.iov_len;
}
if (error != B_OK)
SetStatus(error);
}
@ -98,17 +195,67 @@ IOOperation::Finish()
}
void
/*! Note: SetPartial() must be called first!
*/
status_t
IOOperation::SetRequest(IORequest* request)
{
if (fParent != NULL)
fParent->RemoveOperation(this);
fParent = request;
// set initial phase
fPhase = PHASE_DO_ALL;
if (fParent->IsWrite()) {
if (HasPartialBegin())
fPhase = PHASE_READ_BEGIN;
else if (HasPartialEnd())
fPhase = PHASE_READ_END;
// Copy data to bounce buffer segments, save the partial begin/end vec,
// which will be copied after their respective read phase.
if (UsesBounceBuffer()) {
dprintf(" write with bounce buffer\n");
uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBuffer();
addr_t bounceBufferStart = fDMABuffer->PhysicalBounceBuffer();
addr_t bounceBufferEnd = bounceBufferStart
+ fDMABuffer->BounceBufferSize();
const iovec* vecs = fDMABuffer->Vecs();
uint32 vecCount = fDMABuffer->VecCount();
uint32 i = 0;
off_t offset = Offset();
if (HasPartialBegin()) {
offset += vecs[0].iov_len;
i++;
}
if (HasPartialEnd())
vecCount--;
for (; i < vecCount; i++) {
const iovec& vec = vecs[i];
addr_t base = (addr_t)vec.iov_base;
if (base >= bounceBufferStart && base < bounceBufferEnd) {
status_t error = fParent->CopyData(offset,
bounceBuffer + (base - bounceBufferStart), vec.iov_len);
if (error != B_OK)
return error;
}
offset += vec.iov_len;
}
}
}
fStatus = 1;
if (fParent != NULL)
fParent->AddOperation(this);
return B_OK;
}
@ -128,17 +275,46 @@ IOOperation::SetRange(off_t offset, size_t length)
}
void
IOOperation::SetPartialOperation(bool partialOperation)
iovec*
IOOperation::Vecs() const
{
fIsPartitialOperation = partialOperation;
switch (fPhase) {
case PHASE_READ_END:
return fDMABuffer->Vecs() + (fDMABuffer->VecCount() - 1);
case PHASE_READ_BEGIN:
case PHASE_DO_ALL:
default:
return fDMABuffer->Vecs();
}
}
uint32
IOOperation::VecCount() const
{
switch (fPhase) {
case PHASE_READ_BEGIN:
case PHASE_READ_END:
return 1;
case PHASE_DO_ALL:
default:
return fDMABuffer->VecCount();
}
}
void
IOOperation::SetPartial(bool partialBegin, bool partialEnd)
{
fPartialBegin = partialBegin;
fPartialEnd = partialEnd;
}
bool
IOOperation::IsWrite() const
{
return fParent->IsWrite();
return fParent->IsWrite() && fPhase != PHASE_DO_ALL;
}
@ -149,12 +325,118 @@ IOOperation::IsRead() const
}
status_t
IOOperation::_CopyPartialBegin(bool isWrite, bool& partialBlockOnly)
{
size_t relativeOffset = OriginalOffset() - Offset();
size_t length = fDMABuffer->VecAt(0).iov_len;
partialBlockOnly = relativeOffset + OriginalLength() <= length;
if (partialBlockOnly)
length = relativeOffset + OriginalLength();
if (isWrite) {
return fParent->CopyData(OriginalOffset(),
(uint8*)fDMABuffer->BounceBuffer() + relativeOffset,
length - relativeOffset);
} else {
return fParent->CopyData(
(uint8*)fDMABuffer->BounceBuffer() + relativeOffset,
OriginalOffset(), length - relativeOffset);
}
}
status_t
IOOperation::_CopyPartialEnd(bool isWrite)
{
const iovec& lastVec = fDMABuffer->VecAt(fDMABuffer->VecCount() - 1);
off_t lastVecPos = Offset() + Length() - lastVec.iov_len;
if (isWrite) {
return fParent->CopyData(lastVecPos,
(uint8*)fDMABuffer->BounceBuffer()
+ ((addr_t)lastVec.iov_base
- fDMABuffer->PhysicalBounceBuffer()),
OriginalOffset() + OriginalLength() - lastVecPos);
} else {
return fParent->CopyData((uint8*)fDMABuffer->BounceBuffer()
+ ((addr_t)lastVec.iov_base
- fDMABuffer->PhysicalBounceBuffer()),
lastVecPos, OriginalOffset() + OriginalLength() - lastVecPos);
}
}
// #pragma mark -
IORequest::IORequest()
{
}
IORequest::~IORequest()
{
}
status_t
IORequest::Init(off_t offset, void* buffer, size_t length, bool write,
uint32 flags)
{
iovec vec;
vec.iov_base = buffer;
vec.iov_len = length;
return Init(offset, &vec, 1, length, write, flags);
}
status_t
IORequest::Init(off_t offset, iovec* vecs, size_t count, size_t length,
bool write, uint32 flags)
{
fBuffer = IOBuffer::Create(count);
if (fBuffer == NULL)
return B_NO_MEMORY;
fBuffer->SetVecs(vecs, count, length, flags);
fOffset = offset;
fLength = length;
fFlags = flags;
fTeam = team_get_current_team_id();
fIsWrite = write;
// these are for iteration
fVecIndex = 0;
fVecOffset = 0;
fRemainingBytes = length;
return B_OK;
}
void
IORequest::ChunkFinished(IORequestChunk* chunk, status_t status)
{
// TODO: we would need to update status atomically
if (fStatus <= 0) {
// we're already done
return;
}
fStatus = status;
if (fParent != NULL)
fParent->ChunkFinished(this, Status());
}
void
IORequest::Advance(size_t bySize)
{
dprintf("IORequest::Advance(%lu): remaining: %lu -> %lu\n", bySize,
fRemainingBytes, fRemainingBytes - bySize);
fRemainingBytes -= bySize;
iovec* vecs = fBuffer->Vecs();
@ -201,9 +483,12 @@ IORequest::CopyData(const void* buffer, off_t offset, size_t size)
status_t
IORequest::_CopyData(void* _buffer, off_t offset, size_t size, bool copyIn)
{
if (size == 0)
return B_OK;
uint8* buffer = (uint8*)_buffer;
if (offset < fOffset || offset + size > fOffset + size) {
if (offset < fOffset || offset + size > fOffset + fLength) {
panic("IORequest::_CopyData(): invalid range: (%lld, %lu)", offset,
size);
return B_BAD_VALUE;
@ -255,6 +540,7 @@ IORequest::_CopyData(void* _buffer, off_t offset, size_t size, bool copyIn)
IORequest::_CopySimple(void* bounceBuffer, void* external, size_t size,
bool copyIn)
{
dprintf(" IORequest::_CopySimple(%p, %p, %lu, %d)\n", bounceBuffer, external, size, copyIn);
if (copyIn)
memcpy(bounceBuffer, external, size);
else
@ -277,7 +563,7 @@ IORequest::_CopyPhysical(void* _bounceBuffer, void* _external, size_t size,
return error;
size_t toCopy = min_c(size, B_PAGE_SIZE);
_CopySimple(bounceBuffer, (void*)external, toCopy, copyIn);
_CopySimple(bounceBuffer, (void*)virtualAddress, toCopy, copyIn);
vm_put_physical_page(virtualAddress);

View File

@ -15,9 +15,8 @@
#include "dma_resources.h"
#define IO_BUFFER_PHYSICAL 0x01 /* buffer points to physical memory */
#define IO_BUFFER_USER 0x02 /* buffer points to user memory */
#define B_PHYSICAL_IO_REQUEST 0x01 /* buffer points to physical memory */
#define B_USER_IO_REQUEST 0x02 /* buffer points to user memory */
struct DMABuffer;
struct IOOperation;
@ -31,16 +30,20 @@ public:
bool IsPhysical() const { return fPhysical; }
bool IsUser() const { return !fUser; }
void SetVecs(const iovec* vecs, uint32 count,
size_t length, uint32 flags);
void SetPhysical(bool physical)
{ fPhysical = physical; }
void SetUser(bool user) { fUser = user; }
void SetLength(size_t length) { fLength = length; }
void SetVecCount(uint32 count) { fVecCount = count; }
size_t Length() const { return fLength; }
iovec* Vecs() { return fVecs; }
iovec& VecAt(size_t index) { return fVecs[index]; }
size_t VecCount() const { return fCount; }
size_t VecCount() const { return fVecCount; }
size_t Capacity() const { return fCapacity; }
status_t LockMemory(bool isWrite);
@ -54,7 +57,7 @@ private:
bool fUser;
bool fPhysical;
size_t fLength;
size_t fCount;
size_t fVecCount;
size_t fCapacity;
iovec fVecs[1];
};
@ -65,6 +68,7 @@ class IORequest;
class IORequestChunk {
public:
IORequestChunk();
virtual ~IORequestChunk();
// virtual status_t Wait(bigtime_t timeout = B_INFINITE_TIMEOUT);
@ -96,7 +100,7 @@ public:
bool Finish();
// returns true, if it can be recycled
void SetRequest(IORequest* request);
status_t SetRequest(IORequest* request);
void SetOriginalRange(off_t offset, size_t length);
// also sets range
void SetRange(off_t offset, size_t length);
@ -108,23 +112,37 @@ public:
size_t OriginalLength() const
{ return fOriginalLength; }
void SetPartialOperation(bool partialOperation);
bool IsPartialOperation() const
{ return fIsPartitialOperation; }
iovec* Vecs() const;
uint32 VecCount() const;
void SetPartial(bool partialBegin, bool partialEnd);
bool HasPartialBegin() const
{ return fPartialBegin; }
bool HasPartialEnd() const
{ return fPartialEnd; }
bool IsWrite() const;
bool IsRead() const;
bool UsesBounceBuffer() const
{ return fDMABuffer->UsesBounceBuffer(); }
DMABuffer* Buffer() const { return fDMABuffer; }
void SetBuffer(DMABuffer* buffer)
{ fDMABuffer = buffer; }
protected:
status_t _CopyPartialBegin(bool isWrite,
bool& partialBlockOnly);
status_t _CopyPartialEnd(bool isWrite);
DMABuffer* fDMABuffer;
off_t fOffset;
size_t fLength;
off_t fOriginalOffset;
size_t fOriginalLength;
bool fIsPartitialOperation;
bool fUsesBoundsBuffer;
uint32 fPhase;
bool fPartialBegin;
bool fPartialEnd;
};
typedef IOOperation io_operation;
@ -135,13 +153,13 @@ struct IORequest : IORequestChunk, DoublyLinkedListLinkImpl<IORequest> {
IORequest();
virtual ~IORequest();
virtual void ChunkFinished(IORequestChunk* chunk,
status_t status);
status_t Init(void* buffer, size_t length, bool write,
uint32 flags);
status_t Init(iovec* vecs, size_t count, size_t length,
status_t Init(off_t offset, void* buffer, size_t length,
bool write, uint32 flags);
status_t Init(off_t offset, iovec* vecs, size_t count,
size_t length, bool write, uint32 flags);
void ChunkFinished(IORequestChunk* chunk,
status_t status);
size_t RemainingBytes() const
{ return fRemainingBytes; }
@ -153,6 +171,9 @@ struct IORequest : IORequestChunk, DoublyLinkedListLinkImpl<IORequest> {
off_t Offset() const { return fOffset; }
size_t Length() const { return fLength; }
uint32 VecIndex() const { return fVecIndex; }
size_t VecOffset() const { return fVecOffset; }
void Advance(size_t bySize);
void AddOperation(IOOperation* operation);

View File

@ -6,7 +6,15 @@ BinCommand <test>listdev :
;
KernelAddon <test_driver>config :
config.c
;
config.c
;
SubDirHdrs $(HAIKU_TOP) src system kernel device_manager ;
UsePrivateKernelHeaders ;
KernelAddon <test_driver>dma_resource_test :
dma_resource_test.cpp
#: $(HAIKU_LIBSUPC++)
;
SubInclude HAIKU_TOP src tests system kernel device_manager playground ;

View File

@ -0,0 +1,759 @@
/*
* Copyright 2008, Ingo Weinhold, ingo_weinhold@gmx.de.
* Copyright 2008, Axel Dörfler, axeld@pinc-software.de.
* Distributed under the terms of the MIT License.
*/
#include <stdio.h>
#include <string.h>
#include <device_manager.h>
#include "dma_resources.h"
#include "io_requests.h"
#define DMA_TEST_BLOCK_SIZE 512
struct device_manager_info* sDeviceManager;
static area_id sArea;
static size_t sAreaSize;
static void* sAreaAddress;
static DMAResource* sDMAResource;
class Test : public DoublyLinkedListLinkImpl<Test> {
public:
Test(off_t offset, uint8* base, uint8* physicalBase,
size_t length, bool isWrite, uint32 flags);
Test& AddSource(addr_t base, size_t length);
Test& NextResult(off_t offset, bool partialBegin,
bool partialEnd);
Test& AddTarget(addr_t base, size_t length,
bool usesBounceBuffer);
void Run(DMAResource& resource);
private:
void _Panic(const char* message,...);
off_t fOffset;
uint8* fBase;
uint8* fPhysicalBase;
size_t fLength;
bool fIsWrite;
uint32 fFlags;
iovec fSourceVecs[32];
uint32 fSourceCount;
struct target_t {
addr_t address;
size_t length;
bool uses_bounce_buffer;
};
struct result_t {
off_t offset;
target_t targets[32];
uint32 count;
bool partial_begin;
bool partial_end;
};
result_t fResults[32];
uint32 fResultCount;
};
typedef DoublyLinkedList<Test> TestList;
class TestSuite {
public:
TestSuite(const char* name, const dma_restrictions& restrictions,
size_t blockSize, uint8* base, uint8* physicalBase)
:
fBase(base),
fPhysicalBase(physicalBase)
{
dprintf("----- Run \"%s\" tests ---------------------------\n", name);
dprintf(" DMA restrictions: address %#lx - %#lx, align %lu, boundary "
"%lu,\n max transfer %lu, max segs %lu, max seg size %lu, "
"flags %lx\n\n", restrictions.low_address,
restrictions.high_address, restrictions.alignment,
restrictions.boundary, restrictions.max_transfer_size,
restrictions.max_segment_count, restrictions.max_segment_size,
restrictions.flags);
status_t status = fDMAResource.Init(restrictions, blockSize, 10);
if (status != B_OK)
panic("initializing DMA resource failed: %s\n", strerror(status));
}
~TestSuite()
{
while (Test* test = fTests.RemoveHead()) {
delete test;
}
}
Test& AddTest(off_t offset, size_t length, bool isWrite, uint32 flags)
{
Test* test = new(std::nothrow) Test(offset, fBase, fPhysicalBase,
length, isWrite, flags);
fTests.Add(test);
return *test;
}
void Run()
{
TestList::Iterator iterator = fTests.GetIterator();
uint32 count = 1;
while (Test* test = iterator.Next()) {
dprintf("test %lu...\n", count++);
test->Run(fDMAResource);
}
}
private:
DMAResource fDMAResource;
uint8* fBase;
uint8* fPhysicalBase;
TestList fTests;
};
Test::Test(off_t offset, uint8* base, uint8* physicalBase, size_t length,
bool isWrite, uint32 flags)
:
fOffset(offset),
fBase(base),
fPhysicalBase(physicalBase),
fLength(length),
fIsWrite(isWrite),
fFlags(flags),
fSourceCount(0),
fResultCount(0)
{
}
Test&
Test::AddSource(addr_t address, size_t length)
{
fSourceVecs[fSourceCount].iov_base
= (void*)(((fFlags & B_PHYSICAL_IO_REQUEST) == 0
? fBase : fPhysicalBase) + address);
fSourceVecs[fSourceCount].iov_len = length;
fSourceCount++;
return *this;
}
Test&
Test::NextResult(off_t offset, bool partialBegin, bool partialEnd)
{
fResults[fResultCount].offset = offset;
fResults[fResultCount].count = 0;
fResults[fResultCount].partial_begin = partialBegin;
fResults[fResultCount].partial_end = partialEnd;
fResultCount++;
return *this;
}
Test&
Test::AddTarget(addr_t base, size_t length, bool usesBounceBuffer)
{
struct result_t& result = fResults[fResultCount - 1];
struct target_t& target = result.targets[result.count++];
target.address = base;
target.length = length;
target.uses_bounce_buffer = usesBounceBuffer;
return *this;
}
void
Test::Run(DMAResource& resource)
{
IORequest request;
status_t status = request.Init(fOffset, fSourceVecs, fSourceCount,
fLength, fIsWrite, fFlags);
if (status != B_OK)
_Panic("request init failed: %s\n", strerror(status));
uint32 resultIndex = 0;
IOOperation operation;
while (request.RemainingBytes() > 0) {
if (resultIndex >= fResultCount)
_Panic("no results left");
status_t status = resource.TranslateNext(&request, &operation);
if (status != B_OK) {
_Panic("DMAResource::TranslateNext() failed: %s\n",
strerror(status));
break;
}
DMABuffer* buffer = operation.Buffer();
dprintf("IOOperation: offset %Ld, length %lu (%Ld/%lu)\n",
operation.Offset(), operation.Length(), operation.OriginalOffset(),
operation.OriginalLength());
dprintf(" DMABuffer %p, %lu vecs, bounce buffer: %p (%p) %s\n", buffer,
buffer->VecCount(), buffer->BounceBuffer(),
(void*)buffer->PhysicalBounceBuffer(),
buffer->UsesBounceBuffer() ? "used" : "unused");
for (uint32 i = 0; i < buffer->VecCount(); i++) {
dprintf(" [%lu] base %p, length %lu\n", i,
buffer->VecAt(i).iov_base, buffer->VecAt(i).iov_len);
}
dprintf(" remaining bytes: %lu\n", request.RemainingBytes());
// check results
const result_t& result = fResults[resultIndex];
if (result.count != buffer->VecCount())
panic("result count differs (expected %lu)\n", result.count);
for (uint32 i = 0; i < result.count; i++) {
const target_t& target = result.targets[i];
const iovec& vec = buffer->VecAt(i);
if (target.length != vec.iov_len)
_Panic("[%lu] length differs", i);
void* address;
if (target.uses_bounce_buffer) {
address = (void*)(target.address
+ (addr_t)buffer->PhysicalBounceBuffer());
} else
address = (void*)(target.address + fPhysicalBase);
if (address != vec.iov_base) {
_Panic("[%lu] address differs: %p, should be %p", i,
vec.iov_base, address);
}
}
operation.SetStatus(B_OK);
bool finished = operation.Finish();
bool isPartial = result.partial_begin || result.partial_end;
if (finished == (isPartial && fIsWrite))
_Panic("partial finished %s", finished ? "early" : "late");
if (!finished) {
dprintf(" operation not done yet!\n");
operation.SetStatus(B_OK);
isPartial = result.partial_begin && result.partial_end;
finished = operation.Finish();
if (finished == result.partial_begin && result.partial_end)
_Panic("partial finished %s", finished ? "early" : "late");
if (!finished) {
dprintf(" operation not done yet!\n");
operation.SetStatus(B_OK);
if (!operation.Finish())
_Panic("operation doesn't finish");
}
}
resultIndex++;
}
}
void
Test::_Panic(const char* message,...)
{
char buffer[1024];
va_list args;
va_start(args, message);
vsnprintf(buffer, sizeof(buffer), message, args);
va_end(args);
dprintf("test failed\n");
dprintf(" offset: %lld\n", fOffset);
dprintf(" base: %p (physical: %p)\n", fBase, fPhysicalBase);
dprintf(" length: %lu\n", fLength);
dprintf(" write: %d\n", fIsWrite);
dprintf(" flags: %#lx\n", fFlags);
dprintf(" sources:\n");
for (uint32 i = 0; i < fSourceCount; i++) {
dprintf(" [%p, %lu]\n", fSourceVecs[i].iov_base,
fSourceVecs[i].iov_len);
}
for (uint32 i = 0; i < fResultCount; i++) {
const result_t& result = fResults[i];
dprintf(" result %lu:\n", i);
dprintf(" offset: %lld\n", result.offset);
dprintf(" partial: %d/%d\n", result.partial_begin,
result.partial_end);
for (uint32 k = 0; k < result.count; k++) {
const target_t& target = result.targets[k];
dprintf(" [%p, %lu, %d]\n", (void*)target.address, target.length,
target.uses_bounce_buffer);
}
}
panic("%s", buffer);
}
static void
run_tests_no_restrictions(uint8* address, uint8* physicalAddress, size_t size)
{
const dma_restrictions restrictions = {
0x0, // low
0x0, // high
0, // alignment
0, // boundary
0, // max transfer
0, // max segment count
0, // max segment size
0 // flags
};
TestSuite suite("no restrictions", restrictions, 512, address,
physicalAddress);
suite.AddTest(0, 1024, false, B_USER_IO_REQUEST)
.AddSource(0, 1024)
.NextResult(0, false, false)
.AddTarget(0, 1024, false);
suite.AddTest(23, 1024, true, B_USER_IO_REQUEST)
.AddSource(0, 1024)
.NextResult(0, true, true)
.AddTarget(0, 23, true)
.AddTarget(0, 1024, false)
.AddTarget(23, 512 - 23, true)
;
suite.AddTest(0, 1028, true, B_USER_IO_REQUEST)
.AddSource(0, 512)
.AddSource(1024, 516)
.NextResult(0, false, true)
.AddTarget(0, 512, false)
.AddTarget(1024, 516, false)
.AddTarget(0, 508, true);
suite.Run();
}
static void
run_tests_address_restrictions(uint8* address, uint8* physicalAddress,
size_t size)
{
const dma_restrictions restrictions = {
(addr_t)physicalAddress + 512, // low
0, // high
0, // alignment
0, // boundary
0, // max transfer
0, // max segment count
0, // max segment size
0 // flags
};
TestSuite suite("address", restrictions, 512, address, physicalAddress);
suite.AddTest(0, 1024, false, B_USER_IO_REQUEST)
.AddSource(0, 1024)
.NextResult(0, false, false)
.AddTarget(0, 512, true)
.AddTarget(512, 512, false);
suite.Run();
}
static void
run_tests_alignment_restrictions(uint8* address, uint8* physicalAddress,
size_t size)
{
const dma_restrictions restrictions = {
0x0, // low
0x0, // high
32, // alignment
0, // boundary
0, // max transfer
0, // max segment count
0, // max segment size
0 // flags
};
TestSuite suite("alignment", restrictions, 512, address, physicalAddress);
suite.AddTest(0, 1024, false, B_PHYSICAL_IO_REQUEST)
.AddSource(16, 1024)
.NextResult(0, false, false)
.AddTarget(0, 1024, true);
suite.Run();
}
static void
run_tests_boundary_restrictions(uint8* address, uint8* physicalAddress,
size_t size)
{
const dma_restrictions restrictions = {
0x0, // low
0x0, // high
0, // alignment
1024, // boundary
0, // max transfer
0, // max segment count
0, // max segment size
0 // flags
};
TestSuite suite("boundary", restrictions, 512, address, physicalAddress);
suite.AddTest(0, 2000, false, B_USER_IO_REQUEST)
.AddSource(0, 2048)
.NextResult(0, false, false)
.AddTarget(0, 1024, false)
.AddTarget(1024, 976, false)
.AddTarget(0, 48, true);
suite.Run();
}
static void
run_tests_segment_restrictions(uint8* address, uint8* physicalAddress,
size_t size)
{
const dma_restrictions restrictions = {
0x0, // low
0x0, // high
0, // alignment
0, // boundary
0, // max transfer
4, // max segment count
1024, // max segment size
0 // flags
};
TestSuite suite("segment", restrictions, 512, address, physicalAddress);
#if 0
suite.AddTest(0, 1024, false, B_USER_IO_REQUEST)
.AddSource(0, 1024)
.NextResult(0, false)
.AddTarget(0, 1024, false);
#endif
suite.Run();
}
static void
run_tests_mean_restrictions(uint8* address, uint8* physicalAddress, size_t size)
{
const dma_restrictions restrictions = {
(addr_t)physicalAddress + 1024, // low
0x0, // high
32, // alignment
512, // boundary
2048, // max transfer
2, // max segment count
1024, // max segment size
0 // flags
};
TestSuite suite("mean", restrictions, 512, address, physicalAddress);
#if 0
suite.AddTest(0, 1024, false, B_USER_IO_REQUEST)
.AddSource(0, 1024)
.NextResult(0, false)
.AddTarget(0, 1024, false);
#endif
suite.Run();
}
static void
run_test()
{
size_t size = 1 * 1024 * 1024;
uint8* address;
area_id area = create_area("dma source", (void**)&address,
B_ANY_KERNEL_ADDRESS, size, B_CONTIGUOUS,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (area < B_OK)
return;
physical_entry entry;
get_memory_map(address, size, &entry, 1);
dprintf("DMA Test area %p, physical %p\n", address, entry.address);
run_tests_no_restrictions(address, (uint8*)entry.address, size);
run_tests_address_restrictions(address, (uint8*)entry.address, size);
run_tests_alignment_restrictions(address, (uint8*)entry.address, size);
run_tests_boundary_restrictions(address, (uint8*)entry.address, size);
run_tests_segment_restrictions(address, (uint8*)entry.address, size);
run_tests_mean_restrictions(address, (uint8*)entry.address, size);
delete_area(area);
panic("done.");
}
// #pragma mark - driver
float
dma_test_supports_device(device_node *parent)
{
const char* bus = NULL;
if (sDeviceManager->get_attr_string(parent, B_DEVICE_BUS, &bus, false)
== B_OK && !strcmp(bus, "generic"))
return 0.8;
return -1;
}
status_t
dma_test_register_device(device_node *parent)
{
device_attr attrs[] = {
{B_DEVICE_PRETTY_NAME, B_STRING_TYPE, {string: "DMA Test"}},
{NULL}
};
return sDeviceManager->register_node(parent,
"drivers/disk/dma_test/driver_v1", attrs, NULL, NULL);
}
status_t
dma_test_init_driver(device_node *node, void **_driverCookie)
{
sAreaSize = 10 * 1024 * 1024;
sArea = create_area("dma test", &sAreaAddress, B_ANY_KERNEL_ADDRESS,
sAreaSize, B_LAZY_LOCK, B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (sArea < B_OK)
return sArea;
*_driverCookie = node;
run_test();
return B_OK;
}
void
dma_test_uninit_driver(void *driverCookie)
{
delete_area(sArea);
}
status_t
dma_test_register_child_devices(void *driverCookie)
{
return sDeviceManager->publish_device((device_node*)driverCookie,
"disk/virtual/dma_test/raw", "drivers/disk/dma_test/device_v1");
}
// #pragma mark - device
status_t
dma_test_init_device(void *driverCookie, void **_deviceCookie)
{
*_deviceCookie = driverCookie;
return B_OK;
}
void
dma_test_uninit_device(void *deviceCookie)
{
}
status_t
dma_test_open(void *deviceCookie, const char *path, int openMode,
void **_cookie)
{
return B_OK;
}
status_t
dma_test_close(void *cookie)
{
return B_OK;
}
status_t
dma_test_free(void *cookie)
{
return B_OK;
}
status_t
dma_test_read(void *cookie, off_t pos, void *buffer, size_t *_length)
{
size_t length = *_length;
if (pos >= sAreaSize)
return B_BAD_VALUE;
if (pos + length > sAreaSize)
length = sAreaSize - pos;
status_t status = user_memcpy(buffer, (uint8*)sAreaAddress + pos, length);
if (status == B_OK)
*_length = length;
return status;
}
status_t
dma_test_write(void *cookie, off_t pos, const void *buffer, size_t *_length)
{
size_t length = *_length;
if (pos >= sAreaSize)
return B_BAD_VALUE;
if (pos + length > sAreaSize)
length = sAreaSize - pos;
status_t status = user_memcpy((uint8*)sAreaAddress + pos, buffer, length);
if (status == B_OK)
*_length = length;
return status;
}
status_t
dma_test_io(void *cookie, io_request *request)
{
return B_BAD_VALUE;
}
status_t
dma_test_control(void *cookie, uint32 op, void *buffer, size_t length)
{
switch (op) {
case B_GET_DEVICE_SIZE:
return user_memcpy(buffer, &sAreaSize, sizeof(size_t));
case B_SET_NONBLOCKING_IO:
case B_SET_BLOCKING_IO:
return B_OK;
case B_GET_READ_STATUS:
case B_GET_WRITE_STATUS:
{
bool value = true;
return user_memcpy(buffer, &value, sizeof(bool));
}
case B_GET_GEOMETRY:
case B_GET_BIOS_GEOMETRY:
{
device_geometry geometry;
geometry.bytes_per_sector = DMA_TEST_BLOCK_SIZE;
geometry.sectors_per_track = 1;
geometry.cylinder_count = sAreaSize / DMA_TEST_BLOCK_SIZE;
geometry.head_count = 1;
geometry.device_type = B_DISK;
geometry.removable = true;
geometry.read_only = false;
geometry.write_once = false;
return user_memcpy(buffer, &geometry, sizeof(device_geometry));
}
case B_GET_MEDIA_STATUS:
{
status_t status = B_OK;
return user_memcpy(buffer, &status, sizeof(status_t));
}
case B_SET_UNINTERRUPTABLE_IO:
case B_SET_INTERRUPTABLE_IO:
case B_FLUSH_DRIVE_CACHE:
return B_OK;
}
return B_BAD_VALUE;
}
module_dependency module_dependencies[] = {
{B_DEVICE_MANAGER_MODULE_NAME, (module_info **)&sDeviceManager},
{}
};
const static struct driver_module_info sDMATestDriverModule = {
{
"drivers/disk/dma_test/driver_v1",
0,
NULL
},
dma_test_supports_device,
dma_test_register_device,
dma_test_init_driver,
dma_test_uninit_driver,
dma_test_register_child_devices
};
const static struct device_module_info sDMATestDeviceModule = {
{
"drivers/disk/dma_test/device_v1",
0,
NULL
},
dma_test_init_device,
dma_test_uninit_device,
NULL,
dma_test_open,
dma_test_close,
dma_test_free,
dma_test_read,
dma_test_write,
NULL, // io
dma_test_control,
NULL, // select
NULL // deselect
};
const module_info* modules[] = {
(module_info*)&sDMATestDriverModule,
(module_info*)&sDMATestDeviceModule,
NULL
};