* Enabled all DMA tests, wrote some more.

* Moved data buffer creation to a TestSuiteContext class.
* Added checks if the I/O operation does the correct thing, ie. reads/writes
  the data to the right offset.
* Rearranged DMA translation: we now handle the partial write case correctly
  (bounce buffer must always span over the whole block), and are able to join
  adjacent bounce buffers together.
* The new _AddBounceBuffer() method also respects boundary and segment size
  restrictions for bounce buffers.
* IOOperation now prepares the outgoing vecs/offset/length to contain the
  right data for the current phase (partial read begin/end/do-all); it will
  also make sure that the lengths of the vecs are of the same size than the
  whole request.
* All tests are now passed, the I/O request implementation seems to be ready
  for integration now.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@26556 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2008-07-22 00:43:01 +00:00
parent bee04bfed9
commit 8faff60c7f
5 changed files with 959 additions and 264 deletions

View File

@ -12,6 +12,14 @@
#include "io_requests.h" #include "io_requests.h"
#define TRACE_DMA_RESOURCE
#ifdef TRACE_DMA_RESOURCE
# define TRACE(x...) dprintf(x)
#else
# define TRACE(x...) ;
#endif
const size_t kMaxBounceBufferSize = 4 * B_PAGE_SIZE; const size_t kMaxBounceBufferSize = 4 * B_PAGE_SIZE;
@ -58,6 +66,18 @@ DMABuffer::SetToBounceBuffer(size_t length)
} }
bool
DMABuffer::UsesBounceBufferAt(uint32 index)
{
if (index >= fVecCount)
return false;
return (addr_t)fVecs[index].iov_base >= fPhysicalBounceBuffer
&& (addr_t)fVecs[index].iov_base
< fPhysicalBounceBuffer + fBounceBufferSize;
}
// #pragma mark - // #pragma mark -
@ -95,10 +115,12 @@ DMAResource::Init(const dma_restrictions& restrictions, size_t blockSize,
fRestrictions.max_segment_size = ~(size_t)0; fRestrictions.max_segment_size = ~(size_t)0;
if (_NeedsBoundsBuffers()) { if (_NeedsBoundsBuffers()) {
// TODO: Enforce that the bounce buffer size won't cross boundaries. fBounceBufferSize = fRestrictions.max_segment_size
fBounceBufferSize = fRestrictions.max_segment_size; * min_c(fRestrictions.max_segment_count, 4);
if (fBounceBufferSize > kMaxBounceBufferSize) if (fBounceBufferSize > kMaxBounceBufferSize)
fBounceBufferSize = max_c(kMaxBounceBufferSize, fBlockSize); fBounceBufferSize = kMaxBounceBufferSize;
TRACE("DMAResource::Init(): chose bounce buffer size %lu\n",
fBounceBufferSize);
} }
fScratchVecs = (iovec*)malloc( fScratchVecs = (iovec*)malloc(
@ -183,6 +205,106 @@ DMAResource::_RestrictBoundaryAndSegmentSize(addr_t base, addr_t& length)
} }
void
DMAResource::_CutBuffer(DMABuffer& buffer, addr_t& physicalBounceBuffer,
size_t& bounceLeft, size_t toCut)
{
int32 vecCount = buffer.VecCount();
for (int32 i = vecCount - 1; toCut > 0 && i >= 0; i--) {
iovec& vec = buffer.VecAt(i);
size_t length = vec.iov_len;
bool inBounceBuffer = buffer.UsesBounceBufferAt(i);
if (length <= toCut) {
vecCount--;
toCut -= length;
if (inBounceBuffer) {
bounceLeft += length;
physicalBounceBuffer -= length;
}
} else {
vec.iov_len -= toCut;
if (inBounceBuffer) {
bounceLeft += toCut;
physicalBounceBuffer -= toCut;
}
break;
}
}
buffer.SetVecCount(vecCount);
}
/*! Adds \a length bytes from the bounce buffer to the DMABuffer \a buffer.
Takes care of boundary, and segment restrictions. \a length must be aligned.
If \a fixedLength is requested, this function will fail if it cannot
satisfy the request.
\return 0 if the request cannot be satisfied. There could have been some
additions to the DMA buffer, and you will need to cut them back.
TODO: is that what we want here?
\return >0 the number of bytes added to the buffer.
*/
size_t
DMAResource::_AddBounceBuffer(DMABuffer& buffer, addr_t& physicalBounceBuffer,
size_t& bounceLeft, size_t length, bool fixedLength)
{
if (bounceLeft < length) {
if (fixedLength)
return 0;
length = bounceLeft;
}
size_t bounceUsed = 0;
uint32 vecCount = buffer.VecCount();
if (vecCount > 0) {
// see if we can join the bounce buffer with the previously last vec
iovec& vec = buffer.VecAt(vecCount - 1);
addr_t vecBase = (addr_t)vec.iov_base;
size_t vecLength = vec.iov_len;
if (vecBase + vecLength == physicalBounceBuffer) {
vecLength += length;
_RestrictBoundaryAndSegmentSize(vecBase, vecLength);
size_t lengthDiff = vecLength - vec.iov_len;
length -= lengthDiff;
physicalBounceBuffer += lengthDiff;
bounceLeft -= lengthDiff;
bounceUsed += lengthDiff;
vec.iov_len = vecLength;
}
}
while (length > 0) {
// We need to add another bounce vec
if (vecCount == fRestrictions.max_segment_count)
return fixedLength ? 0 : bounceUsed;
addr_t vecLength = length;
_RestrictBoundaryAndSegmentSize(physicalBounceBuffer, vecLength);
buffer.AddVec((void*)physicalBounceBuffer, vecLength);
vecCount++;
physicalBounceBuffer += vecLength;
bounceLeft -= vecLength;
bounceUsed += vecLength;
length -= vecLength;
}
return bounceUsed;
}
status_t status_t
DMAResource::TranslateNext(IORequest* request, IOOperation* operation) DMAResource::TranslateNext(IORequest* request, IOOperation* operation)
{ {
@ -208,21 +330,22 @@ DMAResource::TranslateNext(IORequest* request, IOOperation* operation)
iovec* vecs = NULL; iovec* vecs = NULL;
uint32 segmentCount = 0; uint32 segmentCount = 0;
bool partialBegin = (offset & (fBlockSize - 1)) != 0; size_t partialBegin = offset & (fBlockSize - 1);
dprintf(" offset %Ld, block size %lu -> %s\n", offset, fBlockSize, partialBegin ? "partial" : "whole"); TRACE(" offset %Ld, block size %lu -> partial: %lu\n", offset, fBlockSize,
partialBegin);
if (buffer->IsVirtual()) { if (buffer->IsVirtual()) {
// Unless we need the bounce buffer anyway, we have to translate the // Unless we need the bounce buffer anyway, we have to translate the
// virtual addresses to physical addresses, so we can check the DMA // virtual addresses to physical addresses, so we can check the DMA
// restrictions. // restrictions.
dprintf(" IS VIRTUAL\n"); TRACE(" buffer is virtual\n");
// TODO: !partialOperation || totalLength >= fBlockSize
// TODO: Maybe enforce fBounceBufferSize >= 2 * fBlockSize.
if (true) { if (true) {
// TODO: !partialOperation || totalLength >= fBlockSize
// TODO: Maybe enforce fBounceBufferSize >= 2 * fBlockSize.
size_t transferLeft = totalLength; size_t transferLeft = totalLength;
vecs = fScratchVecs; vecs = fScratchVecs;
dprintf(" CREATE PHYSICAL MAP %ld\n", buffer->VecCount()); TRACE(" create physical map (for %ld vecs)\n", buffer->VecCount());
for (uint32 i = vecIndex; i < buffer->VecCount(); i++) { for (uint32 i = vecIndex; i < buffer->VecCount(); i++) {
iovec& vec = buffer->VecAt(i); iovec& vec = buffer->VecAt(i);
addr_t base = (addr_t)vec.iov_base + vecOffset; addr_t base = (addr_t)vec.iov_base + vecOffset;
@ -230,7 +353,6 @@ dprintf(" CREATE PHYSICAL MAP %ld\n", buffer->VecCount());
vecOffset = 0; vecOffset = 0;
if (size > transferLeft) if (size > transferLeft)
size = transferLeft; size = transferLeft;
dprintf(" size = %lu\n", size);
while (size > 0 && segmentCount while (size > 0 && segmentCount
< fRestrictions.max_segment_count) { < fRestrictions.max_segment_count) {
@ -262,33 +384,44 @@ dprintf(" size = %lu\n", size);
fRestrictions.max_segment_count); fRestrictions.max_segment_count);
} }
dprintf(" physical count %lu\n", segmentCount); #ifdef TRACE_DMA_RESOURCE
for (uint32 i = 0; i < segmentCount; i++) { TRACE(" physical count %lu\n", segmentCount);
dprintf(" [%lu] %p, %lu\n", i, vecs[i].iov_base, vecs[i].iov_len); for (uint32 i = 0; i < segmentCount; i++) {
} TRACE(" [%lu] %p, %lu\n", i, vecs[i].iov_base, vecs[i].iov_len);
}
#endif
// check alignment, boundaries, etc. and set vecs in DMA buffer // check alignment, boundaries, etc. and set vecs in DMA buffer
size_t dmaLength = 0; size_t dmaLength = 0;
addr_t physicalBounceBuffer = dmaBuffer->PhysicalBounceBuffer(); addr_t physicalBounceBuffer = dmaBuffer->PhysicalBounceBuffer();
size_t bounceLeft = fBounceBufferSize; size_t bounceLeft = fBounceBufferSize;
size_t transferLeft = totalLength;
// If the offset isn't block-aligned, use the bounce buffer to bridge the // If the offset isn't block-aligned, use the bounce buffer to bridge the
// gap to the start of the vec. // gap to the start of the vec.
if (partialBegin) { if (partialBegin > 0) {
off_t diff = offset & (fBlockSize - 1); size_t length;
addr_t base = physicalBounceBuffer; if (request->IsWrite()) {
size_t length = (diff + fRestrictions.alignment - 1) // we always need to read in a whole block for the partial write
& ~(fRestrictions.alignment - 1); length = fBlockSize;
} else {
length = (partialBegin + fRestrictions.alignment - 1)
& ~(fRestrictions.alignment - 1);
}
physicalBounceBuffer += length; if (_AddBounceBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft,
bounceLeft -= length; length, true) == 0) {
TRACE(" adding partial begin failed, length %lu!\n", length);
return B_BAD_VALUE;
}
dmaBuffer->AddVec((void*)base, length);
dmaLength += length; dmaLength += length;
vecOffset += length - diff; vecOffset += length - partialBegin;
offset -= diff; offset -= partialBegin;
dprintf(" partial begin, using bounce buffer: offset: %lld, length: %lu\n", offset, length); TRACE(" partial begin, using bounce buffer: offset: %lld, length: "
"%lu\n", offset, length);
} }
for (uint32 i = vecIndex; i < segmentCount;) { for (uint32 i = vecIndex; i < segmentCount;) {
@ -304,99 +437,110 @@ dprintf(" partial begin, using bounce buffer: offset: %lld, length: %lu\n", off
addr_t base = (addr_t)vec.iov_base + vecOffset; addr_t base = (addr_t)vec.iov_base + vecOffset;
size_t length = vec.iov_len - vecOffset; size_t length = vec.iov_len - vecOffset;
if (length > transferLeft)
length = transferLeft;
// Cut the vec according to transfer size, segment size, and boundary. // Cut the vec according to transfer size, segment size, and boundary.
if (dmaLength + length > fRestrictions.max_transfer_size) if (dmaLength + length > fRestrictions.max_transfer_size) {
{
length = fRestrictions.max_transfer_size - dmaLength; length = fRestrictions.max_transfer_size - dmaLength;
dprintf(" vec %lu: restricting length to %lu due to transfer size limit\n", i, length); TRACE(" vec %lu: restricting length to %lu due to transfer size "
} "limit\n", i, length);
}
_RestrictBoundaryAndSegmentSize(base, length); _RestrictBoundaryAndSegmentSize(base, length);
size_t useBounceBuffer = 0; size_t useBounceBufferSize = 0;
// Check low address: use bounce buffer for range to low address. // Check low address: use bounce buffer for range to low address.
// Check alignment: if not aligned, use bounce buffer for complete vec. // Check alignment: if not aligned, use bounce buffer for complete vec.
if (base < fRestrictions.low_address) if (base < fRestrictions.low_address) {
{ useBounceBufferSize = fRestrictions.low_address - base;
useBounceBuffer = fRestrictions.low_address - base; TRACE(" vec %lu: below low address, using bounce buffer: %lu\n", i,
dprintf(" vec %lu: below low address, using bounce buffer: %lu\n", i, useBounceBuffer); useBounceBufferSize);
} } else if (base & (fRestrictions.alignment - 1)) {
else if (base & (fRestrictions.alignment - 1)) useBounceBufferSize = length;
{ TRACE(" vec %lu: misalignment, using bounce buffer: %lu\n", i,
useBounceBuffer = length; useBounceBufferSize);
dprintf(" vec %lu: misalignment, using bounce buffer: %lu\n", i, useBounceBuffer); }
}
// TODO: Enforce high address restriction! // Enforce high address restriction
if (base > fRestrictions.high_address)
useBounceBufferSize = length;
else if (base + length > fRestrictions.high_address)
length = fRestrictions.high_address - base;
// Align length as well
if (useBounceBufferSize == 0)
length &= ~(fRestrictions.alignment - 1);
// If length is 0, use bounce buffer for complete vec. // If length is 0, use bounce buffer for complete vec.
if (length == 0) { if (length == 0) {
length = vec.iov_len - vecOffset; length = vec.iov_len - vecOffset;
useBounceBuffer = length; useBounceBufferSize = length;
dprintf(" vec %lu: 0 length, using bounce buffer: %lu\n", i, useBounceBuffer); TRACE(" vec %lu: 0 length, using bounce buffer: %lu\n", i,
useBounceBufferSize);
} }
if (useBounceBuffer > 0) { if (useBounceBufferSize > 0) {
if (bounceLeft == 0) { // alignment could still be wrong (we round up here)
dprintf(" vec %lu: out of bounce buffer space\n", i); useBounceBufferSize = (useBounceBufferSize
+ fRestrictions.alignment - 1) & ~(fRestrictions.alignment - 1);
length = _AddBounceBuffer(*dmaBuffer, physicalBounceBuffer,
bounceLeft, useBounceBufferSize, false);
if (length == 0) {
TRACE(" vec %lu: out of bounce buffer space\n", i);
// We don't have any bounce buffer space left, we need to move // We don't have any bounce buffer space left, we need to move
// this request to the next I/O operation. // this request to the next I/O operation.
break; break;
} }
TRACE(" vec %lu: final bounce length: %lu\n", i, length);
base = physicalBounceBuffer; } else {
TRACE(" vec %lu: final length restriction: %lu\n", i, length);
if (useBounceBuffer > length) dmaBuffer->AddVec((void*)base, length);
useBounceBuffer = length;
if (useBounceBuffer > bounceLeft)
useBounceBuffer = bounceLeft;
length = useBounceBuffer;
} }
// check boundary and max segment size.
_RestrictBoundaryAndSegmentSize(base, length);
dprintf(" vec %lu: final length restriction: %lu\n", i, length);
if (useBounceBuffer) {
// alignment could still be wrong
if (useBounceBuffer & (fRestrictions.alignment - 1)) {
useBounceBuffer
= (useBounceBuffer + fRestrictions.alignment - 1)
& ~(fRestrictions.alignment - 1);
if (dmaLength + useBounceBuffer
> fRestrictions.max_transfer_size) {
useBounceBuffer = (fRestrictions.max_transfer_size
- dmaLength) & ~(fRestrictions.alignment - 1);
}
}
physicalBounceBuffer += useBounceBuffer;
bounceLeft -= useBounceBuffer;
}
vecOffset += length;
// TODO: we might be able to join the vec with its preceding vec
// (but then we'd need to take the segment size into account again)
dmaBuffer->AddVec((void*)base, length);
dmaLength += length; dmaLength += length;
vecOffset += length;
transferLeft -= length;
} }
// If total length not block aligned, use bounce buffer for padding. // If we're writing partially, we always need to have a block sized bounce
if ((dmaLength & (fBlockSize - 1)) != 0) { // buffer (or else we would overwrite memory to be written on the read in
dprintf(" dmaLength not block aligned: %lu\n", dmaLength); // the first phase).
size_t length = (dmaLength + fBlockSize - 1) & ~(fBlockSize - 1); if (request->IsWrite() && (dmaLength & (fBlockSize - 1)) != 0) {
size_t diff = dmaLength & (fBlockSize - 1);
TRACE(" partial end write: %lu, diff %lu\n", dmaLength, diff);
_CutBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft, diff);
dmaLength -= diff;
if (_AddBounceBuffer(*dmaBuffer, physicalBounceBuffer,
bounceLeft, fBlockSize, true) == 0) {
// If we cannot write anything, we can't process the request at all
TRACE(" adding bounce buffer failed!!!\n");
if (dmaLength == 0)
return B_BAD_VALUE;
} else
dmaLength += fBlockSize;
}
// If total length not block aligned, use bounce buffer for padding (read
// case only).
while ((dmaLength & (fBlockSize - 1)) != 0) {
TRACE(" dmaLength not block aligned: %lu\n", dmaLength);
size_t length = (dmaLength + fBlockSize - 1) & ~(fBlockSize - 1);
// If total length > max transfer size, segment count > max segment // If total length > max transfer size, segment count > max segment
// count, truncate. // count, truncate.
// TODO: sometimes we can replace the last vec with the bounce buffer
// to let it match the restrictions.
if (length > fRestrictions.max_transfer_size if (length > fRestrictions.max_transfer_size
|| dmaBuffer->VecCount() == fRestrictions.max_segment_count || dmaBuffer->VecCount() == fRestrictions.max_segment_count
|| bounceLeft < length - dmaLength) { || bounceLeft < length - dmaLength) {
// cut the part of dma length // cut the part of dma length
dprintf(" can't align length due to max transfer size, segment count " TRACE(" can't align length due to max transfer size, segment "
"restrictions, or lacking bounce buffer space\n"); "count restrictions, or lacking bounce buffer space\n");
size_t toCut = dmaLength size_t toCut = dmaLength
& (max_c(fBlockSize, fRestrictions.alignment) - 1); & (max_c(fBlockSize, fRestrictions.alignment) - 1);
dmaLength -= toCut; dmaLength -= toCut;
@ -410,36 +554,33 @@ dprintf(" can't align length due to max transfer size, segment count "
& ~(max_c(fBlockSize, fRestrictions.alignment) - 1); & ~(max_c(fBlockSize, fRestrictions.alignment) - 1);
_RestrictBoundaryAndSegmentSize(base, dmaLength); _RestrictBoundaryAndSegmentSize(base, dmaLength);
dmaBuffer->AddVec((void*)base, dmaLength); dmaBuffer->AddVec((void*)base, dmaLength);
} else {
int32 dmaVecCount = dmaBuffer->VecCount();
for (int32 i = dmaVecCount - 1; toCut > 0 && i >= 0; i--) {
iovec& vec = dmaBuffer->VecAt(i);
size_t length = vec.iov_len;
if (length <= toCut) {
dmaVecCount--;
toCut -= length;
} else {
vec.iov_len -= toCut;
break;
}
}
dmaBuffer->SetVecCount(dmaVecCount); physicalBounceBuffer = base + dmaLength;
bounceLeft = fBounceBufferSize - dmaLength;
} else {
_CutBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft, toCut);
} }
} else { } else {
dprintf(" adding %lu bytes final bounce buffer\n", length - dmaLength); TRACE(" adding %lu bytes final bounce buffer\n",
dmaBuffer->AddVec((void*)physicalBounceBuffer, length - dmaLength); length - dmaLength);
dmaLength = length; length -= dmaLength;
length = _AddBounceBuffer(*dmaBuffer, physicalBounceBuffer,
bounceLeft, length, true);
if (length == 0)
panic("don't do this to me!");
dmaLength += length;
} }
} }
off_t requestEnd = request->Offset() + request->Length();
operation->SetBuffer(dmaBuffer); operation->SetBuffer(dmaBuffer);
operation->SetBlockSize(fBlockSize);
operation->SetOriginalRange(originalOffset, operation->SetOriginalRange(originalOffset,
min_c(offset + dmaLength, request->Offset() + request->Length()) min_c(offset + dmaLength, requestEnd) - originalOffset);
- originalOffset);
operation->SetRange(offset, dmaLength); operation->SetRange(offset, dmaLength);
operation->SetPartial(partialBegin, operation->SetPartial(partialBegin != 0, offset + dmaLength > requestEnd);
offset + dmaLength > request->Offset() + request->Length()); operation->SetUsesBounceBuffer(bounceLeft < fBounceBufferSize);
status_t error = operation->SetRequest(request); status_t error = operation->SetRequest(request);
if (error != B_OK) if (error != B_OK)

View File

@ -47,11 +47,8 @@ public:
size_t BounceBufferSize() const size_t BounceBufferSize() const
{ return fBounceBufferSize; } { return fBounceBufferSize; }
bool UsesBounceBufferAt(uint32 index);
void SetToBounceBuffer(size_t length); void SetToBounceBuffer(size_t length);
bool UsesBounceBuffer() const
{ return fVecCount >= 1
&& (addr_t)fVecs[0].iov_base
== fPhysicalBounceBuffer; }
private: private:
void* fBounceBuffer; void* fBounceBuffer;
@ -87,6 +84,13 @@ private:
bool _NeedsBoundsBuffers() const; bool _NeedsBoundsBuffers() const;
void _RestrictBoundaryAndSegmentSize(addr_t base, void _RestrictBoundaryAndSegmentSize(addr_t base,
addr_t& length); addr_t& length);
void _CutBuffer(DMABuffer& buffer,
addr_t& physicalBounceBuffer,
size_t& bounceLeft, size_t toCut);
size_t _AddBounceBuffer(DMABuffer& buffer,
addr_t& physicalBounceBuffer,
size_t& bounceLeft, size_t length,
bool fixedLength);
mutex fLock; mutex fLock;
dma_restrictions fRestrictions; dma_restrictions fRestrictions;

View File

@ -14,6 +14,14 @@
#include "dma_resources.h" #include "dma_resources.h"
#define TRACE_IO_REQUEST
#ifdef TRACE_IO_REQUEST
# define TRACE(x...) dprintf(x)
#else
# define TRACE(x...) ;
#endif
// partial I/O operation phases // partial I/O operation phases
enum { enum {
PHASE_READ_BEGIN = 0, PHASE_READ_BEGIN = 0,
@ -105,12 +113,15 @@ IOBuffer::UnlockMemory(bool isWrite)
bool bool
IOOperation::Finish() IOOperation::Finish()
{ {
dprintf("IOOperation::Finish()\n"); TRACE("IOOperation::Finish()\n");
if (fStatus == B_OK) { if (fStatus == B_OK) {
if (fParent->IsWrite()) { if (fParent->IsWrite()) {
dprintf(" is write\n"); TRACE(" is write\n");
if (fPhase == PHASE_READ_BEGIN) { if (fPhase == PHASE_READ_BEGIN) {
dprintf(" phase read begin\n"); TRACE(" phase read begin\n");
// repair phase adjusted vec
fDMABuffer->VecAt(fSavedVecIndex).iov_len = fSavedVecLength;
// partial write: copy partial begin to bounce buffer // partial write: copy partial begin to bounce buffer
bool skipReadEndPhase; bool skipReadEndPhase;
status_t error = _CopyPartialBegin(true, skipReadEndPhase); status_t error = _CopyPartialBegin(true, skipReadEndPhase);
@ -119,6 +130,7 @@ dprintf(" phase read begin\n");
// Get ready for next phase... // Get ready for next phase...
fPhase = HasPartialEnd() && !skipReadEndPhase fPhase = HasPartialEnd() && !skipReadEndPhase
? PHASE_READ_END : PHASE_DO_ALL; ? PHASE_READ_END : PHASE_DO_ALL;
_PrepareVecs();
SetStatus(1); SetStatus(1);
// TODO: Is there a race condition, if the request is // TODO: Is there a race condition, if the request is
// aborted at the same time? // aborted at the same time?
@ -127,7 +139,13 @@ dprintf(" phase read begin\n");
SetStatus(error); SetStatus(error);
} else if (fPhase == PHASE_READ_END) { } else if (fPhase == PHASE_READ_END) {
dprintf(" phase read end\n"); TRACE(" phase read end\n");
// repair phase adjusted vec
iovec& vec = fDMABuffer->VecAt(fSavedVecIndex);
vec.iov_base = (uint8*)vec.iov_base
+ vec.iov_len - fSavedVecLength;
vec.iov_len = fSavedVecLength;
// partial write: copy partial end to bounce buffer // partial write: copy partial end to bounce buffer
status_t error = _CopyPartialEnd(true); status_t error = _CopyPartialEnd(true);
if (error == B_OK) { if (error == B_OK) {
@ -146,7 +164,7 @@ dprintf(" phase read end\n");
} }
if (fParent->IsRead() && UsesBounceBuffer()) { if (fParent->IsRead() && UsesBounceBuffer()) {
dprintf(" read with bounce buffer\n"); TRACE(" read with bounce buffer\n");
// copy the bounce buffer segments to the final location // copy the bounce buffer segments to the final location
uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBuffer(); uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBuffer();
addr_t bounceBufferStart = fDMABuffer->PhysicalBounceBuffer(); addr_t bounceBufferStart = fDMABuffer->PhysicalBounceBuffer();
@ -155,32 +173,42 @@ dprintf(" read with bounce buffer\n");
const iovec* vecs = fDMABuffer->Vecs(); const iovec* vecs = fDMABuffer->Vecs();
uint32 vecCount = fDMABuffer->VecCount(); uint32 vecCount = fDMABuffer->VecCount();
uint32 i = 0;
off_t offset = Offset();
status_t error = B_OK; status_t error = B_OK;
bool partialBlockOnly = false;
if (HasPartialBegin()) {
error = _CopyPartialBegin(false, partialBlockOnly);
offset += vecs[0].iov_len;
i++;
}
if (error == B_OK && HasPartialEnd() && !partialBlockOnly) { off_t offset = fOffset;
error = _CopyPartialEnd(false); off_t startOffset = fOriginalOffset;
vecCount--; off_t endOffset = fOriginalOffset + fOriginalLength;
}
for (; error == B_OK && i < vecCount; i++) { for (uint32 i = 0; error == B_OK && i < vecCount; i++) {
const iovec& vec = vecs[i]; const iovec& vec = vecs[i];
addr_t base = (addr_t)vec.iov_base; addr_t base = (addr_t)vec.iov_base;
size_t length = vec.iov_len;
if (offset < startOffset) {
if (offset + length <= startOffset) {
offset += length;
continue;
}
size_t diff = startOffset - offset;
base += diff;
length -= diff;
}
if (offset + length > endOffset) {
if (offset >= endOffset)
break;
length = endOffset - offset;
}
if (base >= bounceBufferStart && base < bounceBufferEnd) { if (base >= bounceBufferStart && base < bounceBufferEnd) {
error = fParent->CopyData( error = fParent->CopyData(
bounceBuffer + (base - bounceBufferStart), offset, bounceBuffer + (base - bounceBufferStart), offset, length);
vec.iov_len);
} }
offset += vec.iov_len;
offset += length;
} }
if (error != B_OK) if (error != B_OK)
@ -208,15 +236,10 @@ IOOperation::SetRequest(IORequest* request)
// set initial phase // set initial phase
fPhase = PHASE_DO_ALL; fPhase = PHASE_DO_ALL;
if (fParent->IsWrite()) { if (fParent->IsWrite()) {
if (HasPartialBegin())
fPhase = PHASE_READ_BEGIN;
else if (HasPartialEnd())
fPhase = PHASE_READ_END;
// Copy data to bounce buffer segments, save the partial begin/end vec, // Copy data to bounce buffer segments, save the partial begin/end vec,
// which will be copied after their respective read phase. // which will be copied after their respective read phase.
if (UsesBounceBuffer()) { if (UsesBounceBuffer()) {
dprintf(" write with bounce buffer\n"); TRACE(" write with bounce buffer\n");
uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBuffer(); uint8* bounceBuffer = (uint8*)fDMABuffer->BounceBuffer();
addr_t bounceBufferStart = fDMABuffer->PhysicalBounceBuffer(); addr_t bounceBufferStart = fDMABuffer->PhysicalBounceBuffer();
addr_t bounceBufferEnd = bounceBufferStart addr_t bounceBufferEnd = bounceBufferStart
@ -224,30 +247,67 @@ dprintf(" write with bounce buffer\n");
const iovec* vecs = fDMABuffer->Vecs(); const iovec* vecs = fDMABuffer->Vecs();
uint32 vecCount = fDMABuffer->VecCount(); uint32 vecCount = fDMABuffer->VecCount();
size_t vecOffset = 0;
uint32 i = 0; uint32 i = 0;
off_t offset = Offset(); off_t offset = fOffset;
off_t endOffset = fOffset + fLength;
if (HasPartialBegin()) { if (HasPartialBegin()) {
offset += vecs[0].iov_len; // skip first block
i++; size_t toSkip = fBlockSize;
while (toSkip > 0) {
if (vecs[i].iov_len <= toSkip) {
toSkip -= vecs[i].iov_len;
i++;
} else {
vecOffset = toSkip;
break;
}
}
offset += fBlockSize;
} }
if (HasPartialEnd()) if (HasPartialEnd()) {
vecCount--; // skip last block
size_t toSkip = fBlockSize;
while (toSkip > 0) {
if (vecs[vecCount - 1].iov_len <= toSkip) {
toSkip -= vecs[vecCount - 1].iov_len;
vecCount--;
} else
break;
}
endOffset -= fBlockSize;
}
for (; i < vecCount; i++) { for (; i < vecCount; i++) {
const iovec& vec = vecs[i]; const iovec& vec = vecs[i];
addr_t base = (addr_t)vec.iov_base; addr_t base = (addr_t)vec.iov_base + vecOffset;
size_t length = vec.iov_len - vecOffset;
vecOffset = 0;
if (base >= bounceBufferStart && base < bounceBufferEnd) { if (base >= bounceBufferStart && base < bounceBufferEnd) {
if (offset + length > endOffset)
length = endOffset - offset;
status_t error = fParent->CopyData(offset, status_t error = fParent->CopyData(offset,
bounceBuffer + (base - bounceBufferStart), vec.iov_len); bounceBuffer + (base - bounceBufferStart), length);
if (error != B_OK) if (error != B_OK)
return error; return error;
} }
offset += vec.iov_len;
offset += length;
} }
} }
if (HasPartialBegin())
fPhase = PHASE_READ_BEGIN;
else if (HasPartialEnd())
fPhase = PHASE_READ_END;
_PrepareVecs();
} }
fStatus = 1; fStatus = 1;
@ -275,12 +335,26 @@ IOOperation::SetRange(off_t offset, size_t length)
} }
off_t
IOOperation::Offset() const
{
return fPhase == PHASE_READ_END ? fOffset + fLength - fBlockSize : fOffset;
}
size_t
IOOperation::Length() const
{
return fPhase == PHASE_DO_ALL ? fLength : fBlockSize;
}
iovec* iovec*
IOOperation::Vecs() const IOOperation::Vecs() const
{ {
switch (fPhase) { switch (fPhase) {
case PHASE_READ_END: case PHASE_READ_END:
return fDMABuffer->Vecs() + (fDMABuffer->VecCount() - 1); return fDMABuffer->Vecs() + fSavedVecIndex;
case PHASE_READ_BEGIN: case PHASE_READ_BEGIN:
case PHASE_DO_ALL: case PHASE_DO_ALL:
default: default:
@ -294,8 +368,9 @@ IOOperation::VecCount() const
{ {
switch (fPhase) { switch (fPhase) {
case PHASE_READ_BEGIN: case PHASE_READ_BEGIN:
return fSavedVecIndex + 1;
case PHASE_READ_END: case PHASE_READ_END:
return 1; return fDMABuffer->VecCount() - fSavedVecIndex;
case PHASE_DO_ALL: case PHASE_DO_ALL:
default: default:
return fDMABuffer->VecCount(); return fDMABuffer->VecCount();
@ -306,6 +381,7 @@ IOOperation::VecCount() const
void void
IOOperation::SetPartial(bool partialBegin, bool partialEnd) IOOperation::SetPartial(bool partialBegin, bool partialEnd)
{ {
TRACE("partial begin %d, end %d\n", partialBegin, partialEnd);
fPartialBegin = partialBegin; fPartialBegin = partialBegin;
fPartialEnd = partialEnd; fPartialEnd = partialEnd;
} }
@ -314,7 +390,7 @@ IOOperation::SetPartial(bool partialBegin, bool partialEnd)
bool bool
IOOperation::IsWrite() const IOOperation::IsWrite() const
{ {
return fParent->IsWrite() && fPhase != PHASE_DO_ALL; return fParent->IsWrite() && fPhase == PHASE_DO_ALL;
} }
@ -325,24 +401,64 @@ IOOperation::IsRead() const
} }
status_t void
IOOperation::_CopyPartialBegin(bool isWrite, bool& partialBlockOnly) IOOperation::_PrepareVecs()
{ {
size_t relativeOffset = OriginalOffset() - Offset(); // we need to prepare the vecs for consumption by the drivers
size_t length = fDMABuffer->VecAt(0).iov_len; if (fPhase == PHASE_READ_BEGIN) {
iovec* vecs = fDMABuffer->Vecs();
uint32 vecCount = fDMABuffer->VecCount();
size_t vecLength = fBlockSize;
for (uint32 i = 0; i < vecCount; i++) {
iovec& vec = vecs[i];
if (vec.iov_len >= vecLength) {
fSavedVecIndex = i;
fSavedVecLength = vec.iov_len;
vec.iov_len = vecLength;
break;
}
vecLength -= vec.iov_len;
}
} else if (fPhase == PHASE_READ_END) {
iovec* vecs = fDMABuffer->Vecs();
uint32 vecCount = fDMABuffer->VecCount();
size_t vecLength = fBlockSize;
for (int32 i = vecCount - 1; i >= 0; i--) {
iovec& vec = vecs[i];
if (vec.iov_len >= vecLength) {
fSavedVecIndex = i;
fSavedVecLength = vec.iov_len;
vec.iov_base = (uint8*)vec.iov_base
+ vec.iov_len - vecLength;
vec.iov_len = vecLength;
break;
}
vecLength -= vec.iov_len;
}
}
}
partialBlockOnly = relativeOffset + OriginalLength() <= length;
if (partialBlockOnly) status_t
length = relativeOffset + OriginalLength(); IOOperation::_CopyPartialBegin(bool isWrite, bool& singleBlockOnly)
{
size_t relativeOffset = OriginalOffset() - fOffset;
size_t length = fBlockSize - relativeOffset;
singleBlockOnly = length >= OriginalLength();
if (singleBlockOnly)
length = OriginalLength();
TRACE("_CopyPartialBegin(%s, single only %d)\n",
isWrite ? "write" : "read", singleBlockOnly);
if (isWrite) { if (isWrite) {
return fParent->CopyData(OriginalOffset(), return fParent->CopyData(OriginalOffset(),
(uint8*)fDMABuffer->BounceBuffer() + relativeOffset, (uint8*)fDMABuffer->BounceBuffer() + relativeOffset, length);
length - relativeOffset);
} else { } else {
return fParent->CopyData( return fParent->CopyData(
(uint8*)fDMABuffer->BounceBuffer() + relativeOffset, (uint8*)fDMABuffer->BounceBuffer() + relativeOffset,
OriginalOffset(), length - relativeOffset); OriginalOffset(), length);
} }
} }
@ -350,20 +466,20 @@ IOOperation::_CopyPartialBegin(bool isWrite, bool& partialBlockOnly)
status_t status_t
IOOperation::_CopyPartialEnd(bool isWrite) IOOperation::_CopyPartialEnd(bool isWrite)
{ {
TRACE("_CopyPartialEnd(%s)\n", isWrite ? "write" : "read");
const iovec& lastVec = fDMABuffer->VecAt(fDMABuffer->VecCount() - 1); const iovec& lastVec = fDMABuffer->VecAt(fDMABuffer->VecCount() - 1);
off_t lastVecPos = Offset() + Length() - lastVec.iov_len; off_t lastVecPos = fOffset + fLength - fBlockSize;
if (isWrite) { uint8* base = (uint8*)fDMABuffer->BounceBuffer() + ((addr_t)lastVec.iov_base
return fParent->CopyData(lastVecPos, + lastVec.iov_len - fBlockSize - fDMABuffer->PhysicalBounceBuffer());
(uint8*)fDMABuffer->BounceBuffer() // NOTE: this won't work if we don't use the bounce buffer contiguously
+ ((addr_t)lastVec.iov_base // (because of boundary alignments).
- fDMABuffer->PhysicalBounceBuffer()), size_t length = OriginalOffset() + OriginalLength() - lastVecPos;
OriginalOffset() + OriginalLength() - lastVecPos);
} else { if (isWrite)
return fParent->CopyData((uint8*)fDMABuffer->BounceBuffer() return fParent->CopyData(lastVecPos, base, length);
+ ((addr_t)lastVec.iov_base
- fDMABuffer->PhysicalBounceBuffer()), return fParent->CopyData(base, lastVecPos, length);
lastVecPos, OriginalOffset() + OriginalLength() - lastVecPos);
}
} }
@ -435,8 +551,8 @@ IORequest::ChunkFinished(IORequestChunk* chunk, status_t status)
void void
IORequest::Advance(size_t bySize) IORequest::Advance(size_t bySize)
{ {
dprintf("IORequest::Advance(%lu): remaining: %lu -> %lu\n", bySize, TRACE("IORequest::Advance(%lu): remaining: %lu -> %lu\n", bySize,
fRemainingBytes, fRemainingBytes - bySize); fRemainingBytes, fRemainingBytes - bySize);
fRemainingBytes -= bySize; fRemainingBytes -= bySize;
iovec* vecs = fBuffer->Vecs(); iovec* vecs = fBuffer->Vecs();
@ -540,7 +656,8 @@ IORequest::_CopyData(void* _buffer, off_t offset, size_t size, bool copyIn)
IORequest::_CopySimple(void* bounceBuffer, void* external, size_t size, IORequest::_CopySimple(void* bounceBuffer, void* external, size_t size,
bool copyIn) bool copyIn)
{ {
dprintf(" IORequest::_CopySimple(%p, %p, %lu, %d)\n", bounceBuffer, external, size, copyIn); TRACE(" IORequest::_CopySimple(%p, %p, %lu, %d)\n", bounceBuffer, external,
size, copyIn);
if (copyIn) if (copyIn)
memcpy(bounceBuffer, external, size); memcpy(bounceBuffer, external, size);
else else

View File

@ -105,8 +105,9 @@ public:
// also sets range // also sets range
void SetRange(off_t offset, size_t length); void SetRange(off_t offset, size_t length);
off_t Offset() const { return fOffset; } // TODO: Fix Offset() and Length() for partial write phases!
size_t Length() const { return fLength; } off_t Offset() const;
size_t Length() const;
off_t OriginalOffset() const off_t OriginalOffset() const
{ return fOriginalOffset; } { return fOriginalOffset; }
size_t OriginalLength() const size_t OriginalLength() const
@ -123,26 +124,36 @@ public:
bool IsWrite() const; bool IsWrite() const;
bool IsRead() const; bool IsRead() const;
void SetBlockSize(size_t blockSize)
{ fBlockSize = blockSize; }
bool UsesBounceBuffer() const bool UsesBounceBuffer() const
{ return fDMABuffer->UsesBounceBuffer(); } { return fUsesBounceBuffer; }
void SetUsesBounceBuffer(bool uses)
{ fUsesBounceBuffer = uses; }
DMABuffer* Buffer() const { return fDMABuffer; } DMABuffer* Buffer() const { return fDMABuffer; }
void SetBuffer(DMABuffer* buffer) void SetBuffer(DMABuffer* buffer)
{ fDMABuffer = buffer; } { fDMABuffer = buffer; }
protected: protected:
void _PrepareVecs();
status_t _CopyPartialBegin(bool isWrite, status_t _CopyPartialBegin(bool isWrite,
bool& partialBlockOnly); bool& partialBlockOnly);
status_t _CopyPartialEnd(bool isWrite); status_t _CopyPartialEnd(bool isWrite);
DMABuffer* fDMABuffer; DMABuffer* fDMABuffer;
off_t fOffset; off_t fOffset;
size_t fLength;
off_t fOriginalOffset; off_t fOriginalOffset;
size_t fLength;
size_t fOriginalLength; size_t fOriginalLength;
uint32 fPhase; size_t fBlockSize;
uint16 fSavedVecIndex;
uint16 fSavedVecLength;
uint8 fPhase;
bool fPartialBegin; bool fPartialBegin;
bool fPartialEnd; bool fPartialEnd;
bool fUsesBounceBuffer;
}; };
typedef IOOperation io_operation; typedef IOOperation io_operation;

View File

@ -9,6 +9,8 @@
#include <device_manager.h> #include <device_manager.h>
#include <vm.h>
#include "dma_resources.h" #include "dma_resources.h"
#include "io_requests.h" #include "io_requests.h"
@ -16,18 +18,36 @@
#define DMA_TEST_BLOCK_SIZE 512 #define DMA_TEST_BLOCK_SIZE 512
struct device_manager_info* sDeviceManager; class TestSuite;
static area_id sArea; class TestSuiteContext {
static size_t sAreaSize; public:
static void* sAreaAddress; TestSuiteContext();
static DMAResource* sDMAResource; ~TestSuiteContext();
status_t Init(size_t size);
addr_t DataBase() const { return fDataBase; }
addr_t PhysicalDataBase() const
{ return fPhysicalDataBase; }
addr_t CompareBase() const { return fCompareBase; }
size_t Size() const { return fSize; }
private:
area_id fDataArea;
addr_t fDataBase;
addr_t fPhysicalDataBase;
area_id fCompareArea;
addr_t fCompareBase;
size_t fSize;
};
class Test : public DoublyLinkedListLinkImpl<Test> { class Test : public DoublyLinkedListLinkImpl<Test> {
public: public:
Test(off_t offset, uint8* base, uint8* physicalBase, Test(TestSuite& suite, off_t offset, size_t length,
size_t length, bool isWrite, uint32 flags); bool isWrite, uint32 flags);
Test& AddSource(addr_t base, size_t length); Test& AddSource(addr_t base, size_t length);
Test& NextResult(off_t offset, bool partialBegin, Test& NextResult(off_t offset, bool partialBegin,
@ -38,11 +58,17 @@ public:
void Run(DMAResource& resource); void Run(DMAResource& resource);
private: private:
addr_t _SourceToVirtual(addr_t source);
addr_t _SourceToCompare(addr_t source);
void _Prepare();
void _CheckCompare();
void _CheckWrite();
void _CheckResults();
status_t _DoIO(IOOperation& operation);
void _Panic(const char* message,...); void _Panic(const char* message,...);
TestSuite& fSuite;
off_t fOffset; off_t fOffset;
uint8* fBase;
uint8* fPhysicalBase;
size_t fLength; size_t fLength;
bool fIsWrite; bool fIsWrite;
uint32 fFlags; uint32 fFlags;
@ -70,11 +96,10 @@ typedef DoublyLinkedList<Test> TestList;
class TestSuite { class TestSuite {
public: public:
TestSuite(const char* name, const dma_restrictions& restrictions, TestSuite(TestSuiteContext& context, const char* name,
size_t blockSize, uint8* base, uint8* physicalBase) const dma_restrictions& restrictions, size_t blockSize)
: :
fBase(base), fContext(context)
fPhysicalBase(physicalBase)
{ {
dprintf("----- Run \"%s\" tests ---------------------------\n", name); dprintf("----- Run \"%s\" tests ---------------------------\n", name);
dprintf(" DMA restrictions: address %#lx - %#lx, align %lu, boundary " dprintf(" DMA restrictions: address %#lx - %#lx, align %lu, boundary "
@ -99,8 +124,8 @@ public:
Test& AddTest(off_t offset, size_t length, bool isWrite, uint32 flags) Test& AddTest(off_t offset, size_t length, bool isWrite, uint32 flags)
{ {
Test* test = new(std::nothrow) Test(offset, fBase, fPhysicalBase, Test* test = new(std::nothrow) Test(*this, offset, length, isWrite,
length, isWrite, flags); flags);
fTests.Add(test); fTests.Add(test);
return *test; return *test;
@ -116,20 +141,79 @@ public:
} }
} }
addr_t DataBase() const { return fContext.DataBase(); }
addr_t PhysicalDataBase() const { return fContext.PhysicalDataBase(); }
addr_t CompareBase() const { return fContext.CompareBase(); }
size_t Size() const { return fContext.Size(); }
private: private:
TestSuiteContext& fContext;
DMAResource fDMAResource; DMAResource fDMAResource;
uint8* fBase; uint8* fBase;
uint8* fPhysicalBase; uint8* fPhysicalBase;
size_t fSize;
TestList fTests; TestList fTests;
}; };
Test::Test(off_t offset, uint8* base, uint8* physicalBase, size_t length, struct device_manager_info* sDeviceManager;
bool isWrite, uint32 flags)
static area_id sArea;
static size_t sAreaSize;
static void* sAreaAddress;
static DMAResource* sDMAResource;
TestSuiteContext::TestSuiteContext()
: :
fDataArea(-1),
fCompareArea(-1),
fSize(0)
{
}
TestSuiteContext::~TestSuiteContext()
{
delete_area(fDataArea);
delete_area(fCompareArea);
}
status_t
TestSuiteContext::Init(size_t size)
{
fDataArea = create_area("data buffer", (void**)&fDataBase,
B_ANY_KERNEL_ADDRESS, size, B_CONTIGUOUS,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (fDataArea < B_OK)
return fDataArea;
physical_entry entry;
get_memory_map((void*)fDataBase, size, &entry, 1);
dprintf("DMA Test area %p, physical %p\n", (void*)fDataBase, entry.address);
fPhysicalDataBase = (addr_t)entry.address;
fCompareArea = create_area("compare buffer", (void**)&fCompareBase,
B_ANY_KERNEL_ADDRESS, size, B_FULL_LOCK,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (fCompareArea < B_OK)
return fCompareArea;
fSize = size;
return B_OK;
}
// #pragma mark -
Test::Test(TestSuite& suite, off_t offset, size_t length, bool isWrite,
uint32 flags)
:
fSuite(suite),
fOffset(offset), fOffset(offset),
fBase(base),
fPhysicalBase(physicalBase),
fLength(length), fLength(length),
fIsWrite(isWrite), fIsWrite(isWrite),
fFlags(flags), fFlags(flags),
@ -144,7 +228,7 @@ Test::AddSource(addr_t address, size_t length)
{ {
fSourceVecs[fSourceCount].iov_base fSourceVecs[fSourceCount].iov_base
= (void*)(((fFlags & B_PHYSICAL_IO_REQUEST) == 0 = (void*)(((fFlags & B_PHYSICAL_IO_REQUEST) == 0
? fBase : fPhysicalBase) + address); ? fSuite.DataBase() : fSuite.PhysicalDataBase()) + address);
fSourceVecs[fSourceCount].iov_len = length; fSourceVecs[fSourceCount].iov_len = length;
fSourceCount++; fSourceCount++;
@ -179,9 +263,220 @@ Test::AddTarget(addr_t base, size_t length, bool usesBounceBuffer)
} }
addr_t
Test::_SourceToVirtual(addr_t source)
{
if ((fFlags & B_PHYSICAL_IO_REQUEST) != 0)
return source - fSuite.PhysicalDataBase() + fSuite.DataBase();
return source;
}
addr_t
Test::_SourceToCompare(addr_t source)
{
if ((fFlags & B_PHYSICAL_IO_REQUEST) != 0)
return source - fSuite.PhysicalDataBase() + fSuite.CompareBase();
return source - fSuite.DataBase() + fSuite.CompareBase();
}
void
Test::_Prepare()
{
// prepare disk
uint8* disk = (uint8*)sAreaAddress;
for (size_t i = 0; i < sAreaSize; i++) {
disk[i] = i % 26 + 'a';
}
// prepare data
memset((void*)fSuite.DataBase(), 0xcc, fSuite.Size());
if (fIsWrite) {
off_t offset = fOffset;
size_t length = fLength;
for (uint32 i = 0; i < fSourceCount; i++) {
uint8* data = (uint8*)_SourceToVirtual(
(addr_t)fSourceVecs[i].iov_base);
size_t vecLength = min_c(fSourceVecs[i].iov_len, length);
for (uint32 j = 0; j < vecLength; j++) {
data[j] = (offset + j) % 10 + '0';
}
offset += vecLength;
length -= vecLength;
}
}
// prepare compare data
memset((void*)fSuite.CompareBase(), 0xcc, fSuite.Size());
if (fIsWrite) {
// copy data from source
off_t offset = fOffset;
size_t length = fLength;
for (uint32 i = 0; i < fSourceCount; i++) {
uint8* compare = (uint8*)_SourceToCompare(
(addr_t)fSourceVecs[i].iov_base);
size_t vecLength = min_c(fSourceVecs[i].iov_len, length);
memcpy(compare,
(void*)_SourceToVirtual((addr_t)fSourceVecs[i].iov_base),
vecLength);
offset += vecLength;
length -= vecLength;
}
} else {
// copy data from drive
off_t offset = fOffset;
size_t length = fLength;
for (uint32 i = 0; i < fSourceCount; i++) {
uint8* compare = (uint8*)_SourceToCompare(
(addr_t)fSourceVecs[i].iov_base);
size_t vecLength = min_c(fSourceVecs[i].iov_len, length);
memcpy(compare, disk + offset, vecLength);
offset += vecLength;
length -= vecLength;
}
}
if (fIsWrite)
_CheckCompare();
}
void
Test::_CheckCompare()
{
uint8* data = (uint8*)fSuite.DataBase();
uint8* compare = (uint8*)fSuite.CompareBase();
for (size_t i = 0; i < fSuite.Size(); i++) {
if (data[i] != compare[i]) {
dprintf("offset %lu differs, %s:\n", i,
fIsWrite ? "write" : "read");
i &= ~63;
dump_block((char*)&data[i], min_c(64, fSuite.Size() - i), " ");
dprintf("should be:\n");
dump_block((char*)&compare[i], min_c(64, fSuite.Size() - i), " ");
_Panic("Data %s differs", fIsWrite ? "write" : "read");
}
}
}
void
Test::_CheckWrite()
{
_CheckCompare();
// check if we overwrote parts we shouldn't have
uint8* disk = (uint8*)sAreaAddress;
for (size_t i = 0; i < sAreaSize; i++) {
if (i >= fOffset && i < fOffset + fLength)
continue;
if (disk[i] != i % 26 + 'a') {
dprintf("disk[i] %c, expected %c, i %lu, fLength + fOffset %Ld\n",
disk[i], i % 26 + 'a', i, fLength + fOffset);
dprintf("offset %lu differs, touched innocent data:\n", i);
i &= ~63;
dump_block((char*)&disk[i], min_c(64, fSuite.Size() - i), " ");
_Panic("Data %s differs", fIsWrite ? "write" : "read");
}
}
// check if the data we wanted to have on disk ended up there
off_t offset = fOffset;
size_t length = fLength;
for (uint32 i = 0; i < fSourceCount; i++) {
uint8* data = (uint8*)_SourceToVirtual(
(addr_t)fSourceVecs[i].iov_base);
size_t vecLength = min_c(fSourceVecs[i].iov_len, length);
for (uint32 j = 0; j < vecLength; j++) {
if (disk[offset + j] != data[j]) {
dprintf("offset %lu differs, found on disk:\n", j);
j &= ~63;
dump_block((char*)&disk[offset + j],
min_c(64, fSuite.Size() - i), " ");
dprintf("should be:\n");
dump_block((char*)&data[j], min_c(64, fSuite.Size() - j), " ");
_Panic("Data write differs");
}
}
offset += vecLength;
length -= vecLength;
}
}
void
Test::_CheckResults()
{
if (fIsWrite)
_CheckWrite();
else
_CheckCompare();
}
status_t
Test::_DoIO(IOOperation& operation)
{
uint8* disk = (uint8*)sAreaAddress;
off_t offset = operation.Offset();
for (uint32 i = 0; i < operation.VecCount(); i++) {
const iovec& vec = operation.Vecs()[i];
addr_t base = (addr_t)vec.iov_base;
size_t length = vec.iov_len;
size_t pageOffset = base & ~(B_PAGE_SIZE - 1);
while (length > 0) {
size_t toCopy = min_c(length, B_PAGE_SIZE - pageOffset);
uint8* virtualAddress;
vm_get_physical_page(base - pageOffset, (addr_t*)&virtualAddress,
PHYSICAL_PAGE_NO_WAIT);
if (operation.IsWrite())
memcpy(disk + offset, virtualAddress + pageOffset, toCopy);
else
memcpy(virtualAddress + pageOffset, disk + offset, toCopy);
length -= toCopy;
offset += toCopy;
pageOffset = 0;
}
}
return B_OK;
}
void void
Test::Run(DMAResource& resource) Test::Run(DMAResource& resource)
{ {
_Prepare();
IORequest request; IORequest request;
status_t status = request.Init(fOffset, fSourceVecs, fSourceCount, status_t status = request.Init(fOffset, fSourceVecs, fSourceCount,
fLength, fIsWrite, fFlags); fLength, fIsWrite, fFlags);
@ -210,10 +505,11 @@ Test::Run(DMAResource& resource)
dprintf(" DMABuffer %p, %lu vecs, bounce buffer: %p (%p) %s\n", buffer, dprintf(" DMABuffer %p, %lu vecs, bounce buffer: %p (%p) %s\n", buffer,
buffer->VecCount(), buffer->BounceBuffer(), buffer->VecCount(), buffer->BounceBuffer(),
(void*)buffer->PhysicalBounceBuffer(), (void*)buffer->PhysicalBounceBuffer(),
buffer->UsesBounceBuffer() ? "used" : "unused"); operation.UsesBounceBuffer() ? "used" : "unused");
for (uint32 i = 0; i < buffer->VecCount(); i++) { for (uint32 i = 0; i < buffer->VecCount(); i++) {
dprintf(" [%lu] base %p, length %lu\n", i, dprintf(" [%lu] base %p, length %lu%s\n", i,
buffer->VecAt(i).iov_base, buffer->VecAt(i).iov_len); buffer->VecAt(i).iov_base, buffer->VecAt(i).iov_len,
buffer->UsesBounceBufferAt(i) ? ", bounce" : "");
} }
dprintf(" remaining bytes: %lu\n", request.RemainingBytes()); dprintf(" remaining bytes: %lu\n", request.RemainingBytes());
@ -236,7 +532,7 @@ Test::Run(DMAResource& resource)
address = (void*)(target.address address = (void*)(target.address
+ (addr_t)buffer->PhysicalBounceBuffer()); + (addr_t)buffer->PhysicalBounceBuffer());
} else } else
address = (void*)(target.address + fPhysicalBase); address = (void*)(target.address + fSuite.PhysicalDataBase());
if (address != vec.iov_base) { if (address != vec.iov_base) {
_Panic("[%lu] address differs: %p, should be %p", i, _Panic("[%lu] address differs: %p, should be %p", i,
@ -244,6 +540,7 @@ Test::Run(DMAResource& resource)
} }
} }
_DoIO(operation);
operation.SetStatus(B_OK); operation.SetStatus(B_OK);
bool finished = operation.Finish(); bool finished = operation.Finish();
bool isPartial = result.partial_begin || result.partial_end; bool isPartial = result.partial_begin || result.partial_end;
@ -252,6 +549,7 @@ Test::Run(DMAResource& resource)
if (!finished) { if (!finished) {
dprintf(" operation not done yet!\n"); dprintf(" operation not done yet!\n");
_DoIO(operation);
operation.SetStatus(B_OK); operation.SetStatus(B_OK);
isPartial = result.partial_begin && result.partial_end; isPartial = result.partial_begin && result.partial_end;
@ -261,6 +559,7 @@ Test::Run(DMAResource& resource)
if (!finished) { if (!finished) {
dprintf(" operation not done yet!\n"); dprintf(" operation not done yet!\n");
_DoIO(operation);
operation.SetStatus(B_OK); operation.SetStatus(B_OK);
if (!operation.Finish()) if (!operation.Finish())
@ -270,6 +569,8 @@ Test::Run(DMAResource& resource)
resultIndex++; resultIndex++;
} }
_CheckResults();
} }
@ -285,7 +586,8 @@ Test::_Panic(const char* message,...)
dprintf("test failed\n"); dprintf("test failed\n");
dprintf(" offset: %lld\n", fOffset); dprintf(" offset: %lld\n", fOffset);
dprintf(" base: %p (physical: %p)\n", fBase, fPhysicalBase); dprintf(" base: %p (physical: %p)\n", (void*)fSuite.DataBase(),
(void*)fSuite.PhysicalDataBase());
dprintf(" length: %lu\n", fLength); dprintf(" length: %lu\n", fLength);
dprintf(" write: %d\n", fIsWrite); dprintf(" write: %d\n", fIsWrite);
dprintf(" flags: %#lx\n", fFlags); dprintf(" flags: %#lx\n", fFlags);
@ -313,7 +615,7 @@ Test::_Panic(const char* message,...)
static void static void
run_tests_no_restrictions(uint8* address, uint8* physicalAddress, size_t size) run_tests_no_restrictions(TestSuiteContext& context)
{ {
const dma_restrictions restrictions = { const dma_restrictions restrictions = {
0x0, // low 0x0, // low
@ -326,38 +628,64 @@ run_tests_no_restrictions(uint8* address, uint8* physicalAddress, size_t size)
0 // flags 0 // flags
}; };
TestSuite suite("no restrictions", restrictions, 512, address, TestSuite suite(context, "no restrictions", restrictions, 512);
physicalAddress);
suite.AddTest(0, 1024, false, B_USER_IO_REQUEST) suite.AddTest(0, 1024, false, B_USER_IO_REQUEST)
.AddSource(0, 1024) .AddSource(0, 1024)
.NextResult(0, false, false) .NextResult(0, false, false)
.AddTarget(0, 1024, false); .AddTarget(0, 1024, false);
suite.AddTest(23, 1024, true, B_USER_IO_REQUEST)
// read partial begin/end
suite.AddTest(23, 1024, false, B_USER_IO_REQUEST)
.AddSource(0, 1024) .AddSource(0, 1024)
.NextResult(0, true, true) .NextResult(0, true, true)
.AddTarget(0, 23, true) .AddTarget(0, 23, true)
.AddTarget(0, 1024, false) .AddTarget(0, 1024, false)
.AddTarget(23, 512 - 23, true) .AddTarget(23, 512 - 23, true);
;
suite.AddTest(0, 1028, true, B_USER_IO_REQUEST) // read less than a block
suite.AddTest(23, 30, false, B_USER_IO_REQUEST)
.AddSource(0, 1024)
.NextResult(0, true, true)
.AddTarget(0, 23, true)
.AddTarget(0, 30, false)
.AddTarget(23, 512 - 53, true);
// write begin/end
suite.AddTest(23, 1024, true, B_USER_IO_REQUEST)
.AddSource(0, 1024)
.NextResult(0, true, true)
.AddTarget(0, 512, true)
.AddTarget(489, 512, false)
.AddTarget(512, 512, true);
// read partial end, length < iovec length
suite.AddTest(0, 1028, false, B_USER_IO_REQUEST)
.AddSource(0, 512) .AddSource(0, 512)
.AddSource(1024, 516) .AddSource(1024, 1024)
.NextResult(0, false, true) .NextResult(0, false, true)
.AddTarget(0, 512, false) .AddTarget(0, 512, false)
.AddTarget(1024, 516, false) .AddTarget(1024, 516, false)
.AddTarget(0, 508, true); .AddTarget(0, 508, true);
// write partial end, length < iovec length
suite.AddTest(0, 1028, true, B_USER_IO_REQUEST)
.AddSource(0, 512)
.AddSource(1024, 1024)
.NextResult(0, false, true)
.AddTarget(0, 512, false)
.AddTarget(1024, 512, false)
.AddTarget(0, 512, true);
suite.Run(); suite.Run();
} }
static void static void
run_tests_address_restrictions(uint8* address, uint8* physicalAddress, run_tests_address_restrictions(TestSuiteContext& context)
size_t size)
{ {
const dma_restrictions restrictions = { const dma_restrictions restrictions = {
(addr_t)physicalAddress + 512, // low context.PhysicalDataBase() + 512, // low
0, // high 0, // high
0, // alignment 0, // alignment
0, // boundary 0, // boundary
@ -367,7 +695,7 @@ run_tests_address_restrictions(uint8* address, uint8* physicalAddress,
0 // flags 0 // flags
}; };
TestSuite suite("address", restrictions, 512, address, physicalAddress); TestSuite suite(context, "address", restrictions, 512);
suite.AddTest(0, 1024, false, B_USER_IO_REQUEST) suite.AddTest(0, 1024, false, B_USER_IO_REQUEST)
.AddSource(0, 1024) .AddSource(0, 1024)
@ -380,8 +708,7 @@ run_tests_address_restrictions(uint8* address, uint8* physicalAddress,
static void static void
run_tests_alignment_restrictions(uint8* address, uint8* physicalAddress, run_tests_alignment_restrictions(TestSuiteContext& context)
size_t size)
{ {
const dma_restrictions restrictions = { const dma_restrictions restrictions = {
0x0, // low 0x0, // low
@ -394,7 +721,7 @@ run_tests_alignment_restrictions(uint8* address, uint8* physicalAddress,
0 // flags 0 // flags
}; };
TestSuite suite("alignment", restrictions, 512, address, physicalAddress); TestSuite suite(context, "alignment", restrictions, 512);
suite.AddTest(0, 1024, false, B_PHYSICAL_IO_REQUEST) suite.AddTest(0, 1024, false, B_PHYSICAL_IO_REQUEST)
.AddSource(16, 1024) .AddSource(16, 1024)
@ -406,8 +733,7 @@ run_tests_alignment_restrictions(uint8* address, uint8* physicalAddress,
static void static void
run_tests_boundary_restrictions(uint8* address, uint8* physicalAddress, run_tests_boundary_restrictions(TestSuiteContext& context)
size_t size)
{ {
const dma_restrictions restrictions = { const dma_restrictions restrictions = {
0x0, // low 0x0, // low
@ -420,7 +746,7 @@ run_tests_boundary_restrictions(uint8* address, uint8* physicalAddress,
0 // flags 0 // flags
}; };
TestSuite suite("boundary", restrictions, 512, address, physicalAddress); TestSuite suite(context, "boundary", restrictions, 512);
suite.AddTest(0, 2000, false, B_USER_IO_REQUEST) suite.AddTest(0, 2000, false, B_USER_IO_REQUEST)
.AddSource(0, 2048) .AddSource(0, 2048)
@ -434,8 +760,7 @@ run_tests_boundary_restrictions(uint8* address, uint8* physicalAddress,
static void static void
run_tests_segment_restrictions(uint8* address, uint8* physicalAddress, run_tests_segment_restrictions(TestSuiteContext& context)
size_t size)
{ {
const dma_restrictions restrictions = { const dma_restrictions restrictions = {
0x0, // low 0x0, // low
@ -448,41 +773,144 @@ run_tests_segment_restrictions(uint8* address, uint8* physicalAddress,
0 // flags 0 // flags
}; };
TestSuite suite("segment", restrictions, 512, address, physicalAddress); TestSuite suite(context, "segment", restrictions, 512);
#if 0 suite.AddTest(0, 4096, false, B_USER_IO_REQUEST)
suite.AddTest(0, 1024, false, B_USER_IO_REQUEST) .AddSource(0, 4096)
.AddSource(0, 1024) .NextResult(0, false, false)
.NextResult(0, false) .AddTarget(0, 1024, false)
.AddTarget(0, 1024, false); .AddTarget(1024, 1024, false)
#endif .AddTarget(2048, 1024, false)
.AddTarget(3072, 1024, false);
suite.Run(); suite.Run();
} }
static void static void
run_tests_mean_restrictions(uint8* address, uint8* physicalAddress, size_t size) run_tests_transfer_restrictions(TestSuiteContext& context)
{ {
const dma_restrictions restrictions = { const dma_restrictions restrictions = {
(addr_t)physicalAddress + 1024, // low 0x0, // low
0x0, // high 0x0, // high
32, // alignment 0, // alignment
512, // boundary 0, // boundary
2048, // max transfer 1024, // max transfer
2, // max segment count 0, // max segment count
1024, // max segment size 0, // max segment size
0 // flags 0 // flags
}; };
TestSuite suite("mean", restrictions, 512, address, physicalAddress); TestSuite suite(context, "transfer", restrictions, 512);
suite.AddTest(0, 4000, false, B_USER_IO_REQUEST)
.AddSource(0, 4096)
.NextResult(0, false, false)
.AddTarget(0, 1024, false)
.NextResult(0, false, false)
.AddTarget(1024, 1024, false)
.NextResult(0, false, false)
.AddTarget(2048, 1024, false)
.NextResult(0, false, false)
.AddTarget(3072, 1024 - 96, false)
.AddTarget(0, 96, true);
suite.Run();
}
static void
run_tests_interesting_restrictions(TestSuiteContext& context)
{
dma_restrictions restrictions = {
0x0, // low
0x0, // high
32, // alignment
512, // boundary
0, // max transfer
0, // max segment count
0, // max segment size
0 // flags
};
TestSuite suite(context, "interesting", restrictions, 512);
// read with partial begin/end
suite.AddTest(32, 1000, false, B_USER_IO_REQUEST)
.AddSource(0, 1024)
.NextResult(0, true, true)
.AddTarget(0, 32, true)
.AddTarget(0, 512, false)
.AddTarget(512, 480, false)
.AddTarget(32, 480, true)
.AddTarget(512, 32, true);
// write with partial begin/end
suite.AddTest(32, 1000, true, B_USER_IO_REQUEST)
.AddSource(0, 1024)
.NextResult(0, true, true)
.AddTarget(0, 512, true)
.AddTarget(480, 32, false)
.AddTarget(512, 480, false)
.AddTarget(512, 512, true);
suite.Run();
restrictions = (dma_restrictions){
0x0, // low
0x0, // high
32, // alignment
512, // boundary
0, // max transfer
4, // max segment count
0, // max segment size
0 // flags
};
TestSuite suite2(context, "interesting2", restrictions, 512);
suite2.AddTest(32, 1000, false, B_USER_IO_REQUEST)
.AddSource(0, 1024)
.NextResult(0, true, false)
.AddTarget(0, 32, true)
.AddTarget(0, 512, false)
.AddTarget(512, 480, false)
.NextResult(0, false, true)
.AddTarget(0, 512, true);
suite2.Run();
}
static void
run_tests_mean_restrictions(TestSuiteContext& context)
{
const dma_restrictions restrictions = {
context.PhysicalDataBase() + 1024, // low
0x0, // high
32, // alignment
1024, // boundary
0, // max transfer
2, // max segment count
512, // max segment size
0 // flags
};
TestSuite suite(context, "mean", restrictions, 512);
#if 0
suite.AddTest(0, 1024, false, B_USER_IO_REQUEST) suite.AddTest(0, 1024, false, B_USER_IO_REQUEST)
.AddSource(0, 1024) .AddSource(0, 1024)
.NextResult(0, false) .NextResult(0, false, false)
.AddTarget(0, 1024, false); .AddTarget(0, 512, true)
#endif .AddTarget(512, 512, true);
suite.AddTest(0, 1024, false, B_USER_IO_REQUEST)
.AddSource(1024 + 32, 1024)
.NextResult(0, false, false)
.AddTarget(1024 + 32, 512, false)
.NextResult(0, false, false)
.AddTarget(1568, 480, false)
.AddTarget(1568 + 480, 32, false);
suite.Run(); suite.Run();
} }
@ -491,28 +919,21 @@ run_tests_mean_restrictions(uint8* address, uint8* physicalAddress, size_t size)
static void static void
run_test() run_test()
{ {
size_t size = 1 * 1024 * 1024; TestSuiteContext context;
uint8* address; status_t status = context.Init(4 * B_PAGE_SIZE);
area_id area = create_area("dma source", (void**)&address, if (status != B_OK)
B_ANY_KERNEL_ADDRESS, size, B_CONTIGUOUS,
B_KERNEL_READ_AREA | B_KERNEL_WRITE_AREA);
if (area < B_OK)
return; return;
physical_entry entry; run_tests_no_restrictions(context);
get_memory_map(address, size, &entry, 1); run_tests_address_restrictions(context);
run_tests_alignment_restrictions(context);
run_tests_boundary_restrictions(context);
run_tests_segment_restrictions(context);
run_tests_transfer_restrictions(context);
run_tests_interesting_restrictions(context);
run_tests_mean_restrictions(context);
dprintf("DMA Test area %p, physical %p\n", address, entry.address); panic("All tests passed!");
run_tests_no_restrictions(address, (uint8*)entry.address, size);
run_tests_address_restrictions(address, (uint8*)entry.address, size);
run_tests_alignment_restrictions(address, (uint8*)entry.address, size);
run_tests_boundary_restrictions(address, (uint8*)entry.address, size);
run_tests_segment_restrictions(address, (uint8*)entry.address, size);
run_tests_mean_restrictions(address, (uint8*)entry.address, size);
delete_area(area);
panic("done.");
} }
@ -554,6 +975,7 @@ dma_test_init_driver(device_node *node, void **_driverCookie)
return sArea; return sArea;
*_driverCookie = node; *_driverCookie = node;
run_test(); run_test();
return B_OK; return B_OK;
} }