From 2fc2b436d27500753d17fb4ede98f72c6c203467 Mon Sep 17 00:00:00 2001 From: Ingo Weinhold Date: Sun, 10 Aug 2008 00:44:30 +0000 Subject: [PATCH] Another hard code (DMAResource::TranslateNext()) fix: In the write case if after the main loop the transfer is already block aligned, we still have to check whether the last vec is a bounce buffer that is shorter than a complete block, but exceeds the original end of the request. If so, we have to cut back to the previous block and add a block-sized bounce buffer instead. Actually that's almost the same case as when the transfer length is not yet block aligned, and thus we let the same code handle it. Fixes bug #2584. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@26910 a95241bf-73f2-0310-859d-f6bbb57e9c96 --- .../kernel/device_manager/dma_resources.cpp | 52 +++++++++++++------ 1 file changed, 37 insertions(+), 15 deletions(-) diff --git a/src/system/kernel/device_manager/dma_resources.cpp b/src/system/kernel/device_manager/dma_resources.cpp index 190a58724d..dae96f5670 100644 --- a/src/system/kernel/device_manager/dma_resources.cpp +++ b/src/system/kernel/device_manager/dma_resources.cpp @@ -574,21 +574,45 @@ DMAResource::TranslateNext(IORequest* request, IOOperation* operation) // If we're writing partially, we always need to have a block sized bounce // buffer (or else we would overwrite memory to be written on the read in // the first phase). - if (request->IsWrite() && (dmaLength & (fBlockSize - 1)) != 0) { - size_t diff = dmaLength & (fBlockSize - 1); - TRACE(" partial end write: %lu, diff %lu\n", dmaLength, diff); + off_t requestEnd = request->Offset() + request->Length(); + if (request->IsWrite()) { + size_t diff = dmaLength & (fBlockSize - 1); - _CutBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft, diff); - dmaLength -= diff; + // If the transfer length is block aligned and we're writing past the + // end of the given data, we still have to check the whether the last + // vec is a bounce buffer segment shorter than the block size. If so, we + // have to cut back the complete block and use a bounce buffer for it + // entirely. + if (diff == 0 && offset + dmaLength > requestEnd) { + const iovec& dmaVec = dmaBuffer->VecAt(dmaBuffer->VecCount() - 1); + ASSERT((addr_t)dmaVec.iov_base >= dmaBuffer->PhysicalBounceBuffer() + && (addr_t)dmaVec.iov_base + < dmaBuffer->PhysicalBounceBuffer() + fBounceBufferSize); + // We can be certain that the last vec is a bounce buffer vec, + // since otherwise the DMA buffer couldn't exceed the end of the + // request data. + if (dmaVec.iov_len < fBlockSize) + diff = fBlockSize; + } - if (_AddBounceBuffer(*dmaBuffer, physicalBounceBuffer, - bounceLeft, fBlockSize, true) == 0) { - // If we cannot write anything, we can't process the request at all - TRACE(" adding bounce buffer failed!!!\n"); - if (dmaLength == 0) - return B_BAD_VALUE; - } else - dmaLength += fBlockSize; + if (diff != 0) { + // Not yet block aligned -- cut back to the previous block and add + // a block-sized bounce buffer segment. + TRACE(" partial end write: %lu, diff %lu\n", dmaLength, diff); + + _CutBuffer(*dmaBuffer, physicalBounceBuffer, bounceLeft, diff); + dmaLength -= diff; + + if (_AddBounceBuffer(*dmaBuffer, physicalBounceBuffer, + bounceLeft, fBlockSize, true) == 0) { + // If we cannot write anything, we can't process the request at + // all. + TRACE(" adding bounce buffer failed!!!\n"); + if (dmaLength == 0) + return B_BAD_VALUE; + } else + dmaLength += fBlockSize; + } } // If total length not block aligned, use bounce buffer for padding (read @@ -638,8 +662,6 @@ DMAResource::TranslateNext(IORequest* request, IOOperation* operation) } } - off_t requestEnd = request->Offset() + request->Length(); - operation->SetBuffer(dmaBuffer); operation->SetBlockSize(fBlockSize); operation->SetOriginalRange(originalOffset,