* DMAResource::TranslateNext(): Added parameter to limit the maximum

operation length.
* IORequest: Added owner (IORequestOwner). Also added a SetUnfinished()
  method, which is invoked by the I/O scheduler after all operations of
  the request have been finished, but the request isn't done yet.
* Added debugger commands "io_request_owner" and "io_scheduler" printing
  information for a IORequestOwner and IOScheduler object respectively.
* Implemented an actual I/O scheduling algorithm. It's a simple round
  robin strategy (a queue per thread) with a unidirectional elevator
  serializing the operations. ATM priorities are ignored, the bandwidth
  isn't adjusted to the device, and there are TODOs all over the place.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@27216 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Ingo Weinhold 2008-08-28 01:49:18 +00:00
parent ca2a5874fa
commit 0316483f0a
8 changed files with 568 additions and 123 deletions

View File

@ -11,6 +11,8 @@
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#include <KernelExport.h>
#include <khash.h>
@ -28,10 +30,64 @@
#endif
void
IORequestOwner::Dump() const
{
kprintf("IORequestOwner at %p\n", this);
kprintf(" team: %ld\n", team);
kprintf(" thread: %ld\n", thread);
kprintf(" priority: %ld\n", priority);
kprintf(" requests:");
for (IORequestList::ConstIterator it = requests.GetIterator();
IORequest* request = it.Next();) {
kprintf(" %p", request);
}
kprintf("\n");
kprintf(" completed requests:");
for (IORequestList::ConstIterator it = completed_requests.GetIterator();
IORequest* request = it.Next();) {
kprintf(" %p", request);
}
kprintf("\n");
kprintf(" operations:");
for (IOOperationList::ConstIterator it = operations.GetIterator();
IOOperation* operation = it.Next();) {
kprintf(" %p", operation);
}
kprintf("\n");
}
// #pragma mark -
struct IOScheduler::RequestOwnerHashDefinition {
typedef thread_id KeyType;
typedef IORequestOwner ValueType;
size_t HashKey(thread_id key) const { return key; }
size_t Hash(const IORequestOwner* value) const { return value->thread; }
bool Compare(thread_id key, const IORequestOwner* value) const
{ return value->thread == key; }
HashTableLink<IORequestOwner>* GetLink(IORequestOwner* value) const
{ return value; }
};
struct IOScheduler::RequestOwnerHashTable
: OpenHashTable<RequestOwnerHashDefinition, false> {
};
IOScheduler::IOScheduler(DMAResource* resource)
:
fDMAResource(resource),
fWaiting(false)
fOperationArray(NULL),
fAllocatedRequestOwners(NULL),
fRequestOwners(NULL),
fPendingOperations(0)
{
mutex_init(&fLock, "I/O scheduler");
B_INITIALIZE_SPINLOCK(&fFinisherLock);
@ -47,6 +103,11 @@ IOScheduler::~IOScheduler()
while (IOOperation* operation = fUnusedOperations.RemoveHead())
delete operation;
delete[] fOperationArray;
delete fRequestOwners;
delete[] fAllocatedRequestOwners;
}
@ -66,6 +127,39 @@ IOScheduler::Init(const char* name)
fUnusedOperations.Add(operation);
}
fOperationArray = new(std::nothrow) IOOperation*[count];
if (fDMAResource != NULL)
fBlockSize = fDMAResource->BlockSize();
if (fBlockSize == 0)
fBlockSize = 512;
fAllocatedRequestOwnerCount = thread_max_threads();
fAllocatedRequestOwners
= new(std::nothrow) IORequestOwner[fAllocatedRequestOwnerCount];
if (fAllocatedRequestOwners == NULL)
return B_NO_MEMORY;
for (int32 i = 0; i < fAllocatedRequestOwnerCount; i++) {
IORequestOwner& owner = fAllocatedRequestOwners[i];
owner.team = -1;
owner.thread = -1;
fUnusedRequestOwners.Add(&owner);
}
fRequestOwners = new(std::nothrow) RequestOwnerHashTable;
if (fRequestOwners == NULL)
return B_NO_MEMORY;
status_t error = fRequestOwners->Init(fAllocatedRequestOwnerCount);
if (error != B_OK)
return error;
// TODO: Use a device speed dependent bandwidths!
fIterationBandwidth = fBlockSize * 8192;
fMinOwnerBandwidth = fBlockSize * 1024;
fMaxOwnerBandwidth = fBlockSize * 4096;
// start threads
char buffer[B_OS_NAME_LENGTH];
strlcpy(buffer, name, sizeof(buffer));
@ -106,7 +200,7 @@ IOScheduler::SetCallback(io_callback callback, void* data)
status_t
IOScheduler::ScheduleRequest(IORequest* request)
{
TRACE("IOScheduler::ScheduleRequest(%p)\n", request);
TRACE("%p->IOScheduler::ScheduleRequest(%p)\n", this, request);
IOBuffer* buffer = request->Buffer();
@ -121,8 +215,29 @@ IOScheduler::ScheduleRequest(IORequest* request)
return status;
}
MutexLocker _(fLock);
fUnscheduledRequests.Add(request);
MutexLocker locker(fLock);
struct thread* thread = thread_get_current_thread();
IORequestOwner* owner = _GetRequestOwner(thread->team->id, thread->id,
true);
if (owner == NULL) {
panic("IOScheduler: Out of request owners!\n");
locker.Unlock();
if (buffer->IsVirtual())
buffer->UnlockMemory(request->Team(), request->IsWrite());
return B_NO_MEMORY;
}
bool wasActive = owner->IsActive();
request->SetOwner(owner);
owner->requests.Add(request);
owner->priority = thread->priority;
// TODO: Use the I/O priority instead!
//dprintf(" request %p -> owner %p (thread %ld, active %d)\n", request, owner, owner->thread, wasActive);
if (!wasActive)
fActiveRequestOwners.Add(owner);
fNewRequestCondition.NotifyAll();
return B_OK;
@ -156,12 +271,22 @@ IOScheduler::OperationCompleted(IOOperation* operation, status_t status,
fCompletedOperations.Add(operation);
fFinishedOperationCondition.NotifyAll();
}
if (fWaiting) {
SpinLocker _2(gThreadSpinlock);
thread_interrupt(thread_get_thread_struct_locked(fSchedulerThread),
false);
void
IOScheduler::Dump() const
{
kprintf("IOScheduler at %p\n", this);
kprintf(" DMA resource: %p\n", fDMAResource);
kprintf(" active request owners:");
for (RequestOwnerList::ConstIterator it
= fActiveRequestOwners.GetIterator();
IORequestOwner* owner = it.Next();) {
kprintf(" %p", owner);
}
kprintf("\n");
}
@ -181,10 +306,10 @@ IOScheduler::_Finisher()
if (!operation->Finish()) {
TRACE(" operation: %p not finished yet\n", operation);
MutexLocker _(fLock);
operation->SetTransferredBytes(0);
// TODO: This must be done differently once the scheduler implements
// an actual scheduling policy (other than no-op).
fIOCallback(fIOCallbackData, operation);
operation->Parent()->Owner()->operations.Add(operation);
fPendingOperations--;
continue;
}
@ -205,6 +330,7 @@ IOScheduler::_Finisher()
if (fDMAResource != NULL)
fDMAResource->RecycleBuffer(operation->Buffer());
fPendingOperations--;
fUnusedOperations.Add(operation);
// If the request is done, we need to perform its notifications.
@ -212,16 +338,28 @@ IOScheduler::_Finisher()
if (request->Status() == B_OK && request->RemainingBytes() > 0) {
// The request has been processed OK so far, but it isn't really
// finished yet.
fUnscheduledRequests.Add(request);
fNewRequestCondition.NotifyAll();
} else if (request->HasCallbacks()) {
// The request has callbacks that may take some time to perform,
// so we hand it over to the request notifier.
fFinishedRequests.Add(request);
fFinishedRequestCondition.NotifyAll();
request->SetUnfinished();
} else {
// No callbacks -- finish the request right now.
request->NotifyFinished();
// Remove the request from the request owner.
IORequestOwner* owner = request->Owner();
owner->requests.MoveFrom(&owner->completed_requests);
owner->requests.Remove(request);
request->SetOwner(NULL);
if (!owner->IsActive()) {
fActiveRequestOwners.Remove(owner);
fUnusedRequestOwners.Add(owner);
}
if (request->HasCallbacks()) {
// The request has callbacks that may take some time to
// perform, so we hand it over to the request notifier.
fFinishedRequests.Add(request);
fFinishedRequestCondition.NotifyAll();
} else {
// No callbacks -- finish the request right now.
request->NotifyFinished();
}
}
}
}
@ -237,90 +375,24 @@ IOScheduler::_FinisherWorkPending()
}
IOOperation*
IOScheduler::_GetOperation(bool wait)
{
while (true) {
MutexLocker locker(fLock);
IOOperation* operation = fUnusedOperations.RemoveHead();
if (operation != NULL || !wait)
return operation;
// Wait for new operations. First check whether any finisher work has
// to be done.
InterruptsSpinLocker finisherLocker(fFinisherLock);
if (_FinisherWorkPending()) {
finisherLocker.Unlock();
locker.Unlock();
_Finisher();
continue;
}
ConditionVariableEntry entry;
fFinishedOperationCondition.Add(&entry);
fWaiting = true;
finisherLocker.Unlock();
locker.Unlock();
entry.Wait(B_CAN_INTERRUPT);
fWaiting = false;
_Finisher();
}
}
IORequest*
IOScheduler::_GetNextUnscheduledRequest(bool wait)
{
while (true) {
MutexLocker locker(fLock);
IORequest* request = fUnscheduledRequests.RemoveHead();
if (request != NULL || !wait)
return request;
// Wait for new requests. First check whether any finisher work has
// to be done.
InterruptsSpinLocker finisherLocker(fFinisherLock);
if (_FinisherWorkPending()) {
finisherLocker.Unlock();
locker.Unlock();
_Finisher();
continue;
}
// Wait for new requests.
ConditionVariableEntry entry;
fNewRequestCondition.Add(&entry);
fWaiting = true;
finisherLocker.Unlock();
locker.Unlock();
entry.Wait(B_CAN_INTERRUPT);
fWaiting = false;
_Finisher();
}
}
bool
IOScheduler::_PrepareRequestOperations(IORequest* request,
IOOperationList& operations, int32& operationsPrepared)
IOOperationList& operations, int32& operationsPrepared, off_t quantum,
off_t& usedBandwidth)
{
//dprintf("IOScheduler::_PrepareRequestOperations(%p)\n", request);
usedBandwidth = 0;
if (fDMAResource != NULL) {
while (request->RemainingBytes() > 0) {
IOOperation* operation = _GetOperation(operations.IsEmpty());
while (quantum >= fBlockSize && request->RemainingBytes() > 0) {
IOOperation* operation = fUnusedOperations.RemoveHead();
if (operation == NULL)
return false;
status_t status = fDMAResource->TranslateNext(request,
operation);
status_t status = fDMAResource->TranslateNext(request, operation,
quantum);
if (status != B_OK) {
operation->SetParent(NULL);
MutexLocker locker(fLock);
fUnusedOperations.Add(operation);
// B_BUSY means some resource (DMABuffers or
@ -332,18 +404,38 @@ IOScheduler::_PrepareRequestOperations(IORequest* request,
AbortRequest(request, status);
return true;
}
//dprintf(" prepared operation %p\n", operation);
off_t bandwidth = operation->Length();
quantum -= bandwidth;
usedBandwidth += bandwidth;
operations.Add(operation);
operationsPrepared++;
}
} else {
// TODO: If the device has block size restrictions, we might need to use a
// bounce buffer.
IOOperation* operation = _GetOperation(true);
// TODO: Prepare() can fail!
operation->Prepare(request);
// TODO: If the device has block size restrictions, we might need to use
// a bounce buffer.
IOOperation* operation = fUnusedOperations.RemoveHead();
if (operation == NULL)
return false;
status_t status = operation->Prepare(request);
if (status != B_OK) {
operation->SetParent(NULL);
fUnusedOperations.Add(operation);
AbortRequest(request, status);
return true;
}
operation->SetOriginalRange(request->Offset(), request->Length());
request->Advance(request->Length());
off_t bandwidth = operation->Length();
quantum -= bandwidth;
usedBandwidth += bandwidth;
operations.Add(operation);
operationsPrepared++;
}
@ -351,34 +443,246 @@ IOScheduler::_PrepareRequestOperations(IORequest* request,
}
off_t
IOScheduler::_ComputeRequestOwnerBandwidth(int32 priority) const
{
// TODO: Use a priority dependent quantum!
return fMinOwnerBandwidth;
}
void
IOScheduler::_NextActiveRequestOwner(IORequestOwner*& owner, off_t& quantum)
{
while (true) {
if (owner != NULL)
owner = fActiveRequestOwners.GetNext(owner);
if (owner == NULL)
owner = fActiveRequestOwners.Head();
if (owner != NULL) {
quantum = _ComputeRequestOwnerBandwidth(owner->priority);
return;
}
// Wait for new requests owners. First check whether any finisher work
// has to be done.
InterruptsSpinLocker finisherLocker(fFinisherLock);
if (_FinisherWorkPending()) {
finisherLocker.Unlock();
mutex_unlock(&fLock);
_Finisher();
mutex_lock(&fLock);
continue;
}
// Wait for new requests.
ConditionVariableEntry entry;
fNewRequestCondition.Add(&entry);
finisherLocker.Unlock();
mutex_unlock(&fLock);
entry.Wait(B_CAN_INTERRUPT);
_Finisher();
mutex_lock(&fLock);
}
}
struct OperationComparator {
inline bool operator()(const IOOperation* a, const IOOperation* b)
{
off_t offsetA = a->Offset();
off_t offsetB = b->Offset();
return offsetA < offsetB
|| offsetA == offsetB && a->Length() > b->Length();
}
};
void
IOScheduler::_SortOperations(IOOperationList& operations, off_t& lastOffset)
{
// TODO: _Scheduler() could directly add the operations to the array.
// move operations to an array and sort it
int32 count = 0;
while (IOOperation* operation = operations.RemoveHead())
fOperationArray[count++] = operation;
std::sort(fOperationArray, fOperationArray + count, OperationComparator());
// move the sorted operations to a temporary list we can work with
//dprintf("operations after sorting:\n");
IOOperationList sortedOperations;
for (int32 i = 0; i < count; i++)
//{
//dprintf(" %3ld: %p: offset: %lld, length: %lu\n", i, fOperationArray[i], fOperationArray[i]->Offset(), fOperationArray[i]->Length());
sortedOperations.Add(fOperationArray[i]);
//}
// Sort the operations so that no two adjacent operations overlap. This
// might result in several elevator runs.
while (!sortedOperations.IsEmpty()) {
IOOperation* operation = sortedOperations.Head();
while (operation != NULL) {
IOOperation* nextOperation = sortedOperations.GetNext(operation);
if (operation->Offset() >= lastOffset) {
sortedOperations.Remove(operation);
//dprintf(" adding operation %p\n", operation);
operations.Add(operation);
lastOffset = operation->Offset() + operation->Length();
}
operation = nextOperation;
}
if (!sortedOperations.IsEmpty())
lastOffset = 0;
}
}
status_t
IOScheduler::_Scheduler()
{
// TODO: This is a no-op scheduler. Implement something useful!
IORequestOwner marker;
marker.thread = -1;
{
MutexLocker locker(fLock);
fActiveRequestOwners.Add(&marker, false);
}
off_t lastOffset = 0;
IORequestOwner* owner = NULL;
off_t quantum = 0;
while (true) {
//dprintf("IOScheduler::_Scheduler(): next iteration: request owner: %p, quantum: %lld\n", owner, quantum);
MutexLocker locker(fLock);
IOOperationList operations;
int32 operationCount = 0;
bool resourcesAvailable = true;
off_t iterationBandwidth = fIterationBandwidth;
while (IORequest* request
= _GetNextUnscheduledRequest(operationCount == 0)) {
TRACE("IOScheduler::_Scheduler(): request: %p\n", request);
int32 requestOperations = 0;
if (!_PrepareRequestOperations(request, operations,
requestOperations) && requestOperations == 0) {
// no operation prepared at all -- re-add the request for the
// next round
MutexLocker locker(fLock);
fUnscheduledRequests.Add(request, false);
}
operationCount += requestOperations;
if (owner == NULL) {
owner = fActiveRequestOwners.GetPrevious(&marker);
quantum = 0;
fActiveRequestOwners.Remove(&marker);
}
if (owner == NULL || quantum < fBlockSize)
_NextActiveRequestOwner(owner, quantum);
while (resourcesAvailable && iterationBandwidth >= fBlockSize) {
//dprintf("IOScheduler::_Scheduler(): request owner: %p (thread %ld)\n",
//owner, owner->thread);
// Prepare operations for the owner.
// There might still be unfinished ones.
while (IOOperation* operation = owner->operations.RemoveHead()) {
// TODO: We might actually grant the owner more bandwidth than
// it deserves.
// TODO: We should make sure that after the first read operation
// of a partial write, no other write operation to the same
// location is scheduled!
operations.Add(operation);
operationCount++;
off_t bandwidth = operation->Length();
quantum -= bandwidth;
iterationBandwidth -= bandwidth;
if (quantum < fBlockSize || iterationBandwidth < fBlockSize)
break;
}
while (resourcesAvailable && quantum >= fBlockSize
&& iterationBandwidth >= fBlockSize) {
IORequest* request = owner->requests.Head();
if (request == NULL) {
resourcesAvailable = false;
if (operationCount == 0)
panic("no more requests");
break;
}
off_t bandwidth = 0;
resourcesAvailable = _PrepareRequestOperations(request,
operations, operationCount, quantum, bandwidth);
quantum -= bandwidth;
iterationBandwidth -= bandwidth;
if (request->RemainingBytes() == 0 || request->Status() <= 0) {
// If the request has been completed, move it to the
// completed list, so we don't pick it up again.
owner->requests.Remove(request);
owner->completed_requests.Add(request);
}
}
// Get the next owner.
if (resourcesAvailable)
_NextActiveRequestOwner(owner, quantum);
}
// If the current owner doesn't have anymore requests, we have to
// insert our marker, since the owner will be gone in the next
// iteration.
if (owner->requests.IsEmpty()) {
fActiveRequestOwners.Insert(owner, &marker);
owner = NULL;
}
if (operations.IsEmpty())
continue;
fPendingOperations = operationCount;
locker.Unlock();
// sort the operations
_SortOperations(operations, lastOffset);
// execute the operations
#ifdef TRACE_IO_SCHEDULER
int32 i = 0;
#endif
while (IOOperation* operation = operations.RemoveHead()) {
TRACE("IOScheduler::_Scheduler(): calling callback for "
"operation: %p\n", operation);
"operation %ld: %p\n", i++, operation);
fIOCallback(fIOCallbackData, operation);
_Finisher();
}
// wait for all operations to finish
while (true) {
locker.Lock();
if (fPendingOperations == 0)
break;
// Before waiting first check whether any finisher work has to be
// done.
InterruptsSpinLocker finisherLocker(fFinisherLock);
if (_FinisherWorkPending()) {
finisherLocker.Unlock();
locker.Unlock();
_Finisher();
continue;
}
// wait for finished operations
ConditionVariableEntry entry;
fFinishedOperationCondition.Add(&entry);
finisherLocker.Unlock();
locker.Unlock();
entry.Wait(B_CAN_INTERRUPT);
_Finisher();
}
}
@ -406,7 +710,6 @@ IOScheduler::_RequestNotifier()
if (request == NULL) {
ConditionVariableEntry entry;
fFinishedRequestCondition.Add(&entry);
fWaiting = true;
locker.Unlock();
@ -430,6 +733,38 @@ IOScheduler::_RequestNotifierThread(void *_self)
}
IORequestOwner*
IOScheduler::_GetRequestOwner(team_id team, thread_id thread, bool allocate)
{
// lookup in table
IORequestOwner* owner = fRequestOwners->Lookup(thread);
if (owner != NULL && !owner->IsActive())
fUnusedRequestOwners.Remove(owner);
if (owner != NULL || !allocate)
return owner;
// not in table -- allocate an unused one
RequestOwnerList existingOwners;
while ((owner = fUnusedRequestOwners.RemoveHead()) != NULL) {
if (owner->thread < 0
|| thread_get_thread_struct(owner->thread) == NULL) {
if (owner->thread >= 0)
fRequestOwners->RemoveUnchecked(owner);
owner->team = team;
owner->thread = thread;
fRequestOwners->InsertUnchecked(owner);
break;
}
existingOwners.Add(owner);
}
fUnusedRequestOwners.MoveFrom(&existingOwners);
return owner;
}
/*static*/ status_t
IOScheduler::_IOCallbackWrapper(void* data, io_operation* operation)
{

View File

@ -11,6 +11,7 @@
#include <condition_variable.h>
#include <lock.h>
#include <util/DoublyLinkedList.h>
#include <util/OpenHashTable.h>
#include "dma_resources.h"
#include "io_requests.h"
@ -24,6 +25,24 @@ public:
typedef status_t (*io_callback)(void* data, io_operation* operation);
struct IORequestOwner : DoublyLinkedListLinkImpl<IORequestOwner>,
HashTableLink<IORequestOwner> {
team_id team;
thread_id thread;
int32 priority;
IORequestList requests;
IORequestList completed_requests;
IOOperationList operations;
bool IsActive() const
{ return !requests.IsEmpty()
|| !completed_requests.IsEmpty()
|| !operations.IsEmpty(); }
void Dump() const;
};
class IOScheduler {
public:
IOScheduler(DMAResource* resource);
@ -44,19 +63,38 @@ public:
// has been completed successfully or failed
// for some reason
void Dump() const;
private:
typedef DoublyLinkedList<IORequestOwner> RequestOwnerList;
struct RequestOwnerHashDefinition;
struct RequestOwnerHashTable;
void _Finisher();
bool _FinisherWorkPending();
IOOperation* _GetOperation(bool wait);
IORequest* _GetNextUnscheduledRequest(bool wait);
off_t _ComputeRequestOwnerBandwidth(
int32 priority) const;
void _NextActiveRequestOwner(IORequestOwner*& owner,
off_t& quantum);
bool _PrepareRequestOperations(IORequest* request,
IOOperationList& operations,
int32& operationsPrepared);
bool _PrepareRequestOperations(IORequest* request,
IOOperationList& operations,
int32& operationsPrepared, off_t quantum,
off_t& usedBandwidth);
void _SortOperations(IOOperationList& operations,
off_t& lastOffset);
status_t _Scheduler();
static status_t _SchedulerThread(void* self);
status_t _RequestNotifier();
static status_t _RequestNotifierThread(void* self);
void _AddRequestOwner(IORequestOwner* owner);
IORequestOwner* _GetRequestOwner(team_id team, thread_id thread,
bool allocate);
static status_t _IOCallbackWrapper(void* data,
io_operation* operation);
@ -73,9 +111,19 @@ private:
ConditionVariable fNewRequestCondition;
ConditionVariable fFinishedOperationCondition;
ConditionVariable fFinishedRequestCondition;
IOOperation** fOperationArray;
IOOperationList fUnusedOperations;
IOOperationList fCompletedOperations;
bool fWaiting;
IORequestOwner* fAllocatedRequestOwners;
int32 fAllocatedRequestOwnerCount;
RequestOwnerList fActiveRequestOwners;
RequestOwnerList fUnusedRequestOwners;
RequestOwnerHashTable* fRequestOwners;
size_t fBlockSize;
int32 fPendingOperations;
off_t fIterationBandwidth;
off_t fMinOwnerBandwidth;
off_t fMaxOwnerBandwidth;
};
#endif // IO_SCHEDULER_H

View File

@ -32,6 +32,7 @@
#include "id_generator.h"
#include "io_requests.h"
#include "io_resources.h"
#include "IOScheduler.h"
//#define TRACE_DEVICE_MANAGER
@ -268,6 +269,34 @@ dump_attribute(device_attr* attr, int32 level)
}
static int
dump_io_scheduler(int argc, char** argv)
{
if (argc != 2) {
print_debugger_command_usage(argv[0]);
return 0;
}
IOScheduler* scheduler = (IOScheduler*)parse_expression(argv[1]);
scheduler->Dump();
return 0;
}
static int
dump_io_request_owner(int argc, char** argv)
{
if (argc != 2) {
print_debugger_command_usage(argv[0]);
return 0;
}
IORequestOwner* owner = (IORequestOwner*)parse_expression(argv[1]);
owner->Dump();
return 0;
}
static int
dump_io_request(int argc, char** argv)
{
@ -2212,6 +2241,14 @@ device_manager_init(struct kernel_args* args)
add_debugger_command("dm_tree", &dump_device_nodes,
"dump device node tree");
add_debugger_command_etc("io_scheduler", &dump_io_scheduler,
"Dump an I/O scheduler",
"<scheduler>\n"
"Dumps I/O scheduler at address <scheduler>.\n", 0);
add_debugger_command_etc("io_request_owner", &dump_io_request_owner,
"Dump an I/O request owner",
"<owner>\n"
"Dumps I/O request owner at address <owner>.\n", 0);
add_debugger_command("io_request", &dump_io_request, "dump an I/O request");
add_debugger_command("io_operation", &dump_io_operation,
"dump an I/O operation");

View File

@ -379,12 +379,14 @@ DMAResource::_AddBounceBuffer(DMABuffer& buffer, addr_t& physicalBounceBuffer,
status_t
DMAResource::TranslateNext(IORequest* request, IOOperation* operation)
DMAResource::TranslateNext(IORequest* request, IOOperation* operation,
size_t maxOperationLength)
{
IOBuffer* buffer = request->Buffer();
off_t originalOffset = request->Offset() + request->Length()
- request->RemainingBytes();
off_t offset = originalOffset;
size_t partialBegin = offset & (fBlockSize - 1);
// current iteration state
uint32 vecIndex = request->VecIndex();
@ -392,6 +394,11 @@ DMAResource::TranslateNext(IORequest* request, IOOperation* operation)
size_t totalLength = min_c(request->RemainingBytes(),
fRestrictions.max_transfer_size);
if (maxOperationLength > 0
&& maxOperationLength < totalLength + partialBegin) {
totalLength = maxOperationLength - partialBegin;
}
MutexLocker locker(fLock);
DMABuffer* dmaBuffer = fDMABuffers.RemoveHead();
@ -403,7 +410,6 @@ DMAResource::TranslateNext(IORequest* request, IOOperation* operation)
iovec* vecs = NULL;
uint32 segmentCount = 0;
size_t partialBegin = offset & (fBlockSize - 1);
TRACE(" offset %Ld, remaining size: %lu, block size %lu -> partial: %lu\n",
offset, request->RemainingBytes(), fBlockSize, partialBegin);

View File

@ -94,9 +94,11 @@ public:
status_t CreateBounceBuffer(DMABounceBuffer** _buffer);
status_t TranslateNext(IORequest* request,
IOOperation* operation);
IOOperation* operation,
size_t maxOperationLength);
void RecycleBuffer(DMABuffer* buffer);
size_t BlockSize() const { return fBlockSize; }
uint32 BufferCount() const { return fBufferCount; }
private:

View File

@ -634,6 +634,7 @@ IORequest::Init(off_t offset, size_t firstVecOffset, const iovec* vecs,
fBuffer->SetVecs(firstVecOffset, vecs, count, length, flags);
fOwner = NULL;
fOffset = offset;
fLength = length;
fRelativeParentOffset = 0;
@ -937,6 +938,14 @@ IORequest::SubRequestFinished(IORequest* request, status_t status,
}
void
IORequest::SetUnfinished()
{
MutexLocker _(fLock);
ResetStatus();
}
void
IORequest::SetTransferredBytes(bool partialTransfer, size_t transferredBytes)
{
@ -1162,6 +1171,7 @@ IORequest::Dump() const
{
kprintf("io_request at %p\n", this);
kprintf(" owner: %p\n", fOwner);
kprintf(" parent: %p\n", fParent);
kprintf(" status: %s\n", strerror(fStatus));
kprintf(" mutex: %p\n", &fLock);

View File

@ -81,6 +81,7 @@ private:
class IORequest;
class IORequestOwner;
class IORequestChunk {
@ -219,6 +220,10 @@ struct IORequest : IORequestChunk, DoublyLinkedListLinkImpl<IORequest> {
const iovec* vecs, size_t count,
size_t length, bool write, uint32 flags);
void SetOwner(IORequestOwner* owner)
{ fOwner = owner; }
IORequestOwner* Owner() const { return fOwner; }
status_t CreateSubRequest(off_t parentOffset,
off_t offset, size_t length,
IORequest*& subRequest);
@ -248,6 +253,7 @@ struct IORequest : IORequestChunk, DoublyLinkedListLinkImpl<IORequest> {
void SubRequestFinished(IORequest* request,
status_t status, bool partialTransfer,
size_t transferEndOffset);
void SetUnfinished();
size_t RemainingBytes() const
{ return fRemainingBytes; }
@ -299,6 +305,7 @@ private:
size_t size, team_id team, bool copyIn);
mutex fLock;
IORequestOwner* fOwner;
IOBuffer* fBuffer;
off_t fOffset;
size_t fLength;

View File

@ -577,7 +577,7 @@ Test::Run(DMAResource& resource)
if (resultIndex >= fResultCount)
_Panic("no results left");
status_t status = resource.TranslateNext(&request, &operation);
status_t status = resource.TranslateNext(&request, &operation, 0);
if (status != B_OK) {
_Panic("DMAResource::TranslateNext() failed: %s\n",
strerror(status));