removed command queuing and refactored device configuration

git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23310 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Marcus Overhagen 2008-01-09 15:37:49 +00:00
parent 3d2595d16a
commit cc91279875
9 changed files with 103 additions and 635 deletions

View File

@ -198,7 +198,6 @@ ata_dpc_DMA(ide_qrequest *qrequest)
if (dma_success && !dev_err) {
// reset error count if DMA worked
device->DMA_failures = 0;
device->CQ_failures = 0;
qrequest->request->data_resid = 0;
finish_checksense(qrequest);
} else {
@ -251,27 +250,6 @@ create_rw_taskfile(ide_device_info *device, ide_qrequest *qrequest,
if (length > 0xffff)
goto err;
if (qrequest->queuable) {
// queued LBA48
device->tf_param_mask = ide_mask_features_48
| ide_mask_sector_count
| ide_mask_LBA_low_48
| ide_mask_LBA_mid_48
| ide_mask_LBA_high_48;
device->tf.queued48.sector_count_0_7 = length & 0xff;
device->tf.queued48.sector_count_8_15 = (length >> 8) & 0xff;
device->tf.queued48.tag = qrequest->tag;
device->tf.queued48.lba_0_7 = pos & 0xff;
device->tf.queued48.lba_8_15 = (pos >> 8) & 0xff;
device->tf.queued48.lba_16_23 = (pos >> 16) & 0xff;
device->tf.queued48.lba_24_31 = (pos >> 24) & 0xff;
device->tf.queued48.lba_32_39 = (pos >> 32) & 0xff;
device->tf.queued48.lba_40_47 = (pos >> 40) & 0xff;
device->tf.queued48.command = write ? IDE_CMD_WRITE_DMA_QUEUED
: IDE_CMD_READ_DMA_QUEUED;
return true;
} else {
// non-queued LBA48
device->tf_param_mask = ide_mask_sector_count_48
| ide_mask_LBA_low_48
@ -288,7 +266,6 @@ create_rw_taskfile(ide_device_info *device, ide_qrequest *qrequest,
device->tf.lba48.lba_40_47 = (pos >> 40) & 0xff;
device->tf.lba48.command = cmd_48[qrequest->uses_dma][write];
return true;
}
} else {
// normal LBA
SHOW_FLOW0(3, "using LBA");
@ -296,26 +273,6 @@ create_rw_taskfile(ide_device_info *device, ide_qrequest *qrequest,
if (length > 0x100)
goto err;
if (qrequest->queuable) {
// queued LBA
SHOW_FLOW( 3, "creating DMA queued command, tag=%d", qrequest->tag );
device->tf_param_mask = ide_mask_features
| ide_mask_sector_count
| ide_mask_LBA_low
| ide_mask_LBA_mid
| ide_mask_LBA_high
| ide_mask_device_head;
device->tf.queued.sector_count = length & 0xff;
device->tf.queued.tag = qrequest->tag;
device->tf.queued.lba_0_7 = pos & 0xff;
device->tf.queued.lba_8_15 = (pos >> 8) & 0xff;
device->tf.queued.lba_16_23 = (pos >> 16) & 0xff;
device->tf.queued.lba_24_27 = (pos >> 24) & 0xf;
device->tf.queued.command = write ? IDE_CMD_WRITE_DMA_QUEUED
: IDE_CMD_READ_DMA_QUEUED;
return true;
} else {
// non-queued LBA
SHOW_FLOW0( 3, "creating normal DMA/PIO command" );
device->tf_param_mask = ide_mask_sector_count
@ -331,7 +288,6 @@ create_rw_taskfile(ide_device_info *device, ide_qrequest *qrequest,
device->tf.lba.lba_24_27 = (pos >> 24) & 0xf;
device->tf.lba.command = cmd_28[qrequest->uses_dma][write];
return true;
}
}
} else {
// CHS mode
@ -402,6 +358,7 @@ ata_send_rw(ide_device_info *device, ide_qrequest *qrequest,
if (!prepare_dma(device, qrequest)) {
// fall back to PIO on error
/*
// if command queueing is used and there is another command
// already running, we cannot fallback to PIO immediately -> declare
// command as not queuable and resubmit it, so the scsi bus manager
@ -412,6 +369,7 @@ ata_send_rw(ide_device_info *device, ide_qrequest *qrequest,
finish_retry(qrequest);
return;
}
*/
qrequest->uses_dma = false;
}
@ -439,33 +397,6 @@ ata_send_rw(ide_device_info *device, ide_qrequest *qrequest,
goto err_send;
if (qrequest->uses_dma) {
// if queuing used, we have to ask device first whether it wants
// to postpone the command
// XXX: using the bus release IRQ we don't have to busy wait for
// a response, but I heard that IBM drives have problems with
// that IRQ; to be evaluated
if (qrequest->queuable) {
if (!wait_for_drdy(device))
goto err_send;
if (check_rw_error(device, qrequest))
goto err_send;
if (device_released_bus(device)) {
// device enqueued command, so we have to wait;
// in access_finished, we'll ask device whether it wants to
// continue some other command
bus->active_qrequest = NULL;
access_finished(bus, device);
// we may have rejected commands meanwhile, so tell
// the SIM that it can resend them now
scsi->cont_send_bus(bus->scsi_cookie);
return;
}
//SHOW_ERROR0( 2, "device executes command instantly" );
}
start_dma_wait_no_lock(device, qrequest);
} else {
@ -736,13 +667,10 @@ configure_rmsn(ide_device_info *device)
static bool
configure_command_queueing(ide_device_info *device)
disable_command_queueing(ide_device_info *device)
{
device->CQ_enabled = device->CQ_supported = false;
if (!device->bus->can_CQ
|| !device->infoblock.DMA_QUEUED_supported)
return initialize_qreq_array(device, 1);
if (!device->infoblock.DMA_QUEUED_supported)
return true;
if (device->infoblock.RELEASE_irq_supported
&& !device_set_feature( device, IDE_CMD_SET_FEATURES_DISABLE_REL_INT))
@ -752,23 +680,18 @@ configure_command_queueing(ide_device_info *device)
&& !device_set_feature(device, IDE_CMD_SET_FEATURES_DISABLE_SERV_INT))
dprintf("Cannot disable service irq\n");
device->CQ_enabled = device->CQ_supported = true;
SHOW_INFO0(2, "Enabled command queueing");
// official IBM docs talk about 31 queue entries, though
// their disks report 32; let's hope their docs are wrong
return initialize_qreq_array(device, device->infoblock.queue_depth + 1);
return true;
}
bool
prep_ata(ide_device_info *device)
status_t
configure_ata_device(ide_device_info *device)
{
ide_device_infoblock *infoblock = &device->infoblock;
uint32 chs_capacity;
SHOW_FLOW0(3, "");
TRACE("configure_ata_device\n");
device->is_atapi = false;
device->exec_io = ata_exec_io;
@ -780,12 +703,10 @@ prep_ata(ide_device_info *device)
// we merge it to "CFA bit set" for easier (later) testing
if (*(uint16 *)infoblock == 0x848a)
infoblock->CFA_supported = true;
else
return false;
else
return B_ERROR;
}
SHOW_FLOW0(3, "1");
if (!infoblock->_54_58_valid) {
// normally, current_xxx contains active CHS mapping,
// but if BIOS didn't call INITIALIZE DEVICE PARAMETERS
@ -820,22 +741,12 @@ prep_ata(ide_device_info *device)
if (device->use_48bits)
device->total_sectors = infoblock->LBA48_total_sectors;
SHOW_FLOW0(3, "2");
if (!configure_dma(device)
|| !configure_command_queueing(device)
|| !disable_command_queueing(device)
|| !configure_rmsn(device))
return false;
return B_ERROR;
SHOW_FLOW0(3, "3");
return true;
}
void
enable_CQ(ide_device_info *device, bool enable)
{
return B_OK;
}
@ -865,7 +776,7 @@ ata_read_infoblock(ide_device_info *device, bool isAtapi)
device->tf_param_mask = 0;
device->tf.write.command = isAtapi ? IDE_CMD_IDENTIFY_PACKET_DEVICE : IDE_CMD_IDENTIFY_DEVICE;
if (!send_command(device, NULL, isAtapi ? false : true, 20, ide_state_sync_waiting)) {
if (!send_command(device, NULL, isAtapi ? false : true, 20, ide_state_accessing)) {
TRACE("ata_read_infoblock: send_command failed\n");
goto error;
}
@ -880,7 +791,7 @@ ata_read_infoblock(ide_device_info *device, bool isAtapi)
sizeof(device->infoblock) / sizeof(uint16), false);
if (!wait_for_drqdown(device)) {
TRACE("scan_device_int: wait_for_drqdown failed\n");
TRACE("ata_read_infoblock: wait_for_drqdown failed\n");
goto error;
}

View File

@ -463,18 +463,18 @@ atapi_exec_io(ide_device_info *device, ide_qrequest *qrequest)
/*! Prepare device info for ATAPI device */
bool
prep_atapi(ide_device_info *device)
status_t
configure_atapi_device(ide_device_info *device)
{
ide_device_infoblock *infoblock = &device->infoblock;
SHOW_FLOW0(3, "");
dprintf("configure_atapi_device\n");
device->is_atapi = true;
device->exec_io = atapi_exec_io;
if (infoblock->_0.atapi.ATAPI != 2)
return false;
return B_ERROR;
switch(infoblock->_0.atapi.drq_speed) {
case 0:
@ -486,7 +486,7 @@ prep_atapi(ide_device_info *device)
device->atapi.packet_irq_timeout = IDE_STD_TIMEOUT;
break;
default:
return false;
return B_ERROR;
}
SHOW_FLOW(3, "drq speed: %d", infoblock->_0.atapi.drq_speed);
@ -504,13 +504,8 @@ prep_atapi(ide_device_info *device)
// (SCSI bus manager sets LUN there automatically)
device->tf.packet.lun = 0;
if (!initialize_qreq_array(device, 1)
|| !configure_dma(device))
return false;
if (!configure_dma(device))
return B_ERROR;
// currently, we don't support queuing, but I haven't found any
// ATAPI device that supports queuing anyway, so this is no loss
device->CQ_enabled = device->CQ_supported = false;
return true;
return B_OK;
}

View File

@ -96,10 +96,10 @@ reset_bus(ide_bus_info *bus, bool *devicePresent0, uint32 *sigDev0, bool *device
goto error;
if (tf.read.error != 0x01 && tf.read.error != 0x81)
dprintf("ATA: device 0 failed, error code is 0x%02\n", tf.read.error);
dprintf("ATA: device 0 failed, error code is 0x%02x\n", tf.read.error);
if (tf.read.error >= 0x80)
dprintf("ATA: device 0 indicates that device 1 failed, error code is 0x%02\n", tf.read.error);
dprintf("ATA: device 0 indicates that device 1 failed, error code is 0x%02x\n", tf.read.error);
*sigDev0 = tf.lba.sector_count;
*sigDev0 |= ((uint32)tf.lba.lba_0_7) << 8;
@ -126,7 +126,7 @@ reset_bus(ide_bus_info *bus, bool *devicePresent0, uint32 *sigDev0, bool *device
goto error;
if (tf.read.error != 0x01)
dprintf("ATA: device 1 failed, error code is 0x%02\n", tf.read.error);
dprintf("ATA: device 1 failed, error code is 0x%02x\n", tf.read.error);
*sigDev1 = tf.lba.sector_count;
*sigDev1 |= ((uint32)tf.lba.lba_0_7) << 8;
@ -136,7 +136,7 @@ reset_bus(ide_bus_info *bus, bool *devicePresent0, uint32 *sigDev0, bool *device
*sigDev1 = 0;
}
dprintf("ATA: reset_bus success, device 0 signature: 0x%08lx, device 1 signature: 0x%08lx\n", *sigDev0, *sigDev1);
dprintf("ATA: reset_bus done\n");
return B_OK;
@ -370,99 +370,3 @@ ide_wait(ide_device_info *device, int mask, int not_mask,
}
}
/** tell device to continue queued command
* on return, no waiting is active!
* tag - will contain tag of command to be continued
* return: true - request continued
* false - something went wrong; sense set
*/
bool
device_start_service(ide_device_info *device, int *tag)
{
ide_bus_info *bus = device->bus;
FAST_LOG1(bus->log, ev_ide_device_start_service, device->is_device1);
device->tf.write.command = IDE_CMD_SERVICE;
device->tf.queued.mode = ide_mode_lba;
if (bus->active_device != device) {
// don't apply any precautions in terms of IRQ
// -> the bus is in accessing state, so IRQs are ignored anyway
if (bus->controller->write_command_block_regs(bus->channel_cookie,
&device->tf, ide_mask_device_head) != B_OK)
// on error, pretend that this device asks for service
// -> the disappeared controller will be recognized soon ;)
return true;
bus->active_device = device;
// give one clock (400 ns) to take notice
spin(1);
}
// here we go...
if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf,
ide_mask_command) != B_OK)
goto err;
// we need to wait for the device as we want to read the tag
if (!ide_wait(device, ide_status_drdy, ide_status_bsy, false, 1000000))
return false;
// read tag
if (bus->controller->read_command_block_regs(bus->channel_cookie, &device->tf,
ide_mask_sector_count) != B_OK)
goto err;
if (device->tf.queued.release) {
// bus release is the wrong answer to a service request
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
return false;
}
*tag = device->tf.queued.tag;
FAST_LOG2(bus->log, ev_ide_device_start_service2, device->is_device1, *tag);
return true;
err:
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
return false;
}
/** check device whether it wants to continue queued request */
bool
check_service_req(ide_device_info *device)
{
ide_bus_info *bus = device->bus;
int status;
// fast bailout if there is no request pending
if (device->num_running_reqs == 0)
return false;
if (bus->active_device != device) {
// don't apply any precautions in terms of IRQ
// -> the bus is in accessing state, so IRQs are ignored anyway
if (bus->controller->write_command_block_regs(bus->channel_cookie,
&device->tf, ide_mask_device_head) != B_OK)
// on error, pretend that this device asks for service
// -> the disappeared controller will be recognized soon ;)
return true;
bus->active_device = device;
// give one clock (400 ns) to take notice
spin(1);
}
status = bus->controller->get_altstatus(bus->channel_cookie);
return (status & ide_status_service) != 0;
}

View File

@ -56,16 +56,13 @@ destroy_device(ide_device_info *device)
// paranoia
device->exec_io = NULL;
cancel_timer(&device->reconnect_timer.te);
scsi->free_dpc(device->reconnect_timeout_dpc);
cleanup_device_links(device);
destroy_qreq_array(device);
uninit_synced_pc(&device->reconnect_timeout_synced_pc);
if (device->qreqActive)
dprintf("destroy_device: Warning request still active\n");
free(device->qreqFree);
free(device);
}
@ -120,20 +117,17 @@ create_device(ide_bus_info *bus, bool is_device1)
setup_device_links(bus, device);
device->DMA_failures = 0;
device->CQ_failures = 0;
device->num_failed_send = 0;
device->combined_sense = 0;
device->num_running_reqs = 0;
device->qreqActive = NULL;
device->qreqFree = (ide_qrequest *)malloc(sizeof(ide_qrequest));
memset(device->qreqFree, 0, sizeof(ide_qrequest));
device->qreqFree->running = false;
device->qreqFree->device = device;
device->qreqFree->request = NULL;
device->reconnect_timer.device = device;
init_synced_pc(&device->reconnect_timeout_synced_pc,
reconnect_timeout_worker);
if (scsi->alloc_dpc(&device->reconnect_timeout_dpc) != B_OK)
goto err;
device->total_sectors = 0;
return device;
@ -200,3 +194,15 @@ scan_device(ide_device_info *device, bool isAtapi)
prep_infoblock(device);
return B_OK;
}
status_t
configure_device(ide_device_info *device, bool isAtapi)
{
dprintf("ATA: configure_device\n");
if (isAtapi)
return configure_atapi_device(device);
else
return configure_ata_device(device);
}

View File

@ -91,18 +91,13 @@ typedef struct ide_device_info {
uint8 use_LBA : 1; // true for LBA, false for CHS
uint8 use_48bits : 1; // true for LBA48
uint8 is_atapi : 1; // true for ATAPI, false for ATA
uint8 CQ_supported : 1; // Command Queuing supported
uint8 CQ_enabled : 1; // Command Queuing enabled
uint8 DMA_supported : 1; // DMA supported
uint8 DMA_enabled : 1; // DMA enabled
uint8 is_device1 : 1; // true for slave, false for master
uint8 queue_depth; // maximum Command Queueing depth
uint8 last_lun; // last LUN
uint8 DMA_failures; // DMA failures in a row
uint8 CQ_failures; // Command Queuing failures during _last_ command
uint8 num_failed_send; // number of consequetive send problems
// next two error codes are copied to request on finish_request & co.
@ -112,9 +107,8 @@ typedef struct ide_device_info {
// pending error codes
uint32 combined_sense; // emulated sense of device
struct ide_qrequest *qreq_array; // array of ide requests
struct ide_qrequest *free_qrequests; // free list
int num_running_reqs; // number of running requests
struct ide_qrequest *qreqActive;
struct ide_qrequest *qreqFree;
struct ide_device_info *other_device; // other device on same bus
@ -139,11 +133,6 @@ typedef struct ide_device_info {
uint8 device_type; // atapi device type
bool reconnect_timer_installed; // true, if reconnect timer is running
ide_device_timer_info reconnect_timer; // reconnect timeout
scsi_dpc_cookie reconnect_timeout_dpc; // dpc fired by timeout
ide_synced_pc reconnect_timeout_synced_pc; // spc fired by dpc
// pio from here on
int left_sg_elem; // remaining sg elements
const physical_entry *cur_sg_elem; // active sg element
@ -175,8 +164,6 @@ typedef struct ide_qrequest {
uint8 running : 1; // true if "on bus"
uint8 uses_dma : 1; // true if using dma
uint8 packet_irq : 1; // true if waiting for command packet irq
uint8 queuable : 1; // true if command queuing is used
uint8 tag; // command queuing tag
} ide_qrequest;
@ -323,8 +310,6 @@ bool ata_is_device_present(ide_bus_info *bus, int device);
bool check_rw_error(ide_device_info *device, ide_qrequest *qrequest);
bool check_output(ide_device_info *device, bool drdy_required, int error_mask, bool is_write);
bool prep_ata(ide_device_info *device);
void enable_CQ(ide_device_info *device, bool enable);
void ata_send_rw(ide_device_info *device, ide_qrequest *qrequest,
uint64 pos, size_t length, bool write);
@ -335,11 +320,10 @@ void ata_exec_io(ide_device_info *device, ide_qrequest *qrequest);
status_t ata_read_infoblock(ide_device_info *device, bool isAtapi);
status_t configure_ata_device(ide_device_info *device);
// atapi.c
bool prep_atapi(ide_device_info *device);
status_t configure_atapi_device(ide_device_info *device);
void send_packet(ide_device_info *device, ide_qrequest *qrequest, bool write);
void packet_dpc(ide_qrequest *qrequest);
void atapi_exec_io(ide_device_info *device, ide_qrequest *qrequest);
@ -359,7 +343,6 @@ bool wait_for_drdy(ide_device_info *device);
// timeout in seconds
bool send_command(ide_device_info *device, ide_qrequest *qrequest,
bool need_drdy, uint32 timeout, ide_bus_state new_state);
bool device_start_service( ide_device_info *device, int *tag);
//bool reset_device(ide_device_info *device, ide_qrequest *ignore);
//bool reset_bus(ide_device_info *device, ide_qrequest *ignore);
@ -379,7 +362,7 @@ status_t scan_device(ide_device_info *device, bool isAtapi);
void destroy_device(ide_device_info *device);
ide_device_info *create_device(ide_bus_info *bus, bool is_device1);
status_t configure_device(ide_device_info *device, bool isAtapi);
// dma.c
@ -407,17 +390,6 @@ status_t read_PIO_block(ide_qrequest *qrequest, int length);
status_t write_PIO_block(ide_qrequest *qrequest, int length);
// queuing.c
bool send_abort_queue(ide_device_info *device);
bool try_service(ide_device_info *device);
void reconnect_timeout_worker(ide_bus_info *bus, void *arg);
int32 reconnect_timeout(timer *arg);
bool initialize_qreq_array(ide_device_info *device, int queue_depth);
void destroy_qreq_array(ide_device_info *device);
// sync.c

View File

@ -78,20 +78,10 @@ static void disconnect_worker(ide_bus_info *bus, void *arg);
static void set_check_condition(ide_qrequest *qrequest);
/** check whether this request can be within device */
static inline bool
is_queuable(ide_device_info *device, scsi_ccb *request)
{
return false;
}
static void
sim_scsi_io(ide_bus_info *bus, scsi_ccb *request)
{
ide_device_info *device;
bool queuable;
ide_qrequest *qrequest;
//ide_request_priv *priv;
@ -116,8 +106,6 @@ sim_scsi_io(ide_bus_info *bus, scsi_ccb *request)
if (request->target_lun > device->last_lun)
goto err_inv_device;
queuable = is_queuable(device, request);
// grab the bus
ACQUIRE_BEN(&bus->status_report_ben);
IDE_LOCK(bus);
@ -126,27 +114,26 @@ sim_scsi_io(ide_bus_info *bus, scsi_ccb *request)
goto err_bus_busy;
// bail out if device can't accept further requests
if (device->free_qrequests == NULL
|| (device->num_running_reqs > 0 && !queuable))
if (device->qreqFree == NULL)
goto err_device_busy;
bus->state = ide_state_accessing;
++bus->num_running_reqs;
IDE_UNLOCK(bus);
RELEASE_BEN(&bus->status_report_ben);
// as we own the bus, noone can bother us
qrequest = device->free_qrequests;
device->free_qrequests = qrequest->next;
qrequest = device->qreqFree;
device->qreqFree = NULL;
device->qreqActive = qrequest;
qrequest->request = request;
qrequest->queuable = queuable;
qrequest->running = true;
qrequest->uses_dma = false;
++device->num_running_reqs;
++bus->num_running_reqs;
bus->active_qrequest = qrequest;
bus->active_qrequest = qrequest; // XXX whats this!?!?!
FAST_LOGN(bus->log, ev_ide_scsi_io_exec, 4, (uint32)qrequest,
(uint32)request, bus->num_running_reqs, device->num_running_reqs);
@ -247,20 +234,32 @@ scan_bus(ide_bus_info *bus)
if (bus->disconnected)
return;
status = reset_bus(bus, &devicePresent[0], &deviceSignature[0], &devicePresent[1], &deviceSignature[1]);
for (i = 0; i < bus->max_devices; ++i) {
if (bus->devices[i])
destroy_device(bus->devices[i]);
}
if (status == B_OK && devicePresent[i]) {
isAtapi = deviceSignature[i] == 0xeb140101;
dprintf("ATA: scan_bus: bus %p, creating device %d\n", bus, i);
device = create_device(bus, i /* isDevice1 */);
if (scan_device(device, isAtapi) != B_OK) {
dprintf("ATA: scan_bus: bus %p, scanning failed, destroying device %d\n", bus, i);
destroy_device(device);
}
status = reset_bus(bus, &devicePresent[0], &deviceSignature[0], &devicePresent[1], &deviceSignature[1]);
for (i = 0; i < bus->max_devices; ++i) {
if (!devicePresent[i])
continue;
isAtapi = deviceSignature[i] == 0xeb140101;
dprintf("ATA: scan_bus: bus %p, creating device %d, signature is 0x%08lx\n",
bus, i, deviceSignature[i]);
device = create_device(bus, i /* isDevice1 */);
if (scan_device(device, isAtapi) != B_OK) {
dprintf("ATA: scan_bus: bus %p, scanning failed, destroying device %d\n", bus, i);
destroy_device(device);
continue;
}
if (configure_device(device, isAtapi) != B_OK) {
dprintf("ATA: scan_bus: bus %p, configure failed, destroying device %d\n", bus, i);
destroy_device(device);
}
}
@ -370,7 +369,6 @@ finish_request(ide_qrequest *qrequest, bool resubmit)
ide_device_info *device = qrequest->device;
ide_bus_info *bus = device->bus;
scsi_ccb *request;
uint num_running;
FAST_LOG2(bus->log, ev_ide_finish_request, (uint32)qrequest, resubmit);
SHOW_FLOW0(3, "");
@ -380,13 +378,11 @@ finish_request(ide_qrequest *qrequest, bool resubmit)
request = qrequest->request;
qrequest->running = false;
qrequest->next = device->free_qrequests;
device->free_qrequests = qrequest;
// num_running is not really correct as the XPT is interested
// in the number of concurrent requests when it was *started* !
num_running = device->num_running_reqs--;
--bus->num_running_reqs;
device->qreqFree = device->qreqActive;
device->qreqActive = NULL;
--bus->num_running_reqs; // XXX borked!!!
// paranoia
bus->active_qrequest = NULL;
@ -405,7 +401,7 @@ finish_request(ide_qrequest *qrequest, bool resubmit)
if (resubmit)
scsi->resubmit(request);
else
scsi->finished(request, num_running);
scsi->finished(request, 1);
RELEASE_BEN(&bus->status_report_ben);
}
@ -472,7 +468,7 @@ finish_reset_queue(ide_qrequest *qrequest)
scsi->block_bus(bus->scsi_cookie);
finish_checksense(qrequest);
send_abort_queue(qrequest->device);
// send_abort_queue(qrequest->device); // XXX fix this
scsi->unblock_bus(bus->scsi_cookie);
}
@ -487,16 +483,14 @@ finish_norelease(ide_qrequest *qrequest, bool resubmit)
{
ide_device_info *device = qrequest->device;
ide_bus_info *bus = device->bus;
uint num_requests;
FAST_LOG2(bus->log, ev_ide_finish_norelease, (uint32)qrequest, resubmit);
qrequest->running = false;
qrequest->next = device->free_qrequests;
device->free_qrequests = qrequest;
num_requests = device->num_running_reqs++;
--bus->num_running_reqs;
device->qreqFree = device->qreqActive;
device->qreqActive = 0;
if (bus->active_qrequest == qrequest)
bus->active_qrequest = NULL;
@ -506,7 +500,7 @@ finish_norelease(ide_qrequest *qrequest, bool resubmit)
if (resubmit)
scsi->resubmit(qrequest->request);
else
scsi->finished(qrequest->request, num_requests);
scsi->finished(qrequest->request, 1);
RELEASE_BEN(&bus->status_report_ben);
}
@ -531,6 +525,8 @@ finish_all_requests(ide_device_info *device, ide_qrequest *ignore,
// the entire bus instead (it won't take that long anyway)
scsi->block_bus(device->bus->scsi_cookie);
// XXX fix this
/*
for (i = 0; i < device->queue_depth; ++i) {
ide_qrequest *qrequest = &device->qreq_array[i];
@ -539,7 +535,7 @@ finish_all_requests(ide_device_info *device, ide_qrequest *ignore,
finish_norelease(qrequest, resubmit);
}
}
*/
scsi->unblock_bus(device->bus->scsi_cookie);
}
@ -635,22 +631,6 @@ ide_sim_init_bus(device_node_handle node, void *user_cookie, void **cookie)
SHOW_FLOW(2, "can_dma: %d", bus->can_DMA);
if (bus->can_DMA) {
if (pnp->get_attr_uint8(node, IDE_CONTROLLER_CAN_CQ_ITEM, &bus->can_CQ, true) != B_OK) {
// per default, command queuing is supported unless the driver
// reports problems (queuing should be transparent to
// controller, but for sure there is some buggy, over-optimizing
// controller out there)
bus->can_CQ = true;
}
} else {
// I am not sure if it's a problem of the driver or the drive (probably the
// former), but we're generally disabling command queueing in case of PIO
// transfers. Since those should be rare on a real system (as is CQ support
// in the drive), it's not really worth investigating, though.
bus->can_CQ = false;
}
parent = pnp->get_parent(node);
status = pnp->init_driver(parent, bus, (driver_module_info **)&bus->controller,

View File

@ -21,306 +21,6 @@
#define MAX_CQ_FAILURES 3
/** convert tag to request */
static inline ide_qrequest *
tag2request(ide_device_info *device, int tag)
{
ide_qrequest *qrequest = &device->qreq_array[tag];
if (qrequest->running)
return qrequest;
return NULL;
}
/** service device
*
* (expects locked bus and bus in "accessing" state)
* returns true if servicing a command (implies having bus unlocked)
* returns false on error
*/
static bool
service_device(ide_device_info *device)
{
ide_qrequest *qrequest;
int tag;
SHOW_FLOW0( 3, "Start servicing" );
// delete timeout first
// we must unlock bus before cancelling timer: if the timeout has
// just been fired we have to wait for it, but in turn it waits
// for the ide bus -> deadlock
IDE_UNLOCK(device->bus);
cancel_timer(&device->reconnect_timer.te);
// between IDE_UNLOCK and cancel_timer the request may got
// discarded due to timeout, so it's not a hardware problem
// if servicing fails
// further, the device discards the entire queue if anything goes
// wrong, thus we call send_abort_queue on each error
// (we could also discard the queue without telling the device,
// but we prefer setting the device into a safe state)
// ask device to continue
if (!device_start_service(device, &tag)) {
send_abort_queue(device);
goto err;
}
SHOW_FLOW0( 3, "device starts service" );
// get tag of request
qrequest = tag2request(device, tag);
if (qrequest == NULL) {
send_abort_queue(device);
goto err;
}
SHOW_FLOW( 3, "continue request %p with tag %d", qrequest, tag );
device->bus->active_qrequest = qrequest;
// from here on, queuing is ATA read/write specific, so you have to
// modify that if you want to support ATAPI queuing!
if (check_rw_error(device, qrequest)) {
// if a read/write error occured, the request really failed
finish_reset_queue(qrequest);
goto err;
}
// all ATA commands continue with a DMA request
if (!prepare_dma(device, qrequest)) {
// this is effectively impossible: before the command was initially
// sent, prepare_dma had been called and obviously didn't fail,
// so why should it fail now?
device->subsys_status = SCSI_HBA_ERR;
finish_reset_queue(qrequest);
goto err;
}
SHOW_FLOW0( 3, "launch DMA" );
start_dma_wait_no_lock(device, qrequest);
return true;
err:
// don't start timeout - all requests have been discarded at this point
IDE_LOCK(device->bus);
return false;
}
/** check if some device on bus wants to continue queued requests;
*
* (expects locked bus and bus in "accessing" state)
* returns true if servicing a command (implies having bus unlocked)
* returns false if nothing to service
*/
bool
try_service(ide_device_info *device)
{
bool this_device_needs_service;
ide_device_info *other_device;
other_device = device->other_device;
// first check whether current device requests service
// (the current device is selected anyway, so asking it is fast)
this_device_needs_service = check_service_req(device);
// service other device first as it was certainly waiting
// longer then the current device
if (other_device != device && check_service_req(other_device)) {
if (service_device(other_device)) {
// we handed over control; start timeout for device
// (see below about fairness)
if (device->num_running_reqs > 0) {
if (!device->reconnect_timer_installed) {
device->reconnect_timer_installed = true;
add_timer(&device->reconnect_timer.te, reconnect_timeout,
IDE_RELEASE_TIMEOUT, B_ONE_SHOT_RELATIVE_TIMER);
}
}
return true;
}
}
// service our device second
if (this_device_needs_service) {
if (service_device(device))
return true;
}
// if device has pending reqs, start timeout.
// this may sound strange as we cannot be blamed if the
// other device blocks us. But: the timeout is delayed until
// the bus is idle, so once the other device finishes its
// access, we have a chance of servicing all the pending
// commands before the timeout handler is executed
if (device->num_running_reqs > 0) {
if (!device->reconnect_timer_installed) {
device->reconnect_timer_installed = true;
add_timer(&device->reconnect_timer.te, reconnect_timeout,
IDE_RELEASE_TIMEOUT, B_ONE_SHOT_RELATIVE_TIMER);
}
}
return false;
}
bool
initialize_qreq_array(ide_device_info *device, int queue_depth)
{
int i;
device->queue_depth = queue_depth;
SHOW_FLOW( 3, "queue depth=%d", device->queue_depth );
device->qreq_array = (ide_qrequest *)malloc(queue_depth * sizeof(ide_qrequest));
if (device->qreq_array == NULL)
return false;
memset(device->qreq_array, 0, queue_depth * sizeof(ide_qrequest));
device->free_qrequests = NULL;
for (i = queue_depth - 1; i >= 0 ; --i) {
ide_qrequest *qrequest = &device->qreq_array[i];
qrequest->next = device->free_qrequests;
device->free_qrequests = qrequest;
qrequest->running = false;
qrequest->device = device;
qrequest->tag = i;
qrequest->request = NULL;
}
return true;
}
void
destroy_qreq_array(ide_device_info *device)
{
if (device->qreq_array) {
free(device->qreq_array);
device->qreq_array = NULL;
}
device->num_running_reqs = 0;
device->queue_depth = 0;
device->free_qrequests = NULL;
}
/** change maximum number of queuable requests */
static bool
change_qreq_array(ide_device_info *device, int queue_depth)
{
ide_qrequest *qreq_array = device->qreq_array;
ide_qrequest *old_free_qrequests = device->free_qrequests;
int old_queue_depth = queue_depth;
// be very causious - even if no queuing supported, we still need
// one queue entry; if this allocation fails, we have a device that
// cannot accept any command, which would be odd
if (initialize_qreq_array( device, queue_depth)) {
free(qreq_array);
return true;
}
device->qreq_array = qreq_array;
device->num_running_reqs = 0;
device->queue_depth = old_queue_depth;
device->free_qrequests = old_free_qrequests;
return false;
}
/** reconnect timeout worker
* must be called as a synced procedure call, i.e.
* the bus is allocated for us
*/
void
reconnect_timeout_worker(ide_bus_info *bus, void *arg)
{
ide_device_info *device = (ide_device_info *)arg;
// perhaps all requests have been successfully finished
// when the synced pc was waiting; in this case, everything's fine
// (this is _very_ important if the other device blocks the bus
// for a long time - if this leads to a reconnect timeout, the
// device has a last chance by servicing all requests without
// delay, in which case this function gets delayed until all
// pending requests are finished and the following test would
// make sure that this false alarm gets ignored)
if (device->num_running_reqs > 0) {
// if one queued command fails, all of them fail
send_abort_queue(device);
// if too many timeouts occure, disable CQ
if (++device->CQ_failures > MAX_CQ_FAILURES) {
device->CQ_enabled = false;
change_qreq_array(device, 1);
}
}
// we've blocked the bus in dpc - undo that
scsi->unblock_bus(device->bus->scsi_cookie);
}
/** dpc callback for reconnect timeout */
static void
reconnect_timeout_dpc(void *arg)
{
ide_device_info *device = (ide_device_info *)arg;
// even though we are in the service thread,
// the bus can be in use (e.g. by an ongoing PIO command),
// so we have to issue a synced procedure call which
// waits for the command to be finished
// meanwhile, we don't want any command to be issued to this device
// as we are going to discard the entire device queue;
// sadly, we don't have a reliable XPT device handle, so we block
// bus instead (as this is an error handler, so performance is
// not crucial)
scsi->block_bus(device->bus->scsi_cookie);
schedule_synced_pc(device->bus, &device->reconnect_timeout_synced_pc, device);
}
/** timer function for reconnect timeout */
int32
reconnect_timeout(timer *arg)
{
ide_device_info *device = ((ide_device_timer_info *)arg)->device;
ide_bus_info *bus = device->bus;
// we are polite and let the service thread do the job
scsi->schedule_dpc(bus->scsi_cookie, device->reconnect_timeout_dpc,
reconnect_timeout_dpc, device);
return B_INVOKE_SCHEDULER;
}
/** tell device to abort all queued requests

View File

@ -82,7 +82,7 @@ ata_mode_sense_10(ide_device_info *device, ide_qrequest *qrequest)
memset(&control, 0, sizeof(control));
control.RLEC = false;
control.DQue = !device->CQ_enabled;
control.DQue = 1;//!device->CQ_enabled;
control.QErr = false;
// when a command fails we requeue all
// lost commands automagically
@ -112,7 +112,7 @@ ata_mode_select_control_page(ide_device_info *device, ide_qrequest *qrequest,
}
// we only support enabling/disabling command queuing
enable_CQ(device, !page->DQue);
// enable_CQ(device, !page->DQue);
return true;
}
@ -324,7 +324,7 @@ ata_inquiry(ide_device_info *device, ide_qrequest *qrequest)
data.additional_length = sizeof(scsi_res_inquiry) - 4;
data.soft_reset = false;
data.cmd_queue = device->queue_depth > 1;
data.cmd_queue = 0;//device->queue_depth > 1;
data.linked = false;
// these values are free-style

View File

@ -448,8 +448,8 @@ access_finished(ide_bus_info *bus, ide_device_info *device)
// normally, there is always an device; only exception is a
// bus without devices, not sure whether this can really happen though
if (device) {
if (try_service(device))
return;
// if (try_service(device))
// return;
}
// noone wants it, so execute pending synced_pc