Rewrote bus state functions, commented out much code to make it compile.

Moved bus_reset from basic_protocol.c into ata.c


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23324 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Marcus Overhagen 2008-01-09 19:26:05 +00:00
parent 952fdf7851
commit a094d41047
10 changed files with 355 additions and 588 deletions

View File

@ -16,6 +16,7 @@
#include "ide_cmds.h"
#define TRACE dprintf
#define FLOW dprintf
void
ata_select_device(ide_bus_info *bus, int device)
@ -30,6 +31,17 @@ ata_select_device(ide_bus_info *bus, int device)
}
void
ata_select(ide_device_info *device)
{
// ata_select_device(device->bus, device->is_device1);
ASSERT(device->is_device1 == device->tf.chs.device);
device->bus->controller->write_command_block_regs(device->bus->channel_cookie, &device->tf, ide_mask_device_head);
}
bool
ata_is_device_present(ide_bus_info *bus, int device)
{
@ -51,21 +63,259 @@ ata_is_device_present(ide_bus_info *bus, int device)
}
/*
void
ata_select_device(ide_device_info *device)
{
ide_task_file tf;
tf.chs.head = 0;
tf.chs.mode = ide_mode_lba;
tf.chs.device = device->is_device1;
device->bus->controller->read_command_block_regs(device->bus->channel_cookie, &tf,
ide_mask_device_head);
spin(1); // wait 400 nsec
}
/** busy-wait for device
* set - bits of status register that must be set
* cleared - bits of status register that must be cleared
* check_err - abort if error bit is set
* timeout - waiting timeout
*/
status_t
ata_wait(ide_bus_info *bus, uint8 set, uint8 cleared,
bool check_err, bigtime_t timeout)
{
bigtime_t startTime = system_time();
bigtime_t elapsedTime;
uint8 status;
spin(1); // device needs 400ns to set status
for (;;) {
status = bus->controller->get_altstatus(bus->channel_cookie);
if (check_err && (status & ide_status_err) != 0)
return B_ERROR;
if ((status & set) == set && (status & cleared) == 0)
return B_OK;
elapsedTime = system_time() - startTime;
if (elapsedTime > timeout)
return B_TIMED_OUT;
if (elapsedTime < 5000)
spin(1);
else
snooze(5000);
}
}
// busy-wait for data request going high
status_t
ata_wait_for_drq(ide_bus_info *bus)
{
return ata_wait(bus, ide_status_drq, 0, true, 10000000);
}
// busy-wait for data request going low
status_t
ata_wait_for_drqdown(ide_bus_info *bus)
{
return ata_wait(bus, 0, ide_status_drq, true, 1000000);
}
// busy-wait for device ready
status_t
ata_wait_for_drdy(ide_bus_info *bus)
{
return ata_wait(bus, ide_status_drdy, ide_status_bsy, false, 5000000);
}
status_t
ata_send_command(ide_device_info *device, ide_qrequest *qrequest,
bool need_drdy, uint32 timeout, ata_bus_state new_state)
{
ide_bus_info *bus = device->bus;
ASSERT((device->tf_param_mask & ide_mask_command) == 0);
ASSERT(new_state == ata_state_pio || new_state == ata_state_dma);
ASSERT(bus->state == ata_state_busy);
ASSERT(new_state == ata_state_pio); // XXX only pio for now
FAST_LOGN(bus->log, ev_ide_send_command, 15, device->is_device1, (uint32)qrequest,
device->tf.raw.r[0], device->tf.raw.r[1], device->tf.raw.r[2],
device->tf.raw.r[3], device->tf.raw.r[4], device->tf.raw.r[5],
device->tf.raw.r[6],
device->tf.raw.r[7], device->tf.raw.r[8], device->tf.raw.r[9],
device->tf.raw.r[10], device->tf.raw.r[11]);
TRACE("ata_send_command: qrequest %p, request %p\n", qrequest, qrequest ? qrequest->request : NULL);
// disable Interrupts for PIO transfers
if (new_state == ata_state_pio) {
if (bus->controller->write_device_control(bus->channel_cookie, ide_devctrl_bit3 | ide_devctrl_nien) != B_OK)
goto err;
}
ata_select(device);
bus->active_device = device;
if (ata_wait(bus, 0, ide_status_bsy | ide_status_drq, false, 50000) != B_OK) {
// resetting the device here will discard current configuration,
// it's better when the SCSI bus manager requests an external reset.
TRACE("device selection timeout\n");
device->subsys_status = SCSI_SEL_TIMEOUT;
return B_ERROR;
}
if (need_drdy && (bus->controller->get_altstatus(bus->channel_cookie) & ide_status_drdy) == 0) {
TRACE("drdy not set\n");
device->subsys_status = SCSI_SEQUENCE_FAIL;
return B_ERROR;
}
// write parameters
if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf, device->tf_param_mask) != B_OK)
goto err;
FLOW("Writing command 0x%02x", (int)device->tf.write.command);
IDE_LOCK(bus);
if (new_state == ata_state_dma) {
if (bus->controller->write_device_control(bus->channel_cookie, ide_devctrl_bit3) != B_OK)
goto err_clearint;
}
// start the command
if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf, ide_mask_command) != B_OK)
goto err_clearint;
ASSERT(bus->state == ata_state_busy);
bus->state = new_state;
IDE_UNLOCK(bus);
return B_OK;
err_clearint:
bus->controller->write_device_control(bus->channel_cookie, ide_devctrl_bit3 | ide_devctrl_nien);
err:
device->subsys_status = SCSI_HBA_ERR;
IDE_UNLOCK(bus);
return B_ERROR;
}
status_t
ata_reset_bus(ide_bus_info *bus, bool *_devicePresent0, uint32 *_sigDev0, bool *_devicePresent1, uint32 *_sigDev1)
{
ide_controller_interface *controller = bus->controller;
ide_channel_cookie channel = bus->channel_cookie;
bool devicePresent0;
bool devicePresent1;
ide_task_file tf;
status_t status;
dprintf("ATA: reset_bus %p\n", bus);
devicePresent0 = ata_is_device_present(bus, 0);
devicePresent1 = ata_is_device_present(bus, 1);
dprintf("ATA: reset_bus: ata_is_device_present device 0, present %d\n", devicePresent0);
dprintf("ATA: reset_bus: ata_is_device_present device 1, present %d\n", devicePresent1);
// disable interrupts and assert SRST for at least 5 usec
if (controller->write_device_control(channel, ide_devctrl_bit3 | ide_devctrl_nien | ide_devctrl_srst) != B_OK)
goto error;
spin(20);
// clear SRST and wait for at least 2 ms but (we wait 150ms like everyone else does)
if (controller->write_device_control(channel, ide_devctrl_bit3 | ide_devctrl_nien) != B_OK)
goto error;
snooze(150000);
if (devicePresent0) {
ata_select_device(bus, 0);
dprintf("altstatus device 0: %x\n", controller->get_altstatus(channel));
// wait up to 31 seconds for busy to clear, abort when error is set
status = ata_wait(bus, 0, ide_status_bsy, false, 31000000);
if (status != B_OK) {
dprintf("ATA: reset_bus: timeout\n");
goto error;
}
if (controller->read_command_block_regs(channel, &tf, ide_mask_sector_count |
ide_mask_LBA_low | ide_mask_LBA_mid | ide_mask_LBA_high | ide_mask_error) != B_OK)
goto error;
if (tf.read.error != 0x01 && tf.read.error != 0x81)
dprintf("ATA: device 0 failed, error code is 0x%02x\n", tf.read.error);
if (tf.read.error >= 0x80)
dprintf("ATA: device 0 indicates that device 1 failed, error code is 0x%02x\n", tf.read.error);
if (_sigDev0) {
*_sigDev0 = tf.lba.sector_count;
*_sigDev0 |= ((uint32)tf.lba.lba_0_7) << 8;
*_sigDev0 |= ((uint32)tf.lba.lba_8_15) << 16;
*_sigDev0 |= ((uint32)tf.lba.lba_16_23) << 24;
}
}
if (devicePresent1) {
ata_select_device(bus, 1);
dprintf("altstatus device 1: %x\n", controller->get_altstatus(channel));
// wait up to 31 seconds for busy to clear, abort when error is set
status = ata_wait(bus, 0, ide_status_bsy, false, 31000000);
if (status != B_OK) {
dprintf("ATA: reset_bus: timeout\n");
goto error;
}
if (controller->read_command_block_regs(channel, &tf, ide_mask_sector_count |
ide_mask_LBA_low | ide_mask_LBA_mid | ide_mask_LBA_high | ide_mask_error) != B_OK)
goto error;
if (tf.read.error != 0x01)
dprintf("ATA: device 1 failed, error code is 0x%02x\n", tf.read.error);
if (_sigDev1) {
*_sigDev1 = tf.lba.sector_count;
*_sigDev1 |= ((uint32)tf.lba.lba_0_7) << 8;
*_sigDev1 |= ((uint32)tf.lba.lba_8_15) << 16;
*_sigDev1 |= ((uint32)tf.lba.lba_16_23) << 24;
}
}
if (_devicePresent0)
*_devicePresent0 = devicePresent0;
if (_devicePresent1)
*_devicePresent1 = devicePresent1;
dprintf("ATA: reset_bus done\n");
return B_OK;
error:
dprintf("ATA: reset_bus failed\n");
return B_ERROR;
}
status_t
ata_reset_device(ide_device_info *device, bool *_devicePresent)
{
// XXX first try to reset the single device here
dprintf("ATA: ata_reset_device %p calling ata_reset_bus\n", device);
return ata_reset_bus(device->bus,
device->is_device1 ? NULL : _devicePresent, NULL,
device->is_device1 ? _devicePresent : NULL, NULL);
}
/** verify that device is ready for further PIO transmission */
@ -92,6 +342,7 @@ check_rw_status(ide_device_info *device, bool drqStatus)
}
#if 0
/** DPC called at
* - begin of each PIO read/write block
* - end of PUI write transmission
@ -140,7 +391,7 @@ ata_dpc_PIO(ide_qrequest *qrequest)
// so we better start waiting too early; as we are in service thread,
// a DPC initiated by IRQ cannot overtake us, so there is no need to block
// IRQs during sent
start_waiting_nolock(device->bus, timeout, ide_state_async_waiting);
start_waiting_nolock(device->bus, timeout, ata_state_async_waiting);
// having a too short data buffer shouldn't happen here
// anyway - we are prepared
@ -152,7 +403,7 @@ ata_dpc_PIO(ide_qrequest *qrequest)
} else {
if (device->left_blocks > 1) {
// start async waiting for next command (see above)
start_waiting_nolock(device->bus, timeout, ide_state_async_waiting);
start_waiting_nolock(device->bus, timeout, ata_state_async_waiting);
}
// see write
@ -183,7 +434,7 @@ finish:
finish_checksense(qrequest);
}
#endif
/** DPC called when IRQ was fired at end of DMA transmission */
void
@ -358,19 +609,6 @@ ata_send_rw(ide_device_info *device, ide_qrequest *qrequest,
if (!prepare_dma(device, qrequest)) {
// fall back to PIO on error
/*
// if command queueing is used and there is another command
// already running, we cannot fallback to PIO immediately -> declare
// command as not queuable and resubmit it, so the scsi bus manager
// will block other requests on retry
// (XXX this is not fine if the caller wants to recycle the CCB)
if (device->num_running_reqs > 1) {
qrequest->request->flags &= ~SCSI_ORDERED_QTAG;
finish_retry(qrequest);
return;
}
*/
qrequest->uses_dma = false;
}
}
@ -388,12 +626,8 @@ ata_send_rw(ide_device_info *device, ide_qrequest *qrequest,
timeout = qrequest->request->timeout > 0 ?
qrequest->request->timeout : IDE_STD_TIMEOUT;
// in DMA mode, we continue with "accessing",
// on PIO read, we continue with "async waiting"
// on PIO write, we continue with "accessing"
if (!send_command(device, qrequest, !device->is_atapi, timeout,
(!qrequest->uses_dma && !qrequest->is_write) ?
ide_state_async_waiting : ide_state_accessing))
if (ata_send_command(device, qrequest, !device->is_atapi, timeout,
qrequest->uses_dma ? ata_state_dma : ata_state_pio) != B_OK)
goto err_send;
if (qrequest->uses_dma) {
@ -439,6 +673,7 @@ err_send:
bool
check_rw_error(ide_device_info *device, ide_qrequest *qrequest)
{
#if 0
ide_bus_info *bus = device->bus;
uint8 status;
@ -503,7 +738,7 @@ check_rw_error(ide_device_info *device, ide_qrequest *qrequest)
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
return true;
}
#endif
return false;
}
@ -518,6 +753,7 @@ bool
check_output(ide_device_info *device, bool drdy_required,
int error_mask, bool is_write)
{
#if 0
ide_bus_info *bus = device->bus;
uint8 status;
@ -602,7 +838,7 @@ check_output(ide_device_info *device, bool drdy_required,
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
return false;
}
#endif
return true;
}
@ -614,23 +850,27 @@ check_output(ide_device_info *device, bool drdy_required,
static bool
device_set_feature(ide_device_info *device, int feature)
{
#if 0
device->tf_param_mask = ide_mask_features;
device->tf.write.features = feature;
device->tf.write.command = IDE_CMD_SET_FEATURES;
if (!send_command(device, NULL, true, 1, ide_state_sync_waiting))
if (!send_command(device, NULL, true, 1, ata_state_sync_waiting))
return false;
wait_for_sync(device->bus);
return check_output(device, true, ide_error_abrt, false);
#endif
return true;
}
static bool
configure_rmsn(ide_device_info *device)
{
#if 0
ide_bus_info *bus = device->bus;
int i;
@ -652,7 +892,7 @@ configure_rmsn(ide_device_info *device)
device->tf_param_mask = 0;
device->tf.write.command = IDE_CMD_GET_MEDIA_STATUS;
if (!send_command(device, NULL, true, 15, ide_state_sync_waiting))
if (!send_command(device, NULL, true, 15, ata_state_sync_waiting))
continue;
if (check_output(device, true,
@ -663,6 +903,8 @@ configure_rmsn(ide_device_info *device)
}
return false;
#endif
return true;
}
@ -758,25 +1000,12 @@ ata_read_infoblock(ide_device_info *device, bool isAtapi)
TRACE("ata_read_infoblock: bus %p, device %d, isAtapi %d\n", device->bus, device->is_device1, isAtapi);
// disable interrupts
bus->controller->write_device_control(bus->channel_cookie, ide_devctrl_bit3 | ide_devctrl_nien);
// initialize device selection flags,
// this is the only place where this bit gets initialized in the task file
if (bus->controller->read_command_block_regs(bus->channel_cookie, &device->tf,
ide_mask_device_head) != B_OK) {
TRACE("ata_read_infoblock: read_command_block_regs failed\n");
goto error;
}
ata_select_device(device->bus, device->is_device1);
device->tf.lba.device = device->is_device1; // XXX fix me
ata_select(device);
device->tf_param_mask = 0;
device->tf.write.command = isAtapi ? IDE_CMD_IDENTIFY_PACKET_DEVICE : IDE_CMD_IDENTIFY_DEVICE;
if (!send_command(device, NULL, isAtapi ? false : true, 20, ide_state_accessing)) {
if (ata_send_command(device, NULL, isAtapi ? false : true, 20, ata_state_pio) != B_OK) {
TRACE("ata_read_infoblock: send_command failed\n");
goto error;
}
@ -790,14 +1019,13 @@ ata_read_infoblock(ide_device_info *device, bool isAtapi)
bus->controller->read_pio(bus->channel_cookie, (uint16 *)&device->infoblock,
sizeof(device->infoblock) / sizeof(uint16), false);
if (!wait_for_drqdown(device)) {
TRACE("ata_read_infoblock: wait_for_drqdown failed\n");
if (ata_wait_for_drqdown(bus) != B_OK) {
TRACE("ata_read_infoblock: ata_wait_for_drqdown failed\n");
goto error;
}
IDE_LOCK(bus);
bus->state = ide_state_idle;
cancel_timer(&bus->timer.te);
bus->state = ata_state_idle;
IDE_UNLOCK(bus);
TRACE("ata_read_infoblock: success\n");
@ -805,8 +1033,7 @@ ata_read_infoblock(ide_device_info *device, bool isAtapi)
error:
IDE_LOCK(bus);
bus->state = ide_state_idle;
cancel_timer(&bus->timer.te);
bus->state = ata_state_idle;
IDE_UNLOCK(bus);
return B_ERROR;
}

View File

@ -92,6 +92,7 @@ check_packet_error(ide_device_info *device, ide_qrequest *qrequest)
void
packet_dpc(ide_qrequest *qrequest)
{
#if 0
ide_device_info *device = qrequest->device;
ide_bus_info *bus = device->bus;
int status;
@ -259,6 +260,7 @@ err_cancel_timer:
cancel_irq_timeout(device->bus);
err:
finish_checksense(qrequest);
#endif
}
@ -288,6 +290,7 @@ create_packet_taskfile(ide_device_info *device, ide_qrequest *qrequest,
void
send_packet(ide_device_info *device, ide_qrequest *qrequest, bool write)
{
#if 0
ide_bus_info *bus = device->bus;
bool packet_irq = device->atapi.packet_irq;
uint8 scsi_cmd = device->packet[0];
@ -431,6 +434,7 @@ err_setup:
abort_dma(device, qrequest);
finish_checksense(qrequest);
#endif
}

View File

@ -1,372 +0,0 @@
/*
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
* Distributed under the terms of the MIT License.
*/
/*
Part of Open IDE bus manager
Basic ATA/ATAPI protocol functions
*/
#include "ide_internal.h"
#include <scsi_cmds.h>
#include "ide_sim.h"
#include "ide_cmds.h"
// time in µs an IDE interrupt may get delayed
// as this is used for waiting in normal code, this applies to hardware delays only
// it's used for a hardware bug fix as well, see send_command
#define MAX_IRQ_DELAY 50
// maximum number send tries before giving up
#define MAX_FAILED_SEND 1
/** busy-wait for data request going high */
bool
wait_for_drq(ide_device_info *device)
{
return ide_wait(device, ide_status_drq, 0, true, 10000000);
}
/** busy-wait for data request going low */
bool
wait_for_drqdown(ide_device_info *device)
{
return ide_wait(device, 0, ide_status_drq, true, 1000000);
}
/** busy-wait for device ready */
bool
wait_for_drdy(ide_device_info *device)
{
return ide_wait(device, ide_status_drdy, ide_status_bsy, false, 5000000);
}
status_t
reset_bus(ide_bus_info *bus, bool *devicePresent0, uint32 *sigDev0, bool *devicePresent1, uint32 *sigDev1)
{
ide_controller_interface *controller = bus->controller;
ide_channel_cookie channel = bus->channel_cookie;
ide_task_file tf;
status_t status;
dprintf("ATA: reset_bus %p\n", bus);
*devicePresent0 = ata_is_device_present(bus, 0);
*devicePresent1 = ata_is_device_present(bus, 1);
dprintf("ATA: reset_bus: ata_is_device_present device 0, present %d\n", *devicePresent0);
dprintf("ATA: reset_bus: ata_is_device_present device 1, present %d\n", *devicePresent1);
// disable interrupts and assert SRST for at least 5 usec
if (controller->write_device_control(channel, ide_devctrl_bit3 | ide_devctrl_nien | ide_devctrl_srst) != B_OK)
goto error;
spin(20);
// clear SRST and wait for at least 2 ms but (we wait 150ms like everyone else does)
if (controller->write_device_control(channel, ide_devctrl_bit3 | ide_devctrl_nien) != B_OK)
goto error;
snooze(150000);
if (*devicePresent0) {
ata_select_device(bus, 0);
dprintf("altstatus device 0: %x\n", controller->get_altstatus(channel));
// wait up to 31 seconds for busy to clear, abort when error is set
status = ata_wait(bus, 0, ide_status_bsy, false, 31000000);
if (status != B_OK) {
dprintf("ATA: reset_bus: timeout\n");
goto error;
}
if (controller->read_command_block_regs(channel, &tf, ide_mask_sector_count |
ide_mask_LBA_low | ide_mask_LBA_mid | ide_mask_LBA_high | ide_mask_error) != B_OK)
goto error;
if (tf.read.error != 0x01 && tf.read.error != 0x81)
dprintf("ATA: device 0 failed, error code is 0x%02x\n", tf.read.error);
if (tf.read.error >= 0x80)
dprintf("ATA: device 0 indicates that device 1 failed, error code is 0x%02x\n", tf.read.error);
*sigDev0 = tf.lba.sector_count;
*sigDev0 |= ((uint32)tf.lba.lba_0_7) << 8;
*sigDev0 |= ((uint32)tf.lba.lba_8_15) << 16;
*sigDev0 |= ((uint32)tf.lba.lba_16_23) << 24;
} else {
*sigDev0 = 0;
}
if (*devicePresent1) {
ata_select_device(bus, 1);
dprintf("altstatus device 1: %x\n", controller->get_altstatus(channel));
// wait up to 31 seconds for busy to clear, abort when error is set
status = ata_wait(bus, 0, ide_status_bsy, false, 31000000);
if (status != B_OK) {
dprintf("ATA: reset_bus: timeout\n");
goto error;
}
if (controller->read_command_block_regs(channel, &tf, ide_mask_sector_count |
ide_mask_LBA_low | ide_mask_LBA_mid | ide_mask_LBA_high | ide_mask_error) != B_OK)
goto error;
if (tf.read.error != 0x01)
dprintf("ATA: device 1 failed, error code is 0x%02x\n", tf.read.error);
*sigDev1 = tf.lba.sector_count;
*sigDev1 |= ((uint32)tf.lba.lba_0_7) << 8;
*sigDev1 |= ((uint32)tf.lba.lba_8_15) << 16;
*sigDev1 |= ((uint32)tf.lba.lba_16_23) << 24;
} else {
*sigDev1 = 0;
}
dprintf("ATA: reset_bus done\n");
return B_OK;
error:
dprintf("ATA: reset_bus failed\n");
return B_ERROR;
}
/** new_state must be either accessing, async_waiting or sync_waiting
* param_mask must not include command register
*/
bool
send_command(ide_device_info *device, ide_qrequest *qrequest,
bool need_drdy, uint32 timeout, ide_bus_state new_state)
{
ide_bus_info *bus = device->bus;
bigtime_t irq_disabled_at = 0; // make compiler happy
uint8 num_retries = 0;
bool irq_guard;
FAST_LOGN(bus->log, ev_ide_send_command, 15, device->is_device1, (uint32)qrequest,
device->tf.raw.r[0], device->tf.raw.r[1], device->tf.raw.r[2],
device->tf.raw.r[3], device->tf.raw.r[4], device->tf.raw.r[5],
device->tf.raw.r[6],
device->tf.raw.r[7], device->tf.raw.r[8], device->tf.raw.r[9],
device->tf.raw.r[10], device->tf.raw.r[11]);
retry:
irq_guard = bus->num_running_reqs > 1;
SHOW_FLOW(3, "qrequest=%p, request=%p", qrequest,
qrequest ? qrequest->request : NULL);
// if there are pending requests, IRQs must be disabled to
// not mix up IRQ reasons
// XXX can we avoid that with the IDE_LOCK trick? It would
// save some work and the bug workaround!
if (irq_guard) {
if (bus->controller->write_device_control(bus->channel_cookie,
ide_devctrl_nien | ide_devctrl_bit3) != B_OK)
goto err;
irq_disabled_at = system_time();
}
// select device
if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf,
ide_mask_device_head) != B_OK)
goto err;
bus->active_device = device;
if (!ide_wait(device, 0, ide_status_bsy | ide_status_drq, false, 50000)) {
uint8 status;
SHOW_FLOW0(1, "device is not ready");
status = bus->controller->get_altstatus(bus->channel_cookie);
if (status == 0xff) {
// there is no device (should happen during detection only)
SHOW_FLOW0(1, "there is no device");
// device detection recognizes this code as "all hope lost", so
// neither replace it nor use it anywhere else
device->subsys_status = SCSI_TID_INVALID;
return false;
}
/*
// reset device and retry
if (reset_device(device, qrequest) && ++num_retries <= MAX_FAILED_SEND) {
SHOW_FLOW0(1, "retrying");
goto retry;
}
*/
SHOW_FLOW0(1, "giving up");
// reset to often - abort request
device->subsys_status = SCSI_SEL_TIMEOUT;
return false;
}
if (need_drdy
&& (bus->controller->get_altstatus(bus->channel_cookie) & ide_status_drdy) == 0) {
SHOW_FLOW0(3, "drdy not set");
device->subsys_status = SCSI_SEQUENCE_FAIL;
return false;
}
// write parameters
if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf,
device->tf_param_mask) != B_OK)
goto err;
if (irq_guard) {
// IRQ may be fired by service requests and by the process of disabling(!)
// them (I heard this is caused by edge triggered PCI IRQs)
// wait at least 50 µs to catch all pending irq's
// (at my system, up to 30 µs elapsed)
// additionally, old drives (at least my IBM-DTTA-351010) loose
// sync if they are pushed too hard - on heavy overlapped write
// stress this drive tends to forget outstanding requests,
// waiting at least 50 µs seems(!) to solve this
while (system_time() - irq_disabled_at < MAX_IRQ_DELAY)
spin(1);
}
// if we will start waiting once the command is sent, we have to
// lock the bus before sending; this way, IRQs that are fired
// shortly before/after sending of command are delayed until the
// command is really sent (start_waiting unlocks the bus) and then
// the IRQ handler can check savely whether the IRQ really signals
// finishing of command or not by testing the busy-signal of the device
if (new_state != ide_state_accessing) {
IDE_LOCK(bus);
}
if (irq_guard) {
// now it's clear why IRQs gets fired, so we can enable them again
if (bus->controller->write_device_control(bus->channel_cookie,
ide_devctrl_bit3) != B_OK)
goto err1;
}
// write command code - this will start the actual command
SHOW_FLOW(3, "Writing command 0x%02x", (int)device->tf.write.command);
if (bus->controller->write_command_block_regs(bus->channel_cookie,
&device->tf, ide_mask_command) != B_OK)
goto err1;
// start waiting now; also un-blocks IRQ handler (see above)
if (new_state != ide_state_accessing)
start_waiting(bus, timeout, new_state);
return true;
err1:
if (timeout > 0) {
bus->state = ide_state_accessing;
IDE_UNLOCK(bus);
}
err:
device->subsys_status = SCSI_HBA_ERR;
return false;
}
status_t
ata_wait(ide_bus_info *bus, uint8 mask, uint8 not_mask,
bool check_err, bigtime_t timeout)
{
bigtime_t startTime = system_time();
bigtime_t elapsedTime;
uint8 status;
spin(1); // device needs 400ns to set status
for (;;) {
status = bus->controller->get_altstatus(bus->channel_cookie);
if (check_err && (status & ide_status_err) != 0)
return B_ERROR;
if ((status & mask) == mask && (status & not_mask) == 0)
return B_OK;
elapsedTime = system_time() - startTime;
if (elapsedTime > timeout)
return B_TIMED_OUT;
if (elapsedTime < 5000)
spin(1);
else
snooze(5000);
}
}
/** busy-wait for device
* mask - bits of status register that must be set
* not_mask - bits of status register that must not be set
* check_err - abort if error bit is set
* timeout - waiting timeout
* return: true on success
*/
bool
ide_wait(ide_device_info *device, int mask, int not_mask,
bool check_err, bigtime_t timeout)
{
ide_bus_info *bus = device->bus;
bigtime_t start_time = system_time();
while (1) {
bigtime_t elapsed_time;
int status;
// do spin before test as the device needs 400 ns
// to update its status register
spin(1);
status = bus->controller->get_altstatus(bus->channel_cookie);
if ((status & mask) == mask && (status & not_mask) == 0)
return true;
if (check_err && (status & ide_status_err) != 0) {
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
return false;
}
elapsed_time = system_time() - start_time;
if (elapsed_time > timeout) {
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_TIMEOUT);
return false;
}
// if we've waited more then 5ms, we start passive waiting
// to reduce system load
if (elapsed_time > 5000)
snooze(elapsed_time / 10);
}
}

View File

@ -123,13 +123,23 @@ create_device(ide_bus_info *bus, bool is_device1)
device->qreqActive = NULL;
device->qreqFree = (ide_qrequest *)malloc(sizeof(ide_qrequest));
memset(device->qreqFree, 0, sizeof(ide_qrequest));
device->qreqFree->running = false;
device->qreqFree->device = device;
device->qreqFree->request = NULL;
device->total_sectors = 0;
// disable interrupts
bus->controller->write_device_control(bus->channel_cookie, ide_devctrl_bit3 | ide_devctrl_nien);
// make sure LBA bit is set, and initialize device selection flag
device->tf.chs.head = 0;
device->tf.chs.mode = ide_mode_lba;
device->tf.chs.device = is_device1;
bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf, ide_mask_device_head);
return device;
err:

View File

@ -60,6 +60,9 @@ configure_dma(ide_device_info *device)
device->DMA_enabled = device->DMA_supported = device->bus->can_DMA
&& get_device_dma_mode(device) != -1;
dprintf("XXX DISABLING DMA\n");
device->DMA_enabled = false;
return true;
}
@ -102,12 +105,14 @@ prepare_dma(ide_device_info *device, ide_qrequest *qrequest)
void
start_dma_wait(ide_device_info *device, ide_qrequest *qrequest)
{
#if 0
ide_bus_info *bus = device->bus;
bus->controller->start_dma(bus->channel_cookie);
start_waiting(bus, qrequest->request->timeout > 0 ?
qrequest->request->timeout : IDE_STD_TIMEOUT, ide_state_async_waiting);
#endif
}

View File

@ -169,12 +169,12 @@ typedef struct ide_qrequest {
// state of ide bus
typedef enum {
ide_state_idle, // noone is using it, but overlapped
// commands may be pending
ide_state_accessing, // bus is in use
ide_state_async_waiting, // waiting for IRQ, to be reported via irq_dpc
ide_state_sync_waiting, // waiting for IRQ, to be reported via sync_wait_sem
} ide_bus_state;
ata_state_idle, // not is using it
ata_state_busy, // got bus but no command issued yet
ata_state_pio, // bus is executing a PIO command
ata_state_dma // bus is executing a DMA command
} ata_bus_state;
struct ide_bus_info {
ide_qrequest *active_qrequest;
@ -187,13 +187,12 @@ struct ide_bus_info {
spinlock lock;
cpu_status prev_irq_state;
ide_bus_state state; // current state of bus
ata_bus_state state; // current state of bus
benaphore status_report_ben; // to lock when you report XPT about bus state
// i.e. during requeue, resubmit or finished
bool disconnected; // true, if controller is lost
int num_running_reqs; // total number of running requests
scsi_bus scsi_cookie; // cookie for scsi bus
@ -201,9 +200,6 @@ struct ide_bus_info {
scsi_dpc_cookie irq_dpc;
ide_synced_pc *synced_pc_list;
sem_id sync_wait_sem; // released when sync_wait finished
bool sync_wait_timeout; // true, if timeout occured
ide_device_info *active_device;
ide_device_info *devices[2];
ide_device_info *first_device;
@ -302,10 +298,16 @@ device_released_bus(ide_device_info *device)
// ata.c
//void ata_select_device(ide_device_info *device);
void ata_select_device(ide_bus_info *bus, int device);
void ata_select(ide_device_info *device);
bool ata_is_device_present(ide_bus_info *bus, int device);
status_t ata_wait(ide_bus_info *bus, uint8 set, uint8 not_set, bool check_err, bigtime_t timeout);
status_t ata_wait_for_drq(ide_bus_info *bus);
status_t ata_wait_for_drqdown(ide_bus_info *bus);
status_t ata_wait_for_drdy(ide_bus_info *bus);
status_t ata_reset_bus(ide_bus_info *bus, bool *_devicePresent0, uint32 *_sigDev0, bool *_devicePresent1, uint32 *_sigDev1);
status_t ata_reset_device(ide_device_info *device, bool *_devicePresent);
status_t ata_send_command(ide_device_info *device, ide_qrequest *qrequest, bool need_drdy, uint32 timeout, ata_bus_state new_state);
bool check_rw_error(ide_device_info *device, ide_qrequest *qrequest);
bool check_output(ide_device_info *device, bool drdy_required, int error_mask, bool is_write);
@ -331,25 +333,7 @@ void atapi_exec_io(ide_device_info *device, ide_qrequest *qrequest);
// basic_prot.c
status_t ata_wait(ide_bus_info *bus, uint8 mask, uint8 not_mask, bool check_err, bigtime_t timeout);
bool ide_wait(ide_device_info *device, int mask, int not_mask, bool check_err,
bigtime_t timeout);
bool wait_for_drq(ide_device_info *device);
bool wait_for_drqdown(ide_device_info *device);
bool wait_for_drdy(ide_device_info *device);
// timeout in seconds
bool send_command(ide_device_info *device, ide_qrequest *qrequest,
bool need_drdy, uint32 timeout, ide_bus_state new_state);
//bool reset_device(ide_device_info *device, ide_qrequest *ignore);
//bool reset_bus(ide_device_info *device, ide_qrequest *ignore);
bool check_service_req(ide_device_info *device);
status_t reset_bus(ide_bus_info *bus, bool *devicePresent0, uint32 *sigDev0, bool *devicePresent1, uint32 *sigDev1);
// channel_mgr.c

View File

@ -110,16 +110,14 @@ sim_scsi_io(ide_bus_info *bus, scsi_ccb *request)
ACQUIRE_BEN(&bus->status_report_ben);
IDE_LOCK(bus);
if (bus->state != ide_state_idle)
if (bus->state != ata_state_idle)
goto err_bus_busy;
// bail out if device can't accept further requests
if (device->qreqFree == NULL)
goto err_device_busy;
bus->state = ide_state_accessing;
++bus->num_running_reqs;
bus->state = ata_state_busy;
IDE_UNLOCK(bus);
RELEASE_BEN(&bus->status_report_ben);
@ -239,7 +237,7 @@ scan_bus(ide_bus_info *bus)
destroy_device(bus->devices[i]);
}
status = reset_bus(bus, &devicePresent[0], &deviceSignature[0], &devicePresent[1], &deviceSignature[1]);
status = ata_reset_bus(bus, &devicePresent[0], &deviceSignature[0], &devicePresent[1], &deviceSignature[1]);
for (i = 0; i < bus->max_devices; ++i) {
if (!devicePresent[i])
@ -303,6 +301,7 @@ sim_reset_bus(ide_bus_info *bus)
if (bus->disconnected)
return SCSI_NO_HBA;
return SCSI_REQ_INVALID;
}
@ -382,8 +381,6 @@ finish_request(ide_qrequest *qrequest, bool resubmit)
device->qreqFree = device->qreqActive;
device->qreqActive = NULL;
--bus->num_running_reqs; // XXX borked!!!
// paranoia
bus->active_qrequest = NULL;
@ -514,7 +511,6 @@ void
finish_all_requests(ide_device_info *device, ide_qrequest *ignore,
int subsys_status, bool resubmit)
{
int i;
if (device == NULL)
return;
@ -558,7 +554,6 @@ ide_sim_init_bus(device_node_handle node, void *user_cookie, void **cookie)
memset(bus, 0, sizeof(*bus));
bus->node = node;
bus->lock = 0;
bus->num_running_reqs = 0;
bus->active_qrequest = NULL;
bus->disconnected = false;
@ -581,7 +576,7 @@ ide_sim_init_bus(device_node_handle node, void *user_cookie, void **cookie)
init_synced_pc(&bus->disconnect_syncinfo, disconnect_worker);
bus->scsi_cookie = user_cookie;
bus->state = ide_state_idle;
bus->state = ata_state_idle;
bus->timer.bus = bus;
bus->synced_pc_list = NULL;
@ -589,11 +584,6 @@ ide_sim_init_bus(device_node_handle node, void *user_cookie, void **cookie)
goto err1;
bus->active_device = NULL;
bus->sync_wait_sem = create_sem(0, "ide_sync_wait");
if (bus->sync_wait_sem < 0) {
status = bus->sync_wait_sem;
goto err2;
}
bus->devices[0] = bus->devices[1] = NULL;
@ -642,13 +632,6 @@ ide_sim_init_bus(device_node_handle node, void *user_cookie, void **cookie)
*cookie = bus;
/*
// make sure LBA bit is set
bus->controller->read_command_block_regs(bus->channel_cookie, &device->tf, ide_mask_device_head);
tf.chs.mode = ide_mode_lba;
bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf, ide_mask_device_head);
*/
// detect devices
scan_bus(bus);
return B_OK;
@ -656,9 +639,6 @@ ide_sim_init_bus(device_node_handle node, void *user_cookie, void **cookie)
err5:
DELETE_BEN(&bus->status_report_ben);
err4:
err3:
delete_sem(bus->sync_wait_sem);
err2:
scsi->free_dpc(bus->irq_dpc);
err1:
uninit_synced_pc(&bus->disconnect_syncinfo);
@ -684,7 +664,6 @@ ide_sim_uninit_bus(ide_bus_info *bus)
pnp->put_device_node(parent);
DELETE_BEN(&bus->status_report_ben);
delete_sem(bus->sync_wait_sem);
scsi->free_dpc(bus->irq_dpc);
uninit_synced_pc(&bus->disconnect_syncinfo);
// fast_log->stop_log(bus->log);

View File

@ -1,75 +0,0 @@
/*
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
/*
Part of Open IDE bus manager
Command queuing functions
*/
#include "ide_internal.h"
#include "ide_sim.h"
#include "ide_cmds.h"
#include <string.h>
#include <malloc.h>
// maximum number of errors until command queuing is disabled
#define MAX_CQ_FAILURES 3
/** tell device to abort all queued requests
* (tells XPT to resubmit these requests)
* return: true - abort successful
* false - abort failed (in this case, nothing can be done)
*/
bool
send_abort_queue(ide_device_info *device)
{
int status;
ide_bus_info *bus = device->bus;
SHOW_FLOW0( 3, "" );
device->tf.write.command = IDE_CMD_NOP;
// = discard outstanding commands
device->tf.write.features = IDE_CMD_NOP_NOP;
device->tf_param_mask = ide_mask_features;
if (!send_command(device, NULL, true, 0, ide_state_accessing))
goto err;
if (!wait_for_drdy(device))
goto err;
// device must answer "command rejected" and discard outstanding commands
status = bus->controller->get_altstatus(bus->channel_cookie);
if ((status & ide_status_err) == 0)
goto err;
if (!bus->controller->read_command_block_regs(bus->channel_cookie,
&device->tf, ide_mask_error)) {
// don't bother trying bus_reset as controller disappeared
device->subsys_status = SCSI_HBA_ERR;
return false;
}
if ((device->tf.read.error & ide_error_abrt) == 0)
goto err;
finish_all_requests(device, NULL, 0, true);
return true;
err:
// ouch! device didn't react - we have to reset it
//return reset_device(device, NULL);
return false;
}

View File

@ -205,6 +205,7 @@ err:
static bool
ata_test_unit_ready(ide_device_info *device, ide_qrequest *qrequest)
{
#if 0
SHOW_FLOW0(3, "");
if (!device->infoblock.RMSN_supported
@ -229,6 +230,7 @@ ata_test_unit_ready(ide_device_info *device, ide_qrequest *qrequest)
;
}
#endif
return true;
}
@ -237,6 +239,7 @@ ata_test_unit_ready(ide_device_info *device, ide_qrequest *qrequest)
static bool
ata_flush_cache(ide_device_info *device, ide_qrequest *qrequest)
{
#if 0
// we should also ask for FLUSH CACHE support, but everyone denies it
// (looks like they cheat to gain some performance advantage, but
// that's pretty useless: everyone does it...)
@ -254,6 +257,8 @@ ata_flush_cache(ide_device_info *device, ide_qrequest *qrequest)
wait_for_sync(device->bus);
return check_output(device, true, ide_error_abrt, false);
#endif
return false;
}
@ -263,6 +268,7 @@ ata_flush_cache(ide_device_info *device, ide_qrequest *qrequest)
static bool
ata_load_eject(ide_device_info *device, ide_qrequest *qrequest, bool load)
{
#if 0
if (load) {
// ATA doesn't support loading
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_NOT_SUPPORTED);
@ -278,6 +284,8 @@ ata_load_eject(ide_device_info *device, ide_qrequest *qrequest, bool load)
wait_for_sync(device->bus);
return check_output(device, true, ide_error_abrt | ide_error_nm, false);
#endif
return false;
}

View File

@ -30,6 +30,7 @@
void
ide_dpc(void *arg)
{
#if 0
ide_bus_info *bus = (ide_bus_info *)arg;
ide_qrequest *qrequest;
ide_device_info *device;
@ -88,6 +89,7 @@ ide_dpc(void *arg)
/*err:
xpt->cont_send( bus->xpt_cookie );*/
#endif
}
@ -96,17 +98,10 @@ ide_dpc(void *arg)
status_t
ide_irq_handler(ide_bus_info *bus, uint8 status)
{
return B_UNHANDLED_INTERRUPT;
/*
ide_device_info *device;
// we need to lock bus to have a solid bus state
// (side effect: we lock out the timeout handler and get
// delayed if the IRQ happens at the same time as a command is
// issued; in the latter case, we have no official way to determine
// whether the command was issued before or afterwards; if it was
// afterwards, the device must not be busy; if it was before,
// the device is either busy because of the sent command, or it's
// not busy as the command has already been finished, i.e. there
// was a second IRQ which we've overlooked as we didn't acknowledge
// the first IRQ)
IDE_LOCK(bus);
@ -180,14 +175,17 @@ ide_irq_handler(ide_bus_info *bus, uint8 status)
return B_UNHANDLED_INTERRUPT;
}
*/
}
/** cancel IRQ timeout
* it doesn't matter whether there really was a timout running;
* on return, bus state is set to _accessing_
*/
/*
void
cancel_irq_timeout(ide_bus_info *bus)
{
@ -201,9 +199,8 @@ cancel_irq_timeout(ide_bus_info *bus)
}
/** start waiting for IRQ with bus lock hold
* new_state must be either sync_wait or async_wait
*/
// start waiting for IRQ with bus lock hold
// new_state must be either sync_wait or async_wait
void
start_waiting(ide_bus_info *bus, uint32 timeout, int new_state)
@ -226,7 +223,7 @@ start_waiting(ide_bus_info *bus, uint32 timeout, int new_state)
}
/** start waiting for IRQ with bus lock not hold */
// start waiting for IRQ with bus lock not hold
void
start_waiting_nolock(ide_bus_info *bus, uint32 timeout, int new_state)
@ -236,7 +233,7 @@ start_waiting_nolock(ide_bus_info *bus, uint32 timeout, int new_state)
}
/** wait for sync IRQ */
// wait for sync IRQ
void
wait_for_sync(ide_bus_info *bus)
@ -244,9 +241,9 @@ wait_for_sync(ide_bus_info *bus)
acquire_sem(bus->sync_wait_sem);
cancel_timer(&bus->timer.te);
}
*/
/** timeout dpc handler */
// timeout dpc handler
static void
ide_timeout_dpc(void *arg)
@ -280,19 +277,16 @@ ide_timeout_dpc(void *arg)
}
/** timeout handler, called by system timer */
// timeout handler, called by system timer
status_t
ide_timeout(timer *arg)
{
ide_bus_info *bus = ((ide_bus_timer_info *)arg)->bus;
FAST_LOG0(bus->log, ev_ide_timeout);
TRACE(("ide_timeout(): %p\n", bus));
dprintf("ide: ide_timeout() bus %p\n", bus);
/*
// we need to lock bus to have a solid bus state
// (side effect: we lock out the IRQ handler)
IDE_LOCK(bus);
@ -334,9 +328,12 @@ ide_timeout(timer *arg)
IDE_UNLOCK(bus);
return B_DO_NOT_RESCHEDULE;
}
*/
return B_DO_NOT_RESCHEDULE;
}
void
init_synced_pc(ide_synced_pc *pc, ide_synced_pc_func func)
{
@ -368,7 +365,7 @@ schedule_synced_pc(ide_bus_info *bus, ide_synced_pc *pc, void *arg)
// spc cannot be registered twice
TRACE(("already registered\n"));
return B_ERROR;
} else if( bus->state != ide_state_idle ) {
} else if( bus->state != ata_state_idle ) {
// bus isn't idle - spc must be added to pending list
TRACE(("adding to pending list\n"));
@ -386,7 +383,7 @@ schedule_synced_pc(ide_bus_info *bus, ide_synced_pc *pc, void *arg)
TRACE(("exec immediately\n"));
bus->state = ide_state_accessing;
bus->state = ata_state_busy;
IDE_UNLOCK(bus);
TRACE(("go\n"));
@ -454,7 +451,7 @@ access_finished(ide_bus_info *bus, ide_device_info *device)
// noone wants it, so execute pending synced_pc
if (bus->synced_pc_list == NULL) {
bus->state = ide_state_idle;
bus->state = ata_state_idle;
IDE_UNLOCK(bus);
return;
}