Renamed ide_qrequest *qrequest into ata_request *request.
Removed synced pcs. git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23331 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
6e796d9fbe
commit
5558f70663
@ -126,7 +126,7 @@ ata_wait_for_drdy(ide_bus_info *bus)
|
||||
|
||||
|
||||
status_t
|
||||
ata_send_command(ide_device_info *device, ide_qrequest *qrequest,
|
||||
ata_send_command(ide_device_info *device, ata_request *request,
|
||||
bool need_drdy, uint32 timeout, ata_bus_state new_state)
|
||||
{
|
||||
ide_bus_info *bus = device->bus;
|
||||
@ -137,9 +137,9 @@ ata_send_command(ide_device_info *device, ide_qrequest *qrequest,
|
||||
|
||||
ASSERT(new_state == ata_state_pio); // XXX only pio for now
|
||||
|
||||
FLOW("ata_send_command: %d:%d, qrequest %p, request %p, tf %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
|
||||
FLOW("ata_send_command: %d:%d, request %p, ccb %p, tf %02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x,%02x\n",
|
||||
device->target_id, device->is_device1,
|
||||
qrequest, qrequest ? qrequest->request : NULL,
|
||||
request, request ? request->ccb : NULL,
|
||||
device->tf.raw.r[0], device->tf.raw.r[1], device->tf.raw.r[2],
|
||||
device->tf.raw.r[3], device->tf.raw.r[4], device->tf.raw.r[5],
|
||||
device->tf.raw.r[6], device->tf.raw.r[7], device->tf.raw.r[8],
|
||||
@ -348,25 +348,25 @@ check_rw_status(ide_device_info *device, bool drqStatus)
|
||||
*/
|
||||
|
||||
void
|
||||
ata_dpc_PIO(ide_qrequest *qrequest)
|
||||
ata_dpc_PIO(ata_request *request)
|
||||
{
|
||||
ide_device_info *device = qrequest->device;
|
||||
uint32 timeout = qrequest->request->timeout > 0 ?
|
||||
qrequest->request->timeout : IDE_STD_TIMEOUT;
|
||||
ide_device_info *device = request->device;
|
||||
uint32 timeout = request->ccb->timeout > 0 ?
|
||||
request->ccb->timeout : IDE_STD_TIMEOUT;
|
||||
|
||||
SHOW_FLOW0(3, "");
|
||||
|
||||
if (check_rw_error(device, qrequest)
|
||||
|| !check_rw_status(device, qrequest->is_write ? device->left_blocks > 0 : true))
|
||||
if (check_rw_error(device, request)
|
||||
|| !check_rw_status(device, request->is_write ? device->left_blocks > 0 : true))
|
||||
{
|
||||
// failure reported by device
|
||||
SHOW_FLOW0( 3, "command finished unsuccessfully" );
|
||||
|
||||
finish_checksense(qrequest);
|
||||
finish_checksense(request);
|
||||
return;
|
||||
}
|
||||
|
||||
if (qrequest->is_write) {
|
||||
if (request->is_write) {
|
||||
if (device->left_blocks == 0) {
|
||||
// this was the end-of-transmission IRQ
|
||||
SHOW_FLOW0(3, "write access finished");
|
||||
@ -395,7 +395,7 @@ ata_dpc_PIO(ide_qrequest *qrequest)
|
||||
// having a too short data buffer shouldn't happen here
|
||||
// anyway - we are prepared
|
||||
SHOW_FLOW0(3, "Writing one block");
|
||||
if (write_PIO_block(qrequest, 512) == B_ERROR)
|
||||
if (write_PIO_block(request, 512) == B_ERROR)
|
||||
goto finish_cancel_timeout;
|
||||
|
||||
--device->left_blocks;
|
||||
@ -407,13 +407,13 @@ ata_dpc_PIO(ide_qrequest *qrequest)
|
||||
|
||||
// see write
|
||||
SHOW_FLOW0( 3, "Reading one block" );
|
||||
if (read_PIO_block(qrequest, 512) == B_ERROR)
|
||||
if (read_PIO_block(request, 512) == B_ERROR)
|
||||
goto finish_cancel_timeout;
|
||||
|
||||
--device->left_blocks;
|
||||
|
||||
if (device->left_blocks == 0) {
|
||||
// at end of transmission, wait for data request going low
|
||||
// at end of transmission, wait for data ccb going low
|
||||
SHOW_FLOW0( 3, "Waiting for device to finish transmission" );
|
||||
|
||||
if (!wait_for_drqdown(device))
|
||||
@ -430,26 +430,26 @@ finish_cancel_timeout:
|
||||
cancel_irq_timeout(device->bus);
|
||||
|
||||
finish:
|
||||
finish_checksense(qrequest);
|
||||
finish_checksense(request);
|
||||
}
|
||||
|
||||
#endif
|
||||
/** DPC called when IRQ was fired at end of DMA transmission */
|
||||
|
||||
void
|
||||
ata_dpc_DMA(ide_qrequest *qrequest)
|
||||
ata_dpc_DMA(ata_request *request)
|
||||
{
|
||||
ide_device_info *device = qrequest->device;
|
||||
ide_device_info *device = request->device;
|
||||
bool dma_success, dev_err;
|
||||
|
||||
dma_success = finish_dma(device);
|
||||
dev_err = check_rw_error(device, qrequest);
|
||||
dev_err = check_rw_error(device, request);
|
||||
|
||||
if (dma_success && !dev_err) {
|
||||
// reset error count if DMA worked
|
||||
device->DMA_failures = 0;
|
||||
qrequest->request->data_resid = 0;
|
||||
finish_checksense(qrequest);
|
||||
request->ccb->data_resid = 0;
|
||||
finish_checksense(request);
|
||||
} else {
|
||||
SHOW_ERROR0( 2, "Error in DMA transmission" );
|
||||
|
||||
@ -461,20 +461,20 @@ ata_dpc_DMA(ide_qrequest *qrequest)
|
||||
}
|
||||
|
||||
// reset queue in case queuing is active
|
||||
finish_reset_queue(qrequest);
|
||||
finish_reset_queue(request);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// list of LBA48 opcodes
|
||||
static uint8 cmd_48[2][2] = {
|
||||
static const uint8 cmd_48[2][2] = {
|
||||
{ IDE_CMD_READ_SECTORS_EXT, IDE_CMD_WRITE_SECTORS_EXT },
|
||||
{ IDE_CMD_READ_DMA_EXT, IDE_CMD_WRITE_DMA_EXT }
|
||||
};
|
||||
|
||||
|
||||
// list of normal LBA opcodes
|
||||
static uint8 cmd_28[2][2] = {
|
||||
static const uint8 cmd_28[2][2] = {
|
||||
{ IDE_CMD_READ_SECTORS, IDE_CMD_WRITE_SECTORS },
|
||||
{ IDE_CMD_READ_DMA, IDE_CMD_WRITE_DMA }
|
||||
};
|
||||
@ -483,7 +483,7 @@ static uint8 cmd_28[2][2] = {
|
||||
/** create IDE read/write command */
|
||||
|
||||
static bool
|
||||
create_rw_taskfile(ide_device_info *device, ide_qrequest *qrequest,
|
||||
create_rw_taskfile(ide_device_info *device, ata_request *request,
|
||||
uint64 pos, size_t length, bool write)
|
||||
{
|
||||
SHOW_FLOW0( 3, "" );
|
||||
@ -514,7 +514,7 @@ create_rw_taskfile(ide_device_info *device, ide_qrequest *qrequest,
|
||||
device->tf.lba48.lba_24_31 = (pos >> 24) & 0xff;
|
||||
device->tf.lba48.lba_32_39 = (pos >> 32) & 0xff;
|
||||
device->tf.lba48.lba_40_47 = (pos >> 40) & 0xff;
|
||||
device->tf.lba48.command = cmd_48[qrequest->uses_dma][write];
|
||||
device->tf.lba48.command = cmd_48[request->uses_dma][write];
|
||||
return true;
|
||||
} else {
|
||||
// normal LBA
|
||||
@ -536,7 +536,7 @@ create_rw_taskfile(ide_device_info *device, ide_qrequest *qrequest,
|
||||
device->tf.lba.lba_8_15 = (pos >> 8) & 0xff;
|
||||
device->tf.lba.lba_16_23 = (pos >> 16) & 0xff;
|
||||
device->tf.lba.lba_24_27 = (pos >> 24) & 0xf;
|
||||
device->tf.lba.command = cmd_28[qrequest->uses_dma][write];
|
||||
device->tf.lba.command = cmd_28[request->uses_dma][write];
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
@ -576,7 +576,7 @@ create_rw_taskfile(ide_device_info *device, ide_qrequest *qrequest,
|
||||
device->tf.chs.sector_number = (cylinder_offset % infoblock->current_sectors + 1) & 0xff;
|
||||
device->tf.chs.head = cylinder_offset / infoblock->current_sectors;
|
||||
|
||||
device->tf.chs.command = cmd_28[qrequest->uses_dma][write];
|
||||
device->tf.chs.command = cmd_28[request->uses_dma][write];
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -594,44 +594,44 @@ err:
|
||||
*/
|
||||
|
||||
void
|
||||
ata_send_rw(ide_device_info *device, ide_qrequest *qrequest,
|
||||
ata_send_rw(ide_device_info *device, ata_request *request,
|
||||
uint64 pos, size_t length, bool write)
|
||||
{
|
||||
ide_bus_info *bus = device->bus;
|
||||
uint32 timeout;
|
||||
|
||||
// make a copy first as settings may get changed by user during execution
|
||||
qrequest->is_write = write;
|
||||
qrequest->uses_dma = device->DMA_enabled;
|
||||
request->is_write = write;
|
||||
request->uses_dma = device->DMA_enabled;
|
||||
|
||||
if (qrequest->uses_dma) {
|
||||
if (!prepare_dma(device, qrequest)) {
|
||||
if (request->uses_dma) {
|
||||
if (!prepare_dma(device, request)) {
|
||||
// fall back to PIO on error
|
||||
|
||||
qrequest->uses_dma = false;
|
||||
request->uses_dma = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!qrequest->uses_dma) {
|
||||
prep_PIO_transfer(device, qrequest);
|
||||
if (!request->uses_dma) {
|
||||
prep_PIO_transfer(device, request);
|
||||
device->left_blocks = length;
|
||||
}
|
||||
|
||||
// compose command
|
||||
if (!create_rw_taskfile(device, qrequest, pos, length, write))
|
||||
if (!create_rw_taskfile(device, request, pos, length, write))
|
||||
goto err_setup;
|
||||
|
||||
// if no timeout is specified, use standard
|
||||
timeout = qrequest->request->timeout > 0 ?
|
||||
qrequest->request->timeout : IDE_STD_TIMEOUT;
|
||||
timeout = request->ccb->timeout > 0 ?
|
||||
request->ccb->timeout : IDE_STD_TIMEOUT;
|
||||
|
||||
if (ata_send_command(device, qrequest, !device->is_atapi, timeout,
|
||||
qrequest->uses_dma ? ata_state_dma : ata_state_pio) != B_OK)
|
||||
if (ata_send_command(device, request, !device->is_atapi, timeout,
|
||||
request->uses_dma ? ata_state_dma : ata_state_pio) != B_OK)
|
||||
goto err_send;
|
||||
|
||||
if (qrequest->uses_dma) {
|
||||
if (request->uses_dma) {
|
||||
|
||||
start_dma_wait_no_lock(device, qrequest);
|
||||
start_dma_wait_no_lock(device, request);
|
||||
} else {
|
||||
// on PIO read, we start with waiting, on PIO write we can
|
||||
// transmit data immediately; we let the service thread do
|
||||
@ -639,7 +639,7 @@ ata_send_rw(ide_device_info *device, ide_qrequest *qrequest,
|
||||
// immediately (this optimisation really pays on SMP systems
|
||||
// only)
|
||||
SHOW_FLOW0(3, "Ready for PIO");
|
||||
if (qrequest->is_write) {
|
||||
if (request->is_write) {
|
||||
SHOW_FLOW0(3, "Scheduling write DPC");
|
||||
scsi->schedule_dpc(bus->scsi_cookie, bus->irq_dpc, ide_dpc, bus);
|
||||
}
|
||||
@ -649,19 +649,19 @@ ata_send_rw(ide_device_info *device, ide_qrequest *qrequest,
|
||||
|
||||
err_setup:
|
||||
// error during setup
|
||||
if (qrequest->uses_dma)
|
||||
abort_dma(device, qrequest);
|
||||
if (request->uses_dma)
|
||||
abort_dma(device, request);
|
||||
|
||||
finish_checksense(qrequest);
|
||||
finish_checksense(request);
|
||||
return;
|
||||
|
||||
err_send:
|
||||
// error during/after send;
|
||||
// in this case, the device discards queued request automatically
|
||||
if (qrequest->uses_dma)
|
||||
abort_dma(device, qrequest);
|
||||
// in this case, the device discards queued ccb automatically
|
||||
if (request->uses_dma)
|
||||
abort_dma(device, request);
|
||||
|
||||
finish_reset_queue(qrequest);
|
||||
finish_reset_queue(request);
|
||||
}
|
||||
|
||||
|
||||
@ -670,7 +670,7 @@ err_send:
|
||||
*/
|
||||
|
||||
bool
|
||||
check_rw_error(ide_device_info *device, ide_qrequest *qrequest)
|
||||
check_rw_error(ide_device_info *device, ata_request *request)
|
||||
{
|
||||
#if 0
|
||||
ide_bus_info *bus = device->bus;
|
||||
@ -694,7 +694,7 @@ check_rw_error(ide_device_info *device, ide_qrequest *qrequest)
|
||||
return true;
|
||||
}
|
||||
|
||||
if (qrequest->is_write) {
|
||||
if (request->is_write) {
|
||||
if ((error & ide_error_wp) != 0) {
|
||||
set_sense(device, SCSIS_KEY_DATA_PROTECT, SCSIS_ASC_WRITE_PROTECTED);
|
||||
return true;
|
||||
@ -995,7 +995,6 @@ status_t
|
||||
ata_read_infoblock(ide_device_info *device, bool isAtapi)
|
||||
{
|
||||
ide_bus_info *bus = device->bus;
|
||||
int status;
|
||||
|
||||
TRACE("ata_read_infoblock: bus %p, device %d, isAtapi %d\n", device->bus, device->is_device1, isAtapi);
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
return: true - device reported error
|
||||
*/
|
||||
static bool
|
||||
check_packet_error(ide_device_info *device, ide_qrequest *qrequest)
|
||||
check_packet_error(ide_device_info *device, ata_request *request)
|
||||
{
|
||||
ide_bus_info *bus = device->bus;
|
||||
int status;
|
||||
@ -78,7 +78,7 @@ check_packet_error(ide_device_info *device, ide_qrequest *qrequest)
|
||||
// tell SCSI layer that sense must be requested
|
||||
// (we don't take care of auto-sense ourselve)
|
||||
device->subsys_status = SCSI_REQ_CMP_ERR;
|
||||
qrequest->request->device_status = SCSI_STATUS_CHECK_CONDITION;
|
||||
request->ccb->device_status = SCSI_STATUS_CHECK_CONDITION;
|
||||
// reset pending emulated sense - its overwritten by a real one
|
||||
device->combined_sense = 0;
|
||||
return true;
|
||||
@ -90,14 +90,14 @@ check_packet_error(ide_device_info *device, ide_qrequest *qrequest)
|
||||
|
||||
/*! IRQ handler of packet transfer (executed as DPC) */
|
||||
void
|
||||
packet_dpc(ide_qrequest *qrequest)
|
||||
packet_dpc(ata_request *request)
|
||||
{
|
||||
#if 0
|
||||
ide_device_info *device = qrequest->device;
|
||||
ide_device_info *device = request->device;
|
||||
ide_bus_info *bus = device->bus;
|
||||
int status;
|
||||
uint32 timeout = qrequest->request->timeout > 0 ?
|
||||
qrequest->request->timeout : IDE_STD_TIMEOUT;
|
||||
uint32 timeout = request->ccb->timeout > 0 ?
|
||||
request->ccb->timeout : IDE_STD_TIMEOUT;
|
||||
|
||||
SHOW_FLOW0(3, "");
|
||||
|
||||
@ -106,9 +106,9 @@ packet_dpc(ide_qrequest *qrequest)
|
||||
|
||||
status = bus->controller->get_altstatus(bus->channel_cookie);
|
||||
|
||||
if (qrequest->packet_irq) {
|
||||
if (request->packet_irq) {
|
||||
// device requests packet
|
||||
qrequest->packet_irq = false;
|
||||
request->packet_irq = false;
|
||||
|
||||
if (!device->tf.packet_res.cmd_or_data
|
||||
|| device->tf.packet_res.input_or_output
|
||||
@ -132,7 +132,7 @@ packet_dpc(ide_qrequest *qrequest)
|
||||
return;
|
||||
}
|
||||
|
||||
if (qrequest->uses_dma) {
|
||||
if (request->uses_dma) {
|
||||
// DMA transmission finished
|
||||
bool dma_err, dev_err;
|
||||
|
||||
@ -142,13 +142,13 @@ packet_dpc(ide_qrequest *qrequest)
|
||||
SHOW_FLOW0(3, "DMA done");
|
||||
|
||||
dma_err = !finish_dma(device);
|
||||
dev_err = check_packet_error(device, qrequest);
|
||||
dev_err = check_packet_error(device, request);
|
||||
|
||||
// what to do if both the DMA controller and the device reports an error?
|
||||
// let's assume that the DMA controller got problems because there was a
|
||||
// device error, so we ignore the dma error and use the device error instead
|
||||
if (dev_err) {
|
||||
finish_checksense(qrequest);
|
||||
finish_checksense(request);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -159,8 +159,8 @@ packet_dpc(ide_qrequest *qrequest)
|
||||
device->DMA_failures = 0;
|
||||
// this is a lie, but there is no way to find out
|
||||
// how much has been transmitted
|
||||
qrequest->request->data_resid = 0;
|
||||
finish_checksense(qrequest);
|
||||
request->ccb->data_resid = 0;
|
||||
finish_checksense(request);
|
||||
} else {
|
||||
// DMA transmission went wrong
|
||||
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_COM_FAILURE);
|
||||
@ -171,7 +171,7 @@ packet_dpc(ide_qrequest *qrequest)
|
||||
device->DMA_enabled = false;
|
||||
}
|
||||
|
||||
finish_checksense(qrequest);
|
||||
finish_checksense(request);
|
||||
}
|
||||
|
||||
return;
|
||||
@ -191,7 +191,7 @@ packet_dpc(ide_qrequest *qrequest)
|
||||
}
|
||||
|
||||
// check whether transmission direction matches
|
||||
if ((device->tf.packet_res.input_or_output ^ qrequest->is_write) == 0) {
|
||||
if ((device->tf.packet_res.input_or_output ^ request->is_write) == 0) {
|
||||
SHOW_ERROR0(2, "data transmission in wrong way!?");
|
||||
|
||||
// TODO: hm, either the device is broken or the caller has specified
|
||||
@ -202,9 +202,9 @@ packet_dpc(ide_qrequest *qrequest)
|
||||
// TODO: the device will abort next command with a reset condition
|
||||
// perhaps we should hide that by reading sense?
|
||||
SHOW_FLOW0(3, "Reset");
|
||||
// reset_device(device, qrequest);
|
||||
// reset_device(device, request);
|
||||
|
||||
finish_checksense(qrequest);
|
||||
finish_checksense(request);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -224,9 +224,9 @@ packet_dpc(ide_qrequest *qrequest)
|
||||
start_waiting_nolock(device->bus, timeout, ide_state_async_waiting);
|
||||
|
||||
if (device->tf.packet_res.input_or_output)
|
||||
err = read_PIO_block(qrequest, length);
|
||||
err = read_PIO_block(request, length);
|
||||
else
|
||||
err = write_PIO_block(qrequest, length);
|
||||
err = write_PIO_block(request, length);
|
||||
|
||||
// only report "real" errors;
|
||||
// discarding data (ERR_TOO_BIG) can happen but is OK
|
||||
@ -241,16 +241,16 @@ packet_dpc(ide_qrequest *qrequest)
|
||||
return;
|
||||
} else {
|
||||
// device has done job and doesn't want to transmit data anymore
|
||||
// -> finish request
|
||||
// -> finish ccb
|
||||
SHOW_FLOW0(3, "no data");
|
||||
|
||||
check_packet_error(device, qrequest);
|
||||
check_packet_error(device, request);
|
||||
|
||||
SHOW_FLOW(3, "finished: %d of %d left",
|
||||
(int)qrequest->request->data_resid,
|
||||
(int)qrequest->request->data_length);
|
||||
(int)request->ccb->data_resid,
|
||||
(int)request->ccb->data_length);
|
||||
|
||||
finish_checksense(qrequest);
|
||||
finish_checksense(request);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -259,27 +259,27 @@ packet_dpc(ide_qrequest *qrequest)
|
||||
err_cancel_timer:
|
||||
cancel_irq_timeout(device->bus);
|
||||
err:
|
||||
finish_checksense(qrequest);
|
||||
finish_checksense(request);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/*! Create taskfile for ATAPI packet */
|
||||
static bool
|
||||
create_packet_taskfile(ide_device_info *device, ide_qrequest *qrequest,
|
||||
create_packet_taskfile(ide_device_info *device, ata_request *request,
|
||||
bool write)
|
||||
{
|
||||
scsi_ccb *request = qrequest->request;
|
||||
scsi_ccb *ccb = request->ccb;
|
||||
|
||||
SHOW_FLOW(3, "DMA enabled=%d, uses_dma=%d, scsi_cmd=%x",
|
||||
device->DMA_enabled, qrequest->uses_dma, device->packet[0]);
|
||||
device->DMA_enabled, request->uses_dma, device->packet[0]);
|
||||
|
||||
device->tf_param_mask = ide_mask_features | ide_mask_byte_count;
|
||||
|
||||
device->tf.packet.dma = qrequest->uses_dma;
|
||||
device->tf.packet.dma = request->uses_dma;
|
||||
device->tf.packet.ovl = 0;
|
||||
device->tf.packet.byte_count_0_7 = request->data_length & 0xff;
|
||||
device->tf.packet.byte_count_8_15 = request->data_length >> 8;
|
||||
device->tf.packet.byte_count_0_7 = ccb->data_length & 0xff;
|
||||
device->tf.packet.byte_count_8_15 = ccb->data_length >> 8;
|
||||
device->tf.packet.command = IDE_CMD_PACKET;
|
||||
|
||||
return true;
|
||||
@ -288,14 +288,14 @@ create_packet_taskfile(ide_device_info *device, ide_qrequest *qrequest,
|
||||
|
||||
/*! Send ATAPI packet */
|
||||
void
|
||||
send_packet(ide_device_info *device, ide_qrequest *qrequest, bool write)
|
||||
send_packet(ide_device_info *device, ata_request *request, bool write)
|
||||
{
|
||||
#if 0
|
||||
ide_bus_info *bus = device->bus;
|
||||
bool packet_irq = device->atapi.packet_irq;
|
||||
uint8 scsi_cmd = device->packet[0];
|
||||
|
||||
SHOW_FLOW( 3, "qrequest=%p, command=%x", qrequest, scsi_cmd );
|
||||
SHOW_FLOW( 3, "request=%p, command=%x", request, scsi_cmd );
|
||||
|
||||
/*{
|
||||
unsigned int i;
|
||||
@ -309,47 +309,47 @@ send_packet(ide_device_info *device, ide_qrequest *qrequest, bool write)
|
||||
device->packet[3], device->packet[4], device->packet[5],
|
||||
device->packet[6], device->packet[7], device->packet[8],
|
||||
device->packet[9], device->packet[10], device->packet[11],
|
||||
qrequest->request->cdb_length);
|
||||
request->ccb->cdb_length);
|
||||
|
||||
//snooze( 1000000 );
|
||||
|
||||
qrequest->is_write = write;
|
||||
// if needed, mark first IRQ as being packet request IRQ
|
||||
qrequest->packet_irq = packet_irq;
|
||||
request->is_write = write;
|
||||
// if needed, mark first IRQ as being packet ccb IRQ
|
||||
request->packet_irq = packet_irq;
|
||||
|
||||
// only READ/WRITE commands can use DMA
|
||||
// (the device may support it always, but IDE controllers don't
|
||||
// report how much data is transmitted, and this information is
|
||||
// crucial for the SCSI protocol)
|
||||
// special offer: let READ_CD commands use DMA too
|
||||
qrequest->uses_dma = device->DMA_enabled
|
||||
request->uses_dma = device->DMA_enabled
|
||||
&& (scsi_cmd == SCSI_OP_READ_6 || scsi_cmd == SCSI_OP_WRITE_6
|
||||
|| scsi_cmd == SCSI_OP_READ_10 || scsi_cmd == SCSI_OP_WRITE_10
|
||||
|| scsi_cmd == SCSI_OP_READ_12 || scsi_cmd == SCSI_OP_WRITE_12
|
||||
|| scsi_cmd == SCSI_OP_READ_CD);
|
||||
|
||||
// try preparing DMA, if that fails, fall back to PIO
|
||||
if (qrequest->uses_dma) {
|
||||
if (request->uses_dma) {
|
||||
SHOW_FLOW0(3, "0");
|
||||
if (!prepare_dma( device, qrequest))
|
||||
qrequest->uses_dma = false;
|
||||
if (!prepare_dma( device, request))
|
||||
request->uses_dma = false;
|
||||
|
||||
SHOW_FLOW(3, "0->%d", qrequest->uses_dma);
|
||||
SHOW_FLOW(3, "0->%d", request->uses_dma);
|
||||
}
|
||||
|
||||
SHOW_FLOW0(3, "1");
|
||||
|
||||
if (!qrequest->uses_dma)
|
||||
prep_PIO_transfer(device, qrequest);
|
||||
if (!request->uses_dma)
|
||||
prep_PIO_transfer(device, request);
|
||||
|
||||
SHOW_FLOW0(3, "2");
|
||||
|
||||
if (!create_packet_taskfile(device, qrequest, write))
|
||||
if (!create_packet_taskfile(device, request, write))
|
||||
goto err_setup;
|
||||
|
||||
SHOW_FLOW0(3, "3");
|
||||
|
||||
if (!send_command(device, qrequest, false,
|
||||
if (!send_command(device, request, false,
|
||||
device->atapi.packet_irq_timeout,
|
||||
device->atapi.packet_irq ? ide_state_async_waiting : ide_state_accessing))
|
||||
goto err_setup;
|
||||
@ -407,15 +407,15 @@ send_packet(ide_device_info *device, ide_qrequest *qrequest, bool write)
|
||||
goto err_packet2;
|
||||
}
|
||||
|
||||
if (qrequest->uses_dma) {
|
||||
if (request->uses_dma) {
|
||||
SHOW_FLOW0( 3, "ready for DMA" );
|
||||
|
||||
// S/G table must already be setup - we hold the bus lock, so
|
||||
// we really have to hurry up
|
||||
start_dma_wait(device, qrequest);
|
||||
start_dma_wait(device, request);
|
||||
} else {
|
||||
uint32 timeout = qrequest->request->timeout > 0 ?
|
||||
qrequest->request->timeout : IDE_STD_TIMEOUT;
|
||||
uint32 timeout = request->ccb->timeout > 0 ?
|
||||
request->ccb->timeout : IDE_STD_TIMEOUT;
|
||||
|
||||
start_waiting(bus, timeout, ide_state_async_waiting);
|
||||
}
|
||||
@ -430,38 +430,38 @@ err_packet:
|
||||
device->subsys_status = SCSI_HBA_ERR;
|
||||
|
||||
err_setup:
|
||||
if (qrequest->uses_dma)
|
||||
abort_dma(device, qrequest);
|
||||
if (request->uses_dma)
|
||||
abort_dma(device, request);
|
||||
|
||||
finish_checksense(qrequest);
|
||||
finish_checksense(request);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/*! Execute SCSI I/O for atapi devices */
|
||||
void
|
||||
atapi_exec_io(ide_device_info *device, ide_qrequest *qrequest)
|
||||
atapi_exec_io(ide_device_info *device, ata_request *request)
|
||||
{
|
||||
scsi_ccb *request = qrequest->request;
|
||||
scsi_ccb *ccb = request->ccb;
|
||||
|
||||
SHOW_FLOW(3, "command=%x", qrequest->request->cdb[0]);
|
||||
SHOW_FLOW(3, "command=%x", request->ccb->cdb[0]);
|
||||
|
||||
// ATAPI command packets are 12 bytes long;
|
||||
// if the command is shorter, remaining bytes must be padded with zeros
|
||||
memset(device->packet, 0, sizeof(device->packet));
|
||||
memcpy(device->packet, request->cdb, request->cdb_length);
|
||||
memcpy(device->packet, ccb->cdb, ccb->cdb_length);
|
||||
|
||||
if (request->cdb[0] == SCSI_OP_REQUEST_SENSE && device->combined_sense) {
|
||||
if (ccb->cdb[0] == SCSI_OP_REQUEST_SENSE && device->combined_sense) {
|
||||
// we have a pending emulated sense - return it on REQUEST SENSE
|
||||
ide_request_sense(device, qrequest);
|
||||
finish_checksense(qrequest);
|
||||
ide_request_sense(device, request);
|
||||
finish_checksense(request);
|
||||
} else {
|
||||
// reset all error codes for new request
|
||||
start_request(device, qrequest);
|
||||
// reset all error codes for new ccb
|
||||
start_request(device, request);
|
||||
|
||||
// now we have an IDE packet
|
||||
send_packet(device, qrequest,
|
||||
(request->flags & SCSI_DIR_MASK) == SCSI_DIR_OUT);
|
||||
send_packet(device, request,
|
||||
(ccb->flags & SCSI_DIR_MASK) == SCSI_DIR_OUT);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -59,10 +59,6 @@ destroy_device(ide_device_info *device)
|
||||
|
||||
cleanup_device_links(device);
|
||||
|
||||
if (device->qreqActive)
|
||||
dprintf("destroy_device: Warning request still active\n");
|
||||
free(device->qreqFree);
|
||||
|
||||
free(device);
|
||||
}
|
||||
|
||||
@ -121,14 +117,6 @@ create_device(ide_bus_info *bus, bool is_device1)
|
||||
|
||||
device->combined_sense = 0;
|
||||
|
||||
device->qreqActive = NULL;
|
||||
device->qreqFree = (ide_qrequest *)malloc(sizeof(ide_qrequest));
|
||||
|
||||
memset(device->qreqFree, 0, sizeof(ide_qrequest));
|
||||
device->qreqFree->running = false;
|
||||
device->qreqFree->device = device;
|
||||
device->qreqFree->request = NULL;
|
||||
|
||||
device->total_sectors = 0;
|
||||
|
||||
// disable interrupts
|
||||
@ -141,10 +129,6 @@ create_device(ide_bus_info *bus, bool is_device1)
|
||||
bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf, ide_mask_device_head);
|
||||
|
||||
return device;
|
||||
|
||||
err:
|
||||
destroy_device(device);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#if B_HOST_IS_LENDIAN
|
||||
|
@ -71,7 +71,7 @@ configure_dma(ide_device_info *device)
|
||||
must be called _before_ start_dma_wait
|
||||
*/
|
||||
void
|
||||
abort_dma(ide_device_info *device, ide_qrequest *qrequest)
|
||||
abort_dma(ide_device_info *device, ata_request *request)
|
||||
{
|
||||
ide_bus_info *bus = device->bus;
|
||||
|
||||
@ -86,14 +86,14 @@ abort_dma(ide_device_info *device, ide_qrequest *qrequest)
|
||||
warning: doesn't set sense data on error
|
||||
*/
|
||||
bool
|
||||
prepare_dma(ide_device_info *device, ide_qrequest *qrequest)
|
||||
prepare_dma(ide_device_info *device, ata_request *request)
|
||||
{
|
||||
ide_bus_info *bus = device->bus;
|
||||
scsi_ccb *request = qrequest->request;
|
||||
scsi_ccb *ccb = request->ccb;
|
||||
status_t res;
|
||||
|
||||
res = bus->controller->prepare_dma(bus->channel_cookie, request->sg_list,
|
||||
request->sg_count, qrequest->is_write);
|
||||
res = bus->controller->prepare_dma(bus->channel_cookie, ccb->sg_list,
|
||||
ccb->sg_count, request->is_write);
|
||||
if (res != B_OK)
|
||||
return false;
|
||||
|
||||
@ -103,27 +103,27 @@ prepare_dma(ide_device_info *device, ide_qrequest *qrequest)
|
||||
|
||||
/*! Start waiting for DMA to be finished */
|
||||
void
|
||||
start_dma_wait(ide_device_info *device, ide_qrequest *qrequest)
|
||||
start_dma_wait(ide_device_info *device, ata_request *request)
|
||||
{
|
||||
#if 0
|
||||
ide_bus_info *bus = device->bus;
|
||||
|
||||
bus->controller->start_dma(bus->channel_cookie);
|
||||
|
||||
start_waiting(bus, qrequest->request->timeout > 0 ?
|
||||
qrequest->request->timeout : IDE_STD_TIMEOUT, ide_state_async_waiting);
|
||||
start_waiting(bus, request->ccb->timeout > 0 ?
|
||||
request->ccb->timeout : IDE_STD_TIMEOUT, ide_state_async_waiting);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
/*! Start waiting for DMA to be finished with bus lock not hold */
|
||||
void
|
||||
start_dma_wait_no_lock(ide_device_info *device, ide_qrequest *qrequest)
|
||||
start_dma_wait_no_lock(ide_device_info *device, ata_request *request)
|
||||
{
|
||||
ide_bus_info *bus = device->bus;
|
||||
|
||||
IDE_LOCK(bus);
|
||||
start_dma_wait(device, qrequest);
|
||||
start_dma_wait(device, request);
|
||||
}
|
||||
|
||||
|
||||
|
@ -18,10 +18,10 @@
|
||||
|
||||
/*! Emulate REQUEST SENSE */
|
||||
void
|
||||
ide_request_sense(ide_device_info *device, ide_qrequest *qrequest)
|
||||
ide_request_sense(ide_device_info *device, ata_request *request)
|
||||
{
|
||||
scsi_ccb *request = qrequest->request;
|
||||
scsi_cmd_request_sense *cmd = (scsi_cmd_request_sense *)request->cdb;
|
||||
scsi_ccb *ccb = request->ccb;
|
||||
scsi_cmd_request_sense *cmd = (scsi_cmd_request_sense *)ccb->cdb;
|
||||
scsi_sense sense;
|
||||
uint32 transferSize;
|
||||
|
||||
@ -33,39 +33,39 @@ ide_request_sense(ide_device_info *device, ide_qrequest *qrequest)
|
||||
else
|
||||
memset(&sense, 0, sizeof(sense));
|
||||
|
||||
copy_sg_data(request, 0, cmd->allocation_length, &sense, sizeof(sense), false);
|
||||
copy_sg_data(ccb, 0, cmd->allocation_length, &sense, sizeof(sense), false);
|
||||
|
||||
// reset sense information on read
|
||||
device->combined_sense = 0;
|
||||
|
||||
transferSize = min(sizeof(sense), cmd->allocation_length);
|
||||
transferSize = min(transferSize, request->data_length);
|
||||
transferSize = min(transferSize, ccb->data_length);
|
||||
|
||||
request->data_resid = request->data_length - transferSize;
|
||||
ccb->data_resid = ccb->data_length - transferSize;
|
||||
|
||||
// normally, all flags are set to "success", but for Request Sense
|
||||
// this would have overwritten the sense we want to read
|
||||
device->subsys_status = SCSI_REQ_CMP;
|
||||
request->device_status = SCSI_STATUS_GOOD;
|
||||
ccb->device_status = SCSI_STATUS_GOOD;
|
||||
}
|
||||
|
||||
|
||||
/*! Copy data between request data and buffer
|
||||
request - request to copy data from/to
|
||||
offset - offset of data in request
|
||||
allocation_length- limit of request's data buffer according to CDB
|
||||
/*! Copy data between ccb data and buffer
|
||||
ccb - ccb to copy data from/to
|
||||
offset - offset of data in ccb
|
||||
allocation_length- limit of ccb's data buffer according to CDB
|
||||
buffer - data to copy data from/to
|
||||
size - number of bytes to copy
|
||||
to_buffer - true: copy from request to buffer
|
||||
false: copy from buffer to request
|
||||
return: true, if data of request was large enough
|
||||
to_buffer - true: copy from ccb to buffer
|
||||
false: copy from buffer to ccb
|
||||
return: true, if data of ccb was large enough
|
||||
*/
|
||||
bool
|
||||
copy_sg_data(scsi_ccb *request, uint offset, uint allocationLength,
|
||||
copy_sg_data(scsi_ccb *ccb, uint offset, uint allocationLength,
|
||||
void *buffer, int size, bool toBuffer)
|
||||
{
|
||||
const physical_entry *sgList = request->sg_list;
|
||||
int sgCount = request->sg_count;
|
||||
const physical_entry *sgList = ccb->sg_list;
|
||||
int sgCount = ccb->sg_count;
|
||||
int requestSize;
|
||||
|
||||
SHOW_FLOW(3, "offset=%u, req_size_limit=%d, size=%d, sg_list=%p, sg_cnt=%d, %s buffer",
|
||||
@ -81,8 +81,8 @@ copy_sg_data(scsi_ccb *request, uint offset, uint allocationLength,
|
||||
if (sgCount == 0)
|
||||
return 0;
|
||||
|
||||
// remaining bytes we are allowed to copy from/to request
|
||||
requestSize = min(allocationLength, request->data_length) - offset;
|
||||
// remaining bytes we are allowed to copy from/to ccb
|
||||
requestSize = min(allocationLength, ccb->data_length) - offset;
|
||||
|
||||
// copy one S/G entry at a time
|
||||
for (; size > 0 && requestSize > 0 && sgCount > 0; ++sgList, --sgCount) {
|
||||
|
@ -37,20 +37,15 @@
|
||||
#define IDE_CHANNEL_ID_GENERATOR "ide/channel_id"
|
||||
// node item containing channel id (uint32)
|
||||
#define IDE_CHANNEL_ID_ITEM "ide/channel_id"
|
||||
// SIM interface
|
||||
#define IDE_SIM_MODULE_NAME "bus_managers/ide/sim/v1"
|
||||
|
||||
|
||||
extern device_manager_info *pnp;
|
||||
|
||||
|
||||
typedef struct ide_bus_info ide_bus_info;
|
||||
|
||||
typedef void (*ide_synced_pc_func)(ide_bus_info *bus, void *arg);
|
||||
|
||||
typedef struct ide_synced_pc {
|
||||
struct ide_synced_pc *next;
|
||||
ide_synced_pc_func func;
|
||||
void *arg;
|
||||
bool registered;
|
||||
} ide_synced_pc;
|
||||
|
||||
// structure for device time-outs
|
||||
typedef struct ide_device_timer_info {
|
||||
@ -64,6 +59,18 @@ typedef struct ide_bus_timer_info {
|
||||
struct ide_bus_info *bus;
|
||||
} ide_bus_timer_info;
|
||||
|
||||
// ide request
|
||||
typedef struct ata_request {
|
||||
|
||||
struct ide_device_info *device;
|
||||
|
||||
scsi_ccb *ccb; // basic request
|
||||
uint8 is_write : 1; // true for write request
|
||||
uint8 uses_dma : 1; // true if using dma
|
||||
uint8 packet_irq : 1; // true if waiting for command packet irq
|
||||
} ata_request;
|
||||
|
||||
|
||||
|
||||
typedef struct ide_device_info {
|
||||
struct ide_bus_info *bus;
|
||||
@ -87,13 +94,10 @@ typedef struct ide_device_info {
|
||||
// pending error codes
|
||||
uint32 combined_sense; // emulated sense of device
|
||||
|
||||
struct ide_qrequest *qreqActive;
|
||||
struct ide_qrequest *qreqFree;
|
||||
|
||||
struct ide_device_info *other_device; // other device on same bus
|
||||
|
||||
// entry for scsi's exec_io request
|
||||
void (*exec_io)( struct ide_device_info *device, struct ide_qrequest *qrequest );
|
||||
void (*exec_io)( struct ide_device_info *device, struct ata_request *request );
|
||||
|
||||
int target_id; // target id (currently, same as is_device1)
|
||||
|
||||
@ -134,18 +138,6 @@ typedef enum {
|
||||
ide_request_autosense = 2
|
||||
} ide_request_state;*/
|
||||
|
||||
// ide request
|
||||
typedef struct ide_qrequest {
|
||||
struct ide_qrequest *next;
|
||||
ide_device_info *device;
|
||||
scsi_ccb *request; // basic request
|
||||
|
||||
uint8 is_write : 1; // true for write request
|
||||
uint8 running : 1; // true if "on bus"
|
||||
uint8 uses_dma : 1; // true if using dma
|
||||
uint8 packet_irq : 1; // true if waiting for command packet irq
|
||||
} ide_qrequest;
|
||||
|
||||
|
||||
// state of ide bus
|
||||
typedef enum {
|
||||
@ -157,7 +149,6 @@ typedef enum {
|
||||
|
||||
|
||||
struct ide_bus_info {
|
||||
ide_qrequest *active_qrequest;
|
||||
|
||||
// controller
|
||||
ide_controller_interface *controller;
|
||||
@ -169,6 +160,10 @@ struct ide_bus_info {
|
||||
|
||||
ata_bus_state state; // current state of bus
|
||||
|
||||
struct ata_request * qreqActive;
|
||||
struct ata_request * qreqFree;
|
||||
|
||||
|
||||
benaphore status_report_ben; // to lock when you report XPT about bus state
|
||||
// i.e. during requeue, resubmit or finished
|
||||
|
||||
@ -178,14 +173,11 @@ struct ide_bus_info {
|
||||
|
||||
ide_bus_timer_info timer; // timeout
|
||||
scsi_dpc_cookie irq_dpc;
|
||||
ide_synced_pc *synced_pc_list;
|
||||
|
||||
ide_device_info *active_device;
|
||||
ide_device_info *devices[2];
|
||||
ide_device_info *first_device;
|
||||
|
||||
ide_synced_pc disconnect_syncinfo; // used to handle lost controller
|
||||
|
||||
uchar path_id;
|
||||
|
||||
device_node_handle node; // our pnp node
|
||||
@ -213,67 +205,6 @@ struct ide_bus_info {
|
||||
restore_interrupts( prev_irq_state ); \
|
||||
}
|
||||
|
||||
// SIM interface
|
||||
#define IDE_SIM_MODULE_NAME "bus_managers/ide/sim/v1"
|
||||
|
||||
|
||||
enum {
|
||||
ev_ide_send_command = 1,
|
||||
ev_ide_device_start_service,
|
||||
ev_ide_device_start_service2,
|
||||
ev_ide_dpc_service,
|
||||
ev_ide_dpc_continue,
|
||||
ev_ide_irq_handle,
|
||||
ev_ide_cancel_irq_timeout,
|
||||
ev_ide_start_waiting,
|
||||
ev_ide_timeout_dpc,
|
||||
ev_ide_timeout,
|
||||
ev_ide_reset_bus,
|
||||
ev_ide_reset_device,
|
||||
|
||||
ev_ide_scsi_io,
|
||||
ev_ide_scsi_io_exec,
|
||||
ev_ide_scsi_io_invalid_device,
|
||||
ev_ide_scsi_io_bus_busy,
|
||||
ev_ide_scsi_io_device_busy,
|
||||
ev_ide_scsi_io_disconnected,
|
||||
ev_ide_finish_request,
|
||||
ev_ide_finish_norelease,
|
||||
|
||||
ev_ide_scan_device_int,
|
||||
ev_ide_scan_device_int_cant_send,
|
||||
ev_ide_scan_device_int_keeps_busy,
|
||||
ev_ide_scan_device_int_found
|
||||
};
|
||||
|
||||
|
||||
|
||||
// get selected device
|
||||
static inline
|
||||
ide_device_info *get_current_device(ide_bus_info *bus)
|
||||
{
|
||||
ide_task_file tf;
|
||||
|
||||
bus->controller->read_command_block_regs(bus->channel_cookie, &tf,
|
||||
ide_mask_device_head);
|
||||
|
||||
return bus->devices[tf.lba.device];
|
||||
}
|
||||
|
||||
|
||||
// check if device has released the bus
|
||||
// return: true, if bus was released
|
||||
static inline int
|
||||
device_released_bus(ide_device_info *device)
|
||||
{
|
||||
ide_bus_info *bus = device->bus;
|
||||
|
||||
bus->controller->read_command_block_regs(bus->channel_cookie,
|
||||
&device->tf, ide_mask_sector_count);
|
||||
|
||||
return device->tf.queued.release;
|
||||
}
|
||||
|
||||
|
||||
// ata.c
|
||||
|
||||
@ -286,18 +217,18 @@ status_t ata_wait_for_drqdown(ide_bus_info *bus);
|
||||
status_t ata_wait_for_drdy(ide_bus_info *bus);
|
||||
status_t ata_reset_bus(ide_bus_info *bus, bool *_devicePresent0, uint32 *_sigDev0, bool *_devicePresent1, uint32 *_sigDev1);
|
||||
status_t ata_reset_device(ide_device_info *device, bool *_devicePresent);
|
||||
status_t ata_send_command(ide_device_info *device, ide_qrequest *qrequest, bool need_drdy, uint32 timeout, ata_bus_state new_state);
|
||||
status_t ata_send_command(ide_device_info *device, ata_request *request, bool need_drdy, uint32 timeout, ata_bus_state new_state);
|
||||
|
||||
bool check_rw_error(ide_device_info *device, ide_qrequest *qrequest);
|
||||
bool check_rw_error(ide_device_info *device, ata_request *request);
|
||||
bool check_output(ide_device_info *device, bool drdy_required, int error_mask, bool is_write);
|
||||
|
||||
void ata_send_rw(ide_device_info *device, ide_qrequest *qrequest,
|
||||
void ata_send_rw(ide_device_info *device, ata_request *request,
|
||||
uint64 pos, size_t length, bool write);
|
||||
|
||||
void ata_dpc_DMA(ide_qrequest *qrequest);
|
||||
void ata_dpc_PIO(ide_qrequest *qrequest);
|
||||
void ata_dpc_DMA(ata_request *request);
|
||||
void ata_dpc_PIO(ata_request *request);
|
||||
|
||||
void ata_exec_io(ide_device_info *device, ide_qrequest *qrequest);
|
||||
void ata_exec_io(ide_device_info *device, ata_request *request);
|
||||
|
||||
status_t ata_read_infoblock(ide_device_info *device, bool isAtapi);
|
||||
|
||||
@ -305,9 +236,9 @@ status_t configure_ata_device(ide_device_info *device);
|
||||
|
||||
// atapi.c
|
||||
status_t configure_atapi_device(ide_device_info *device);
|
||||
void send_packet(ide_device_info *device, ide_qrequest *qrequest, bool write);
|
||||
void packet_dpc(ide_qrequest *qrequest);
|
||||
void atapi_exec_io(ide_device_info *device, ide_qrequest *qrequest);
|
||||
void send_packet(ide_device_info *device, ata_request *request, bool write);
|
||||
void packet_dpc(ata_request *request);
|
||||
void atapi_exec_io(ide_device_info *device, ata_request *request);
|
||||
|
||||
|
||||
// basic_prot.c
|
||||
@ -329,12 +260,12 @@ status_t configure_device(ide_device_info *device, bool isAtapi);
|
||||
|
||||
// dma.c
|
||||
|
||||
bool prepare_dma(ide_device_info *device, ide_qrequest *qrequest);
|
||||
void start_dma(ide_device_info *device, ide_qrequest *qrequest);
|
||||
void start_dma_wait(ide_device_info *device, ide_qrequest *qrequest);
|
||||
void start_dma_wait_no_lock(ide_device_info *device, ide_qrequest *qrequest);
|
||||
bool prepare_dma(ide_device_info *device, ata_request *request);
|
||||
void start_dma(ide_device_info *device, ata_request *request);
|
||||
void start_dma_wait(ide_device_info *device, ata_request *request);
|
||||
void start_dma_wait_no_lock(ide_device_info *device, ata_request *request);
|
||||
bool finish_dma(ide_device_info *device);
|
||||
void abort_dma(ide_device_info *device, ide_qrequest *qrequest);
|
||||
void abort_dma(ide_device_info *device, ata_request *request);
|
||||
|
||||
bool configure_dma(ide_device_info *device);
|
||||
|
||||
@ -343,14 +274,14 @@ bool configure_dma(ide_device_info *device);
|
||||
|
||||
bool copy_sg_data(scsi_ccb *request, uint offset, uint req_size_limit,
|
||||
void *buffer, int size, bool to_buffer);
|
||||
void ide_request_sense(ide_device_info *device, ide_qrequest *qrequest);
|
||||
void ide_request_sense(ide_device_info *device, ata_request *request);
|
||||
|
||||
|
||||
// pio.c
|
||||
|
||||
void prep_PIO_transfer(ide_device_info *device, ide_qrequest *qrequest);
|
||||
status_t read_PIO_block(ide_qrequest *qrequest, int length);
|
||||
status_t write_PIO_block(ide_qrequest *qrequest, int length);
|
||||
void prep_PIO_transfer(ide_device_info *device, ata_request *request);
|
||||
status_t read_PIO_block(ata_request *request, int length);
|
||||
status_t write_PIO_block(ata_request *request, int length);
|
||||
|
||||
|
||||
|
||||
@ -362,9 +293,6 @@ void start_waiting_nolock(ide_bus_info *bus, uint32 timeout, int new_state);
|
||||
void wait_for_sync(ide_bus_info *bus);
|
||||
void cancel_irq_timeout(ide_bus_info *bus);
|
||||
|
||||
status_t schedule_synced_pc(ide_bus_info *bus, ide_synced_pc *pc, void *arg);
|
||||
void init_synced_pc(ide_synced_pc *pc, ide_synced_pc_func func);
|
||||
void uninit_synced_pc(ide_synced_pc *pc);
|
||||
|
||||
void ide_dpc(void *arg);
|
||||
void access_finished(ide_bus_info *bus, ide_device_info *device);
|
||||
|
@ -39,18 +39,17 @@
|
||||
scsi_for_sim_interface *scsi;
|
||||
|
||||
|
||||
static void disconnect_worker(ide_bus_info *bus, void *arg);
|
||||
static void set_check_condition(ide_qrequest *qrequest);
|
||||
static void set_check_condition(ata_request *request);
|
||||
|
||||
|
||||
static void
|
||||
sim_scsi_io(ide_bus_info *bus, scsi_ccb *request)
|
||||
sim_scsi_io(ide_bus_info *bus, scsi_ccb *ccb)
|
||||
{
|
||||
ide_device_info *device;
|
||||
ide_qrequest *qrequest;
|
||||
ata_request *request;
|
||||
//ide_request_priv *priv;
|
||||
|
||||
FLOW("sim_scsi_iobus %p, %d:%d\n", bus, request->target_id, request->target_lun);
|
||||
FLOW("sim_scsi_iobus %p, %d:%d\n", bus, ccb->target_id, ccb->target_lun);
|
||||
|
||||
if (bus->disconnected)
|
||||
goto err_disconnected;
|
||||
@ -59,14 +58,14 @@ sim_scsi_io(ide_bus_info *bus, scsi_ccb *request)
|
||||
// I've read that there are ATAPI devices with more then one LUN,
|
||||
// but it seems that most (all?) devices ignore LUN, so we have
|
||||
// to restrict to LUN 0 to avoid mirror devices
|
||||
if (request->target_id >= 2)
|
||||
if (ccb->target_id >= 2)
|
||||
goto err_inv_device;
|
||||
|
||||
device = bus->devices[request->target_id];
|
||||
device = bus->devices[ccb->target_id];
|
||||
if (device == NULL)
|
||||
goto err_inv_device;
|
||||
|
||||
if (request->target_lun > device->last_lun)
|
||||
if (ccb->target_lun > device->last_lun)
|
||||
goto err_inv_device;
|
||||
|
||||
// grab the bus
|
||||
@ -77,7 +76,7 @@ sim_scsi_io(ide_bus_info *bus, scsi_ccb *request)
|
||||
goto err_bus_busy;
|
||||
|
||||
// bail out if device can't accept further requests
|
||||
if (device->qreqFree == NULL)
|
||||
if (bus->qreqFree == NULL)
|
||||
goto err_device_busy;
|
||||
|
||||
bus->state = ata_state_busy;
|
||||
@ -86,35 +85,32 @@ sim_scsi_io(ide_bus_info *bus, scsi_ccb *request)
|
||||
RELEASE_BEN(&bus->status_report_ben);
|
||||
|
||||
// as we own the bus, noone can bother us
|
||||
qrequest = device->qreqFree;
|
||||
device->qreqFree = NULL;
|
||||
device->qreqActive = qrequest;
|
||||
request = bus->qreqFree;
|
||||
bus->qreqFree = NULL;
|
||||
bus->qreqActive = request;
|
||||
|
||||
qrequest->request = request;
|
||||
qrequest->running = true;
|
||||
qrequest->uses_dma = false;
|
||||
request->device = device;
|
||||
request->ccb = ccb;
|
||||
request->uses_dma = false;
|
||||
|
||||
bus->active_qrequest = qrequest; // XXX whats this!?!?!
|
||||
FLOW("calling exec_io: %p, %d:%d\n", bus, ccb->target_id, ccb->target_lun);
|
||||
|
||||
|
||||
FLOW("calling exec_io: %p, %d:%d\n", bus, request->target_id, request->target_lun);
|
||||
|
||||
device->exec_io(device, qrequest);
|
||||
device->exec_io(device, request);
|
||||
|
||||
return;
|
||||
|
||||
err_inv_device:
|
||||
FLOW("Invalid device %d:%d\n", request->target_id, request->target_lun);
|
||||
FLOW("Invalid device %d:%d\n", ccb->target_id, ccb->target_lun);
|
||||
|
||||
request->subsys_status = SCSI_SEL_TIMEOUT;
|
||||
scsi->finished(request, 1);
|
||||
ccb->subsys_status = SCSI_SEL_TIMEOUT;
|
||||
scsi->finished(ccb, 1);
|
||||
return;
|
||||
|
||||
err_bus_busy:
|
||||
FLOW("Bus busy\n");
|
||||
|
||||
IDE_UNLOCK(bus);
|
||||
scsi->requeue(request, true);
|
||||
scsi->requeue(ccb, true);
|
||||
RELEASE_BEN(&bus->status_report_ben);
|
||||
return;
|
||||
|
||||
@ -122,14 +118,14 @@ err_device_busy:
|
||||
FLOW("Device busy\n");
|
||||
|
||||
IDE_UNLOCK(bus);
|
||||
scsi->requeue(request, false);
|
||||
scsi->requeue(ccb, false);
|
||||
RELEASE_BEN(&bus->status_report_ben);
|
||||
return;
|
||||
|
||||
err_disconnected:
|
||||
TRACE("No controller anymore\n");
|
||||
request->subsys_status = SCSI_NO_HBA;
|
||||
scsi->finished(request, 1);
|
||||
ccb->subsys_status = SCSI_NO_HBA;
|
||||
scsi->finished(ccb, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -310,25 +306,25 @@ create_sense(ide_device_info *device, scsi_sense *sense)
|
||||
/** finish command, updating sense of device and request, and release bus */
|
||||
|
||||
void
|
||||
finish_checksense(ide_qrequest *qrequest)
|
||||
finish_checksense(ata_request *request)
|
||||
{
|
||||
SHOW_FLOW(3, "%p, subsys_status=%d, sense=%x",
|
||||
qrequest->request,
|
||||
qrequest->request->subsys_status,
|
||||
(int)qrequest->device->new_combined_sense);
|
||||
request->ccb,
|
||||
request->ccb->subsys_status,
|
||||
(int)request->device->new_combined_sense);
|
||||
|
||||
qrequest->request->subsys_status = qrequest->device->subsys_status;
|
||||
request->ccb->subsys_status = request->device->subsys_status;
|
||||
|
||||
if (qrequest->request->subsys_status == SCSI_REQ_CMP) {
|
||||
if (request->ccb->subsys_status == SCSI_REQ_CMP) {
|
||||
// device or emulation code completed command
|
||||
qrequest->device->combined_sense = qrequest->device->new_combined_sense;
|
||||
request->device->combined_sense = request->device->new_combined_sense;
|
||||
|
||||
// if emulation code detected error, set CHECK CONDITION
|
||||
if (qrequest->device->combined_sense)
|
||||
set_check_condition(qrequest);
|
||||
if (request->device->combined_sense)
|
||||
set_check_condition(request);
|
||||
}
|
||||
|
||||
finish_request(qrequest, false);
|
||||
finish_request(request, false);
|
||||
}
|
||||
|
||||
|
||||
@ -337,25 +333,21 @@ finish_checksense(ide_qrequest *qrequest)
|
||||
*/
|
||||
|
||||
void
|
||||
finish_request(ide_qrequest *qrequest, bool resubmit)
|
||||
finish_request(ata_request *request, bool resubmit)
|
||||
{
|
||||
ide_device_info *device = qrequest->device;
|
||||
ide_device_info *device = request->device;
|
||||
ide_bus_info *bus = device->bus;
|
||||
scsi_ccb *request;
|
||||
scsi_ccb *ccb;
|
||||
|
||||
SHOW_FLOW0(3, "");
|
||||
|
||||
// save request first, as qrequest can be reused as soon as
|
||||
// save request first, as request can be reused as soon as
|
||||
// access_finished is called!
|
||||
request = qrequest->request;
|
||||
ccb = request->ccb;
|
||||
|
||||
qrequest->running = false;
|
||||
|
||||
device->qreqFree = device->qreqActive;
|
||||
device->qreqActive = NULL;
|
||||
|
||||
// paranoia
|
||||
bus->active_qrequest = NULL;
|
||||
bus->qreqFree = bus->qreqActive;
|
||||
bus->qreqActive = NULL;
|
||||
|
||||
// release bus, handling service requests;
|
||||
// TBD:
|
||||
@ -369,9 +361,9 @@ finish_request(ide_qrequest *qrequest, bool resubmit)
|
||||
ACQUIRE_BEN(&bus->status_report_ben);
|
||||
|
||||
if (resubmit)
|
||||
scsi->resubmit(request);
|
||||
scsi->resubmit(ccb);
|
||||
else
|
||||
scsi->finished(request, 1);
|
||||
scsi->finished(ccb, 1);
|
||||
|
||||
RELEASE_BEN(&bus->status_report_ben);
|
||||
}
|
||||
@ -383,18 +375,18 @@ finish_request(ide_qrequest *qrequest, bool resubmit)
|
||||
*/
|
||||
|
||||
static void
|
||||
set_check_condition(ide_qrequest *qrequest)
|
||||
set_check_condition(ata_request *request)
|
||||
{
|
||||
scsi_ccb *request = qrequest->request;
|
||||
ide_device_info *device = qrequest->device;
|
||||
scsi_ccb *ccb = request->ccb;
|
||||
ide_device_info *device = request->device;
|
||||
|
||||
SHOW_FLOW0(3, "");
|
||||
|
||||
request->subsys_status = SCSI_REQ_CMP_ERR;
|
||||
request->device_status = SCSI_STATUS_CHECK_CONDITION;
|
||||
ccb->subsys_status = SCSI_REQ_CMP_ERR;
|
||||
ccb->device_status = SCSI_STATUS_CHECK_CONDITION;
|
||||
|
||||
// copy sense only if caller requested it
|
||||
if ((request->flags & SCSI_DIS_AUTOSENSE) == 0) {
|
||||
if ((ccb->flags & SCSI_DIS_AUTOSENSE) == 0) {
|
||||
scsi_sense sense;
|
||||
int sense_len;
|
||||
|
||||
@ -405,9 +397,9 @@ set_check_condition(ide_qrequest *qrequest)
|
||||
|
||||
sense_len = min(SCSI_MAX_SENSE_SIZE, sizeof(sense));
|
||||
|
||||
memcpy(request->sense, &sense, sense_len);
|
||||
request->sense_resid = SCSI_MAX_SENSE_SIZE - sense_len;
|
||||
request->subsys_status |= SCSI_AUTOSNS_VALID;
|
||||
memcpy(ccb->sense, &sense, sense_len);
|
||||
ccb->sense_resid = SCSI_MAX_SENSE_SIZE - sense_len;
|
||||
ccb->subsys_status |= SCSI_AUTOSNS_VALID;
|
||||
|
||||
// device sense gets reset once it's read
|
||||
device->combined_sense = 0;
|
||||
@ -416,10 +408,10 @@ set_check_condition(ide_qrequest *qrequest)
|
||||
|
||||
|
||||
void
|
||||
finish_retry(ide_qrequest *qrequest)
|
||||
finish_retry(ata_request *request)
|
||||
{
|
||||
qrequest->device->combined_sense = 0;
|
||||
finish_request(qrequest, true);
|
||||
request->device->combined_sense = 0;
|
||||
finish_request(request, true);
|
||||
}
|
||||
|
||||
|
||||
@ -428,17 +420,17 @@ finish_retry(ide_qrequest *qrequest)
|
||||
*/
|
||||
|
||||
void
|
||||
finish_reset_queue(ide_qrequest *qrequest)
|
||||
finish_reset_queue(ata_request *request)
|
||||
{
|
||||
ide_bus_info *bus = qrequest->device->bus;
|
||||
ide_bus_info *bus = request->device->bus;
|
||||
|
||||
// don't remove block_bus!!!
|
||||
// during finish_checksense, the bus is released, so
|
||||
// the SCSI bus manager could send us further commands
|
||||
scsi->block_bus(bus->scsi_cookie);
|
||||
|
||||
finish_checksense(qrequest);
|
||||
// send_abort_queue(qrequest->device); // XXX fix this
|
||||
finish_checksense(request);
|
||||
// send_abort_queue(request->device); // XXX fix this
|
||||
|
||||
scsi->unblock_bus(bus->scsi_cookie);
|
||||
}
|
||||
@ -447,40 +439,33 @@ finish_reset_queue(ide_qrequest *qrequest)
|
||||
/** finish request, but don't release bus
|
||||
* if resubmit is true, the request will be resubmitted
|
||||
*/
|
||||
|
||||
/*
|
||||
static void
|
||||
finish_norelease(ide_qrequest *qrequest, bool resubmit)
|
||||
finish_norelease(ata_request *request, bool resubmit)
|
||||
{
|
||||
ide_device_info *device = qrequest->device;
|
||||
ide_device_info *device = request->device;
|
||||
ide_bus_info *bus = device->bus;
|
||||
|
||||
|
||||
qrequest->running = false;
|
||||
|
||||
|
||||
device->qreqFree = device->qreqActive;
|
||||
device->qreqActive = 0;
|
||||
|
||||
if (bus->active_qrequest == qrequest)
|
||||
bus->active_qrequest = NULL;
|
||||
bus->qreqFree = bus->qreqActive;
|
||||
bus->qreqActive = 0;
|
||||
|
||||
ACQUIRE_BEN(&bus->status_report_ben);
|
||||
|
||||
if (resubmit)
|
||||
scsi->resubmit(qrequest->request);
|
||||
scsi->resubmit(request->ccb);
|
||||
else
|
||||
scsi->finished(qrequest->request, 1);
|
||||
scsi->finished(request->ccb, 1);
|
||||
|
||||
RELEASE_BEN(&bus->status_report_ben);
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
/** finish all queued requests but <ignore> of the device;
|
||||
* set resubmit, if requests are to be resubmitted by xpt
|
||||
*/
|
||||
|
||||
void
|
||||
finish_all_requests(ide_device_info *device, ide_qrequest *ignore,
|
||||
finish_all_requests(ide_device_info *device, ata_request *ignore,
|
||||
int subsys_status, bool resubmit)
|
||||
{
|
||||
|
||||
@ -496,11 +481,11 @@ finish_all_requests(ide_device_info *device, ide_qrequest *ignore,
|
||||
// XXX fix this
|
||||
/*
|
||||
for (i = 0; i < device->queue_depth; ++i) {
|
||||
ide_qrequest *qrequest = &device->qreq_array[i];
|
||||
ata_request *request = &device->qreq_array[i];
|
||||
|
||||
if (qrequest->running && qrequest != ignore) {
|
||||
qrequest->request->subsys_status = subsys_status;
|
||||
finish_norelease(qrequest, resubmit);
|
||||
if (request->running && request != ignore) {
|
||||
request->ccb->subsys_status = subsys_status;
|
||||
finish_norelease(request, resubmit);
|
||||
}
|
||||
}
|
||||
*/
|
||||
@ -526,7 +511,6 @@ ide_sim_init_bus(device_node_handle node, void *user_cookie, void **cookie)
|
||||
memset(bus, 0, sizeof(*bus));
|
||||
bus->node = node;
|
||||
bus->lock = 0;
|
||||
bus->active_qrequest = NULL;
|
||||
bus->disconnected = false;
|
||||
|
||||
{
|
||||
@ -537,13 +521,9 @@ ide_sim_init_bus(device_node_handle node, void *user_cookie, void **cookie)
|
||||
sprintf(bus->name, "ide_bus %d", (int)channel_id);
|
||||
}
|
||||
|
||||
|
||||
init_synced_pc(&bus->disconnect_syncinfo, disconnect_worker);
|
||||
|
||||
bus->scsi_cookie = user_cookie;
|
||||
bus->state = ata_state_idle;
|
||||
bus->timer.bus = bus;
|
||||
bus->synced_pc_list = NULL;
|
||||
|
||||
if ((status = scsi->alloc_dpc(&bus->irq_dpc)) < B_OK)
|
||||
goto err1;
|
||||
@ -568,6 +548,12 @@ ide_sim_init_bus(device_node_handle node, void *user_cookie, void **cookie)
|
||||
|
||||
bus->first_device = NULL;
|
||||
|
||||
|
||||
bus->qreqActive = NULL;
|
||||
bus->qreqFree = (ata_request *)malloc(sizeof(ata_request));
|
||||
|
||||
memset(bus->qreqFree, 0, sizeof(ata_request));
|
||||
|
||||
// read restrictions of controller
|
||||
|
||||
if (pnp->get_attr_uint8(node, IDE_CONTROLLER_MAX_DEVICES_ITEM,
|
||||
@ -606,8 +592,6 @@ err5:
|
||||
err4:
|
||||
scsi->free_dpc(bus->irq_dpc);
|
||||
err1:
|
||||
uninit_synced_pc(&bus->disconnect_syncinfo);
|
||||
err:
|
||||
free(bus);
|
||||
|
||||
return status;
|
||||
@ -627,7 +611,10 @@ ide_sim_uninit_bus(ide_bus_info *bus)
|
||||
|
||||
DELETE_BEN(&bus->status_report_ben);
|
||||
scsi->free_dpc(bus->irq_dpc);
|
||||
uninit_synced_pc(&bus->disconnect_syncinfo);
|
||||
|
||||
if (bus->qreqActive)
|
||||
dprintf("ide_sim_uninit_bus: Warning request still active\n");
|
||||
free(bus->qreqFree);
|
||||
|
||||
free(bus);
|
||||
|
||||
@ -635,22 +622,6 @@ ide_sim_uninit_bus(ide_bus_info *bus)
|
||||
}
|
||||
|
||||
|
||||
// abort all running requests with SCSI_NO_HBA; finally, unblock bus
|
||||
static void
|
||||
disconnect_worker(ide_bus_info *bus, void *arg)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bus->max_devices; ++i) {
|
||||
if (bus->devices[i])
|
||||
// is this the proper error code?
|
||||
finish_all_requests(bus->devices[i], NULL, SCSI_NO_HBA, false);
|
||||
}
|
||||
|
||||
scsi->unblock_bus(bus->scsi_cookie);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
ide_sim_bus_removed(device_node_handle node, ide_bus_info *bus)
|
||||
{
|
||||
@ -664,9 +635,9 @@ ide_sim_bus_removed(device_node_handle node, ide_bus_info *bus)
|
||||
scsi->block_bus(bus->scsi_cookie);
|
||||
// make sure, we refuse all new commands
|
||||
bus->disconnected = true;
|
||||
|
||||
// abort all running commands with SCSI_NO_HBA
|
||||
// (the scheduled function also unblocks the bus when finished)
|
||||
schedule_synced_pc(bus, &bus->disconnect_syncinfo, NULL);
|
||||
// XXX
|
||||
}
|
||||
|
||||
|
||||
|
@ -19,7 +19,7 @@
|
||||
extern scsi_for_sim_interface *scsi;
|
||||
extern scsi_sim_interface ide_sim_module;
|
||||
|
||||
// set sense of current request
|
||||
// set sense of current ccb
|
||||
static inline void
|
||||
set_sense(ide_device_info *device, int sense_key, int sense_asc)
|
||||
{
|
||||
@ -54,21 +54,21 @@ decode_sense_asc_ascq(uint32 combined_sense)
|
||||
return combined_sense & 0xffff;
|
||||
}
|
||||
|
||||
void finish_request(ide_qrequest *qrequest, bool resubmit);
|
||||
void finish_reset_queue(ide_qrequest *qrequest);
|
||||
void finish_retry(ide_qrequest *qrequest);
|
||||
void finish_all_requests(ide_device_info *device, ide_qrequest *ignore,
|
||||
void finish_request(ata_request *request, bool resubmit);
|
||||
void finish_reset_queue(ata_request *request);
|
||||
void finish_retry(ata_request *request);
|
||||
void finish_all_requests(ide_device_info *device, ata_request *ignore,
|
||||
int subsys_status, bool resubmit);
|
||||
void finish_checksense(ide_qrequest *qrequest);
|
||||
void finish_checksense(ata_request *request);
|
||||
|
||||
|
||||
// start request by resetting sense
|
||||
// start ccb by resetting sense
|
||||
static inline void
|
||||
start_request(ide_device_info *device, ide_qrequest *qrequest)
|
||||
start_request(ide_device_info *device, ata_request *request)
|
||||
{
|
||||
device->new_combined_sense = 0;
|
||||
device->subsys_status = SCSI_REQ_CMP;
|
||||
qrequest->request->device_status = SCSI_STATUS_GOOD;
|
||||
request->ccb->device_status = SCSI_STATUS_GOOD;
|
||||
}
|
||||
|
||||
|
||||
|
@ -50,15 +50,15 @@
|
||||
|
||||
/*! Prepare PIO transfer */
|
||||
void
|
||||
prep_PIO_transfer(ide_device_info *device, ide_qrequest *qrequest)
|
||||
prep_PIO_transfer(ide_device_info *device, ata_request *request)
|
||||
{
|
||||
SHOW_FLOW0(4, "");
|
||||
|
||||
device->left_sg_elem = qrequest->request->sg_count;
|
||||
device->cur_sg_elem = qrequest->request->sg_list;
|
||||
device->left_sg_elem = request->ccb->sg_count;
|
||||
device->cur_sg_elem = request->ccb->sg_list;
|
||||
device->cur_sg_ofs = 0;
|
||||
device->has_odd_byte = false;
|
||||
qrequest->request->data_resid = qrequest->request->data_length;
|
||||
request->ccb->data_resid = request->ccb->data_length;
|
||||
}
|
||||
|
||||
|
||||
@ -280,22 +280,22 @@ read_discard_PIO(ide_device_info *device, int length)
|
||||
B_ERROR - something serious went wrong, sense data was set
|
||||
*/
|
||||
status_t
|
||||
write_PIO_block(ide_qrequest *qrequest, int length)
|
||||
write_PIO_block(ata_request *request, int length)
|
||||
{
|
||||
ide_device_info *device = qrequest->device;
|
||||
ide_device_info *device = request->device;
|
||||
int transferred;
|
||||
status_t err;
|
||||
|
||||
transferred = 0;
|
||||
err = transfer_PIO_block(device, length, true, &transferred);
|
||||
|
||||
qrequest->request->data_resid -= transferred;
|
||||
request->ccb->data_resid -= transferred;
|
||||
|
||||
if (err != ERR_TOO_BIG)
|
||||
return err;
|
||||
|
||||
// there may be a pending odd byte - transmit that now
|
||||
if (qrequest->device->has_odd_byte) {
|
||||
if (request->device->has_odd_byte) {
|
||||
uint8 buffer[2];
|
||||
|
||||
buffer[0] = device->odd_byte;
|
||||
@ -303,7 +303,7 @@ write_PIO_block(ide_qrequest *qrequest, int length)
|
||||
|
||||
device->has_odd_byte = false;
|
||||
|
||||
qrequest->request->data_resid -= 1;
|
||||
request->ccb->data_resid -= 1;
|
||||
transferred += 2;
|
||||
|
||||
device->bus->controller->write_pio(device->bus->channel_cookie, (uint16 *)buffer, 1, false);
|
||||
@ -318,7 +318,7 @@ write_PIO_block(ide_qrequest *qrequest, int length)
|
||||
// Sadly, this behaviour is OK for ATAPI packets, but there is no
|
||||
// way to tell the device that we don't have any data left;
|
||||
// only solution is to send zero bytes, though it's BAD BAD BAD
|
||||
write_discard_PIO(qrequest->device, length - transferred);
|
||||
write_discard_PIO(request->device, length - transferred);
|
||||
return ERR_TOO_BIG;
|
||||
}
|
||||
|
||||
@ -327,23 +327,23 @@ write_PIO_block(ide_qrequest *qrequest, int length)
|
||||
return: see write_PIO_block
|
||||
*/
|
||||
status_t
|
||||
read_PIO_block(ide_qrequest *qrequest, int length)
|
||||
read_PIO_block(ata_request *request, int length)
|
||||
{
|
||||
ide_device_info *device = qrequest->device;
|
||||
ide_device_info *device = request->device;
|
||||
int transferred;
|
||||
status_t err;
|
||||
|
||||
transferred = 0;
|
||||
err = transfer_PIO_block(qrequest->device, length, false, &transferred);
|
||||
err = transfer_PIO_block(request->device, length, false, &transferred);
|
||||
|
||||
qrequest->request->data_resid -= transferred;
|
||||
request->ccb->data_resid -= transferred;
|
||||
|
||||
// if length was odd, there's an extra byte waiting in device->odd_byte
|
||||
if (device->has_odd_byte) {
|
||||
// discard byte
|
||||
device->has_odd_byte = false;
|
||||
// adjust res_id as the extra byte didn't reach the buffer
|
||||
++qrequest->request->data_resid;
|
||||
++request->ccb->data_resid;
|
||||
}
|
||||
|
||||
if (err != ERR_TOO_BIG)
|
||||
@ -358,6 +358,6 @@ read_PIO_block(ide_qrequest *qrequest, int length)
|
||||
return err;
|
||||
|
||||
SHOW_FLOW(3, "discarding after %d bytes", transferred);
|
||||
read_discard_PIO(qrequest->device, length - transferred);
|
||||
read_discard_PIO(request->device, length - transferred);
|
||||
return ERR_TOO_BIG;
|
||||
}
|
||||
|
@ -22,10 +22,10 @@
|
||||
/** emulate MODE SENSE 10 command */
|
||||
|
||||
static void
|
||||
ata_mode_sense_10(ide_device_info *device, ide_qrequest *qrequest)
|
||||
ata_mode_sense_10(ide_device_info *device, ata_request *request)
|
||||
{
|
||||
scsi_ccb *request = qrequest->request;
|
||||
scsi_cmd_mode_sense_10 *cmd = (scsi_cmd_mode_sense_10 *)request->cdb;
|
||||
scsi_ccb *ccb = request->ccb;
|
||||
scsi_cmd_mode_sense_10 *cmd = (scsi_cmd_mode_sense_10 *)ccb->cdb;
|
||||
scsi_mode_param_header_10 param_header;
|
||||
scsi_modepage_control control;
|
||||
scsi_mode_param_block_desc block_desc;
|
||||
@ -53,17 +53,17 @@ ata_mode_sense_10(ide_device_info *device, ide_qrequest *qrequest)
|
||||
return;
|
||||
}
|
||||
|
||||
//param_header = (scsi_mode_param_header_10 *)request->data;
|
||||
//param_header = (scsi_mode_param_header_10 *)ccb->data;
|
||||
param_header.mode_data_length = B_HOST_TO_BENDIAN_INT16(totalLength - 1);
|
||||
param_header.medium_type = 0; // XXX standard is a bit vague here
|
||||
param_header.dev_spec_parameter = *(uint8 *)&devspec;
|
||||
param_header.block_desc_length
|
||||
= B_HOST_TO_BENDIAN_INT16(sizeof(scsi_mode_param_block_desc));
|
||||
|
||||
copy_sg_data(request, 0, allocationLength, ¶m_header,
|
||||
copy_sg_data(ccb, 0, allocationLength, ¶m_header,
|
||||
sizeof(param_header), false);
|
||||
|
||||
/*block_desc = (scsi_mode_param_block_desc *)(request->data
|
||||
/*block_desc = (scsi_mode_param_block_desc *)(ccb->data
|
||||
+ sizeof(*param_header));*/
|
||||
memset(&block_desc, 0, sizeof(block_desc));
|
||||
// density is reserved (0), descriptor apply to entire medium (num_blocks=0)
|
||||
@ -72,10 +72,10 @@ ata_mode_sense_10(ide_device_info *device, ide_qrequest *qrequest)
|
||||
block_desc.med_blocklen = 512 >> 8;
|
||||
block_desc.low_blocklen = 512 & 0xff;
|
||||
|
||||
copy_sg_data(request, sizeof(param_header), allocationLength,
|
||||
copy_sg_data(ccb, sizeof(param_header), allocationLength,
|
||||
&block_desc, sizeof(block_desc), false);
|
||||
|
||||
/*contr = (scsi_modepage_contr *)(request->data
|
||||
/*contr = (scsi_modepage_contr *)(ccb->data
|
||||
+ sizeof(*param_header)
|
||||
+ ((uint16)param_header->high_block_desc_len << 8)
|
||||
+ param_header->low_block_desc_len);*/
|
||||
@ -88,22 +88,22 @@ ata_mode_sense_10(ide_device_info *device, ide_qrequest *qrequest)
|
||||
// lost commands automagically
|
||||
control.QAM = SCSI_QAM_UNRESTRICTED;
|
||||
|
||||
copy_sg_data(request, sizeof(param_header)
|
||||
copy_sg_data(ccb, sizeof(param_header)
|
||||
+ B_BENDIAN_TO_HOST_INT16(param_header.block_desc_length),
|
||||
allocationLength, &control, sizeof(control), false);
|
||||
|
||||
// the number of bytes that were transferred to buffer is
|
||||
// restricted by allocation length and by request data buffer size
|
||||
// restricted by allocation length and by ccb data buffer size
|
||||
totalLength = min(totalLength, allocationLength);
|
||||
totalLength = min(totalLength, request->data_length);
|
||||
totalLength = min(totalLength, ccb->data_length);
|
||||
|
||||
request->data_resid = request->data_length - totalLength;
|
||||
ccb->data_resid = ccb->data_length - totalLength;
|
||||
}
|
||||
|
||||
|
||||
/*! Emulate modifying control page */
|
||||
static bool
|
||||
ata_mode_select_control_page(ide_device_info *device, ide_qrequest *qrequest,
|
||||
ata_mode_select_control_page(ide_device_info *device, ata_request *request,
|
||||
scsi_modepage_control *page)
|
||||
{
|
||||
if (page->header.page_length != sizeof(*page) - sizeof(page->header)) {
|
||||
@ -119,10 +119,10 @@ ata_mode_select_control_page(ide_device_info *device, ide_qrequest *qrequest,
|
||||
|
||||
/*! Emulate MODE SELECT 10 command */
|
||||
static void
|
||||
ata_mode_select_10(ide_device_info *device, ide_qrequest *qrequest)
|
||||
ata_mode_select_10(ide_device_info *device, ata_request *request)
|
||||
{
|
||||
scsi_ccb *request = qrequest->request;
|
||||
scsi_cmd_mode_select_10 *cmd = (scsi_cmd_mode_select_10 *)request->cdb;
|
||||
scsi_ccb *ccb = request->ccb;
|
||||
scsi_cmd_mode_select_10 *cmd = (scsi_cmd_mode_select_10 *)ccb->cdb;
|
||||
scsi_mode_param_header_10 param_header;
|
||||
scsi_modepage_header page_header;
|
||||
uint32 totalLength;
|
||||
@ -134,12 +134,12 @@ ata_mode_select_10(ide_device_info *device, ide_qrequest *qrequest)
|
||||
return;
|
||||
}
|
||||
|
||||
totalLength = min(request->data_length,
|
||||
totalLength = min(ccb->data_length,
|
||||
B_BENDIAN_TO_HOST_INT16(cmd->param_list_length));
|
||||
|
||||
// first, retrieve page header to get size of different chunks
|
||||
//param_header = (scsi_mode_param_header_10 *)request->data;
|
||||
if (!copy_sg_data(request, 0, totalLength, ¶m_header, sizeof(param_header), true))
|
||||
//param_header = (scsi_mode_param_header_10 *)ccb->data;
|
||||
if (!copy_sg_data(ccb, 0, totalLength, ¶m_header, sizeof(param_header), true))
|
||||
goto err;
|
||||
|
||||
totalLength = min(totalLength,
|
||||
@ -155,7 +155,7 @@ ata_mode_select_10(ide_device_info *device, ide_qrequest *qrequest)
|
||||
uint32 pageLength;
|
||||
|
||||
// get header to know how long page is
|
||||
if (!copy_sg_data(request, modepageOffset, totalLength,
|
||||
if (!copy_sg_data(ccb, modepageOffset, totalLength,
|
||||
&page_header, sizeof(page_header), true))
|
||||
goto err;
|
||||
|
||||
@ -167,7 +167,7 @@ ata_mode_select_10(ide_device_info *device, ide_qrequest *qrequest)
|
||||
if (pageLength > sizeof(modepage_buffer))
|
||||
goto err;
|
||||
|
||||
if (!copy_sg_data(request, modepageOffset, totalLength,
|
||||
if (!copy_sg_data(ccb, modepageOffset, totalLength,
|
||||
&modepage_buffer, min(pageLength, sizeof(modepage_buffer)), true))
|
||||
goto err;
|
||||
|
||||
@ -175,7 +175,7 @@ ata_mode_select_10(ide_device_info *device, ide_qrequest *qrequest)
|
||||
// currently, we only support the control mode page
|
||||
switch (page_header.page_code) {
|
||||
case SCSI_MODEPAGE_CONTROL:
|
||||
if (!ata_mode_select_control_page(device, qrequest,
|
||||
if (!ata_mode_select_control_page(device, request,
|
||||
(scsi_modepage_control *)modepage_buffer))
|
||||
return;
|
||||
break;
|
||||
@ -192,7 +192,7 @@ ata_mode_select_10(ide_device_info *device, ide_qrequest *qrequest)
|
||||
if (modepageOffset != totalLength)
|
||||
goto err;
|
||||
|
||||
request->data_resid = request->data_length - totalLength;
|
||||
ccb->data_resid = ccb->data_length - totalLength;
|
||||
return;
|
||||
|
||||
// if we arrive here, data length was incorrect
|
||||
@ -203,7 +203,7 @@ err:
|
||||
|
||||
/*! Emulate TEST UNIT READY */
|
||||
static bool
|
||||
ata_test_unit_ready(ide_device_info *device, ide_qrequest *qrequest)
|
||||
ata_test_unit_ready(ide_device_info *device, ata_request *request)
|
||||
{
|
||||
#if 0
|
||||
SHOW_FLOW0(3, "");
|
||||
@ -216,16 +216,16 @@ ata_test_unit_ready(ide_device_info *device, ide_qrequest *qrequest)
|
||||
device->tf_param_mask = 0;
|
||||
device->tf.write.command = IDE_CMD_GET_MEDIA_STATUS;
|
||||
|
||||
if (!send_command(device, qrequest, true, 15, ide_state_sync_waiting))
|
||||
if (!send_command(device, request, true, 15, ide_state_sync_waiting))
|
||||
return false;
|
||||
|
||||
// bits ide_error_mcr | ide_error_mc | ide_error_wp are also valid
|
||||
// but not requested by TUR; ide_error_wp can safely be ignored, but
|
||||
// we don't want to loose media change (request) reports
|
||||
// we don't want to loose media change (ccb) reports
|
||||
if (!check_output(device, true,
|
||||
ide_error_nm | ide_error_abrt | ide_error_mcr | ide_error_mc,
|
||||
false)) {
|
||||
// SCSI spec is unclear here: we shouldn't report "media change (request)"
|
||||
// SCSI spec is unclear here: we shouldn't report "media change (ccb)"
|
||||
// but what to do if there is one? anyway - we report them
|
||||
;
|
||||
}
|
||||
@ -237,7 +237,7 @@ ata_test_unit_ready(ide_device_info *device, ide_qrequest *qrequest)
|
||||
|
||||
/*! Flush internal device cache */
|
||||
static bool
|
||||
ata_flush_cache(ide_device_info *device, ide_qrequest *qrequest)
|
||||
ata_flush_cache(ide_device_info *device, ata_request *request)
|
||||
{
|
||||
#if 0
|
||||
// we should also ask for FLUSH CACHE support, but everyone denies it
|
||||
@ -251,7 +251,7 @@ ata_flush_cache(ide_device_info *device, ide_qrequest *qrequest)
|
||||
: IDE_CMD_FLUSH_CACHE;
|
||||
|
||||
// spec says that this may take more then 30s, how much more?
|
||||
if (!send_command(device, qrequest, true, 60, ide_state_sync_waiting))
|
||||
if (!send_command(device, request, true, 60, ide_state_sync_waiting))
|
||||
return false;
|
||||
|
||||
wait_for_sync(device->bus);
|
||||
@ -266,7 +266,7 @@ ata_flush_cache(ide_device_info *device, ide_qrequest *qrequest)
|
||||
load = true - load medium
|
||||
*/
|
||||
static bool
|
||||
ata_load_eject(ide_device_info *device, ide_qrequest *qrequest, bool load)
|
||||
ata_load_eject(ide_device_info *device, ata_request *request, bool load)
|
||||
{
|
||||
#if 0
|
||||
if (load) {
|
||||
@ -278,7 +278,7 @@ ata_load_eject(ide_device_info *device, ide_qrequest *qrequest, bool load)
|
||||
device->tf_param_mask = 0;
|
||||
device->tf.lba.command = IDE_CMD_MEDIA_EJECT;
|
||||
|
||||
if (!send_command(device, qrequest, true, 15, ide_state_sync_waiting))
|
||||
if (!send_command(device, request, true, 15, ide_state_sync_waiting))
|
||||
return false;
|
||||
|
||||
wait_for_sync(device->bus);
|
||||
@ -300,11 +300,11 @@ ata_prevent_allow(ide_device_info *device, bool prevent)
|
||||
|
||||
/*! Emulate INQUIRY command */
|
||||
static void
|
||||
ata_inquiry(ide_device_info *device, ide_qrequest *qrequest)
|
||||
ata_inquiry(ide_device_info *device, ata_request *request)
|
||||
{
|
||||
scsi_ccb *request = qrequest->request;
|
||||
scsi_ccb *ccb = request->ccb;
|
||||
scsi_res_inquiry data;
|
||||
scsi_cmd_inquiry *cmd = (scsi_cmd_inquiry *)request->cdb;
|
||||
scsi_cmd_inquiry *cmd = (scsi_cmd_inquiry *)ccb->cdb;
|
||||
uint32 allocation_length = cmd->allocation_length;
|
||||
uint32 transfer_size;
|
||||
|
||||
@ -349,22 +349,22 @@ ata_inquiry(ide_device_info *device, ide_qrequest *qrequest)
|
||||
sizeof(data.product_ident));
|
||||
memcpy(data.product_rev, " ", sizeof(data.product_rev));
|
||||
|
||||
copy_sg_data(request, 0, allocation_length, &data, sizeof(data), false);
|
||||
copy_sg_data(ccb, 0, allocation_length, &data, sizeof(data), false);
|
||||
|
||||
transfer_size = min(sizeof(data), allocation_length);
|
||||
transfer_size = min(transfer_size, request->data_length);
|
||||
transfer_size = min(transfer_size, ccb->data_length);
|
||||
|
||||
request->data_resid = request->data_length - transfer_size;
|
||||
ccb->data_resid = ccb->data_length - transfer_size;
|
||||
}
|
||||
|
||||
|
||||
/*! Emulate READ CAPACITY command */
|
||||
static void
|
||||
read_capacity(ide_device_info *device, ide_qrequest *qrequest)
|
||||
read_capacity(ide_device_info *device, ata_request *request)
|
||||
{
|
||||
scsi_ccb *request = qrequest->request;
|
||||
scsi_ccb *ccb = request->ccb;
|
||||
scsi_res_read_capacity data;
|
||||
scsi_cmd_read_capacity *cmd = (scsi_cmd_read_capacity *)request->cdb;
|
||||
scsi_cmd_read_capacity *cmd = (scsi_cmd_read_capacity *)ccb->cdb;
|
||||
uint32 lastBlock;
|
||||
|
||||
if (cmd->pmi || cmd->lba) {
|
||||
@ -378,58 +378,58 @@ read_capacity(ide_device_info *device, ide_qrequest *qrequest)
|
||||
lastBlock = device->total_sectors - 1;
|
||||
data.lba = B_HOST_TO_BENDIAN_INT32(lastBlock);
|
||||
|
||||
copy_sg_data(request, 0, request->data_length, &data, sizeof(data), false);
|
||||
request->data_resid = max(request->data_length - sizeof(data), 0);
|
||||
copy_sg_data(ccb, 0, ccb->data_length, &data, sizeof(data), false);
|
||||
ccb->data_resid = max(ccb->data_length - sizeof(data), 0);
|
||||
}
|
||||
|
||||
|
||||
/*! Execute SCSI command */
|
||||
void
|
||||
ata_exec_io(ide_device_info *device, ide_qrequest *qrequest)
|
||||
ata_exec_io(ide_device_info *device, ata_request *request)
|
||||
{
|
||||
scsi_ccb *request = qrequest->request;
|
||||
scsi_ccb *ccb = request->ccb;
|
||||
|
||||
SHOW_FLOW(3, "command=%x", request->cdb[0]);
|
||||
SHOW_FLOW(3, "command=%x", ccb->cdb[0]);
|
||||
|
||||
// ATA devices have one LUN only
|
||||
if (request->target_lun != 0) {
|
||||
request->subsys_status = SCSI_SEL_TIMEOUT;
|
||||
finish_request(qrequest, false);
|
||||
if (ccb->target_lun != 0) {
|
||||
ccb->subsys_status = SCSI_SEL_TIMEOUT;
|
||||
finish_request(request, false);
|
||||
return;
|
||||
}
|
||||
|
||||
// starting a request means deleting sense, so don't do it if
|
||||
// starting a ccb means deleting sense, so don't do it if
|
||||
// the command wants to read it
|
||||
if (request->cdb[0] != SCSI_OP_REQUEST_SENSE)
|
||||
start_request(device, qrequest);
|
||||
if (ccb->cdb[0] != SCSI_OP_REQUEST_SENSE)
|
||||
start_request(device, request);
|
||||
|
||||
switch (request->cdb[0]) {
|
||||
switch (ccb->cdb[0]) {
|
||||
case SCSI_OP_TEST_UNIT_READY:
|
||||
ata_test_unit_ready(device, qrequest);
|
||||
ata_test_unit_ready(device, request);
|
||||
break;
|
||||
|
||||
case SCSI_OP_REQUEST_SENSE:
|
||||
ide_request_sense(device, qrequest);
|
||||
ide_request_sense(device, request);
|
||||
return;
|
||||
|
||||
case SCSI_OP_FORMAT: /* FORMAT UNIT */
|
||||
// we could forward request to disk, but modern disks cannot
|
||||
// be formatted anyway, so we just refuse request
|
||||
// we could forward ccb to disk, but modern disks cannot
|
||||
// be formatted anyway, so we just refuse ccb
|
||||
// (exceptions are removable media devices, but to my knowledge
|
||||
// they don't have to be formatted as well)
|
||||
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
|
||||
break;
|
||||
|
||||
case SCSI_OP_INQUIRY:
|
||||
ata_inquiry(device, qrequest);
|
||||
ata_inquiry(device, request);
|
||||
break;
|
||||
|
||||
case SCSI_OP_MODE_SELECT_10:
|
||||
ata_mode_select_10(device, qrequest);
|
||||
ata_mode_select_10(device, request);
|
||||
break;
|
||||
|
||||
case SCSI_OP_MODE_SENSE_10:
|
||||
ata_mode_sense_10(device, qrequest);
|
||||
ata_mode_sense_10(device, request);
|
||||
break;
|
||||
|
||||
case SCSI_OP_MODE_SELECT_6:
|
||||
@ -446,7 +446,7 @@ ata_exec_io(ide_device_info *device, ide_qrequest *qrequest)
|
||||
break;
|
||||
|
||||
case SCSI_OP_START_STOP: {
|
||||
scsi_cmd_ssu *cmd = (scsi_cmd_ssu *)request->cdb;
|
||||
scsi_cmd_ssu *cmd = (scsi_cmd_ssu *)ccb->cdb;
|
||||
|
||||
// with no LoEj bit set, we should only allow/deny further access
|
||||
// we ignore that (unsupported for ATA)
|
||||
@ -455,23 +455,23 @@ ata_exec_io(ide_device_info *device, ide_qrequest *qrequest)
|
||||
|
||||
if (!cmd->start)
|
||||
// we must always flush cache if start = 0
|
||||
ata_flush_cache(device, qrequest);
|
||||
ata_flush_cache(device, request);
|
||||
|
||||
if (cmd->load_eject)
|
||||
ata_load_eject(device, qrequest, cmd->start);
|
||||
ata_load_eject(device, request, cmd->start);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case SCSI_OP_PREVENT_ALLOW: {
|
||||
scsi_cmd_prevent_allow *cmd = (scsi_cmd_prevent_allow *)request->cdb;
|
||||
scsi_cmd_prevent_allow *cmd = (scsi_cmd_prevent_allow *)ccb->cdb;
|
||||
|
||||
ata_prevent_allow(device, cmd->prevent);
|
||||
break;
|
||||
}
|
||||
|
||||
case SCSI_OP_READ_CAPACITY:
|
||||
read_capacity(device, qrequest);
|
||||
read_capacity(device, request);
|
||||
break;
|
||||
|
||||
case SCSI_OP_VERIFY:
|
||||
@ -482,7 +482,7 @@ ata_exec_io(ide_device_info *device, ide_qrequest *qrequest)
|
||||
|
||||
case SCSI_OP_SYNCHRONIZE_CACHE:
|
||||
// we ignore range and immediate bit, we always immediately flush everything
|
||||
ata_flush_cache(device, qrequest);
|
||||
ata_flush_cache(device, request);
|
||||
break;
|
||||
|
||||
// sadly, there are two possible read/write operation codes;
|
||||
@ -490,7 +490,7 @@ ata_exec_io(ide_device_info *device, ide_qrequest *qrequest)
|
||||
case SCSI_OP_READ_6:
|
||||
case SCSI_OP_WRITE_6:
|
||||
{
|
||||
scsi_cmd_rw_6 *cmd = (scsi_cmd_rw_6 *)request->cdb;
|
||||
scsi_cmd_rw_6 *cmd = (scsi_cmd_rw_6 *)ccb->cdb;
|
||||
uint32 pos;
|
||||
size_t length;
|
||||
|
||||
@ -500,14 +500,14 @@ ata_exec_io(ide_device_info *device, ide_qrequest *qrequest)
|
||||
|
||||
SHOW_FLOW(3, "READ6/WRITE6 pos=%lx, length=%lx", pos, length);
|
||||
|
||||
ata_send_rw(device, qrequest, pos, length, cmd->opcode == SCSI_OP_WRITE_6);
|
||||
ata_send_rw(device, request, pos, length, cmd->opcode == SCSI_OP_WRITE_6);
|
||||
return;
|
||||
}
|
||||
|
||||
case SCSI_OP_READ_10:
|
||||
case SCSI_OP_WRITE_10:
|
||||
{
|
||||
scsi_cmd_rw_10 *cmd = (scsi_cmd_rw_10 *)request->cdb;
|
||||
scsi_cmd_rw_10 *cmd = (scsi_cmd_rw_10 *)ccb->cdb;
|
||||
uint32 pos;
|
||||
size_t length;
|
||||
|
||||
@ -515,10 +515,10 @@ ata_exec_io(ide_device_info *device, ide_qrequest *qrequest)
|
||||
length = B_BENDIAN_TO_HOST_INT16(cmd->length);
|
||||
|
||||
if (length != 0) {
|
||||
ata_send_rw(device, qrequest, pos, length, cmd->opcode == SCSI_OP_WRITE_10);
|
||||
ata_send_rw(device, request, pos, length, cmd->opcode == SCSI_OP_WRITE_10);
|
||||
} else {
|
||||
// we cannot transfer zero blocks (apart from LBA48)
|
||||
finish_request(qrequest, false);
|
||||
finish_request(request, false);
|
||||
}
|
||||
return;
|
||||
}
|
||||
@ -527,5 +527,5 @@ ata_exec_io(ide_device_info *device, ide_qrequest *qrequest)
|
||||
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
|
||||
}
|
||||
|
||||
finish_checksense(qrequest);
|
||||
finish_checksense(request);
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ ide_dpc(void *arg)
|
||||
{
|
||||
#if 0
|
||||
ide_bus_info *bus = (ide_bus_info *)arg;
|
||||
ide_qrequest *qrequest;
|
||||
ata_request *request;
|
||||
ide_device_info *device;
|
||||
|
||||
TRACE(("\n"));
|
||||
@ -48,17 +48,17 @@ ide_dpc(void *arg)
|
||||
// cancel timeout
|
||||
cancel_timer(&bus->timer.te);
|
||||
|
||||
qrequest = bus->active_qrequest;
|
||||
device = qrequest->device;
|
||||
request = bus->active_qrequest;
|
||||
device = request->device;
|
||||
|
||||
// not perfect but simple: we simply know who is waiting why
|
||||
if (device->is_atapi)
|
||||
packet_dpc(qrequest);
|
||||
packet_dpc(request);
|
||||
else {
|
||||
if (qrequest->uses_dma)
|
||||
ata_dpc_DMA(qrequest);
|
||||
if (request->uses_dma)
|
||||
ata_dpc_DMA(request);
|
||||
else
|
||||
ata_dpc_PIO(qrequest);
|
||||
ata_dpc_PIO(request);
|
||||
}
|
||||
} else {
|
||||
// no request active, so this must be a service request or
|
||||
@ -139,7 +139,9 @@ ide_irq_handler(ide_bus_info *bus, uint8 status)
|
||||
if (bus->num_running_reqs == 0) {
|
||||
IDE_UNLOCK(bus);
|
||||
return B_UNHANDLED_INTERRUPT;
|
||||
}
|
||||
}mmand
|
||||
if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf, ide_mask_command) != B_OK)
|
||||
goto err_clearint;
|
||||
|
||||
bus->state = ide_state_accessing;
|
||||
|
||||
@ -154,7 +156,9 @@ ide_irq_handler(ide_bus_info *bus, uint8 status)
|
||||
bus->state = ide_state_accessing;
|
||||
bus->sync_wait_timeout = false;
|
||||
|
||||
IDE_UNLOCK(bus);
|
||||
IDE_UNLOCK(bus);mmand
|
||||
if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf, ide_mask_command) != B_OK)
|
||||
goto err_clearint;
|
||||
|
||||
release_sem_etc(bus->sync_wait_sem, 1, B_DO_NOT_RESCHEDULE);
|
||||
return B_INVOKE_SCHEDULER;
|
||||
@ -199,7 +203,9 @@ cancel_irq_timeout(ide_bus_info *bus)
|
||||
|
||||
void
|
||||
start_waiting(ide_bus_info *bus, uint32 timeout, int new_state)
|
||||
{
|
||||
{mmand
|
||||
if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf, ide_mask_command) != B_OK)
|
||||
goto err_clearint;
|
||||
int res;
|
||||
|
||||
TRACE(("timeout = %u\n", (uint)timeout));
|
||||
@ -241,21 +247,21 @@ wait_for_sync(ide_bus_info *bus)
|
||||
static void
|
||||
ide_timeout_dpc(void *arg)
|
||||
{
|
||||
/*
|
||||
ide_bus_info *bus = (ide_bus_info *)arg;
|
||||
ide_qrequest *qrequest;
|
||||
ata_request *request;
|
||||
ide_device_info *device;
|
||||
|
||||
qrequest = bus->active_qrequest;
|
||||
device = qrequest->device;
|
||||
device = request->device;
|
||||
|
||||
dprintf("ide: ide_timeout_dpc() bus %p, device %p\n", bus, device);
|
||||
|
||||
// this also resets overlapped commands
|
||||
// reset_device(device, qrequest);
|
||||
// reset_device(device, request);
|
||||
|
||||
device->subsys_status = SCSI_CMD_TIMEOUT;
|
||||
|
||||
if (qrequest->uses_dma) {
|
||||
if (request->uses_dma) {
|
||||
if (++device->DMA_failures >= MAX_DMA_FAILURES) {
|
||||
dprintf("Disabling DMA because of too many errors\n");
|
||||
|
||||
@ -264,7 +270,8 @@ ide_timeout_dpc(void *arg)
|
||||
}
|
||||
|
||||
// let upper layer do the retry
|
||||
finish_checksense(qrequest);
|
||||
finish_checksense(request);
|
||||
*/
|
||||
}
|
||||
|
||||
|
||||
@ -325,99 +332,6 @@ ide_timeout(timer *arg)
|
||||
|
||||
|
||||
|
||||
void
|
||||
init_synced_pc(ide_synced_pc *pc, ide_synced_pc_func func)
|
||||
{
|
||||
pc->func = func;
|
||||
pc->registered = false;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
uninit_synced_pc(ide_synced_pc *pc)
|
||||
{
|
||||
if (pc->registered)
|
||||
panic("Tried to clean up pending synced PC\n");
|
||||
}
|
||||
|
||||
|
||||
/** schedule a synced pc
|
||||
* a synced pc gets executed as soon as the bus becomes idle
|
||||
*/
|
||||
|
||||
status_t
|
||||
schedule_synced_pc(ide_bus_info *bus, ide_synced_pc *pc, void *arg)
|
||||
{
|
||||
//TRACE(());
|
||||
|
||||
IDE_LOCK(bus);
|
||||
|
||||
if (pc->registered) {
|
||||
// spc cannot be registered twice
|
||||
TRACE(("already registered\n"));
|
||||
return B_ERROR;
|
||||
} else if( bus->state != ata_state_idle ) {
|
||||
// bus isn't idle - spc must be added to pending list
|
||||
TRACE(("adding to pending list\n"));
|
||||
|
||||
pc->next = bus->synced_pc_list;
|
||||
bus->synced_pc_list = pc;
|
||||
pc->arg = arg;
|
||||
pc->registered = true;
|
||||
|
||||
IDE_UNLOCK(bus);
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
// we have luck - bus is idle, so grab it before
|
||||
// releasing the lock
|
||||
|
||||
TRACE(("exec immediately\n"));
|
||||
|
||||
bus->state = ata_state_busy;
|
||||
IDE_UNLOCK(bus);
|
||||
|
||||
TRACE(("go\n"));
|
||||
pc->func(bus, arg);
|
||||
|
||||
TRACE(("finished\n"));
|
||||
access_finished(bus, bus->first_device);
|
||||
|
||||
// meanwhile, we may have rejected SCSI commands;
|
||||
// usually, the XPT resends them once a command
|
||||
// has finished, but in this case XPT doesn't know
|
||||
// about our "private" command, so we have to tell about
|
||||
// idle bus manually
|
||||
TRACE(("tell SCSI bus manager about idle bus\n"));
|
||||
scsi->cont_send_bus(bus->scsi_cookie);
|
||||
return B_OK;
|
||||
}
|
||||
|
||||
|
||||
/** execute list of synced pcs */
|
||||
|
||||
static void
|
||||
exec_synced_pcs(ide_bus_info *bus, ide_synced_pc *pc_list)
|
||||
{
|
||||
ide_synced_pc *pc;
|
||||
|
||||
// noone removes items from pc_list, so we don't need lock
|
||||
// to access entries
|
||||
for (pc = pc_list; pc; pc = pc->next) {
|
||||
pc->func(bus, pc->arg);
|
||||
}
|
||||
|
||||
// need lock now as items can be added to pc_list again as soon
|
||||
// as <registered> is reset
|
||||
IDE_LOCK(bus);
|
||||
|
||||
for (pc = pc_list; pc; pc = pc->next) {
|
||||
pc->registered = false;
|
||||
}
|
||||
|
||||
IDE_UNLOCK(bus);
|
||||
}
|
||||
|
||||
|
||||
/** finish bus access;
|
||||
* check if any device wants to service pending commands + execute synced_pc
|
||||
@ -428,33 +342,5 @@ access_finished(ide_bus_info *bus, ide_device_info *device)
|
||||
{
|
||||
TRACE(("bus = %p, device = %p\n", bus, device));
|
||||
|
||||
while (true) {
|
||||
ide_synced_pc *synced_pc_list;
|
||||
|
||||
IDE_LOCK(bus);
|
||||
|
||||
// normally, there is always an device; only exception is a
|
||||
// bus without devices, not sure whether this can really happen though
|
||||
if (device) {
|
||||
// if (try_service(device))
|
||||
// return;
|
||||
}
|
||||
|
||||
// noone wants it, so execute pending synced_pc
|
||||
if (bus->synced_pc_list == NULL) {
|
||||
bus->state = ata_state_idle;
|
||||
IDE_UNLOCK(bus);
|
||||
return;
|
||||
}
|
||||
|
||||
synced_pc_list = bus->synced_pc_list;
|
||||
bus->synced_pc_list = NULL;
|
||||
|
||||
IDE_UNLOCK(bus);
|
||||
|
||||
exec_synced_pcs(bus, synced_pc_list);
|
||||
|
||||
// executed synced_pc may have generated other sync_pc,
|
||||
// thus the loop
|
||||
}
|
||||
// this would be the correct place to called synced pc
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user