Fixed all warnings in scsi2ata.c.
Style cleanup. git-svn-id: file:///srv/svn/repos/haiku/trunk/current@10297 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
parent
4e9c28deed
commit
d99c404f77
@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
|
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
|
||||||
** Distributed under the terms of the OpenBeOS License.
|
* Distributed under the terms of the MIT License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Part of Open IDE bus manager
|
Part of Open IDE bus manager
|
||||||
@ -25,44 +25,54 @@
|
|||||||
// maximum number send tries before giving up
|
// maximum number send tries before giving up
|
||||||
#define MAX_FAILED_SEND 1
|
#define MAX_FAILED_SEND 1
|
||||||
|
|
||||||
// busy-wait for data request going high
|
|
||||||
bool wait_for_drq( ide_device_info *device )
|
/** busy-wait for data request going high */
|
||||||
|
|
||||||
|
bool
|
||||||
|
wait_for_drq(ide_device_info *device)
|
||||||
{
|
{
|
||||||
return ide_wait( device, ide_status_drq, 0, true, 10000000 );
|
return ide_wait(device, ide_status_drq, 0, true, 10000000);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// busy-wait for data request going low
|
/** busy-wait for data request going low */
|
||||||
bool wait_for_drqdown( ide_device_info *device )
|
|
||||||
|
bool
|
||||||
|
wait_for_drqdown(ide_device_info *device)
|
||||||
{
|
{
|
||||||
return ide_wait( device, 0, ide_status_drq, true, 1000000 );
|
return ide_wait(device, 0, ide_status_drq, true, 1000000);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// busy-wait for device ready
|
/** busy-wait for device ready */
|
||||||
bool wait_for_drdy( ide_device_info *device )
|
|
||||||
|
bool
|
||||||
|
wait_for_drdy(ide_device_info *device)
|
||||||
{
|
{
|
||||||
return ide_wait( device, ide_status_drdy, ide_status_bsy, false, 5000000 );
|
return ide_wait(device, ide_status_drdy, ide_status_bsy, false, 5000000);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// reset entire IDE bus
|
/** reset entire IDE bus
|
||||||
// all active request apart from <ignore> are resubmitted
|
* all active request apart from <ignore> are resubmitted
|
||||||
bool reset_bus( ide_device_info *device, ide_qrequest *ignore )
|
*/
|
||||||
|
|
||||||
|
bool
|
||||||
|
reset_bus(ide_device_info *device, ide_qrequest *ignore)
|
||||||
{
|
{
|
||||||
ide_bus_info *bus = device->bus;
|
ide_bus_info *bus = device->bus;
|
||||||
ide_controller_interface *controller = bus->controller;
|
ide_controller_interface *controller = bus->controller;
|
||||||
ide_channel_cookie channel = bus->channel;
|
ide_channel_cookie channel = bus->channel;
|
||||||
|
|
||||||
FAST_LOG0( bus->log, ev_ide_reset_bus );
|
FAST_LOG0(bus->log, ev_ide_reset_bus);
|
||||||
|
|
||||||
if( device->reconnect_timer_installed ) {
|
if (device->reconnect_timer_installed) {
|
||||||
cancel_timer( &device->reconnect_timer.te );
|
cancel_timer(&device->reconnect_timer.te);
|
||||||
device->reconnect_timer_installed = false;
|
device->reconnect_timer_installed = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if( device->other_device->reconnect_timer_installed ) {
|
if (device->other_device->reconnect_timer_installed) {
|
||||||
cancel_timer( &device->other_device->reconnect_timer.te );
|
cancel_timer(&device->other_device->reconnect_timer.te);
|
||||||
device->other_device->reconnect_timer_installed = false;
|
device->other_device->reconnect_timer_installed = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -70,74 +80,75 @@ bool reset_bus( ide_device_info *device, ide_qrequest *ignore )
|
|||||||
// also, deactivate IRQ
|
// also, deactivate IRQ
|
||||||
// (as usual, we will get an IRQ on disabling, but as we leave them
|
// (as usual, we will get an IRQ on disabling, but as we leave them
|
||||||
// disabled for 2 ms, this false report is ignored)
|
// disabled for 2 ms, this false report is ignored)
|
||||||
if( controller->write_device_control( channel,
|
if (controller->write_device_control(channel,
|
||||||
ide_devctrl_nien | ide_devctrl_srst | ide_devctrl_bit3 ) != B_OK )
|
ide_devctrl_nien | ide_devctrl_srst | ide_devctrl_bit3) != B_OK)
|
||||||
goto err0;
|
goto err0;
|
||||||
|
|
||||||
spin( 5 );
|
|
||||||
|
|
||||||
if( controller->write_device_control( channel,
|
spin(5);
|
||||||
ide_devctrl_nien | ide_devctrl_bit3 ) != B_OK )
|
|
||||||
|
if (controller->write_device_control(channel, ide_devctrl_nien | ide_devctrl_bit3) != B_OK)
|
||||||
goto err0;
|
goto err0;
|
||||||
|
|
||||||
// let devices wake up
|
// let devices wake up
|
||||||
snooze( 2000 );
|
snooze(2000);
|
||||||
|
|
||||||
// ouch, we have to wait up to 31 seconds!
|
// ouch, we have to wait up to 31 seconds!
|
||||||
if( !ide_wait( device, 0, ide_status_bsy, true, 31000000 )) {
|
if (!ide_wait(device, 0, ide_status_bsy, true, 31000000)) {
|
||||||
|
|
||||||
// as we don't know which of the devices is broken
|
// as we don't know which of the devices is broken
|
||||||
// we leave them both alive
|
// we leave them both alive
|
||||||
if( controller->write_device_control( channel, ide_devctrl_bit3 ) != B_OK )
|
if (controller->write_device_control(channel, ide_devctrl_bit3) != B_OK)
|
||||||
goto err0;
|
goto err0;
|
||||||
|
|
||||||
set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_TIMEOUT );
|
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_TIMEOUT);
|
||||||
goto err1;
|
goto err1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if( controller->write_device_control( channel, ide_devctrl_bit3 ) != B_OK )
|
if (controller->write_device_control(channel, ide_devctrl_bit3) != B_OK)
|
||||||
goto err0;
|
goto err0;
|
||||||
|
|
||||||
finish_all_requests( bus->devices[0], ignore, SCSI_SCSI_BUS_RESET, true );
|
finish_all_requests(bus->devices[0], ignore, SCSI_SCSI_BUS_RESET, true);
|
||||||
finish_all_requests( bus->devices[1], ignore, SCSI_SCSI_BUS_RESET, true );
|
finish_all_requests(bus->devices[1], ignore, SCSI_SCSI_BUS_RESET, true);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
err0:
|
err0:
|
||||||
set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE );
|
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
|
||||||
|
|
||||||
err1:
|
err1:
|
||||||
finish_all_requests( bus->devices[0], ignore, SCSI_SCSI_BUS_RESET, true );
|
finish_all_requests(bus->devices[0], ignore, SCSI_SCSI_BUS_RESET, true);
|
||||||
finish_all_requests( bus->devices[1], ignore, SCSI_SCSI_BUS_RESET, true );
|
finish_all_requests(bus->devices[1], ignore, SCSI_SCSI_BUS_RESET, true);
|
||||||
|
|
||||||
//xpt->call_async( bus->xpt_cookie, -1, -1, AC_BUS_RESET, NULL, 0 );
|
//xpt->call_async( bus->xpt_cookie, -1, -1, AC_BUS_RESET, NULL, 0 );
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// execute packet device reset.
|
/** execute packet device reset.
|
||||||
// resets entire bus on fail or if device is not atapi;
|
* resets entire bus on fail or if device is not atapi;
|
||||||
// all requests but <ignore> are resubmitted
|
* all requests but <ignore> are resubmitted
|
||||||
bool reset_device( ide_device_info *device, ide_qrequest *ignore )
|
*/
|
||||||
|
|
||||||
|
bool
|
||||||
|
reset_device(ide_device_info *device, ide_qrequest *ignore)
|
||||||
{
|
{
|
||||||
ide_bus_info *bus = device->bus;
|
ide_bus_info *bus = device->bus;
|
||||||
status_t res;
|
status_t res;
|
||||||
uint8 orig_command;
|
uint8 orig_command;
|
||||||
|
|
||||||
FAST_LOG1( bus->log, ev_ide_reset_device, device->is_device1 );
|
|
||||||
SHOW_FLOW0( 3, "" );
|
|
||||||
|
|
||||||
if( !device->is_atapi )
|
FAST_LOG1(bus->log, ev_ide_reset_device, device->is_device1);
|
||||||
|
SHOW_FLOW0(3, "");
|
||||||
|
|
||||||
|
if (!device->is_atapi)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
if( device->reconnect_timer_installed ) {
|
if (device->reconnect_timer_installed) {
|
||||||
cancel_timer( &device->reconnect_timer.te );
|
cancel_timer(&device->reconnect_timer.te);
|
||||||
device->reconnect_timer_installed = false;
|
device->reconnect_timer_installed = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// select device
|
// select device
|
||||||
if( bus->controller->write_command_block_regs( bus->channel, &device->tf,
|
if (bus->controller->write_command_block_regs(bus->channel, &device->tf,
|
||||||
ide_mask_device_head ) != B_OK )
|
ide_mask_device_head) != B_OK)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
// safe original command to let caller restart it
|
// safe original command to let caller restart it
|
||||||
@ -146,305 +157,311 @@ bool reset_device( ide_device_info *device, ide_qrequest *ignore )
|
|||||||
// send device reset, independ of current device state
|
// send device reset, independ of current device state
|
||||||
// (that's the point of a reset)
|
// (that's the point of a reset)
|
||||||
device->tf.write.command = IDE_CMD_DEVICE_RESET;
|
device->tf.write.command = IDE_CMD_DEVICE_RESET;
|
||||||
res = bus->controller->write_command_block_regs( bus->channel,
|
res = bus->controller->write_command_block_regs(bus->channel,
|
||||||
&device->tf, ide_mask_command );
|
&device->tf, ide_mask_command);
|
||||||
device->tf.write.command = orig_command;
|
device->tf.write.command = orig_command;
|
||||||
|
|
||||||
if( res != B_OK )
|
if (res != B_OK)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
// don't know how long to wait, but 31 seconds, like soft reset,
|
// don't know how long to wait, but 31 seconds, like soft reset,
|
||||||
// should be enough
|
// should be enough
|
||||||
if( !ide_wait( device, 0, ide_status_bsy, true, 31000000 ))
|
if (!ide_wait(device, 0, ide_status_bsy, true, 31000000))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
// alright, resubmit all requests
|
// alright, resubmit all requests
|
||||||
finish_all_requests( device, ignore, SCSI_SCSI_BUS_RESET, true );
|
finish_all_requests(device, ignore, SCSI_SCSI_BUS_RESET, true);
|
||||||
|
|
||||||
SHOW_FLOW0( 3, "done" );
|
SHOW_FLOW0(3, "done");
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
// do the hard way
|
// do the hard way
|
||||||
return reset_bus( device, ignore );
|
return reset_bus(device, ignore);
|
||||||
}
|
}
|
||||||
|
|
||||||
// new_state must be either accessing, async_waiting or sync_waiting
|
/** new_state must be either accessing, async_waiting or sync_waiting
|
||||||
// param_mask must not include command register
|
* param_mask must not include command register
|
||||||
bool send_command( ide_device_info *device, ide_qrequest *qrequest,
|
*/
|
||||||
bool need_drdy, uint32 timeout, ide_bus_state new_state )
|
|
||||||
|
bool
|
||||||
|
send_command(ide_device_info *device, ide_qrequest *qrequest,
|
||||||
|
bool need_drdy, uint32 timeout, ide_bus_state new_state)
|
||||||
{
|
{
|
||||||
ide_bus_info *bus = device->bus;
|
ide_bus_info *bus = device->bus;
|
||||||
bigtime_t irq_disabled_at = 0; // make compiler happy
|
bigtime_t irq_disabled_at = 0; // make compiler happy
|
||||||
uint8 num_retries = 0;
|
uint8 num_retries = 0;
|
||||||
bool irq_guard;
|
bool irq_guard;
|
||||||
|
|
||||||
FAST_LOGN( bus->log, ev_ide_send_command, 15, device->is_device1, (uint32)qrequest,
|
FAST_LOGN(bus->log, ev_ide_send_command, 15, device->is_device1, (uint32)qrequest,
|
||||||
device->tf.raw.r[0], device->tf.raw.r[1], device->tf.raw.r[2],
|
device->tf.raw.r[0], device->tf.raw.r[1], device->tf.raw.r[2],
|
||||||
device->tf.raw.r[3], device->tf.raw.r[4], device->tf.raw.r[5],
|
device->tf.raw.r[3], device->tf.raw.r[4], device->tf.raw.r[5],
|
||||||
device->tf.raw.r[6],
|
device->tf.raw.r[6],
|
||||||
device->tf.raw.r[7], device->tf.raw.r[8], device->tf.raw.r[9],
|
device->tf.raw.r[7], device->tf.raw.r[8], device->tf.raw.r[9],
|
||||||
device->tf.raw.r[10], device->tf.raw.r[11] );
|
device->tf.raw.r[10], device->tf.raw.r[11]);
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
irq_guard = bus->num_running_reqs > 1;
|
irq_guard = bus->num_running_reqs > 1;
|
||||||
|
|
||||||
SHOW_FLOW( 3, "qrequest=%p, request=%p", qrequest,
|
SHOW_FLOW(3, "qrequest=%p, request=%p", qrequest,
|
||||||
qrequest ? qrequest->request : NULL );
|
qrequest ? qrequest->request : NULL);
|
||||||
|
|
||||||
// if there are pending requests, IRQs must be disabled to
|
// if there are pending requests, IRQs must be disabled to
|
||||||
// not mix up IRQ reasons
|
// not mix up IRQ reasons
|
||||||
// XXX can we avoid that with the IDE_LOCK trick? It would
|
// XXX can we avoid that with the IDE_LOCK trick? It would
|
||||||
// save some work and the bug workaround!
|
// save some work and the bug workaround!
|
||||||
if( irq_guard ) {
|
if (irq_guard) {
|
||||||
if( bus->controller->write_device_control( bus->channel,
|
if (bus->controller->write_device_control(bus->channel,
|
||||||
ide_devctrl_nien | ide_devctrl_bit3 ) != B_OK )
|
ide_devctrl_nien | ide_devctrl_bit3) != B_OK)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
irq_disabled_at = system_time();
|
irq_disabled_at = system_time();
|
||||||
}
|
}
|
||||||
|
|
||||||
// select device
|
// select device
|
||||||
if( bus->controller->write_command_block_regs( bus->channel, &device->tf,
|
if (bus->controller->write_command_block_regs(bus->channel, &device->tf,
|
||||||
ide_mask_device_head ) != B_OK )
|
ide_mask_device_head) != B_OK)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
bus->active_device = device;
|
bus->active_device = device;
|
||||||
|
|
||||||
if( !ide_wait( device, 0, ide_status_bsy | ide_status_drq, false, 50000 )) {
|
if (!ide_wait(device, 0, ide_status_bsy | ide_status_drq, false, 50000)) {
|
||||||
uint8 status;
|
uint8 status;
|
||||||
|
|
||||||
SHOW_FLOW0( 1, "device is not ready" );
|
SHOW_FLOW0(1, "device is not ready");
|
||||||
|
|
||||||
status = bus->controller->get_altstatus( bus->channel );
|
status = bus->controller->get_altstatus(bus->channel);
|
||||||
if( status == 0xff ) {
|
if (status == 0xff) {
|
||||||
// there is no device (should happen during detection only)
|
// there is no device (should happen during detection only)
|
||||||
SHOW_FLOW0( 1, "there is no device" );
|
SHOW_FLOW0(1, "there is no device");
|
||||||
|
|
||||||
// device detection recognizes this code as "all hope lost", so
|
// device detection recognizes this code as "all hope lost", so
|
||||||
// neither replace it nor use it anywhere else
|
// neither replace it nor use it anywhere else
|
||||||
device->subsys_status = SCSI_TID_INVALID;
|
device->subsys_status = SCSI_TID_INVALID;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// reset device and retry
|
// reset device and retry
|
||||||
if( reset_device( device, qrequest ) && ++num_retries <= MAX_FAILED_SEND ) {
|
if (reset_device(device, qrequest) && ++num_retries <= MAX_FAILED_SEND) {
|
||||||
SHOW_FLOW0( 1, "retrying" );
|
SHOW_FLOW0(1, "retrying");
|
||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
|
|
||||||
SHOW_FLOW0( 1, "giving up" );
|
SHOW_FLOW0(1, "giving up");
|
||||||
|
|
||||||
// reset to often - abort request
|
// reset to often - abort request
|
||||||
device->subsys_status = SCSI_SEL_TIMEOUT;
|
device->subsys_status = SCSI_SEL_TIMEOUT;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if( need_drdy &&
|
if (need_drdy
|
||||||
(bus->controller->get_altstatus( bus->channel ) & ide_status_drdy) == 0 )
|
&& (bus->controller->get_altstatus(bus->channel) & ide_status_drdy) == 0) {
|
||||||
{
|
SHOW_FLOW0(3, "drdy not set");
|
||||||
SHOW_FLOW0( 3, "drdy not set" );
|
|
||||||
device->subsys_status = SCSI_SEQUENCE_FAIL;
|
device->subsys_status = SCSI_SEQUENCE_FAIL;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// write parameters
|
// write parameters
|
||||||
if( bus->controller->write_command_block_regs( bus->channel, &device->tf,
|
if (bus->controller->write_command_block_regs(bus->channel, &device->tf,
|
||||||
device->tf_param_mask ) != B_OK )
|
device->tf_param_mask) != B_OK)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
if( irq_guard ) {
|
if (irq_guard) {
|
||||||
// IRQ may be fired by service requests and by the process of disabling(!)
|
// IRQ may be fired by service requests and by the process of disabling(!)
|
||||||
// them (I heard this is caused by edge triggered PCI IRQs)
|
// them (I heard this is caused by edge triggered PCI IRQs)
|
||||||
|
|
||||||
// wait at least 50 µs to catch all pending irq's
|
// wait at least 50 µs to catch all pending irq's
|
||||||
// (at my system, up to 30 µs elapsed)
|
// (at my system, up to 30 µs elapsed)
|
||||||
|
|
||||||
// additionally, old drives (at least my IBM-DTTA-351010) loose
|
// additionally, old drives (at least my IBM-DTTA-351010) loose
|
||||||
// sync if they are pushed too hard - on heavy overlapped write
|
// sync if they are pushed too hard - on heavy overlapped write
|
||||||
// stress this drive tends to forget outstanding requests,
|
// stress this drive tends to forget outstanding requests,
|
||||||
// waiting at least 50 µs seems(!) to solve this
|
// waiting at least 50 µs seems(!) to solve this
|
||||||
while( system_time() - irq_disabled_at < MAX_IRQ_DELAY )
|
while (system_time() - irq_disabled_at < MAX_IRQ_DELAY)
|
||||||
spin( 1 );
|
spin(1);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we will start waiting once the command is sent, we have to
|
// if we will start waiting once the command is sent, we have to
|
||||||
// lock the bus before sending; this way, IRQs that are fired
|
// lock the bus before sending; this way, IRQs that are fired
|
||||||
// shortly before/after sending of command are delayed until the
|
// shortly before/after sending of command are delayed until the
|
||||||
// command is really sent (start_waiting unlocks the bus) and then
|
// command is really sent (start_waiting unlocks the bus) and then
|
||||||
// the IRQ handler can check savely whether the IRQ really signals
|
// the IRQ handler can check savely whether the IRQ really signals
|
||||||
// finishing of command or not by testing the busy-signal of the device
|
// finishing of command or not by testing the busy-signal of the device
|
||||||
if( new_state != ide_state_accessing ) {
|
if (new_state != ide_state_accessing) {
|
||||||
IDE_LOCK( bus );
|
IDE_LOCK(bus);
|
||||||
}
|
}
|
||||||
|
|
||||||
if( irq_guard ) {
|
if (irq_guard) {
|
||||||
// now it's clear why IRQs gets fired, so we can enable them again
|
// now it's clear why IRQs gets fired, so we can enable them again
|
||||||
if( bus->controller->write_device_control( bus->channel,
|
if (bus->controller->write_device_control(bus->channel,
|
||||||
ide_devctrl_bit3 ) != B_OK )
|
ide_devctrl_bit3) != B_OK)
|
||||||
goto err1;
|
goto err1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// write command code - this will start the actual command
|
// write command code - this will start the actual command
|
||||||
SHOW_FLOW( 3, "Writing command 0x%02x", (int)device->tf.write.command );
|
SHOW_FLOW(3, "Writing command 0x%02x", (int)device->tf.write.command);
|
||||||
if( bus->controller->write_command_block_regs( bus->channel,
|
if (bus->controller->write_command_block_regs(bus->channel,
|
||||||
&device->tf, ide_mask_command ) != B_OK )
|
&device->tf, ide_mask_command) != B_OK)
|
||||||
goto err1;
|
goto err1;
|
||||||
|
|
||||||
// start waiting now; also un-blocks IRQ handler (see above)
|
// start waiting now; also un-blocks IRQ handler (see above)
|
||||||
if( new_state != ide_state_accessing ) {
|
if (new_state != ide_state_accessing)
|
||||||
start_waiting( bus, timeout, new_state );
|
start_waiting(bus, timeout, new_state);
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
err1:
|
err1:
|
||||||
if( timeout > 0 ) {
|
if (timeout > 0) {
|
||||||
bus->state = ide_state_accessing;
|
bus->state = ide_state_accessing;
|
||||||
IDE_UNLOCK( bus );
|
IDE_UNLOCK(bus);
|
||||||
}
|
}
|
||||||
|
|
||||||
err:
|
err:
|
||||||
device->subsys_status = SCSI_HBA_ERR;
|
device->subsys_status = SCSI_HBA_ERR;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// busy-wait for device
|
/** busy-wait for device
|
||||||
// mask - bits of status register that must be set
|
* mask - bits of status register that must be set
|
||||||
// not_mask - bits of status register that must not be set
|
* not_mask - bits of status register that must not be set
|
||||||
// check_err - abort if error bit is set
|
* check_err - abort if error bit is set
|
||||||
// timeout - waiting timeout
|
* timeout - waiting timeout
|
||||||
// return: true on success
|
* return: true on success
|
||||||
bool ide_wait( ide_device_info *device, int mask, int not_mask,
|
*/
|
||||||
bool check_err, bigtime_t timeout )
|
|
||||||
|
bool
|
||||||
|
ide_wait(ide_device_info *device, int mask, int not_mask,
|
||||||
|
bool check_err, bigtime_t timeout)
|
||||||
{
|
{
|
||||||
ide_bus_info *bus = device->bus;
|
ide_bus_info *bus = device->bus;
|
||||||
bigtime_t start_time = system_time();
|
bigtime_t start_time = system_time();
|
||||||
|
|
||||||
while( 1 ) {
|
while (1) {
|
||||||
bigtime_t elapsed_time;
|
bigtime_t elapsed_time;
|
||||||
int status;
|
int status;
|
||||||
|
|
||||||
// do spin before test as the device needs 400 ns
|
// do spin before test as the device needs 400 ns
|
||||||
// to update its status register
|
// to update its status register
|
||||||
spin( 1 );
|
spin(1);
|
||||||
|
|
||||||
status = bus->controller->get_altstatus( bus->channel );
|
status = bus->controller->get_altstatus(bus->channel);
|
||||||
|
|
||||||
if( (status & mask) == mask &&
|
if ((status & mask) == mask && (status & not_mask) == 0)
|
||||||
(status & not_mask) == 0 )
|
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if( check_err && (status & ide_status_err) != 0 ) {
|
if (check_err && (status & ide_status_err) != 0) {
|
||||||
set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE );
|
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
elapsed_time = system_time() - start_time;
|
elapsed_time = system_time() - start_time;
|
||||||
|
|
||||||
if( elapsed_time > timeout ) {
|
if (elapsed_time > timeout) {
|
||||||
set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_TIMEOUT );
|
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_TIMEOUT);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we've waited more then 5ms, we start passive waiting
|
// if we've waited more then 5ms, we start passive waiting
|
||||||
// to reduce system load
|
// to reduce system load
|
||||||
if( elapsed_time > 5000 )
|
if (elapsed_time > 5000)
|
||||||
snooze( elapsed_time / 10 );
|
snooze(elapsed_time / 10);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// tell device to continue queued command
|
/** tell device to continue queued command
|
||||||
// on return, no waiting is active!
|
* on return, no waiting is active!
|
||||||
// tag - will contain tag of command to be continued
|
* tag - will contain tag of command to be continued
|
||||||
// return: true - request continued
|
* return: true - request continued
|
||||||
// false - something went wrong; sense set
|
* false - something went wrong; sense set
|
||||||
bool device_start_service( ide_device_info *device, int *tag )
|
*/
|
||||||
|
|
||||||
|
bool
|
||||||
|
device_start_service(ide_device_info *device, int *tag)
|
||||||
{
|
{
|
||||||
ide_bus_info *bus = device->bus;
|
ide_bus_info *bus = device->bus;
|
||||||
|
|
||||||
FAST_LOG1( bus->log, ev_ide_device_start_service, device->is_device1 );
|
FAST_LOG1(bus->log, ev_ide_device_start_service, device->is_device1);
|
||||||
|
|
||||||
device->tf.write.command = IDE_CMD_SERVICE;
|
device->tf.write.command = IDE_CMD_SERVICE;
|
||||||
device->tf.queued.mode = ide_mode_lba;
|
device->tf.queued.mode = ide_mode_lba;
|
||||||
|
|
||||||
if( bus->active_device != device ) {
|
if (bus->active_device != device) {
|
||||||
// don't apply any precautions in terms of IRQ
|
// don't apply any precautions in terms of IRQ
|
||||||
// -> the bus is in accessing state, so IRQs are ignored anyway
|
// -> the bus is in accessing state, so IRQs are ignored anyway
|
||||||
if( bus->controller->write_command_block_regs( bus->channel,
|
if (bus->controller->write_command_block_regs(bus->channel,
|
||||||
&device->tf, ide_mask_device_head ) != B_OK )
|
&device->tf, ide_mask_device_head) != B_OK)
|
||||||
// on error, pretend that this device asks for service
|
// on error, pretend that this device asks for service
|
||||||
// -> the disappeared controller will be recognized soon ;)
|
// -> the disappeared controller will be recognized soon ;)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
bus->active_device = device;
|
bus->active_device = device;
|
||||||
|
|
||||||
// give one clock (400 ns) to take notice
|
// give one clock (400 ns) to take notice
|
||||||
spin( 1 );
|
spin(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
// here we go...
|
// here we go...
|
||||||
if( bus->controller->write_command_block_regs( bus->channel, &device->tf,
|
if (bus->controller->write_command_block_regs(bus->channel, &device->tf,
|
||||||
ide_mask_command ) != B_OK )
|
ide_mask_command) != B_OK)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
// we need to wait for the device as we want to read the tag
|
// we need to wait for the device as we want to read the tag
|
||||||
if( !ide_wait( device, ide_status_drdy, ide_status_bsy, false, 1000000 ))
|
if (!ide_wait(device, ide_status_drdy, ide_status_bsy, false, 1000000))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// read tag
|
// read tag
|
||||||
if( bus->controller->read_command_block_regs( bus->channel, &device->tf,
|
if (bus->controller->read_command_block_regs(bus->channel, &device->tf,
|
||||||
ide_mask_sector_count ) != B_OK )
|
ide_mask_sector_count) != B_OK)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
if( device->tf.queued.release ) {
|
if (device->tf.queued.release) {
|
||||||
// bus release is the wrong answer to a service request
|
// bus release is the wrong answer to a service request
|
||||||
set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE );
|
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
*tag = device->tf.queued.tag;
|
|
||||||
|
|
||||||
FAST_LOG2( bus->log, ev_ide_device_start_service2, device->is_device1, *tag );
|
|
||||||
|
|
||||||
|
*tag = device->tf.queued.tag;
|
||||||
|
|
||||||
|
FAST_LOG2(bus->log, ev_ide_device_start_service2, device->is_device1, *tag);
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE );
|
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// check device whether it wants to continue queued request
|
/** check device whether it wants to continue queued request */
|
||||||
bool check_service_req( ide_device_info *device )
|
|
||||||
|
bool
|
||||||
|
check_service_req(ide_device_info *device)
|
||||||
{
|
{
|
||||||
ide_bus_info *bus = device->bus;
|
ide_bus_info *bus = device->bus;
|
||||||
int status;
|
int status;
|
||||||
|
|
||||||
// fast bailout if there is no request pending
|
// fast bailout if there is no request pending
|
||||||
if( device->num_running_reqs == 0 )
|
if (device->num_running_reqs == 0)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if( bus->active_device != device ) {
|
if (bus->active_device != device) {
|
||||||
// don't apply any precautions in terms of IRQ
|
// don't apply any precautions in terms of IRQ
|
||||||
// -> the bus is in accessing state, so IRQs are ignored anyway
|
// -> the bus is in accessing state, so IRQs are ignored anyway
|
||||||
if( bus->controller->write_command_block_regs( bus->channel,
|
if (bus->controller->write_command_block_regs(bus->channel,
|
||||||
&device->tf, ide_mask_device_head ) != B_OK )
|
&device->tf, ide_mask_device_head) != B_OK)
|
||||||
// on error, pretend that this device asks for service
|
// on error, pretend that this device asks for service
|
||||||
// -> the disappeared controller will be recognized soon ;)
|
// -> the disappeared controller will be recognized soon ;)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
bus->active_device = device;
|
bus->active_device = device;
|
||||||
|
|
||||||
// give one clock (400 ns) to take notice
|
// give one clock (400 ns) to take notice
|
||||||
spin( 1 );
|
spin(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
status = bus->controller->get_altstatus( bus->channel );
|
status = bus->controller->get_altstatus(bus->channel);
|
||||||
|
|
||||||
return (status & ide_status_service) != 0;
|
return (status & ide_status_service) != 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
|
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
|
||||||
** Distributed under the terms of the OpenBeOS License.
|
* Distributed under the terms of the MIT License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Part of Open IDE bus manager
|
Part of Open IDE bus manager
|
||||||
@ -29,7 +29,7 @@ ide_channel_added(pnp_node_handle parent)
|
|||||||
char *str = NULL, *controller_name = NULL;
|
char *str = NULL, *controller_name = NULL;
|
||||||
uint32 channel_id;
|
uint32 channel_id;
|
||||||
|
|
||||||
SHOW_FLOW0( 2, "" );
|
SHOW_FLOW0(2, "");
|
||||||
|
|
||||||
if (pnp->get_attr_string(parent, PNP_DRIVER_TYPE, &str, false) != B_OK
|
if (pnp->get_attr_string(parent, PNP_DRIVER_TYPE, &str, false) != B_OK
|
||||||
|| strcmp(str, IDE_BUS_TYPE_NAME) != 0)
|
|| strcmp(str, IDE_BUS_TYPE_NAME) != 0)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
|
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
|
||||||
** Distributed under the terms of the Haiku License.
|
* Distributed under the terms of the MIT License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Part of Open IDE bus manager
|
Part of Open IDE bus manager
|
||||||
@ -66,28 +66,26 @@ static fast_log_event_type ide_events[] =
|
|||||||
static void disconnect_worker( ide_bus_info *bus, void *arg );
|
static void disconnect_worker( ide_bus_info *bus, void *arg );
|
||||||
static void set_check_condition( ide_qrequest *qrequest );
|
static void set_check_condition( ide_qrequest *qrequest );
|
||||||
|
|
||||||
// check whether this request can be within device
|
|
||||||
static inline bool is_queuable( ide_device_info *device, scsi_ccb *request )
|
/** check whether this request can be within device */
|
||||||
|
|
||||||
|
static inline bool
|
||||||
|
is_queuable(ide_device_info *device, scsi_ccb *request)
|
||||||
{
|
{
|
||||||
int opcode = request->cdb[0];
|
int opcode = request->cdb[0];
|
||||||
|
|
||||||
// XXX disable queuing
|
// XXX disable queuing
|
||||||
if( !device->CQ_enabled )
|
if (!device->CQ_enabled)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// make sure the caller allows queuing
|
// make sure the caller allows queuing
|
||||||
if( (request->flags & SCSI_ORDERED_QTAG) != 0 )
|
if ((request->flags & SCSI_ORDERED_QTAG) != 0)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// for atapi, all commands could be queued, but all
|
// for atapi, all commands could be queued, but all
|
||||||
// atapi devices I know don't support queuing anyway
|
// atapi devices I know don't support queuing anyway
|
||||||
if( opcode == SCSI_OP_READ_6 ||
|
return opcode == SCSI_OP_READ_6 || opcode == SCSI_OP_WRITE_6
|
||||||
opcode == SCSI_OP_WRITE_6 ||
|
|| opcode == SCSI_OP_READ_10 || opcode == SCSI_OP_WRITE_10;
|
||||||
opcode == SCSI_OP_READ_10 ||
|
|
||||||
opcode == SCSI_OP_WRITE_10 )
|
|
||||||
return true;
|
|
||||||
else
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -199,7 +197,7 @@ sim_path_inquiry(ide_bus_info *bus, scsi_path_inquiry *info)
|
|||||||
{
|
{
|
||||||
char *controller_name;
|
char *controller_name;
|
||||||
|
|
||||||
SHOW_FLOW0( 4, "" );
|
SHOW_FLOW0(4, "");
|
||||||
|
|
||||||
if (bus->disconnected)
|
if (bus->disconnected)
|
||||||
return SCSI_NO_HBA;
|
return SCSI_NO_HBA;
|
||||||
@ -256,7 +254,7 @@ sim_scan_bus(ide_bus_info *bus)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
SHOW_FLOW0( 4, "" );
|
SHOW_FLOW0(4, "");
|
||||||
|
|
||||||
if (bus->disconnected)
|
if (bus->disconnected)
|
||||||
return SCSI_NO_HBA;
|
return SCSI_NO_HBA;
|
||||||
@ -334,10 +332,10 @@ create_sense(ide_device_info *device, scsi_sense *sense)
|
|||||||
void
|
void
|
||||||
finish_checksense(ide_qrequest *qrequest)
|
finish_checksense(ide_qrequest *qrequest)
|
||||||
{
|
{
|
||||||
SHOW_FLOW( 3, "%p, subsys_status=%d, sense=%x",
|
SHOW_FLOW(3, "%p, subsys_status=%d, sense=%x",
|
||||||
qrequest->request,
|
qrequest->request,
|
||||||
qrequest->request->subsys_status,
|
qrequest->request->subsys_status,
|
||||||
(int)qrequest->device->new_combined_sense );
|
(int)qrequest->device->new_combined_sense);
|
||||||
|
|
||||||
qrequest->request->subsys_status = qrequest->device->subsys_status;
|
qrequest->request->subsys_status = qrequest->device->subsys_status;
|
||||||
|
|
||||||
@ -416,7 +414,7 @@ set_check_condition(ide_qrequest *qrequest)
|
|||||||
scsi_ccb *request = qrequest->request;
|
scsi_ccb *request = qrequest->request;
|
||||||
ide_device_info *device = qrequest->device;
|
ide_device_info *device = qrequest->device;
|
||||||
|
|
||||||
SHOW_FLOW0( 3, "" );
|
SHOW_FLOW0(3, "");
|
||||||
|
|
||||||
request->subsys_status = SCSI_REQ_CMP_ERR;
|
request->subsys_status = SCSI_REQ_CMP_ERR;
|
||||||
request->device_status = SCSI_STATUS_CHECK_CONDITION;
|
request->device_status = SCSI_STATUS_CHECK_CONDITION;
|
||||||
@ -426,7 +424,7 @@ set_check_condition(ide_qrequest *qrequest)
|
|||||||
scsi_sense sense;
|
scsi_sense sense;
|
||||||
int sense_len;
|
int sense_len;
|
||||||
|
|
||||||
SHOW_FLOW0( 3, "autosense" );
|
SHOW_FLOW0(3, "autosense");
|
||||||
|
|
||||||
// we cannot copy sense directly as sense buffer may be too small
|
// we cannot copy sense directly as sense buffer may be too small
|
||||||
create_sense(device, &sense);
|
create_sense(device, &sense);
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
** Copyright 2002-04, Thomas Kurschel. All rights reserved.
|
* Copyright 2002-04, Thomas Kurschel. All rights reserved.
|
||||||
** Distributed under the terms of the OpenBeOS License.
|
* Distributed under the terms of the MIT License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Part of Open IDE bus manager
|
Part of Open IDE bus manager
|
||||||
@ -46,11 +46,14 @@
|
|||||||
// internal error code if scatter gather table is too short
|
// internal error code if scatter gather table is too short
|
||||||
#define ERR_TOO_BIG (B_ERRORS_END + 1)
|
#define ERR_TOO_BIG (B_ERRORS_END + 1)
|
||||||
|
|
||||||
// prepare PIO transfer
|
|
||||||
void prep_PIO_transfer( ide_device_info *device, ide_qrequest *qrequest )
|
/** prepare PIO transfer */
|
||||||
|
|
||||||
|
void
|
||||||
|
prep_PIO_transfer(ide_device_info *device, ide_qrequest *qrequest)
|
||||||
{
|
{
|
||||||
SHOW_FLOW0( 4, "" );
|
SHOW_FLOW0(4, "");
|
||||||
|
|
||||||
device->left_sg_elem = qrequest->request->sg_cnt;
|
device->left_sg_elem = qrequest->request->sg_cnt;
|
||||||
device->cur_sg_elem = qrequest->request->sg_list;
|
device->cur_sg_elem = qrequest->request->sg_list;
|
||||||
device->cur_sg_ofs = 0;
|
device->cur_sg_ofs = 0;
|
||||||
@ -58,152 +61,155 @@ void prep_PIO_transfer( ide_device_info *device, ide_qrequest *qrequest )
|
|||||||
qrequest->request->data_resid = qrequest->request->data_len;
|
qrequest->request->data_resid = qrequest->request->data_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
// transfer virtually continuous data
|
|
||||||
static inline status_t transfer_PIO_virtcont( ide_device_info *device, char *virt_addr, int length,
|
/** transfer virtually continuous data */
|
||||||
bool write, int *transferred )
|
|
||||||
|
static inline status_t
|
||||||
|
transfer_PIO_virtcont(ide_device_info *device, char *virt_addr, int length,
|
||||||
|
bool write, int *transferred)
|
||||||
{
|
{
|
||||||
ide_bus_info *bus = device->bus;
|
ide_bus_info *bus = device->bus;
|
||||||
ide_controller_interface *controller = bus->controller;
|
ide_controller_interface *controller = bus->controller;
|
||||||
ide_channel_cookie cookie = bus->channel;
|
ide_channel_cookie cookie = bus->channel;
|
||||||
|
|
||||||
if( write ) {
|
if (write) {
|
||||||
// if there is a byte left from last chunk, transmit it together
|
// if there is a byte left from last chunk, transmit it together
|
||||||
// with the first byte of the current chunk (IDE requires 16 bits
|
// with the first byte of the current chunk (IDE requires 16 bits
|
||||||
// to be transmitted at once)
|
// to be transmitted at once)
|
||||||
if( device->has_odd_byte ) {
|
if (device->has_odd_byte) {
|
||||||
uint8 buffer[2];
|
uint8 buffer[2];
|
||||||
|
|
||||||
buffer[0] = device->odd_byte;
|
buffer[0] = device->odd_byte;
|
||||||
buffer[1] = *virt_addr++;
|
buffer[1] = *virt_addr++;
|
||||||
|
|
||||||
controller->write_pio( cookie, (uint16 *)buffer, 1, false );
|
controller->write_pio(cookie, (uint16 *)buffer, 1, false);
|
||||||
|
|
||||||
--length;
|
--length;
|
||||||
*transferred += 2;
|
*transferred += 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
controller->write_pio( cookie, (uint16 *)virt_addr, length / 2, false );
|
controller->write_pio(cookie, (uint16 *)virt_addr, length / 2, false);
|
||||||
|
|
||||||
// take care if chunk size was odd, which means that 1 byte remains
|
// take care if chunk size was odd, which means that 1 byte remains
|
||||||
virt_addr += length & ~1;
|
virt_addr += length & ~1;
|
||||||
*transferred += length & ~1;
|
*transferred += length & ~1;
|
||||||
|
|
||||||
device->has_odd_byte = (length & 1) != 0;
|
device->has_odd_byte = (length & 1) != 0;
|
||||||
|
|
||||||
if( device->has_odd_byte )
|
if (device->has_odd_byte)
|
||||||
device->odd_byte = *virt_addr;
|
device->odd_byte = *virt_addr;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// if we read one byte too much last time, push it into current chunk
|
// if we read one byte too much last time, push it into current chunk
|
||||||
if( device->has_odd_byte ) {
|
if (device->has_odd_byte) {
|
||||||
*virt_addr++ = device->odd_byte;
|
*virt_addr++ = device->odd_byte;
|
||||||
--length;
|
--length;
|
||||||
}
|
}
|
||||||
|
|
||||||
SHOW_FLOW( 4, "Reading PIO to %p, %d bytes", virt_addr, length );
|
SHOW_FLOW(4, "Reading PIO to %p, %d bytes", virt_addr, length);
|
||||||
|
|
||||||
controller->read_pio( cookie, (uint16 *)virt_addr, length / 2, false );
|
controller->read_pio(cookie, (uint16 *)virt_addr, length / 2, false);
|
||||||
|
|
||||||
// take care of odd chunk size;
|
// take care of odd chunk size;
|
||||||
// in this case we read 1 byte to few!
|
// in this case we read 1 byte to few!
|
||||||
virt_addr += length & ~1;
|
virt_addr += length & ~1;
|
||||||
*transferred += length & ~1;
|
*transferred += length & ~1;
|
||||||
|
|
||||||
device->has_odd_byte = (length & 1) != 0;
|
device->has_odd_byte = (length & 1) != 0;
|
||||||
|
|
||||||
if( device->has_odd_byte ) {
|
if (device->has_odd_byte) {
|
||||||
uint8 buffer[2];
|
uint8 buffer[2];
|
||||||
|
|
||||||
// now read the missing byte; as we have to read 2 bytes at once,
|
// now read the missing byte; as we have to read 2 bytes at once,
|
||||||
// we'll read one byte too much
|
// we'll read one byte too much
|
||||||
controller->read_pio( cookie, (uint16 *)buffer, 1, false );
|
controller->read_pio(cookie, (uint16 *)buffer, 1, false);
|
||||||
|
|
||||||
*virt_addr = buffer[0];
|
*virt_addr = buffer[0];
|
||||||
device->odd_byte = buffer[1];
|
device->odd_byte = buffer[1];
|
||||||
|
|
||||||
*transferred += 2;
|
*transferred += 2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return B_OK;
|
return B_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// transmit physically continuous data
|
/** transmit physically continuous data */
|
||||||
|
|
||||||
static inline status_t
|
static inline status_t
|
||||||
transfer_PIO_physcont(ide_device_info *device, addr_t phys_addr,
|
transfer_PIO_physcont(ide_device_info *device, addr_t phys_addr,
|
||||||
int length, bool write, int *transferred)
|
int length, bool write, int *transferred)
|
||||||
{
|
{
|
||||||
// we must split up chunk into B_PAGE_SIZE blocks as we can map only
|
// we must split up chunk into B_PAGE_SIZE blocks as we can map only
|
||||||
// one page into address space at once
|
// one page into address space at once
|
||||||
while( length > 0 ) {
|
while (length > 0) {
|
||||||
void *virt_addr;
|
void *virt_addr;
|
||||||
int page_left, cur_len;
|
int page_left, cur_len;
|
||||||
status_t err;
|
status_t err;
|
||||||
|
|
||||||
SHOW_FLOW( 4, "Transmitting to/from physical address %x, %d bytes left", (int)phys_addr,
|
SHOW_FLOW(4, "Transmitting to/from physical address %x, %d bytes left", (int)phys_addr,
|
||||||
length );
|
length);
|
||||||
|
|
||||||
if (map_mainmemory(phys_addr, &virt_addr) != B_OK) {
|
if (map_mainmemory(phys_addr, &virt_addr) != B_OK) {
|
||||||
// ouch: this should never ever happen
|
// ouch: this should never ever happen
|
||||||
set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE );
|
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
|
||||||
return B_ERROR;
|
return B_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
// if chunks starts in the middle of a page, we have even less then
|
// if chunks starts in the middle of a page, we have even less then
|
||||||
// a page left
|
// a page left
|
||||||
page_left = B_PAGE_SIZE - phys_addr % B_PAGE_SIZE;
|
page_left = B_PAGE_SIZE - phys_addr % B_PAGE_SIZE;
|
||||||
|
|
||||||
SHOW_FLOW( 4, "page_left=%d", page_left );
|
|
||||||
|
|
||||||
cur_len = min( page_left, length );
|
|
||||||
|
|
||||||
SHOW_FLOW( 4, "cur_len=%d", cur_len );
|
|
||||||
|
|
||||||
err = transfer_PIO_virtcont( device, (char *)virt_addr,
|
|
||||||
cur_len, write, transferred );
|
|
||||||
|
|
||||||
if( err != B_OK ) {
|
SHOW_FLOW(4, "page_left=%d", page_left);
|
||||||
unmap_mainmemory( virt_addr );
|
|
||||||
|
cur_len = min(page_left, length);
|
||||||
|
|
||||||
|
SHOW_FLOW(4, "cur_len=%d", cur_len);
|
||||||
|
|
||||||
|
err = transfer_PIO_virtcont(device, (char *)virt_addr,
|
||||||
|
cur_len, write, transferred);
|
||||||
|
if (err != B_OK) {
|
||||||
|
unmap_mainmemory(virt_addr);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
length -= cur_len;
|
length -= cur_len;
|
||||||
phys_addr += cur_len;
|
phys_addr += cur_len;
|
||||||
|
|
||||||
unmap_mainmemory( virt_addr );
|
unmap_mainmemory(virt_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
return B_OK;
|
return B_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// transfer PIO block from/to buffer
|
/** transfer PIO block from/to buffer */
|
||||||
static inline int transfer_PIO_block( ide_device_info *device, int length, bool write,
|
|
||||||
int *transferred )
|
static inline int
|
||||||
|
transfer_PIO_block(ide_device_info *device, int length, bool write, int *transferred)
|
||||||
{
|
{
|
||||||
// data is usually split up into multiple scatter/gather blocks
|
// data is usually split up into multiple scatter/gather blocks
|
||||||
while( length > 0 ) {
|
while (length > 0) {
|
||||||
int left_bytes, cur_len;
|
int left_bytes, cur_len;
|
||||||
status_t err;
|
status_t err;
|
||||||
|
|
||||||
if( device->left_sg_elem == 0 )
|
if (device->left_sg_elem == 0)
|
||||||
// ups - buffer too small (for ATAPI data, this is OK)
|
// ups - buffer too small (for ATAPI data, this is OK)
|
||||||
return ERR_TOO_BIG;
|
return ERR_TOO_BIG;
|
||||||
|
|
||||||
// we might have transmitted part of a scatter/entry already!
|
// we might have transmitted part of a scatter/entry already!
|
||||||
left_bytes = device->cur_sg_elem->size - device->cur_sg_ofs;
|
left_bytes = device->cur_sg_elem->size - device->cur_sg_ofs;
|
||||||
|
|
||||||
cur_len = min( left_bytes, length );
|
cur_len = min(left_bytes, length);
|
||||||
|
|
||||||
err = transfer_PIO_physcont( device,
|
err = transfer_PIO_physcont(device,
|
||||||
(addr_t)device->cur_sg_elem->address + device->cur_sg_ofs,
|
(addr_t)device->cur_sg_elem->address + device->cur_sg_ofs,
|
||||||
cur_len, write, transferred );
|
cur_len, write, transferred);
|
||||||
|
|
||||||
if( err != B_OK )
|
if (err != B_OK)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
if( left_bytes <= length ) {
|
if (left_bytes <= length) {
|
||||||
// end of one scatter/gather block reached
|
// end of one scatter/gather block reached
|
||||||
device->cur_sg_ofs = 0;
|
device->cur_sg_ofs = 0;
|
||||||
++device->cur_sg_elem;
|
++device->cur_sg_elem;
|
||||||
@ -212,139 +218,150 @@ static inline int transfer_PIO_block( ide_device_info *device, int length, bool
|
|||||||
// still in the same block
|
// still in the same block
|
||||||
device->cur_sg_ofs += cur_len;
|
device->cur_sg_ofs += cur_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
length -= cur_len;
|
length -= cur_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
return B_OK;
|
return B_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
// write zero data (required for ATAPI if we ran out of data)
|
|
||||||
static void write_discard_PIO( ide_device_info *device, int length )
|
/** write zero data (required for ATAPI if we ran out of data) */
|
||||||
|
|
||||||
|
static void
|
||||||
|
write_discard_PIO(ide_device_info *device, int length)
|
||||||
{
|
{
|
||||||
ide_bus_info *bus = device->bus;
|
ide_bus_info *bus = device->bus;
|
||||||
uint8 buffer[32];
|
uint8 buffer[32];
|
||||||
|
|
||||||
memset( buffer, 0, sizeof( buffer ));
|
memset(buffer, 0, sizeof(buffer));
|
||||||
|
|
||||||
// we transmit 32 zero-bytes at once
|
// we transmit 32 zero-bytes at once
|
||||||
// (not very efficient but easy to implement - you get what you deserve
|
// (not very efficient but easy to implement - you get what you deserve
|
||||||
// when you don't provide enough buffer)
|
// when you don't provide enough buffer)
|
||||||
while( length > 0 ) {
|
while (length > 0) {
|
||||||
int cur_len;
|
int cur_len;
|
||||||
|
|
||||||
// if device asks for odd number of bytes, append an extra byte to
|
// if device asks for odd number of bytes, append an extra byte to
|
||||||
// make length even (this is the "length + 1" term)
|
// make length even (this is the "length + 1" term)
|
||||||
cur_len = min( length + 1, (int)(sizeof( buffer ))) / 2;
|
cur_len = min(length + 1, (int)(sizeof(buffer))) / 2;
|
||||||
|
|
||||||
bus->controller->write_pio( bus->channel, (uint16 *)buffer, cur_len, false );
|
bus->controller->write_pio(bus->channel, (uint16 *)buffer, cur_len, false);
|
||||||
|
|
||||||
length -= cur_len * 2;
|
length -= cur_len * 2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// read PIO data and discard it (required for ATAPI if buffer was too small)
|
/** read PIO data and discard it (required for ATAPI if buffer was too small) */
|
||||||
static void read_discard_PIO( ide_device_info *device, int length )
|
|
||||||
|
static void
|
||||||
|
read_discard_PIO(ide_device_info *device, int length)
|
||||||
{
|
{
|
||||||
ide_bus_info *bus = device->bus;
|
ide_bus_info *bus = device->bus;
|
||||||
uint8 buffer[32];
|
uint8 buffer[32];
|
||||||
|
|
||||||
// discard 32 bytes at once (see write_discard_PIO)
|
// discard 32 bytes at once (see write_discard_PIO)
|
||||||
while( length > 0 ) {
|
while (length > 0) {
|
||||||
int cur_len;
|
int cur_len;
|
||||||
|
|
||||||
// read extra byte if length is odd (that's the "length + 1")
|
// read extra byte if length is odd (that's the "length + 1")
|
||||||
cur_len = min( length + 1, (int)sizeof( buffer )) / 2;
|
cur_len = min(length + 1, (int)sizeof(buffer)) / 2;
|
||||||
|
|
||||||
bus->controller->read_pio( bus->channel, (uint16 *)buffer, cur_len, false );
|
bus->controller->read_pio(bus->channel, (uint16 *)buffer, cur_len, false);
|
||||||
|
|
||||||
length -= cur_len * 2;
|
length -= cur_len * 2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// write PIO data
|
/** write PIO data
|
||||||
// return: there are 3 possible results
|
* return: there are 3 possible results
|
||||||
// NO_ERROR - everything's nice and groovy
|
* NO_ERROR - everything's nice and groovy
|
||||||
// ERR_TOO_BIG - data buffer was too short, remaining data got discarded
|
* ERR_TOO_BIG - data buffer was too short, remaining data got discarded
|
||||||
// B_ERROR - something serious went wrong, sense data was set
|
* B_ERROR - something serious went wrong, sense data was set
|
||||||
status_t write_PIO_block( ide_qrequest *qrequest, int length )
|
*/
|
||||||
|
|
||||||
|
status_t
|
||||||
|
write_PIO_block(ide_qrequest *qrequest, int length)
|
||||||
{
|
{
|
||||||
ide_device_info *device = qrequest->device;
|
ide_device_info *device = qrequest->device;
|
||||||
int transferred;
|
int transferred;
|
||||||
status_t err;
|
status_t err;
|
||||||
|
|
||||||
transferred = 0;
|
transferred = 0;
|
||||||
err = transfer_PIO_block( device, length, true, &transferred );
|
err = transfer_PIO_block(device, length, true, &transferred);
|
||||||
|
|
||||||
qrequest->request->data_resid -= transferred;
|
qrequest->request->data_resid -= transferred;
|
||||||
|
|
||||||
if( err != ERR_TOO_BIG )
|
if (err != ERR_TOO_BIG)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
// there may be a pending odd byte - transmit that now
|
// there may be a pending odd byte - transmit that now
|
||||||
if( qrequest->device->has_odd_byte ) {
|
if (qrequest->device->has_odd_byte) {
|
||||||
uint8 buffer[2];
|
uint8 buffer[2];
|
||||||
|
|
||||||
buffer[0] = device->odd_byte;
|
buffer[0] = device->odd_byte;
|
||||||
buffer[1] = 0;
|
buffer[1] = 0;
|
||||||
|
|
||||||
device->has_odd_byte = false;
|
device->has_odd_byte = false;
|
||||||
|
|
||||||
qrequest->request->data_resid -= 1;
|
qrequest->request->data_resid -= 1;
|
||||||
transferred += 2;
|
transferred += 2;
|
||||||
|
|
||||||
device->bus->controller->write_pio( device->bus->channel, (uint16 *)buffer, 1, false );
|
device->bus->controller->write_pio(device->bus->channel, (uint16 *)buffer, 1, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
// "transferred" may actually be larger then length because the last odd-byte
|
// "transferred" may actually be larger then length because the last odd-byte
|
||||||
// is sent together with an extra zero-byte
|
// is sent together with an extra zero-byte
|
||||||
if( transferred >= length )
|
if (transferred >= length)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
// Ouch! the device asks for data but we haven't got any left.
|
// Ouch! the device asks for data but we haven't got any left.
|
||||||
// Sadly, this behaviour is OK for ATAPI packets, but there is no
|
// Sadly, this behaviour is OK for ATAPI packets, but there is no
|
||||||
// way to tell the device that we don't have any data left;
|
// way to tell the device that we don't have any data left;
|
||||||
// only solution is to send zero bytes, though it's BAD BAD BAD
|
// only solution is to send zero bytes, though it's BAD BAD BAD
|
||||||
write_discard_PIO( qrequest->device, length - transferred );
|
write_discard_PIO(qrequest->device, length - transferred);
|
||||||
return ERR_TOO_BIG;
|
return ERR_TOO_BIG;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// read PIO data
|
/** read PIO data
|
||||||
// return: see write_PIO_block
|
* return: see write_PIO_block
|
||||||
status_t read_PIO_block( ide_qrequest *qrequest, int length )
|
*/
|
||||||
|
|
||||||
|
status_t
|
||||||
|
read_PIO_block(ide_qrequest *qrequest, int length)
|
||||||
{
|
{
|
||||||
ide_device_info *device = qrequest->device;
|
ide_device_info *device = qrequest->device;
|
||||||
int transferred;
|
int transferred;
|
||||||
status_t err;
|
status_t err;
|
||||||
|
|
||||||
transferred = 0;
|
transferred = 0;
|
||||||
err = transfer_PIO_block( qrequest->device, length, false, &transferred );
|
err = transfer_PIO_block(qrequest->device, length, false, &transferred);
|
||||||
|
|
||||||
qrequest->request->data_resid -= transferred;
|
qrequest->request->data_resid -= transferred;
|
||||||
|
|
||||||
// if length was odd, there's an extra byte waiting in device->odd_byte
|
// if length was odd, there's an extra byte waiting in device->odd_byte
|
||||||
if( device->has_odd_byte ) {
|
if (device->has_odd_byte) {
|
||||||
// discard byte
|
// discard byte
|
||||||
device->has_odd_byte = false;
|
device->has_odd_byte = false;
|
||||||
// adjust res_id as the extra byte didn't reach the buffer
|
// adjust res_id as the extra byte didn't reach the buffer
|
||||||
++qrequest->request->data_resid;
|
++qrequest->request->data_resid;
|
||||||
}
|
}
|
||||||
|
|
||||||
if( err != ERR_TOO_BIG )
|
if (err != ERR_TOO_BIG)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
// the device returns more data then the buffer can store;
|
// the device returns more data then the buffer can store;
|
||||||
// for ATAPI this is OK - we just discard remaining bytes (there
|
// for ATAPI this is OK - we just discard remaining bytes (there
|
||||||
// is no way to tell ATAPI about that, but we "only" waste time)
|
// is no way to tell ATAPI about that, but we "only" waste time)
|
||||||
|
|
||||||
// perhaps discarding the extra odd-byte was sufficient
|
// perhaps discarding the extra odd-byte was sufficient
|
||||||
if( transferred >= length )
|
if (transferred >= length)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
SHOW_FLOW( 3, "discarding after %d bytes", transferred );
|
SHOW_FLOW(3, "discarding after %d bytes", transferred);
|
||||||
read_discard_PIO( qrequest->device, length - transferred );
|
read_discard_PIO(qrequest->device, length - transferred);
|
||||||
return ERR_TOO_BIG;
|
return ERR_TOO_BIG;
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
|
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
|
||||||
** Distributed under the terms of the OpenBeOS License.
|
* Distributed under the terms of the MIT License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Part of Open IDE bus manager
|
Part of Open IDE bus manager
|
||||||
@ -27,31 +27,27 @@ ata_mode_sense_10(ide_device_info *device, ide_qrequest *qrequest)
|
|||||||
scsi_mode_param_header_10 param_header;
|
scsi_mode_param_header_10 param_header;
|
||||||
scsi_modepage_contr contr;
|
scsi_modepage_contr contr;
|
||||||
scsi_mode_param_block_desc block_desc;
|
scsi_mode_param_block_desc block_desc;
|
||||||
size_t total_length =
|
size_t total_length = sizeof(scsi_mode_param_header_10)
|
||||||
sizeof(scsi_mode_param_header_10) +
|
+ sizeof(scsi_mode_param_block_desc)
|
||||||
sizeof(scsi_mode_param_block_desc) +
|
+ sizeof(scsi_modepage_contr);
|
||||||
sizeof(scsi_modepage_contr);
|
|
||||||
scsi_mode_param_dev_spec_da devspec = {
|
scsi_mode_param_dev_spec_da devspec = {
|
||||||
res0_0 : 0,
|
res0_0 : 0,
|
||||||
DPOFUA : 0,
|
DPOFUA : 0,
|
||||||
res0_6 : 0,
|
res0_6 : 0,
|
||||||
WP : 0
|
WP : 0
|
||||||
};
|
};
|
||||||
int allocation_length;
|
uint32 allocation_length;
|
||||||
|
|
||||||
SHOW_ERROR0( 0, "Hi!" );
|
SHOW_FLOW0(1, "Hi!");
|
||||||
|
|
||||||
allocation_length = ((int16)cmd->high_allocation_length << 8)
|
allocation_length = ((int16)cmd->high_allocation_length << 8)
|
||||||
| cmd->low_allocation_length;
|
| cmd->low_allocation_length;
|
||||||
|
|
||||||
// we answer control page requests and "all pages" requests
|
// we answer control page requests and "all pages" requests
|
||||||
// (as the latter are the same as the first)
|
// (as the latter are the same as the first)
|
||||||
if( (cmd->page_code != SCSI_MODEPAGE_CONTROL &&
|
if ((cmd->page_code != SCSI_MODEPAGE_CONTROL && cmd->page_code != SCSI_MODEPAGE_ALL)
|
||||||
cmd->page_code != SCSI_MODEPAGE_ALL) ||
|
|| (cmd->PC != SCSI_MODE_SENSE_PC_CURRENT && cmd->PC != SCSI_MODE_SENSE_PC_SAVED)) {
|
||||||
(cmd->PC != SCSI_MODE_SENSE_PC_CURRENT &&
|
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD);
|
||||||
cmd->PC != SCSI_MODE_SENSE_PC_SAVED))
|
|
||||||
{
|
|
||||||
set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD );
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -60,293 +56,300 @@ ata_mode_sense_10(ide_device_info *device, ide_qrequest *qrequest)
|
|||||||
param_header.high_mode_data_len = 0;
|
param_header.high_mode_data_len = 0;
|
||||||
param_header.medium_type = 0; // XXX standard is a bit vague here
|
param_header.medium_type = 0; // XXX standard is a bit vague here
|
||||||
param_header.dev_spec_parameter = *(uint8 *)&devspec;
|
param_header.dev_spec_parameter = *(uint8 *)&devspec;
|
||||||
param_header.low_block_desc_len = sizeof( scsi_mode_param_block_desc );
|
param_header.low_block_desc_len = sizeof(scsi_mode_param_block_desc);
|
||||||
param_header.high_block_desc_len = 0;
|
param_header.high_block_desc_len = 0;
|
||||||
|
|
||||||
copy_sg_data( request, 0, allocation_length,
|
copy_sg_data(request, 0, allocation_length, ¶m_header, sizeof(param_header), false);
|
||||||
¶m_header, sizeof( param_header ), false );
|
|
||||||
|
|
||||||
/*block_desc = (scsi_mode_param_block_desc *)(request->data
|
/*block_desc = (scsi_mode_param_block_desc *)(request->data
|
||||||
+ sizeof( *param_header ));*/
|
+ sizeof(*param_header));*/
|
||||||
memset( &block_desc, 0, sizeof( block_desc ));
|
memset(&block_desc, 0, sizeof(block_desc));
|
||||||
// density is reserved (0), descriptor apply to entire medium (num_blocks=0)
|
// density is reserved (0), descriptor apply to entire medium (num_blocks=0)
|
||||||
// remains the blocklen to be set
|
// remains the blocklen to be set
|
||||||
block_desc.high_blocklen = 0;
|
block_desc.high_blocklen = 0;
|
||||||
block_desc.med_blocklen = 512 >> 8;
|
block_desc.med_blocklen = 512 >> 8;
|
||||||
block_desc.low_blocklen = 512 & 0xff;
|
block_desc.low_blocklen = 512 & 0xff;
|
||||||
|
|
||||||
copy_sg_data( request, sizeof( param_header ), allocation_length,
|
copy_sg_data(request, sizeof(param_header), allocation_length,
|
||||||
&block_desc, sizeof( block_desc ), false );
|
&block_desc, sizeof(block_desc), false);
|
||||||
|
|
||||||
/*contr = (scsi_modepage_contr *)(request->data
|
/*contr = (scsi_modepage_contr *)(request->data
|
||||||
+ sizeof( *param_header )
|
+ sizeof(*param_header)
|
||||||
+ ((uint16)param_header->high_block_desc_len << 8)
|
+ ((uint16)param_header->high_block_desc_len << 8)
|
||||||
+ param_header->low_block_desc_len);*/
|
+ param_header->low_block_desc_len);*/
|
||||||
|
|
||||||
memset( &contr, 0, sizeof( contr ));
|
memset(&contr, 0, sizeof(contr));
|
||||||
contr.RLEC = false;
|
contr.RLEC = false;
|
||||||
contr.DQue = !device->CQ_enabled;
|
contr.DQue = !device->CQ_enabled;
|
||||||
contr.QErr = false; // when a command fails we requeue all
|
contr.QErr = false;
|
||||||
// lost commands automagically
|
// when a command fails we requeue all
|
||||||
|
// lost commands automagically
|
||||||
contr.QAM = SCSI_QAM_UNRESTRICTED;
|
contr.QAM = SCSI_QAM_UNRESTRICTED;
|
||||||
|
|
||||||
copy_sg_data( request,
|
copy_sg_data(request, sizeof(param_header)
|
||||||
sizeof( param_header )
|
|
||||||
+ ((uint16)param_header.high_block_desc_len << 8)
|
+ ((uint16)param_header.high_block_desc_len << 8)
|
||||||
+ param_header.low_block_desc_len,
|
+ param_header.low_block_desc_len,
|
||||||
allocation_length,
|
allocation_length, &contr, sizeof(contr), false);
|
||||||
&contr, sizeof( contr ), false );
|
|
||||||
|
|
||||||
// the number of bytes that were transferred to buffer is
|
// the number of bytes that were transferred to buffer is
|
||||||
// restricted by allocation length and by request data buffer size
|
// restricted by allocation length and by request data buffer size
|
||||||
total_length = min( total_length, allocation_length );
|
total_length = min(total_length, allocation_length);
|
||||||
total_length = min( total_length, request->data_len );
|
total_length = min(total_length, request->data_len);
|
||||||
|
|
||||||
request->data_resid = request->data_len - total_length;
|
request->data_resid = request->data_len - total_length;
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// emulate modifying control page
|
/** emulate modifying control page */
|
||||||
static bool ata_mode_select_contrpage( ide_device_info *device, ide_qrequest *qrequest,
|
|
||||||
scsi_modepage_contr *page )
|
static bool
|
||||||
|
ata_mode_select_control_page(ide_device_info *device, ide_qrequest *qrequest,
|
||||||
|
scsi_modepage_contr *page)
|
||||||
{
|
{
|
||||||
if( page->header.page_length != sizeof( *page ) - sizeof( page->header )) {
|
if (page->header.page_length != sizeof(*page) - sizeof(page->header)) {
|
||||||
set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_LIST_LENGTH_ERR );
|
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_LIST_LENGTH_ERR);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// we only support enabling/disabling command queuing
|
// we only support enabling/disabling command queuing
|
||||||
enable_CQ( device, !page->DQue );
|
enable_CQ(device, !page->DQue);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// emulate MODE SELECT 10 command
|
/** emulate MODE SELECT 10 command */
|
||||||
static void ata_mode_select_10( ide_device_info *device, ide_qrequest *qrequest )
|
|
||||||
|
static void
|
||||||
|
ata_mode_select_10(ide_device_info *device, ide_qrequest *qrequest)
|
||||||
{
|
{
|
||||||
scsi_ccb *request = qrequest->request;
|
scsi_ccb *request = qrequest->request;
|
||||||
scsi_cmd_mode_select_10 *cmd = (scsi_cmd_mode_select_10 *)request->cdb;
|
scsi_cmd_mode_select_10 *cmd = (scsi_cmd_mode_select_10 *)request->cdb;
|
||||||
scsi_mode_param_header_10 param_header;
|
scsi_mode_param_header_10 param_header;
|
||||||
scsi_modepage_header page_header;
|
scsi_modepage_header page_header;
|
||||||
int total_length;
|
uint32 total_length;
|
||||||
uint modepage_offset;
|
uint32 modepage_offset;
|
||||||
char modepage_buffer[64]; // !!! enlarge this to support longer mode pages
|
char modepage_buffer[64]; // !!! enlarge this to support longer mode pages
|
||||||
|
|
||||||
if( cmd->SP || cmd->PF != 1 ) {
|
if (cmd->SP || cmd->PF != 1) {
|
||||||
set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD );
|
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
total_length = min( request->data_len,
|
total_length = min(request->data_len,
|
||||||
((uint16)cmd->high_param_list_length << 8) | cmd->low_param_list_length);
|
((uint16)cmd->high_param_list_length << 8) | (uint32)cmd->low_param_list_length);
|
||||||
|
|
||||||
// first, retrieve page header to get size of different chunks
|
// first, retrieve page header to get size of different chunks
|
||||||
//param_header = (scsi_mode_param_header_10 *)request->data;
|
//param_header = (scsi_mode_param_header_10 *)request->data;
|
||||||
if( !copy_sg_data( request, 0, total_length,
|
if (!copy_sg_data(request, 0, total_length, ¶m_header, sizeof(param_header), true))
|
||||||
¶m_header, sizeof( param_header ), true ))
|
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
total_length = min( total_length,
|
total_length = min(total_length, (((uint16)param_header.high_mode_data_len << 8)
|
||||||
(((uint16)param_header.high_mode_data_len << 8) | param_header.low_mode_data_len) + 1 );
|
| param_header.low_mode_data_len) + 1UL);
|
||||||
|
|
||||||
// this is the start of the first mode page;
|
// this is the start of the first mode page;
|
||||||
// we ignore the block descriptor silently
|
// we ignore the block descriptor silently
|
||||||
modepage_offset =
|
modepage_offset = sizeof(param_header) + (((uint16)param_header.high_block_desc_len << 8)
|
||||||
sizeof( param_header ) +
|
| param_header.low_block_desc_len);
|
||||||
(((uint16)param_header.high_block_desc_len << 8) | param_header.low_block_desc_len);
|
|
||||||
|
|
||||||
// go through list of pages
|
// go through list of pages
|
||||||
while( modepage_offset < total_length ) {
|
while (modepage_offset < total_length) {
|
||||||
int page_len;
|
uint32 page_len;
|
||||||
|
|
||||||
// get header to know how long page is
|
// get header to know how long page is
|
||||||
if( !copy_sg_data( request, modepage_offset, total_length,
|
if (!copy_sg_data(request, modepage_offset, total_length,
|
||||||
&page_header, sizeof( page_header ), true ))
|
&page_header, sizeof(page_header), true))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
// get size of one page and copy it to buffer
|
// get size of one page and copy it to buffer
|
||||||
page_len = page_header.page_length + sizeof( scsi_modepage_header );
|
page_len = page_header.page_length + sizeof(scsi_modepage_header);
|
||||||
|
|
||||||
// the buffer has a maximum size - this is really standard compliant but
|
// the buffer has a maximum size - this is really standard compliant but
|
||||||
// sufficient for our needs
|
// sufficient for our needs
|
||||||
if( page_len > sizeof( modepage_buffer ))
|
if (page_len > sizeof(modepage_buffer))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
if( !copy_sg_data( request, modepage_offset, total_length,
|
if (!copy_sg_data(request, modepage_offset, total_length,
|
||||||
&modepage_buffer, min( page_len, sizeof( modepage_buffer )), true ))
|
&modepage_buffer, min(page_len, sizeof(modepage_buffer)), true))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
// modify page;
|
// modify page;
|
||||||
// currently, we only support the control mode page
|
// currently, we only support the control mode page
|
||||||
switch( page_header.page_code ) {
|
switch (page_header.page_code) {
|
||||||
case SCSI_MODEPAGE_CONTROL:
|
case SCSI_MODEPAGE_CONTROL:
|
||||||
if( !ata_mode_select_contrpage( device, qrequest,
|
if (!ata_mode_select_control_page(device, qrequest,
|
||||||
(scsi_modepage_contr *)modepage_buffer ))
|
(scsi_modepage_contr *)modepage_buffer))
|
||||||
|
return;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_PARAM_LIST_FIELD);
|
||||||
return;
|
return;
|
||||||
break;
|
|
||||||
|
|
||||||
default:
|
|
||||||
set_sense( device,
|
|
||||||
SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_PARAM_LIST_FIELD );
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
modepage_offset += page_len;
|
modepage_offset += page_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
if( modepage_offset != total_length )
|
if (modepage_offset != total_length)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
request->data_resid = request->data_len - total_length;
|
request->data_resid = request->data_len - total_length;
|
||||||
return;
|
return;
|
||||||
|
|
||||||
// if we arrive here, data length was incorrect
|
// if we arrive here, data length was incorrect
|
||||||
err:
|
err:
|
||||||
set_sense( device,
|
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_LIST_LENGTH_ERR);
|
||||||
SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_LIST_LENGTH_ERR );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// emulate TEST UNIT READY
|
/** emulate TEST UNIT READY */
|
||||||
static bool ata_tur( ide_device_info *device, ide_qrequest *qrequest )
|
|
||||||
|
static bool
|
||||||
|
ata_test_unit_ready(ide_device_info *device, ide_qrequest *qrequest)
|
||||||
{
|
{
|
||||||
SHOW_FLOW0( 3, "" );
|
SHOW_FLOW0(3, "");
|
||||||
|
|
||||||
if( !device->infoblock.RMSN_supported ||
|
if (!device->infoblock.RMSN_supported
|
||||||
device->infoblock._127_RMSN_support != 1 )
|
|| device->infoblock._127_RMSN_support != 1)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
// ask device about status
|
// ask device about status
|
||||||
device->tf_param_mask = 0;
|
device->tf_param_mask = 0;
|
||||||
device->tf.write.command = IDE_CMD_GET_MEDIA_STATUS;
|
device->tf.write.command = IDE_CMD_GET_MEDIA_STATUS;
|
||||||
|
|
||||||
if( !send_command( device, qrequest, true, 15, ide_state_sync_waiting ))
|
if (!send_command(device, qrequest, true, 15, ide_state_sync_waiting))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
// bits ide_error_mcr | ide_error_mc | ide_error_wp are also valid
|
// bits ide_error_mcr | ide_error_mc | ide_error_wp are also valid
|
||||||
// but not requested by TUR; ide_error_wp can safely be ignored, but
|
// but not requested by TUR; ide_error_wp can safely be ignored, but
|
||||||
// we don't want to loose media change (request) reports
|
// we don't want to loose media change (request) reports
|
||||||
if( !check_output( device, true,
|
if (!check_output(device, true,
|
||||||
ide_error_nm | ide_error_abrt | ide_error_mcr | ide_error_mc,
|
ide_error_nm | ide_error_abrt | ide_error_mcr | ide_error_mc,
|
||||||
false ))
|
false)) {
|
||||||
{
|
|
||||||
// SCSI spec is unclear here: we shouldn't report "media change (request)"
|
// SCSI spec is unclear here: we shouldn't report "media change (request)"
|
||||||
// but what to do if there is one? anyway - we report them
|
// but what to do if there is one? anyway - we report them
|
||||||
;
|
;
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// flush internal device cache
|
/** flush internal device cache */
|
||||||
static bool ata_flush_cache( ide_device_info *device, ide_qrequest *qrequest )
|
|
||||||
|
static bool
|
||||||
|
ata_flush_cache(ide_device_info *device, ide_qrequest *qrequest)
|
||||||
{
|
{
|
||||||
// we should also ask for FLUSH CACHE support, but everyone denies it
|
// we should also ask for FLUSH CACHE support, but everyone denies it
|
||||||
// (looks like they cheat to gain some performance advantage, but
|
// (looks like they cheat to gain some performance advantage, but
|
||||||
// that's pretty useless: everyone does it...)
|
// that's pretty useless: everyone does it...)
|
||||||
if( !device->infoblock.write_cache_supported )
|
if (!device->infoblock.write_cache_supported)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
device->tf_param_mask = 0;
|
device->tf_param_mask = 0;
|
||||||
device->tf.lba.command = device->use_48bits ? IDE_CMD_FLUSH_CACHE_EXT
|
device->tf.lba.command = device->use_48bits ? IDE_CMD_FLUSH_CACHE_EXT
|
||||||
: IDE_CMD_FLUSH_CACHE;
|
: IDE_CMD_FLUSH_CACHE;
|
||||||
|
|
||||||
// spec says that this may take more then 30s, how much more?
|
|
||||||
if( !send_command( device, qrequest, true, 60, ide_state_sync_waiting ))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
wait_for_sync( device->bus );
|
|
||||||
|
|
||||||
return check_output( device, true, ide_error_abrt, false );
|
// spec says that this may take more then 30s, how much more?
|
||||||
|
if (!send_command(device, qrequest, true, 60, ide_state_sync_waiting))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
wait_for_sync(device->bus);
|
||||||
|
|
||||||
|
return check_output(device, true, ide_error_abrt, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// load or eject medium
|
/** load or eject medium
|
||||||
// load = true - load medium
|
* load = true - load medium
|
||||||
static bool ata_load_eject( ide_device_info *device, ide_qrequest *qrequest, bool load )
|
*/
|
||||||
|
|
||||||
|
static bool
|
||||||
|
ata_load_eject(ide_device_info *device, ide_qrequest *qrequest, bool load)
|
||||||
{
|
{
|
||||||
if( load ) {
|
if (load) {
|
||||||
// ATA doesn't support loading
|
// ATA doesn't support loading
|
||||||
set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_NOT_SUPPORTED );
|
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_NOT_SUPPORTED);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
device->tf_param_mask = 0;
|
device->tf_param_mask = 0;
|
||||||
device->tf.lba.command = IDE_CMD_MEDIA_EJECT;
|
device->tf.lba.command = IDE_CMD_MEDIA_EJECT;
|
||||||
|
|
||||||
if( !send_command( device, qrequest, true, 15, ide_state_sync_waiting ))
|
if (!send_command(device, qrequest, true, 15, ide_state_sync_waiting))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
wait_for_sync( device->bus );
|
wait_for_sync(device->bus);
|
||||||
|
|
||||||
return check_output( device, true, ide_error_abrt | ide_error_nm, false );
|
return check_output(device, true, ide_error_abrt | ide_error_nm, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// emulate PREVENT ALLOW command
|
/** emulate PREVENT ALLOW command */
|
||||||
static bool ata_prevent_allow( ide_device_info *device, bool prevent )
|
|
||||||
|
static bool
|
||||||
|
ata_prevent_allow(ide_device_info *device, bool prevent)
|
||||||
{
|
{
|
||||||
set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_ILL_FUNCTION );
|
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_ILL_FUNCTION);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// emulate INQUIRY command
|
/** emulate INQUIRY command */
|
||||||
static void ata_inquiry( ide_device_info *device, ide_qrequest *qrequest )
|
|
||||||
|
static void
|
||||||
|
ata_inquiry(ide_device_info *device, ide_qrequest *qrequest)
|
||||||
{
|
{
|
||||||
scsi_ccb *request = qrequest->request;
|
scsi_ccb *request = qrequest->request;
|
||||||
scsi_res_inquiry data;
|
scsi_res_inquiry data;
|
||||||
scsi_cmd_inquiry *cmd = (scsi_cmd_inquiry *)request->cdb;
|
scsi_cmd_inquiry *cmd = (scsi_cmd_inquiry *)request->cdb;
|
||||||
uint allocation_length = cmd->allocation_length;
|
uint32 allocation_length = cmd->allocation_length;
|
||||||
int transfer_size;
|
uint32 transfer_size;
|
||||||
|
|
||||||
if (cmd->EVPD || cmd->page_code) {
|
if (cmd->EVPD || cmd->page_code) {
|
||||||
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD);
|
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
memset( &data, 0, sizeof( data ));
|
memset(&data, 0, sizeof(data));
|
||||||
|
|
||||||
data.device_type = scsi_dev_direct_access;
|
data.device_type = scsi_dev_direct_access;
|
||||||
data.device_qualifier = scsi_periph_qual_connected;
|
data.device_qualifier = scsi_periph_qual_connected;
|
||||||
|
|
||||||
data.device_type_modifier = 0;
|
data.device_type_modifier = 0;
|
||||||
data.RMB = false;
|
data.RMB = false;
|
||||||
|
|
||||||
data.ANSI_version = 2;
|
data.ANSI_version = 2;
|
||||||
data.ECMA_version = 0;
|
data.ECMA_version = 0;
|
||||||
data.ISO_version = 0;
|
data.ISO_version = 0;
|
||||||
|
|
||||||
data.response_data_format = 2;
|
data.response_data_format = 2;
|
||||||
data.TrmIOP = false; // to be changed if we support TERM I/O
|
data.TrmIOP = false; // to be changed if we support TERM I/O
|
||||||
|
|
||||||
data.additional_length = sizeof( scsi_res_inquiry ) - 4;
|
data.additional_length = sizeof(scsi_res_inquiry) - 4;
|
||||||
|
|
||||||
data.SftRe = false;
|
data.SftRe = false;
|
||||||
data.CmdQue = device->queue_depth > 1;
|
data.CmdQue = device->queue_depth > 1;
|
||||||
data.Linked = false;
|
data.Linked = false;
|
||||||
|
|
||||||
// these values are free-style
|
// these values are free-style
|
||||||
data.Sync = false;
|
data.Sync = false;
|
||||||
data.WBus16 = true;
|
data.WBus16 = true;
|
||||||
data.WBus32 = false;
|
data.WBus32 = false;
|
||||||
|
|
||||||
data.RelAdr = false;
|
data.RelAdr = false;
|
||||||
|
|
||||||
// the following fields are *much* to small, sigh...
|
// the following fields are *much* to small, sigh...
|
||||||
memcpy( data.vendor_ident, device->infoblock.model_number,
|
memcpy(data.vendor_ident, device->infoblock.model_number,
|
||||||
sizeof( data.vendor_ident ));
|
sizeof(data.vendor_ident));
|
||||||
memcpy( data.product_ident, device->infoblock.model_number + 8,
|
memcpy(data.product_ident, device->infoblock.model_number + 8,
|
||||||
sizeof( data.product_ident ));
|
sizeof(data.product_ident));
|
||||||
memcpy( data.product_rev, " ", sizeof( data.product_rev ));
|
memcpy(data.product_rev, " ", sizeof(data.product_rev));
|
||||||
|
|
||||||
copy_sg_data( request, 0, allocation_length, &data, sizeof( data ), false );
|
copy_sg_data(request, 0, allocation_length, &data, sizeof(data), false);
|
||||||
|
|
||||||
transfer_size = min( sizeof( data ), allocation_length );
|
transfer_size = min(sizeof(data), allocation_length);
|
||||||
transfer_size = min( transfer_size, request->data_len );
|
transfer_size = min(transfer_size, request->data_len);
|
||||||
|
|
||||||
request->data_resid = request->data_len - transfer_size;
|
request->data_resid = request->data_len - transfer_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -392,141 +395,144 @@ ata_exec_io(ide_device_info *device, ide_qrequest *qrequest)
|
|||||||
{
|
{
|
||||||
scsi_ccb *request = qrequest->request;
|
scsi_ccb *request = qrequest->request;
|
||||||
|
|
||||||
SHOW_FLOW( 3, "command=%x", request->cdb[0] );
|
SHOW_FLOW(3, "command=%x", request->cdb[0]);
|
||||||
|
|
||||||
// ATA devices have one LUN only
|
// ATA devices have one LUN only
|
||||||
if( request->target_lun != 0 ) {
|
if (request->target_lun != 0) {
|
||||||
request->subsys_status = SCSI_SEL_TIMEOUT;
|
request->subsys_status = SCSI_SEL_TIMEOUT;
|
||||||
finish_request( qrequest, false );
|
finish_request(qrequest, false);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// starting a request means deleting sense, so don't do it if
|
// starting a request means deleting sense, so don't do it if
|
||||||
// the command wants to read it
|
// the command wants to read it
|
||||||
if( request->cdb[0] != SCSI_OP_REQUEST_SENSE )
|
if (request->cdb[0] != SCSI_OP_REQUEST_SENSE)
|
||||||
start_request( device, qrequest );
|
start_request(device, qrequest);
|
||||||
|
|
||||||
switch( request->cdb[0] ) {
|
|
||||||
case SCSI_OP_TUR:
|
|
||||||
ata_tur( device, qrequest );
|
|
||||||
break;
|
|
||||||
|
|
||||||
case SCSI_OP_REQUEST_SENSE:
|
switch (request->cdb[0]) {
|
||||||
ide_request_sense( device, qrequest );
|
case SCSI_OP_TUR:
|
||||||
return;
|
ata_test_unit_ready(device, qrequest);
|
||||||
|
break;
|
||||||
case SCSI_OP_FORMAT: /* FORMAT UNIT */
|
|
||||||
// we could forward request to disk, but modern disks cannot
|
|
||||||
// be formatted anyway, so we just refuse request
|
|
||||||
// (exceptions are removable media devices, but to my knowledge
|
|
||||||
// they don't have to be formatted as well)
|
|
||||||
set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE );
|
|
||||||
break;
|
|
||||||
|
|
||||||
case SCSI_OP_INQUIRY:
|
case SCSI_OP_REQUEST_SENSE:
|
||||||
ata_inquiry( device, qrequest );
|
ide_request_sense(device, qrequest);
|
||||||
break;
|
return;
|
||||||
|
|
||||||
case SCSI_OP_MODE_SELECT_10:
|
case SCSI_OP_FORMAT: /* FORMAT UNIT */
|
||||||
ata_mode_select_10( device, qrequest );
|
// we could forward request to disk, but modern disks cannot
|
||||||
break;
|
// be formatted anyway, so we just refuse request
|
||||||
|
// (exceptions are removable media devices, but to my knowledge
|
||||||
case SCSI_OP_MODE_SENSE_10:
|
// they don't have to be formatted as well)
|
||||||
ata_mode_sense_10( device, qrequest );
|
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case SCSI_OP_MODE_SELECT_6:
|
case SCSI_OP_INQUIRY:
|
||||||
case SCSI_OP_MODE_SENSE_6:
|
ata_inquiry(device, qrequest);
|
||||||
// we've told SCSI bus manager to emulates these commands
|
break;
|
||||||
set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE );
|
|
||||||
break;
|
case SCSI_OP_MODE_SELECT_10:
|
||||||
|
ata_mode_select_10(device, qrequest);
|
||||||
case SCSI_OP_RESERVE:
|
break;
|
||||||
case SCSI_OP_RELEASE:
|
|
||||||
// though mandatory, this doesn't make much sense in a
|
case SCSI_OP_MODE_SENSE_10:
|
||||||
// single initiator environment; so what
|
ata_mode_sense_10(device, qrequest);
|
||||||
set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE );
|
break;
|
||||||
break;
|
|
||||||
|
case SCSI_OP_MODE_SELECT_6:
|
||||||
case SCSI_OP_START_STOP: {
|
case SCSI_OP_MODE_SENSE_6:
|
||||||
scsi_cmd_ssu *cmd = (scsi_cmd_ssu *)request->cdb;
|
// we've told SCSI bus manager to emulates these commands
|
||||||
|
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
|
||||||
// with no LoEj bit set, we should only allow/deny further access
|
break;
|
||||||
// we ignore that (unsupported for ATA)
|
|
||||||
// with LoEj bit set, we should additionally either load or eject the medium
|
case SCSI_OP_RESERVE:
|
||||||
// (start = 0 - eject; start = 1 - load)
|
case SCSI_OP_RELEASE:
|
||||||
|
// though mandatory, this doesn't make much sense in a
|
||||||
if( !cmd->start )
|
// single initiator environment; so what
|
||||||
// we must always flush cache if start = 0
|
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
|
||||||
ata_flush_cache( device, qrequest );
|
break;
|
||||||
|
|
||||||
if( cmd->LoEj )
|
case SCSI_OP_START_STOP: {
|
||||||
ata_load_eject( device, qrequest, cmd->start );
|
scsi_cmd_ssu *cmd = (scsi_cmd_ssu *)request->cdb;
|
||||||
|
|
||||||
break; }
|
// with no LoEj bit set, we should only allow/deny further access
|
||||||
|
// we ignore that (unsupported for ATA)
|
||||||
case SCSI_OP_PREVENT_ALLOW: {
|
// with LoEj bit set, we should additionally either load or eject the medium
|
||||||
scsi_cmd_prevent_allow *cmd = (scsi_cmd_prevent_allow *)request->cdb;
|
// (start = 0 - eject; start = 1 - load)
|
||||||
|
|
||||||
ata_prevent_allow( device, cmd->prevent );
|
if (!cmd->start)
|
||||||
break; }
|
// we must always flush cache if start = 0
|
||||||
|
ata_flush_cache(device, qrequest);
|
||||||
case SCSI_OP_READ_CAPACITY:
|
|
||||||
read_capacity( device, qrequest );
|
if (cmd->LoEj)
|
||||||
break;
|
ata_load_eject(device, qrequest, cmd->start);
|
||||||
|
|
||||||
case SCSI_OP_VERIFY:
|
break;
|
||||||
// does anyone uses this function?
|
|
||||||
// effectly, it does a read-and-compare, which IDE doesn't support
|
|
||||||
set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE );
|
|
||||||
break;
|
|
||||||
|
|
||||||
case SCSI_OP_SYNCHRONIZE_CACHE:
|
|
||||||
// we ignore range and immediate bit, we always immediately flush everything
|
|
||||||
ata_flush_cache( device, qrequest );
|
|
||||||
break;
|
|
||||||
|
|
||||||
// sadly, there are two possible read/write operation codes;
|
|
||||||
// at least, the third one, read/write(12), is not valid for DAS
|
|
||||||
case SCSI_OP_READ_6:
|
|
||||||
case SCSI_OP_WRITE_6: {
|
|
||||||
scsi_cmd_rw_6 *cmd = (scsi_cmd_rw_6 *)request->cdb;
|
|
||||||
uint32 pos;
|
|
||||||
size_t length;
|
|
||||||
|
|
||||||
pos = ((uint32)cmd->high_LBA << 16) | ((uint32)cmd->mid_LBA << 8)
|
|
||||||
| (uint32)cmd->low_LBA;
|
|
||||||
|
|
||||||
length = cmd->length != 0 ? cmd->length : 256;
|
|
||||||
|
|
||||||
SHOW_FLOW( 3, "READ6/WRITE6 pos=%ux, length=%ux", (uint)pos, (uint)length );
|
|
||||||
|
|
||||||
ata_send_rw( device, qrequest, pos, length, cmd->opcode == SCSI_OP_WRITE_6 );
|
|
||||||
return; }
|
|
||||||
|
|
||||||
case SCSI_OP_READ_10:
|
|
||||||
case SCSI_OP_WRITE_10: {
|
|
||||||
scsi_cmd_rw_10 *cmd = (scsi_cmd_rw_10 *)request->cdb;
|
|
||||||
uint32 pos;
|
|
||||||
size_t length;
|
|
||||||
|
|
||||||
pos = ((uint32)cmd->top_LBA << 24) | ((uint32)cmd->high_LBA << 16)
|
|
||||||
| ((uint32)cmd->mid_LBA << 8) | (uint32)cmd->low_LBA;
|
|
||||||
|
|
||||||
length = ((uint32)cmd->high_length << 8) | cmd->low_length;
|
|
||||||
|
|
||||||
if( length != 0 ) {
|
|
||||||
ata_send_rw( device, qrequest, pos, length, cmd->opcode == SCSI_OP_WRITE_10 );
|
|
||||||
} else {
|
|
||||||
// we cannot transfer zero blocks (apart from LBA48)
|
|
||||||
finish_request( qrequest, false );
|
|
||||||
}
|
}
|
||||||
return; }
|
|
||||||
|
case SCSI_OP_PREVENT_ALLOW: {
|
||||||
default:
|
scsi_cmd_prevent_allow *cmd = (scsi_cmd_prevent_allow *)request->cdb;
|
||||||
set_sense( device,
|
|
||||||
SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE );
|
ata_prevent_allow(device, cmd->prevent);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case SCSI_OP_READ_CAPACITY:
|
||||||
|
read_capacity(device, qrequest);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case SCSI_OP_VERIFY:
|
||||||
|
// does anyone uses this function?
|
||||||
|
// effectly, it does a read-and-compare, which IDE doesn't support
|
||||||
|
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
|
||||||
|
break;
|
||||||
|
|
||||||
|
case SCSI_OP_SYNCHRONIZE_CACHE:
|
||||||
|
// we ignore range and immediate bit, we always immediately flush everything
|
||||||
|
ata_flush_cache(device, qrequest);
|
||||||
|
break;
|
||||||
|
|
||||||
|
// sadly, there are two possible read/write operation codes;
|
||||||
|
// at least, the third one, read/write(12), is not valid for DAS
|
||||||
|
case SCSI_OP_READ_6:
|
||||||
|
case SCSI_OP_WRITE_6: {
|
||||||
|
scsi_cmd_rw_6 *cmd = (scsi_cmd_rw_6 *)request->cdb;
|
||||||
|
uint32 pos;
|
||||||
|
size_t length;
|
||||||
|
|
||||||
|
pos = ((uint32)cmd->high_LBA << 16) | ((uint32)cmd->mid_LBA << 8)
|
||||||
|
| (uint32)cmd->low_LBA;
|
||||||
|
|
||||||
|
length = cmd->length != 0 ? cmd->length : 256;
|
||||||
|
|
||||||
|
SHOW_FLOW(3, "READ6/WRITE6 pos=%lx, length=%lx", pos, length);
|
||||||
|
|
||||||
|
ata_send_rw(device, qrequest, pos, length, cmd->opcode == SCSI_OP_WRITE_6);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
case SCSI_OP_READ_10:
|
||||||
|
case SCSI_OP_WRITE_10: {
|
||||||
|
scsi_cmd_rw_10 *cmd = (scsi_cmd_rw_10 *)request->cdb;
|
||||||
|
uint32 pos;
|
||||||
|
size_t length;
|
||||||
|
|
||||||
|
pos = ((uint32)cmd->top_LBA << 24) | ((uint32)cmd->high_LBA << 16)
|
||||||
|
| ((uint32)cmd->mid_LBA << 8) | (uint32)cmd->low_LBA;
|
||||||
|
|
||||||
|
length = ((uint32)cmd->high_length << 8) | cmd->low_length;
|
||||||
|
|
||||||
|
if (length != 0) {
|
||||||
|
ata_send_rw(device, qrequest, pos, length, cmd->opcode == SCSI_OP_WRITE_10);
|
||||||
|
} else {
|
||||||
|
// we cannot transfer zero blocks (apart from LBA48)
|
||||||
|
finish_request(qrequest, false);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
|
||||||
}
|
}
|
||||||
|
|
||||||
finish_checksense( qrequest );
|
finish_checksense(qrequest);
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
/*
|
/*
|
||||||
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
|
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
|
||||||
** Distributed under the terms of the Haiku License.
|
* Distributed under the terms of the MIT License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Part of Open IDE bus manager
|
Part of Open IDE bus manager
|
||||||
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
//#define TRACE_SYNC
|
//#define TRACE_SYNC
|
||||||
#ifdef TRACE_SYNC
|
#ifdef TRACE_SYNC
|
||||||
# define TRACE(x) dprintf x
|
# define TRACE(x) { dprintf("%s(): ", __FUNCTION__); dprintf x ; }
|
||||||
#else
|
#else
|
||||||
# define TRACE(x) ;
|
# define TRACE(x) ;
|
||||||
#endif
|
#endif
|
||||||
@ -34,7 +34,7 @@ ide_dpc(void *arg)
|
|||||||
ide_qrequest *qrequest;
|
ide_qrequest *qrequest;
|
||||||
ide_device_info *device;
|
ide_device_info *device;
|
||||||
|
|
||||||
TRACE(("ide_dpc()\n"));
|
TRACE(("\n"));
|
||||||
|
|
||||||
//snooze(500000);
|
//snooze(500000);
|
||||||
|
|
||||||
@ -42,7 +42,7 @@ ide_dpc(void *arg)
|
|||||||
// in idle state, so we just check whether there is an active request,
|
// in idle state, so we just check whether there is an active request,
|
||||||
// which means that we were async_waiting
|
// which means that we were async_waiting
|
||||||
if (bus->active_qrequest != NULL) {
|
if (bus->active_qrequest != NULL) {
|
||||||
FAST_LOG1( bus->log, ev_ide_dpc_continue, (uint32)bus->active_qrequest );
|
FAST_LOG1(bus->log, ev_ide_dpc_continue, (uint32)bus->active_qrequest);
|
||||||
TRACE(("continue command\n"));
|
TRACE(("continue command\n"));
|
||||||
|
|
||||||
// cancel timeout
|
// cancel timeout
|
||||||
@ -69,7 +69,6 @@ ide_dpc(void *arg)
|
|||||||
FAST_LOG0(bus->log, ev_ide_dpc_service);
|
FAST_LOG0(bus->log, ev_ide_dpc_service);
|
||||||
|
|
||||||
device = get_current_device(bus);
|
device = get_current_device(bus);
|
||||||
|
|
||||||
if (device == NULL) {
|
if (device == NULL) {
|
||||||
// got an interrupt from a non-existing device
|
// got an interrupt from a non-existing device
|
||||||
// either this is a spurious interrupt or there *is* a device
|
// either this is a spurious interrupt or there *is* a device
|
||||||
@ -99,7 +98,7 @@ ide_irq_handler(ide_bus_info *bus, uint8 status)
|
|||||||
{
|
{
|
||||||
ide_device_info *device;
|
ide_device_info *device;
|
||||||
|
|
||||||
TRACE(("ide_irq_handler()\n"));
|
TRACE(("\n"));
|
||||||
FAST_LOG0(bus->log, ev_ide_irq_handle);
|
FAST_LOG0(bus->log, ev_ide_irq_handle);
|
||||||
|
|
||||||
// we need to lock bus to have a solid bus state
|
// we need to lock bus to have a solid bus state
|
||||||
|
@ -43,15 +43,15 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef debug_level_flow
|
#ifndef debug_level_flow
|
||||||
# define debug_level_flow 3
|
# define debug_level_flow 4
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef debug_level_info
|
#ifndef debug_level_info
|
||||||
# define debug_level_info 2
|
# define debug_level_info 4
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef debug_level_error
|
#ifndef debug_level_error
|
||||||
# define debug_level_error 1
|
# define debug_level_error 4
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define FUNC_NAME DEBUG_MSG_PREFIX __FUNCTION__ ": "
|
#define FUNC_NAME DEBUG_MSG_PREFIX __FUNCTION__ ": "
|
||||||
|
Loading…
Reference in New Issue
Block a user