Fixed all warnings in scsi2ata.c.

Style cleanup.


git-svn-id: file:///srv/svn/repos/haiku/trunk/current@10297 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Axel Dörfler 2004-11-30 17:24:04 +00:00
parent 4e9c28deed
commit d99c404f77
7 changed files with 682 additions and 645 deletions

View File

@ -1,7 +1,7 @@
/*
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
* Distributed under the terms of the MIT License.
*/
/*
Part of Open IDE bus manager
@ -25,44 +25,54 @@
// maximum number send tries before giving up
#define MAX_FAILED_SEND 1
// busy-wait for data request going high
bool wait_for_drq( ide_device_info *device )
/** busy-wait for data request going high */
bool
wait_for_drq(ide_device_info *device)
{
return ide_wait( device, ide_status_drq, 0, true, 10000000 );
return ide_wait(device, ide_status_drq, 0, true, 10000000);
}
// busy-wait for data request going low
bool wait_for_drqdown( ide_device_info *device )
/** busy-wait for data request going low */
bool
wait_for_drqdown(ide_device_info *device)
{
return ide_wait( device, 0, ide_status_drq, true, 1000000 );
return ide_wait(device, 0, ide_status_drq, true, 1000000);
}
// busy-wait for device ready
bool wait_for_drdy( ide_device_info *device )
/** busy-wait for device ready */
bool
wait_for_drdy(ide_device_info *device)
{
return ide_wait( device, ide_status_drdy, ide_status_bsy, false, 5000000 );
return ide_wait(device, ide_status_drdy, ide_status_bsy, false, 5000000);
}
// reset entire IDE bus
// all active request apart from <ignore> are resubmitted
bool reset_bus( ide_device_info *device, ide_qrequest *ignore )
/** reset entire IDE bus
* all active request apart from <ignore> are resubmitted
*/
bool
reset_bus(ide_device_info *device, ide_qrequest *ignore)
{
ide_bus_info *bus = device->bus;
ide_controller_interface *controller = bus->controller;
ide_channel_cookie channel = bus->channel;
FAST_LOG0( bus->log, ev_ide_reset_bus );
if( device->reconnect_timer_installed ) {
cancel_timer( &device->reconnect_timer.te );
FAST_LOG0(bus->log, ev_ide_reset_bus);
if (device->reconnect_timer_installed) {
cancel_timer(&device->reconnect_timer.te);
device->reconnect_timer_installed = false;
}
if( device->other_device->reconnect_timer_installed ) {
cancel_timer( &device->other_device->reconnect_timer.te );
if (device->other_device->reconnect_timer_installed) {
cancel_timer(&device->other_device->reconnect_timer.te);
device->other_device->reconnect_timer_installed = false;
}
@ -70,74 +80,75 @@ bool reset_bus( ide_device_info *device, ide_qrequest *ignore )
// also, deactivate IRQ
// (as usual, we will get an IRQ on disabling, but as we leave them
// disabled for 2 ms, this false report is ignored)
if( controller->write_device_control( channel,
ide_devctrl_nien | ide_devctrl_srst | ide_devctrl_bit3 ) != B_OK )
if (controller->write_device_control(channel,
ide_devctrl_nien | ide_devctrl_srst | ide_devctrl_bit3) != B_OK)
goto err0;
spin( 5 );
if( controller->write_device_control( channel,
ide_devctrl_nien | ide_devctrl_bit3 ) != B_OK )
spin(5);
if (controller->write_device_control(channel, ide_devctrl_nien | ide_devctrl_bit3) != B_OK)
goto err0;
// let devices wake up
snooze( 2000 );
snooze(2000);
// ouch, we have to wait up to 31 seconds!
if( !ide_wait( device, 0, ide_status_bsy, true, 31000000 )) {
if (!ide_wait(device, 0, ide_status_bsy, true, 31000000)) {
// as we don't know which of the devices is broken
// we leave them both alive
if( controller->write_device_control( channel, ide_devctrl_bit3 ) != B_OK )
if (controller->write_device_control(channel, ide_devctrl_bit3) != B_OK)
goto err0;
set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_TIMEOUT );
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_TIMEOUT);
goto err1;
}
if( controller->write_device_control( channel, ide_devctrl_bit3 ) != B_OK )
if (controller->write_device_control(channel, ide_devctrl_bit3) != B_OK)
goto err0;
finish_all_requests( bus->devices[0], ignore, SCSI_SCSI_BUS_RESET, true );
finish_all_requests( bus->devices[1], ignore, SCSI_SCSI_BUS_RESET, true );
finish_all_requests(bus->devices[0], ignore, SCSI_SCSI_BUS_RESET, true);
finish_all_requests(bus->devices[1], ignore, SCSI_SCSI_BUS_RESET, true);
return true;
err0:
set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE );
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
err1:
finish_all_requests( bus->devices[0], ignore, SCSI_SCSI_BUS_RESET, true );
finish_all_requests( bus->devices[1], ignore, SCSI_SCSI_BUS_RESET, true );
finish_all_requests(bus->devices[0], ignore, SCSI_SCSI_BUS_RESET, true);
finish_all_requests(bus->devices[1], ignore, SCSI_SCSI_BUS_RESET, true);
//xpt->call_async( bus->xpt_cookie, -1, -1, AC_BUS_RESET, NULL, 0 );
return false;
}
// execute packet device reset.
// resets entire bus on fail or if device is not atapi;
// all requests but <ignore> are resubmitted
bool reset_device( ide_device_info *device, ide_qrequest *ignore )
/** execute packet device reset.
* resets entire bus on fail or if device is not atapi;
* all requests but <ignore> are resubmitted
*/
bool
reset_device(ide_device_info *device, ide_qrequest *ignore)
{
ide_bus_info *bus = device->bus;
status_t res;
uint8 orig_command;
FAST_LOG1( bus->log, ev_ide_reset_device, device->is_device1 );
SHOW_FLOW0( 3, "" );
if( !device->is_atapi )
FAST_LOG1(bus->log, ev_ide_reset_device, device->is_device1);
SHOW_FLOW0(3, "");
if (!device->is_atapi)
goto err;
if( device->reconnect_timer_installed ) {
cancel_timer( &device->reconnect_timer.te );
if (device->reconnect_timer_installed) {
cancel_timer(&device->reconnect_timer.te);
device->reconnect_timer_installed = false;
}
// select device
if( bus->controller->write_command_block_regs( bus->channel, &device->tf,
ide_mask_device_head ) != B_OK )
if (bus->controller->write_command_block_regs(bus->channel, &device->tf,
ide_mask_device_head) != B_OK)
goto err;
// safe original command to let caller restart it
@ -146,305 +157,311 @@ bool reset_device( ide_device_info *device, ide_qrequest *ignore )
// send device reset, independ of current device state
// (that's the point of a reset)
device->tf.write.command = IDE_CMD_DEVICE_RESET;
res = bus->controller->write_command_block_regs( bus->channel,
&device->tf, ide_mask_command );
res = bus->controller->write_command_block_regs(bus->channel,
&device->tf, ide_mask_command);
device->tf.write.command = orig_command;
if( res != B_OK )
if (res != B_OK)
goto err;
// don't know how long to wait, but 31 seconds, like soft reset,
// should be enough
if( !ide_wait( device, 0, ide_status_bsy, true, 31000000 ))
if (!ide_wait(device, 0, ide_status_bsy, true, 31000000))
goto err;
// alright, resubmit all requests
finish_all_requests( device, ignore, SCSI_SCSI_BUS_RESET, true );
finish_all_requests(device, ignore, SCSI_SCSI_BUS_RESET, true);
SHOW_FLOW0( 3, "done" );
SHOW_FLOW0(3, "done");
return true;
err:
// do the hard way
return reset_bus( device, ignore );
return reset_bus(device, ignore);
}
// new_state must be either accessing, async_waiting or sync_waiting
// param_mask must not include command register
bool send_command( ide_device_info *device, ide_qrequest *qrequest,
bool need_drdy, uint32 timeout, ide_bus_state new_state )
/** new_state must be either accessing, async_waiting or sync_waiting
* param_mask must not include command register
*/
bool
send_command(ide_device_info *device, ide_qrequest *qrequest,
bool need_drdy, uint32 timeout, ide_bus_state new_state)
{
ide_bus_info *bus = device->bus;
bigtime_t irq_disabled_at = 0; // make compiler happy
uint8 num_retries = 0;
bool irq_guard;
FAST_LOGN( bus->log, ev_ide_send_command, 15, device->is_device1, (uint32)qrequest,
FAST_LOGN(bus->log, ev_ide_send_command, 15, device->is_device1, (uint32)qrequest,
device->tf.raw.r[0], device->tf.raw.r[1], device->tf.raw.r[2],
device->tf.raw.r[3], device->tf.raw.r[4], device->tf.raw.r[5],
device->tf.raw.r[6],
device->tf.raw.r[7], device->tf.raw.r[8], device->tf.raw.r[9],
device->tf.raw.r[10], device->tf.raw.r[11] );
device->tf.raw.r[10], device->tf.raw.r[11]);
retry:
irq_guard = bus->num_running_reqs > 1;
SHOW_FLOW( 3, "qrequest=%p, request=%p", qrequest,
qrequest ? qrequest->request : NULL );
SHOW_FLOW(3, "qrequest=%p, request=%p", qrequest,
qrequest ? qrequest->request : NULL);
// if there are pending requests, IRQs must be disabled to
// not mix up IRQ reasons
// XXX can we avoid that with the IDE_LOCK trick? It would
// save some work and the bug workaround!
if( irq_guard ) {
if( bus->controller->write_device_control( bus->channel,
ide_devctrl_nien | ide_devctrl_bit3 ) != B_OK )
if (irq_guard) {
if (bus->controller->write_device_control(bus->channel,
ide_devctrl_nien | ide_devctrl_bit3) != B_OK)
goto err;
irq_disabled_at = system_time();
}
// select device
if( bus->controller->write_command_block_regs( bus->channel, &device->tf,
ide_mask_device_head ) != B_OK )
if (bus->controller->write_command_block_regs(bus->channel, &device->tf,
ide_mask_device_head) != B_OK)
goto err;
bus->active_device = device;
if( !ide_wait( device, 0, ide_status_bsy | ide_status_drq, false, 50000 )) {
if (!ide_wait(device, 0, ide_status_bsy | ide_status_drq, false, 50000)) {
uint8 status;
SHOW_FLOW0( 1, "device is not ready" );
status = bus->controller->get_altstatus( bus->channel );
if( status == 0xff ) {
SHOW_FLOW0(1, "device is not ready");
status = bus->controller->get_altstatus(bus->channel);
if (status == 0xff) {
// there is no device (should happen during detection only)
SHOW_FLOW0( 1, "there is no device" );
SHOW_FLOW0(1, "there is no device");
// device detection recognizes this code as "all hope lost", so
// neither replace it nor use it anywhere else
device->subsys_status = SCSI_TID_INVALID;
return false;
}
// reset device and retry
if( reset_device( device, qrequest ) && ++num_retries <= MAX_FAILED_SEND ) {
SHOW_FLOW0( 1, "retrying" );
if (reset_device(device, qrequest) && ++num_retries <= MAX_FAILED_SEND) {
SHOW_FLOW0(1, "retrying");
goto retry;
}
SHOW_FLOW0( 1, "giving up" );
SHOW_FLOW0(1, "giving up");
// reset to often - abort request
device->subsys_status = SCSI_SEL_TIMEOUT;
return false;
}
if( need_drdy &&
(bus->controller->get_altstatus( bus->channel ) & ide_status_drdy) == 0 )
{
SHOW_FLOW0( 3, "drdy not set" );
if (need_drdy
&& (bus->controller->get_altstatus(bus->channel) & ide_status_drdy) == 0) {
SHOW_FLOW0(3, "drdy not set");
device->subsys_status = SCSI_SEQUENCE_FAIL;
return false;
}
// write parameters
if( bus->controller->write_command_block_regs( bus->channel, &device->tf,
device->tf_param_mask ) != B_OK )
if (bus->controller->write_command_block_regs(bus->channel, &device->tf,
device->tf_param_mask) != B_OK)
goto err;
if( irq_guard ) {
if (irq_guard) {
// IRQ may be fired by service requests and by the process of disabling(!)
// them (I heard this is caused by edge triggered PCI IRQs)
// wait at least 50 µs to catch all pending irq's
// (at my system, up to 30 µs elapsed)
// additionally, old drives (at least my IBM-DTTA-351010) loose
// sync if they are pushed too hard - on heavy overlapped write
// stress this drive tends to forget outstanding requests,
// waiting at least 50 µs seems(!) to solve this
while( system_time() - irq_disabled_at < MAX_IRQ_DELAY )
spin( 1 );
while (system_time() - irq_disabled_at < MAX_IRQ_DELAY)
spin(1);
}
// if we will start waiting once the command is sent, we have to
// lock the bus before sending; this way, IRQs that are fired
// shortly before/after sending of command are delayed until the
// command is really sent (start_waiting unlocks the bus) and then
// the IRQ handler can check savely whether the IRQ really signals
// finishing of command or not by testing the busy-signal of the device
if( new_state != ide_state_accessing ) {
IDE_LOCK( bus );
if (new_state != ide_state_accessing) {
IDE_LOCK(bus);
}
if( irq_guard ) {
if (irq_guard) {
// now it's clear why IRQs gets fired, so we can enable them again
if( bus->controller->write_device_control( bus->channel,
ide_devctrl_bit3 ) != B_OK )
if (bus->controller->write_device_control(bus->channel,
ide_devctrl_bit3) != B_OK)
goto err1;
}
// write command code - this will start the actual command
SHOW_FLOW( 3, "Writing command 0x%02x", (int)device->tf.write.command );
if( bus->controller->write_command_block_regs( bus->channel,
&device->tf, ide_mask_command ) != B_OK )
SHOW_FLOW(3, "Writing command 0x%02x", (int)device->tf.write.command);
if (bus->controller->write_command_block_regs(bus->channel,
&device->tf, ide_mask_command) != B_OK)
goto err1;
// start waiting now; also un-blocks IRQ handler (see above)
if( new_state != ide_state_accessing ) {
start_waiting( bus, timeout, new_state );
}
if (new_state != ide_state_accessing)
start_waiting(bus, timeout, new_state);
return true;
err1:
if( timeout > 0 ) {
if (timeout > 0) {
bus->state = ide_state_accessing;
IDE_UNLOCK( bus );
IDE_UNLOCK(bus);
}
err:
device->subsys_status = SCSI_HBA_ERR;
return false;
}
// busy-wait for device
// mask - bits of status register that must be set
// not_mask - bits of status register that must not be set
// check_err - abort if error bit is set
// timeout - waiting timeout
// return: true on success
bool ide_wait( ide_device_info *device, int mask, int not_mask,
bool check_err, bigtime_t timeout )
/** busy-wait for device
* mask - bits of status register that must be set
* not_mask - bits of status register that must not be set
* check_err - abort if error bit is set
* timeout - waiting timeout
* return: true on success
*/
bool
ide_wait(ide_device_info *device, int mask, int not_mask,
bool check_err, bigtime_t timeout)
{
ide_bus_info *bus = device->bus;
bigtime_t start_time = system_time();
while( 1 ) {
while (1) {
bigtime_t elapsed_time;
int status;
// do spin before test as the device needs 400 ns
// to update its status register
spin( 1 );
status = bus->controller->get_altstatus( bus->channel );
if( (status & mask) == mask &&
(status & not_mask) == 0 )
spin(1);
status = bus->controller->get_altstatus(bus->channel);
if ((status & mask) == mask && (status & not_mask) == 0)
return true;
if( check_err && (status & ide_status_err) != 0 ) {
set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE );
if (check_err && (status & ide_status_err) != 0) {
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
return false;
}
elapsed_time = system_time() - start_time;
if( elapsed_time > timeout ) {
set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_TIMEOUT );
if (elapsed_time > timeout) {
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_TIMEOUT);
return false;
}
// if we've waited more then 5ms, we start passive waiting
// to reduce system load
if( elapsed_time > 5000 )
snooze( elapsed_time / 10 );
if (elapsed_time > 5000)
snooze(elapsed_time / 10);
}
}
// tell device to continue queued command
// on return, no waiting is active!
// tag - will contain tag of command to be continued
// return: true - request continued
// false - something went wrong; sense set
bool device_start_service( ide_device_info *device, int *tag )
/** tell device to continue queued command
* on return, no waiting is active!
* tag - will contain tag of command to be continued
* return: true - request continued
* false - something went wrong; sense set
*/
bool
device_start_service(ide_device_info *device, int *tag)
{
ide_bus_info *bus = device->bus;
FAST_LOG1( bus->log, ev_ide_device_start_service, device->is_device1 );
FAST_LOG1(bus->log, ev_ide_device_start_service, device->is_device1);
device->tf.write.command = IDE_CMD_SERVICE;
device->tf.queued.mode = ide_mode_lba;
if( bus->active_device != device ) {
if (bus->active_device != device) {
// don't apply any precautions in terms of IRQ
// -> the bus is in accessing state, so IRQs are ignored anyway
if( bus->controller->write_command_block_regs( bus->channel,
&device->tf, ide_mask_device_head ) != B_OK )
if (bus->controller->write_command_block_regs(bus->channel,
&device->tf, ide_mask_device_head) != B_OK)
// on error, pretend that this device asks for service
// -> the disappeared controller will be recognized soon ;)
return true;
bus->active_device = device;
// give one clock (400 ns) to take notice
spin( 1 );
spin(1);
}
// here we go...
if( bus->controller->write_command_block_regs( bus->channel, &device->tf,
ide_mask_command ) != B_OK )
if (bus->controller->write_command_block_regs(bus->channel, &device->tf,
ide_mask_command) != B_OK)
goto err;
// we need to wait for the device as we want to read the tag
if( !ide_wait( device, ide_status_drdy, ide_status_bsy, false, 1000000 ))
if (!ide_wait(device, ide_status_drdy, ide_status_bsy, false, 1000000))
return false;
// read tag
if( bus->controller->read_command_block_regs( bus->channel, &device->tf,
ide_mask_sector_count ) != B_OK )
if (bus->controller->read_command_block_regs(bus->channel, &device->tf,
ide_mask_sector_count) != B_OK)
goto err;
if( device->tf.queued.release ) {
if (device->tf.queued.release) {
// bus release is the wrong answer to a service request
set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE );
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
return false;
}
*tag = device->tf.queued.tag;
FAST_LOG2( bus->log, ev_ide_device_start_service2, device->is_device1, *tag );
*tag = device->tf.queued.tag;
FAST_LOG2(bus->log, ev_ide_device_start_service2, device->is_device1, *tag);
return true;
err:
set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE );
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
return false;
}
// check device whether it wants to continue queued request
bool check_service_req( ide_device_info *device )
/** check device whether it wants to continue queued request */
bool
check_service_req(ide_device_info *device)
{
ide_bus_info *bus = device->bus;
int status;
// fast bailout if there is no request pending
if( device->num_running_reqs == 0 )
if (device->num_running_reqs == 0)
return false;
if( bus->active_device != device ) {
if (bus->active_device != device) {
// don't apply any precautions in terms of IRQ
// -> the bus is in accessing state, so IRQs are ignored anyway
if( bus->controller->write_command_block_regs( bus->channel,
&device->tf, ide_mask_device_head ) != B_OK )
if (bus->controller->write_command_block_regs(bus->channel,
&device->tf, ide_mask_device_head) != B_OK)
// on error, pretend that this device asks for service
// -> the disappeared controller will be recognized soon ;)
return true;
bus->active_device = device;
// give one clock (400 ns) to take notice
spin( 1 );
spin(1);
}
status = bus->controller->get_altstatus( bus->channel );
status = bus->controller->get_altstatus(bus->channel);
return (status & ide_status_service) != 0;
}

View File

@ -1,7 +1,7 @@
/*
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
* Distributed under the terms of the MIT License.
*/
/*
Part of Open IDE bus manager
@ -29,7 +29,7 @@ ide_channel_added(pnp_node_handle parent)
char *str = NULL, *controller_name = NULL;
uint32 channel_id;
SHOW_FLOW0( 2, "" );
SHOW_FLOW0(2, "");
if (pnp->get_attr_string(parent, PNP_DRIVER_TYPE, &str, false) != B_OK
|| strcmp(str, IDE_BUS_TYPE_NAME) != 0)

View File

@ -1,7 +1,7 @@
/*
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the Haiku License.
*/
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
* Distributed under the terms of the MIT License.
*/
/*
Part of Open IDE bus manager
@ -66,28 +66,26 @@ static fast_log_event_type ide_events[] =
static void disconnect_worker( ide_bus_info *bus, void *arg );
static void set_check_condition( ide_qrequest *qrequest );
// check whether this request can be within device
static inline bool is_queuable( ide_device_info *device, scsi_ccb *request )
/** check whether this request can be within device */
static inline bool
is_queuable(ide_device_info *device, scsi_ccb *request)
{
int opcode = request->cdb[0];
// XXX disable queuing
if( !device->CQ_enabled )
if (!device->CQ_enabled)
return false;
// make sure the caller allows queuing
if( (request->flags & SCSI_ORDERED_QTAG) != 0 )
if ((request->flags & SCSI_ORDERED_QTAG) != 0)
return false;
// for atapi, all commands could be queued, but all
// atapi devices I know don't support queuing anyway
if( opcode == SCSI_OP_READ_6 ||
opcode == SCSI_OP_WRITE_6 ||
opcode == SCSI_OP_READ_10 ||
opcode == SCSI_OP_WRITE_10 )
return true;
else
return false;
return opcode == SCSI_OP_READ_6 || opcode == SCSI_OP_WRITE_6
|| opcode == SCSI_OP_READ_10 || opcode == SCSI_OP_WRITE_10;
}
@ -199,7 +197,7 @@ sim_path_inquiry(ide_bus_info *bus, scsi_path_inquiry *info)
{
char *controller_name;
SHOW_FLOW0( 4, "" );
SHOW_FLOW0(4, "");
if (bus->disconnected)
return SCSI_NO_HBA;
@ -256,7 +254,7 @@ sim_scan_bus(ide_bus_info *bus)
{
int i;
SHOW_FLOW0( 4, "" );
SHOW_FLOW0(4, "");
if (bus->disconnected)
return SCSI_NO_HBA;
@ -334,10 +332,10 @@ create_sense(ide_device_info *device, scsi_sense *sense)
void
finish_checksense(ide_qrequest *qrequest)
{
SHOW_FLOW( 3, "%p, subsys_status=%d, sense=%x",
SHOW_FLOW(3, "%p, subsys_status=%d, sense=%x",
qrequest->request,
qrequest->request->subsys_status,
(int)qrequest->device->new_combined_sense );
(int)qrequest->device->new_combined_sense);
qrequest->request->subsys_status = qrequest->device->subsys_status;
@ -416,7 +414,7 @@ set_check_condition(ide_qrequest *qrequest)
scsi_ccb *request = qrequest->request;
ide_device_info *device = qrequest->device;
SHOW_FLOW0( 3, "" );
SHOW_FLOW0(3, "");
request->subsys_status = SCSI_REQ_CMP_ERR;
request->device_status = SCSI_STATUS_CHECK_CONDITION;
@ -426,7 +424,7 @@ set_check_condition(ide_qrequest *qrequest)
scsi_sense sense;
int sense_len;
SHOW_FLOW0( 3, "autosense" );
SHOW_FLOW0(3, "autosense");
// we cannot copy sense directly as sense buffer may be too small
create_sense(device, &sense);

View File

@ -1,7 +1,7 @@
/*
** Copyright 2002-04, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
* Copyright 2002-04, Thomas Kurschel. All rights reserved.
* Distributed under the terms of the MIT License.
*/
/*
Part of Open IDE bus manager
@ -46,11 +46,14 @@
// internal error code if scatter gather table is too short
#define ERR_TOO_BIG (B_ERRORS_END + 1)
// prepare PIO transfer
void prep_PIO_transfer( ide_device_info *device, ide_qrequest *qrequest )
/** prepare PIO transfer */
void
prep_PIO_transfer(ide_device_info *device, ide_qrequest *qrequest)
{
SHOW_FLOW0( 4, "" );
SHOW_FLOW0(4, "");
device->left_sg_elem = qrequest->request->sg_cnt;
device->cur_sg_elem = qrequest->request->sg_list;
device->cur_sg_ofs = 0;
@ -58,152 +61,155 @@ void prep_PIO_transfer( ide_device_info *device, ide_qrequest *qrequest )
qrequest->request->data_resid = qrequest->request->data_len;
}
// transfer virtually continuous data
static inline status_t transfer_PIO_virtcont( ide_device_info *device, char *virt_addr, int length,
bool write, int *transferred )
/** transfer virtually continuous data */
static inline status_t
transfer_PIO_virtcont(ide_device_info *device, char *virt_addr, int length,
bool write, int *transferred)
{
ide_bus_info *bus = device->bus;
ide_controller_interface *controller = bus->controller;
ide_channel_cookie cookie = bus->channel;
if( write ) {
if (write) {
// if there is a byte left from last chunk, transmit it together
// with the first byte of the current chunk (IDE requires 16 bits
// to be transmitted at once)
if( device->has_odd_byte ) {
if (device->has_odd_byte) {
uint8 buffer[2];
buffer[0] = device->odd_byte;
buffer[1] = *virt_addr++;
controller->write_pio( cookie, (uint16 *)buffer, 1, false );
controller->write_pio(cookie, (uint16 *)buffer, 1, false);
--length;
*transferred += 2;
}
controller->write_pio( cookie, (uint16 *)virt_addr, length / 2, false );
controller->write_pio(cookie, (uint16 *)virt_addr, length / 2, false);
// take care if chunk size was odd, which means that 1 byte remains
virt_addr += length & ~1;
*transferred += length & ~1;
device->has_odd_byte = (length & 1) != 0;
if( device->has_odd_byte )
if (device->has_odd_byte)
device->odd_byte = *virt_addr;
} else {
// if we read one byte too much last time, push it into current chunk
if( device->has_odd_byte ) {
if (device->has_odd_byte) {
*virt_addr++ = device->odd_byte;
--length;
}
SHOW_FLOW( 4, "Reading PIO to %p, %d bytes", virt_addr, length );
controller->read_pio( cookie, (uint16 *)virt_addr, length / 2, false );
SHOW_FLOW(4, "Reading PIO to %p, %d bytes", virt_addr, length);
controller->read_pio(cookie, (uint16 *)virt_addr, length / 2, false);
// take care of odd chunk size;
// in this case we read 1 byte to few!
virt_addr += length & ~1;
*transferred += length & ~1;
device->has_odd_byte = (length & 1) != 0;
if( device->has_odd_byte ) {
if (device->has_odd_byte) {
uint8 buffer[2];
// now read the missing byte; as we have to read 2 bytes at once,
// we'll read one byte too much
controller->read_pio( cookie, (uint16 *)buffer, 1, false );
controller->read_pio(cookie, (uint16 *)buffer, 1, false);
*virt_addr = buffer[0];
device->odd_byte = buffer[1];
*transferred += 2;
}
}
return B_OK;
}
// transmit physically continuous data
/** transmit physically continuous data */
static inline status_t
transfer_PIO_physcont(ide_device_info *device, addr_t phys_addr,
transfer_PIO_physcont(ide_device_info *device, addr_t phys_addr,
int length, bool write, int *transferred)
{
// we must split up chunk into B_PAGE_SIZE blocks as we can map only
// one page into address space at once
while( length > 0 ) {
while (length > 0) {
void *virt_addr;
int page_left, cur_len;
status_t err;
SHOW_FLOW( 4, "Transmitting to/from physical address %x, %d bytes left", (int)phys_addr,
length );
SHOW_FLOW(4, "Transmitting to/from physical address %x, %d bytes left", (int)phys_addr,
length);
if (map_mainmemory(phys_addr, &virt_addr) != B_OK) {
// ouch: this should never ever happen
set_sense( device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE );
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
return B_ERROR;
}
// if chunks starts in the middle of a page, we have even less then
// a page left
page_left = B_PAGE_SIZE - phys_addr % B_PAGE_SIZE;
SHOW_FLOW( 4, "page_left=%d", page_left );
cur_len = min( page_left, length );
SHOW_FLOW( 4, "cur_len=%d", cur_len );
err = transfer_PIO_virtcont( device, (char *)virt_addr,
cur_len, write, transferred );
if( err != B_OK ) {
unmap_mainmemory( virt_addr );
SHOW_FLOW(4, "page_left=%d", page_left);
cur_len = min(page_left, length);
SHOW_FLOW(4, "cur_len=%d", cur_len);
err = transfer_PIO_virtcont(device, (char *)virt_addr,
cur_len, write, transferred);
if (err != B_OK) {
unmap_mainmemory(virt_addr);
return err;
}
length -= cur_len;
phys_addr += cur_len;
unmap_mainmemory( virt_addr );
unmap_mainmemory(virt_addr);
}
return B_OK;
}
// transfer PIO block from/to buffer
static inline int transfer_PIO_block( ide_device_info *device, int length, bool write,
int *transferred )
/** transfer PIO block from/to buffer */
static inline int
transfer_PIO_block(ide_device_info *device, int length, bool write, int *transferred)
{
// data is usually split up into multiple scatter/gather blocks
while( length > 0 ) {
while (length > 0) {
int left_bytes, cur_len;
status_t err;
if( device->left_sg_elem == 0 )
if (device->left_sg_elem == 0)
// ups - buffer too small (for ATAPI data, this is OK)
return ERR_TOO_BIG;
// we might have transmitted part of a scatter/entry already!
left_bytes = device->cur_sg_elem->size - device->cur_sg_ofs;
cur_len = min( left_bytes, length );
err = transfer_PIO_physcont( device,
cur_len = min(left_bytes, length);
err = transfer_PIO_physcont(device,
(addr_t)device->cur_sg_elem->address + device->cur_sg_ofs,
cur_len, write, transferred );
if( err != B_OK )
cur_len, write, transferred);
if (err != B_OK)
return err;
if( left_bytes <= length ) {
if (left_bytes <= length) {
// end of one scatter/gather block reached
device->cur_sg_ofs = 0;
++device->cur_sg_elem;
@ -212,139 +218,150 @@ static inline int transfer_PIO_block( ide_device_info *device, int length, bool
// still in the same block
device->cur_sg_ofs += cur_len;
}
length -= cur_len;
}
return B_OK;
}
// write zero data (required for ATAPI if we ran out of data)
static void write_discard_PIO( ide_device_info *device, int length )
/** write zero data (required for ATAPI if we ran out of data) */
static void
write_discard_PIO(ide_device_info *device, int length)
{
ide_bus_info *bus = device->bus;
uint8 buffer[32];
memset( buffer, 0, sizeof( buffer ));
memset(buffer, 0, sizeof(buffer));
// we transmit 32 zero-bytes at once
// (not very efficient but easy to implement - you get what you deserve
// when you don't provide enough buffer)
while( length > 0 ) {
while (length > 0) {
int cur_len;
// if device asks for odd number of bytes, append an extra byte to
// make length even (this is the "length + 1" term)
cur_len = min( length + 1, (int)(sizeof( buffer ))) / 2;
bus->controller->write_pio( bus->channel, (uint16 *)buffer, cur_len, false );
cur_len = min(length + 1, (int)(sizeof(buffer))) / 2;
bus->controller->write_pio(bus->channel, (uint16 *)buffer, cur_len, false);
length -= cur_len * 2;
}
}
// read PIO data and discard it (required for ATAPI if buffer was too small)
static void read_discard_PIO( ide_device_info *device, int length )
/** read PIO data and discard it (required for ATAPI if buffer was too small) */
static void
read_discard_PIO(ide_device_info *device, int length)
{
ide_bus_info *bus = device->bus;
uint8 buffer[32];
// discard 32 bytes at once (see write_discard_PIO)
while( length > 0 ) {
while (length > 0) {
int cur_len;
// read extra byte if length is odd (that's the "length + 1")
cur_len = min( length + 1, (int)sizeof( buffer )) / 2;
bus->controller->read_pio( bus->channel, (uint16 *)buffer, cur_len, false );
cur_len = min(length + 1, (int)sizeof(buffer)) / 2;
bus->controller->read_pio(bus->channel, (uint16 *)buffer, cur_len, false);
length -= cur_len * 2;
}
}
// write PIO data
// return: there are 3 possible results
// NO_ERROR - everything's nice and groovy
// ERR_TOO_BIG - data buffer was too short, remaining data got discarded
// B_ERROR - something serious went wrong, sense data was set
status_t write_PIO_block( ide_qrequest *qrequest, int length )
/** write PIO data
* return: there are 3 possible results
* NO_ERROR - everything's nice and groovy
* ERR_TOO_BIG - data buffer was too short, remaining data got discarded
* B_ERROR - something serious went wrong, sense data was set
*/
status_t
write_PIO_block(ide_qrequest *qrequest, int length)
{
ide_device_info *device = qrequest->device;
int transferred;
status_t err;
transferred = 0;
err = transfer_PIO_block( device, length, true, &transferred );
err = transfer_PIO_block(device, length, true, &transferred);
qrequest->request->data_resid -= transferred;
if( err != ERR_TOO_BIG )
if (err != ERR_TOO_BIG)
return err;
// there may be a pending odd byte - transmit that now
if( qrequest->device->has_odd_byte ) {
if (qrequest->device->has_odd_byte) {
uint8 buffer[2];
buffer[0] = device->odd_byte;
buffer[1] = 0;
device->has_odd_byte = false;
qrequest->request->data_resid -= 1;
transferred += 2;
device->bus->controller->write_pio( device->bus->channel, (uint16 *)buffer, 1, false );
device->bus->controller->write_pio(device->bus->channel, (uint16 *)buffer, 1, false);
}
// "transferred" may actually be larger then length because the last odd-byte
// is sent together with an extra zero-byte
if( transferred >= length )
if (transferred >= length)
return err;
// Ouch! the device asks for data but we haven't got any left.
// Sadly, this behaviour is OK for ATAPI packets, but there is no
// way to tell the device that we don't have any data left;
// only solution is to send zero bytes, though it's BAD BAD BAD
write_discard_PIO( qrequest->device, length - transferred );
write_discard_PIO(qrequest->device, length - transferred);
return ERR_TOO_BIG;
}
// read PIO data
// return: see write_PIO_block
status_t read_PIO_block( ide_qrequest *qrequest, int length )
/** read PIO data
* return: see write_PIO_block
*/
status_t
read_PIO_block(ide_qrequest *qrequest, int length)
{
ide_device_info *device = qrequest->device;
int transferred;
status_t err;
transferred = 0;
err = transfer_PIO_block( qrequest->device, length, false, &transferred );
err = transfer_PIO_block(qrequest->device, length, false, &transferred);
qrequest->request->data_resid -= transferred;
// if length was odd, there's an extra byte waiting in device->odd_byte
if( device->has_odd_byte ) {
if (device->has_odd_byte) {
// discard byte
device->has_odd_byte = false;
// adjust res_id as the extra byte didn't reach the buffer
++qrequest->request->data_resid;
}
if( err != ERR_TOO_BIG )
if (err != ERR_TOO_BIG)
return err;
// the device returns more data then the buffer can store;
// for ATAPI this is OK - we just discard remaining bytes (there
// is no way to tell ATAPI about that, but we "only" waste time)
// perhaps discarding the extra odd-byte was sufficient
if( transferred >= length )
if (transferred >= length)
return err;
SHOW_FLOW( 3, "discarding after %d bytes", transferred );
read_discard_PIO( qrequest->device, length - transferred );
SHOW_FLOW(3, "discarding after %d bytes", transferred);
read_discard_PIO(qrequest->device, length - transferred);
return ERR_TOO_BIG;
}

View File

@ -1,7 +1,7 @@
/*
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
* Distributed under the terms of the MIT License.
*/
/*
Part of Open IDE bus manager
@ -27,31 +27,27 @@ ata_mode_sense_10(ide_device_info *device, ide_qrequest *qrequest)
scsi_mode_param_header_10 param_header;
scsi_modepage_contr contr;
scsi_mode_param_block_desc block_desc;
size_t total_length =
sizeof(scsi_mode_param_header_10) +
sizeof(scsi_mode_param_block_desc) +
sizeof(scsi_modepage_contr);
size_t total_length = sizeof(scsi_mode_param_header_10)
+ sizeof(scsi_mode_param_block_desc)
+ sizeof(scsi_modepage_contr);
scsi_mode_param_dev_spec_da devspec = {
res0_0 : 0,
DPOFUA : 0,
res0_6 : 0,
WP : 0
};
int allocation_length;
SHOW_ERROR0( 0, "Hi!" );
uint32 allocation_length;
SHOW_FLOW0(1, "Hi!");
allocation_length = ((int16)cmd->high_allocation_length << 8)
| cmd->low_allocation_length;
// we answer control page requests and "all pages" requests
// (as the latter are the same as the first)
if( (cmd->page_code != SCSI_MODEPAGE_CONTROL &&
cmd->page_code != SCSI_MODEPAGE_ALL) ||
(cmd->PC != SCSI_MODE_SENSE_PC_CURRENT &&
cmd->PC != SCSI_MODE_SENSE_PC_SAVED))
{
set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD );
if ((cmd->page_code != SCSI_MODEPAGE_CONTROL && cmd->page_code != SCSI_MODEPAGE_ALL)
|| (cmd->PC != SCSI_MODE_SENSE_PC_CURRENT && cmd->PC != SCSI_MODE_SENSE_PC_SAVED)) {
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD);
return;
}
@ -60,293 +56,300 @@ ata_mode_sense_10(ide_device_info *device, ide_qrequest *qrequest)
param_header.high_mode_data_len = 0;
param_header.medium_type = 0; // XXX standard is a bit vague here
param_header.dev_spec_parameter = *(uint8 *)&devspec;
param_header.low_block_desc_len = sizeof( scsi_mode_param_block_desc );
param_header.low_block_desc_len = sizeof(scsi_mode_param_block_desc);
param_header.high_block_desc_len = 0;
copy_sg_data( request, 0, allocation_length,
&param_header, sizeof( param_header ), false );
copy_sg_data(request, 0, allocation_length, &param_header, sizeof(param_header), false);
/*block_desc = (scsi_mode_param_block_desc *)(request->data
+ sizeof( *param_header ));*/
memset( &block_desc, 0, sizeof( block_desc ));
+ sizeof(*param_header));*/
memset(&block_desc, 0, sizeof(block_desc));
// density is reserved (0), descriptor apply to entire medium (num_blocks=0)
// remains the blocklen to be set
block_desc.high_blocklen = 0;
block_desc.med_blocklen = 512 >> 8;
block_desc.low_blocklen = 512 & 0xff;
copy_sg_data( request, sizeof( param_header ), allocation_length,
&block_desc, sizeof( block_desc ), false );
copy_sg_data(request, sizeof(param_header), allocation_length,
&block_desc, sizeof(block_desc), false);
/*contr = (scsi_modepage_contr *)(request->data
+ sizeof( *param_header )
+ sizeof(*param_header)
+ ((uint16)param_header->high_block_desc_len << 8)
+ param_header->low_block_desc_len);*/
memset( &contr, 0, sizeof( contr ));
memset(&contr, 0, sizeof(contr));
contr.RLEC = false;
contr.DQue = !device->CQ_enabled;
contr.QErr = false; // when a command fails we requeue all
// lost commands automagically
contr.QErr = false;
// when a command fails we requeue all
// lost commands automagically
contr.QAM = SCSI_QAM_UNRESTRICTED;
copy_sg_data( request,
sizeof( param_header )
copy_sg_data(request, sizeof(param_header)
+ ((uint16)param_header.high_block_desc_len << 8)
+ param_header.low_block_desc_len,
allocation_length,
&contr, sizeof( contr ), false );
allocation_length, &contr, sizeof(contr), false);
// the number of bytes that were transferred to buffer is
// restricted by allocation length and by request data buffer size
total_length = min( total_length, allocation_length );
total_length = min( total_length, request->data_len );
total_length = min(total_length, allocation_length);
total_length = min(total_length, request->data_len);
request->data_resid = request->data_len - total_length;
return;
}
// emulate modifying control page
static bool ata_mode_select_contrpage( ide_device_info *device, ide_qrequest *qrequest,
scsi_modepage_contr *page )
/** emulate modifying control page */
static bool
ata_mode_select_control_page(ide_device_info *device, ide_qrequest *qrequest,
scsi_modepage_contr *page)
{
if( page->header.page_length != sizeof( *page ) - sizeof( page->header )) {
set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_LIST_LENGTH_ERR );
if (page->header.page_length != sizeof(*page) - sizeof(page->header)) {
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_LIST_LENGTH_ERR);
return false;
}
// we only support enabling/disabling command queuing
enable_CQ( device, !page->DQue );
enable_CQ(device, !page->DQue);
return true;
}
// emulate MODE SELECT 10 command
static void ata_mode_select_10( ide_device_info *device, ide_qrequest *qrequest )
/** emulate MODE SELECT 10 command */
static void
ata_mode_select_10(ide_device_info *device, ide_qrequest *qrequest)
{
scsi_ccb *request = qrequest->request;
scsi_cmd_mode_select_10 *cmd = (scsi_cmd_mode_select_10 *)request->cdb;
scsi_mode_param_header_10 param_header;
scsi_modepage_header page_header;
int total_length;
uint modepage_offset;
uint32 total_length;
uint32 modepage_offset;
char modepage_buffer[64]; // !!! enlarge this to support longer mode pages
if( cmd->SP || cmd->PF != 1 ) {
set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD );
if (cmd->SP || cmd->PF != 1) {
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD);
return;
}
total_length = min( request->data_len,
((uint16)cmd->high_param_list_length << 8) | cmd->low_param_list_length);
total_length = min(request->data_len,
((uint16)cmd->high_param_list_length << 8) | (uint32)cmd->low_param_list_length);
// first, retrieve page header to get size of different chunks
//param_header = (scsi_mode_param_header_10 *)request->data;
if( !copy_sg_data( request, 0, total_length,
&param_header, sizeof( param_header ), true ))
if (!copy_sg_data(request, 0, total_length, &param_header, sizeof(param_header), true))
goto err;
total_length = min( total_length,
(((uint16)param_header.high_mode_data_len << 8) | param_header.low_mode_data_len) + 1 );
total_length = min(total_length, (((uint16)param_header.high_mode_data_len << 8)
| param_header.low_mode_data_len) + 1UL);
// this is the start of the first mode page;
// we ignore the block descriptor silently
modepage_offset =
sizeof( param_header ) +
(((uint16)param_header.high_block_desc_len << 8) | param_header.low_block_desc_len);
modepage_offset = sizeof(param_header) + (((uint16)param_header.high_block_desc_len << 8)
| param_header.low_block_desc_len);
// go through list of pages
while( modepage_offset < total_length ) {
int page_len;
while (modepage_offset < total_length) {
uint32 page_len;
// get header to know how long page is
if( !copy_sg_data( request, modepage_offset, total_length,
&page_header, sizeof( page_header ), true ))
if (!copy_sg_data(request, modepage_offset, total_length,
&page_header, sizeof(page_header), true))
goto err;
// get size of one page and copy it to buffer
page_len = page_header.page_length + sizeof( scsi_modepage_header );
page_len = page_header.page_length + sizeof(scsi_modepage_header);
// the buffer has a maximum size - this is really standard compliant but
// sufficient for our needs
if( page_len > sizeof( modepage_buffer ))
if (page_len > sizeof(modepage_buffer))
goto err;
if( !copy_sg_data( request, modepage_offset, total_length,
&modepage_buffer, min( page_len, sizeof( modepage_buffer )), true ))
if (!copy_sg_data(request, modepage_offset, total_length,
&modepage_buffer, min(page_len, sizeof(modepage_buffer)), true))
goto err;
// modify page;
// currently, we only support the control mode page
switch( page_header.page_code ) {
case SCSI_MODEPAGE_CONTROL:
if( !ata_mode_select_contrpage( device, qrequest,
(scsi_modepage_contr *)modepage_buffer ))
switch (page_header.page_code) {
case SCSI_MODEPAGE_CONTROL:
if (!ata_mode_select_control_page(device, qrequest,
(scsi_modepage_contr *)modepage_buffer))
return;
break;
default:
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_PARAM_LIST_FIELD);
return;
break;
default:
set_sense( device,
SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_PARAM_LIST_FIELD );
return;
}
modepage_offset += page_len;
}
if( modepage_offset != total_length )
if (modepage_offset != total_length)
goto err;
request->data_resid = request->data_len - total_length;
return;
// if we arrive here, data length was incorrect
err:
set_sense( device,
SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_LIST_LENGTH_ERR );
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_LIST_LENGTH_ERR);
}
// emulate TEST UNIT READY
static bool ata_tur( ide_device_info *device, ide_qrequest *qrequest )
/** emulate TEST UNIT READY */
static bool
ata_test_unit_ready(ide_device_info *device, ide_qrequest *qrequest)
{
SHOW_FLOW0( 3, "" );
if( !device->infoblock.RMSN_supported ||
device->infoblock._127_RMSN_support != 1 )
SHOW_FLOW0(3, "");
if (!device->infoblock.RMSN_supported
|| device->infoblock._127_RMSN_support != 1)
return true;
// ask device about status
device->tf_param_mask = 0;
device->tf.write.command = IDE_CMD_GET_MEDIA_STATUS;
if( !send_command( device, qrequest, true, 15, ide_state_sync_waiting ))
if (!send_command(device, qrequest, true, 15, ide_state_sync_waiting))
return false;
// bits ide_error_mcr | ide_error_mc | ide_error_wp are also valid
// but not requested by TUR; ide_error_wp can safely be ignored, but
// we don't want to loose media change (request) reports
if( !check_output( device, true,
ide_error_nm | ide_error_abrt | ide_error_mcr | ide_error_mc,
false ))
{
if (!check_output(device, true,
ide_error_nm | ide_error_abrt | ide_error_mcr | ide_error_mc,
false)) {
// SCSI spec is unclear here: we shouldn't report "media change (request)"
// but what to do if there is one? anyway - we report them
;
}
return true;
}
// flush internal device cache
static bool ata_flush_cache( ide_device_info *device, ide_qrequest *qrequest )
/** flush internal device cache */
static bool
ata_flush_cache(ide_device_info *device, ide_qrequest *qrequest)
{
// we should also ask for FLUSH CACHE support, but everyone denies it
// (looks like they cheat to gain some performance advantage, but
// that's pretty useless: everyone does it...)
if( !device->infoblock.write_cache_supported )
if (!device->infoblock.write_cache_supported)
return true;
device->tf_param_mask = 0;
device->tf.lba.command = device->use_48bits ? IDE_CMD_FLUSH_CACHE_EXT
: IDE_CMD_FLUSH_CACHE;
// spec says that this may take more then 30s, how much more?
if( !send_command( device, qrequest, true, 60, ide_state_sync_waiting ))
return false;
wait_for_sync( device->bus );
return check_output( device, true, ide_error_abrt, false );
// spec says that this may take more then 30s, how much more?
if (!send_command(device, qrequest, true, 60, ide_state_sync_waiting))
return false;
wait_for_sync(device->bus);
return check_output(device, true, ide_error_abrt, false);
}
// load or eject medium
// load = true - load medium
static bool ata_load_eject( ide_device_info *device, ide_qrequest *qrequest, bool load )
/** load or eject medium
* load = true - load medium
*/
static bool
ata_load_eject(ide_device_info *device, ide_qrequest *qrequest, bool load)
{
if( load ) {
if (load) {
// ATA doesn't support loading
set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_NOT_SUPPORTED );
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_NOT_SUPPORTED);
return false;
}
device->tf_param_mask = 0;
device->tf.lba.command = IDE_CMD_MEDIA_EJECT;
if( !send_command( device, qrequest, true, 15, ide_state_sync_waiting ))
if (!send_command(device, qrequest, true, 15, ide_state_sync_waiting))
return false;
wait_for_sync( device->bus );
return check_output( device, true, ide_error_abrt | ide_error_nm, false );
wait_for_sync(device->bus);
return check_output(device, true, ide_error_abrt | ide_error_nm, false);
}
// emulate PREVENT ALLOW command
static bool ata_prevent_allow( ide_device_info *device, bool prevent )
/** emulate PREVENT ALLOW command */
static bool
ata_prevent_allow(ide_device_info *device, bool prevent)
{
set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_ILL_FUNCTION );
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_ILL_FUNCTION);
return false;
}
// emulate INQUIRY command
static void ata_inquiry( ide_device_info *device, ide_qrequest *qrequest )
/** emulate INQUIRY command */
static void
ata_inquiry(ide_device_info *device, ide_qrequest *qrequest)
{
scsi_ccb *request = qrequest->request;
scsi_res_inquiry data;
scsi_cmd_inquiry *cmd = (scsi_cmd_inquiry *)request->cdb;
uint allocation_length = cmd->allocation_length;
int transfer_size;
uint32 allocation_length = cmd->allocation_length;
uint32 transfer_size;
if (cmd->EVPD || cmd->page_code) {
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD);
return;
}
memset( &data, 0, sizeof( data ));
memset(&data, 0, sizeof(data));
data.device_type = scsi_dev_direct_access;
data.device_qualifier = scsi_periph_qual_connected;
data.device_type_modifier = 0;
data.RMB = false;
data.ANSI_version = 2;
data.ECMA_version = 0;
data.ISO_version = 0;
data.response_data_format = 2;
data.TrmIOP = false; // to be changed if we support TERM I/O
data.additional_length = sizeof( scsi_res_inquiry ) - 4;
data.additional_length = sizeof(scsi_res_inquiry) - 4;
data.SftRe = false;
data.CmdQue = device->queue_depth > 1;
data.Linked = false;
// these values are free-style
data.Sync = false;
data.WBus16 = true;
data.WBus32 = false;
data.RelAdr = false;
// the following fields are *much* to small, sigh...
memcpy( data.vendor_ident, device->infoblock.model_number,
sizeof( data.vendor_ident ));
memcpy( data.product_ident, device->infoblock.model_number + 8,
sizeof( data.product_ident ));
memcpy( data.product_rev, " ", sizeof( data.product_rev ));
copy_sg_data( request, 0, allocation_length, &data, sizeof( data ), false );
transfer_size = min( sizeof( data ), allocation_length );
transfer_size = min( transfer_size, request->data_len );
memcpy(data.vendor_ident, device->infoblock.model_number,
sizeof(data.vendor_ident));
memcpy(data.product_ident, device->infoblock.model_number + 8,
sizeof(data.product_ident));
memcpy(data.product_rev, " ", sizeof(data.product_rev));
copy_sg_data(request, 0, allocation_length, &data, sizeof(data), false);
transfer_size = min(sizeof(data), allocation_length);
transfer_size = min(transfer_size, request->data_len);
request->data_resid = request->data_len - transfer_size;
}
@ -392,141 +395,144 @@ ata_exec_io(ide_device_info *device, ide_qrequest *qrequest)
{
scsi_ccb *request = qrequest->request;
SHOW_FLOW( 3, "command=%x", request->cdb[0] );
SHOW_FLOW(3, "command=%x", request->cdb[0]);
// ATA devices have one LUN only
if( request->target_lun != 0 ) {
if (request->target_lun != 0) {
request->subsys_status = SCSI_SEL_TIMEOUT;
finish_request( qrequest, false );
finish_request(qrequest, false);
return;
}
// starting a request means deleting sense, so don't do it if
// the command wants to read it
if( request->cdb[0] != SCSI_OP_REQUEST_SENSE )
start_request( device, qrequest );
switch( request->cdb[0] ) {
case SCSI_OP_TUR:
ata_tur( device, qrequest );
break;
if (request->cdb[0] != SCSI_OP_REQUEST_SENSE)
start_request(device, qrequest);
case SCSI_OP_REQUEST_SENSE:
ide_request_sense( device, qrequest );
return;
case SCSI_OP_FORMAT: /* FORMAT UNIT */
// we could forward request to disk, but modern disks cannot
// be formatted anyway, so we just refuse request
// (exceptions are removable media devices, but to my knowledge
// they don't have to be formatted as well)
set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE );
break;
switch (request->cdb[0]) {
case SCSI_OP_TUR:
ata_test_unit_ready(device, qrequest);
break;
case SCSI_OP_INQUIRY:
ata_inquiry( device, qrequest );
break;
case SCSI_OP_REQUEST_SENSE:
ide_request_sense(device, qrequest);
return;
case SCSI_OP_MODE_SELECT_10:
ata_mode_select_10( device, qrequest );
break;
case SCSI_OP_MODE_SENSE_10:
ata_mode_sense_10( device, qrequest );
break;
case SCSI_OP_MODE_SELECT_6:
case SCSI_OP_MODE_SENSE_6:
// we've told SCSI bus manager to emulates these commands
set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE );
break;
case SCSI_OP_RESERVE:
case SCSI_OP_RELEASE:
// though mandatory, this doesn't make much sense in a
// single initiator environment; so what
set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE );
break;
case SCSI_OP_START_STOP: {
scsi_cmd_ssu *cmd = (scsi_cmd_ssu *)request->cdb;
// with no LoEj bit set, we should only allow/deny further access
// we ignore that (unsupported for ATA)
// with LoEj bit set, we should additionally either load or eject the medium
// (start = 0 - eject; start = 1 - load)
if( !cmd->start )
// we must always flush cache if start = 0
ata_flush_cache( device, qrequest );
if( cmd->LoEj )
ata_load_eject( device, qrequest, cmd->start );
break; }
case SCSI_OP_PREVENT_ALLOW: {
scsi_cmd_prevent_allow *cmd = (scsi_cmd_prevent_allow *)request->cdb;
ata_prevent_allow( device, cmd->prevent );
break; }
case SCSI_OP_READ_CAPACITY:
read_capacity( device, qrequest );
break;
case SCSI_OP_VERIFY:
// does anyone uses this function?
// effectly, it does a read-and-compare, which IDE doesn't support
set_sense( device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE );
break;
case SCSI_OP_SYNCHRONIZE_CACHE:
// we ignore range and immediate bit, we always immediately flush everything
ata_flush_cache( device, qrequest );
break;
// sadly, there are two possible read/write operation codes;
// at least, the third one, read/write(12), is not valid for DAS
case SCSI_OP_READ_6:
case SCSI_OP_WRITE_6: {
scsi_cmd_rw_6 *cmd = (scsi_cmd_rw_6 *)request->cdb;
uint32 pos;
size_t length;
pos = ((uint32)cmd->high_LBA << 16) | ((uint32)cmd->mid_LBA << 8)
| (uint32)cmd->low_LBA;
length = cmd->length != 0 ? cmd->length : 256;
SHOW_FLOW( 3, "READ6/WRITE6 pos=%ux, length=%ux", (uint)pos, (uint)length );
ata_send_rw( device, qrequest, pos, length, cmd->opcode == SCSI_OP_WRITE_6 );
return; }
case SCSI_OP_READ_10:
case SCSI_OP_WRITE_10: {
scsi_cmd_rw_10 *cmd = (scsi_cmd_rw_10 *)request->cdb;
uint32 pos;
size_t length;
pos = ((uint32)cmd->top_LBA << 24) | ((uint32)cmd->high_LBA << 16)
| ((uint32)cmd->mid_LBA << 8) | (uint32)cmd->low_LBA;
length = ((uint32)cmd->high_length << 8) | cmd->low_length;
if( length != 0 ) {
ata_send_rw( device, qrequest, pos, length, cmd->opcode == SCSI_OP_WRITE_10 );
} else {
// we cannot transfer zero blocks (apart from LBA48)
finish_request( qrequest, false );
case SCSI_OP_FORMAT: /* FORMAT UNIT */
// we could forward request to disk, but modern disks cannot
// be formatted anyway, so we just refuse request
// (exceptions are removable media devices, but to my knowledge
// they don't have to be formatted as well)
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
break;
case SCSI_OP_INQUIRY:
ata_inquiry(device, qrequest);
break;
case SCSI_OP_MODE_SELECT_10:
ata_mode_select_10(device, qrequest);
break;
case SCSI_OP_MODE_SENSE_10:
ata_mode_sense_10(device, qrequest);
break;
case SCSI_OP_MODE_SELECT_6:
case SCSI_OP_MODE_SENSE_6:
// we've told SCSI bus manager to emulates these commands
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
break;
case SCSI_OP_RESERVE:
case SCSI_OP_RELEASE:
// though mandatory, this doesn't make much sense in a
// single initiator environment; so what
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
break;
case SCSI_OP_START_STOP: {
scsi_cmd_ssu *cmd = (scsi_cmd_ssu *)request->cdb;
// with no LoEj bit set, we should only allow/deny further access
// we ignore that (unsupported for ATA)
// with LoEj bit set, we should additionally either load or eject the medium
// (start = 0 - eject; start = 1 - load)
if (!cmd->start)
// we must always flush cache if start = 0
ata_flush_cache(device, qrequest);
if (cmd->LoEj)
ata_load_eject(device, qrequest, cmd->start);
break;
}
return; }
default:
set_sense( device,
SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE );
case SCSI_OP_PREVENT_ALLOW: {
scsi_cmd_prevent_allow *cmd = (scsi_cmd_prevent_allow *)request->cdb;
ata_prevent_allow(device, cmd->prevent);
break;
}
case SCSI_OP_READ_CAPACITY:
read_capacity(device, qrequest);
break;
case SCSI_OP_VERIFY:
// does anyone uses this function?
// effectly, it does a read-and-compare, which IDE doesn't support
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
break;
case SCSI_OP_SYNCHRONIZE_CACHE:
// we ignore range and immediate bit, we always immediately flush everything
ata_flush_cache(device, qrequest);
break;
// sadly, there are two possible read/write operation codes;
// at least, the third one, read/write(12), is not valid for DAS
case SCSI_OP_READ_6:
case SCSI_OP_WRITE_6: {
scsi_cmd_rw_6 *cmd = (scsi_cmd_rw_6 *)request->cdb;
uint32 pos;
size_t length;
pos = ((uint32)cmd->high_LBA << 16) | ((uint32)cmd->mid_LBA << 8)
| (uint32)cmd->low_LBA;
length = cmd->length != 0 ? cmd->length : 256;
SHOW_FLOW(3, "READ6/WRITE6 pos=%lx, length=%lx", pos, length);
ata_send_rw(device, qrequest, pos, length, cmd->opcode == SCSI_OP_WRITE_6);
return;
}
case SCSI_OP_READ_10:
case SCSI_OP_WRITE_10: {
scsi_cmd_rw_10 *cmd = (scsi_cmd_rw_10 *)request->cdb;
uint32 pos;
size_t length;
pos = ((uint32)cmd->top_LBA << 24) | ((uint32)cmd->high_LBA << 16)
| ((uint32)cmd->mid_LBA << 8) | (uint32)cmd->low_LBA;
length = ((uint32)cmd->high_length << 8) | cmd->low_length;
if (length != 0) {
ata_send_rw(device, qrequest, pos, length, cmd->opcode == SCSI_OP_WRITE_10);
} else {
// we cannot transfer zero blocks (apart from LBA48)
finish_request(qrequest, false);
}
return;
}
default:
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
}
finish_checksense( qrequest );
finish_checksense(qrequest);
}

View File

@ -1,7 +1,7 @@
/*
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the Haiku License.
*/
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
* Distributed under the terms of the MIT License.
*/
/*
Part of Open IDE bus manager
@ -19,7 +19,7 @@
//#define TRACE_SYNC
#ifdef TRACE_SYNC
# define TRACE(x) dprintf x
# define TRACE(x) { dprintf("%s(): ", __FUNCTION__); dprintf x ; }
#else
# define TRACE(x) ;
#endif
@ -34,7 +34,7 @@ ide_dpc(void *arg)
ide_qrequest *qrequest;
ide_device_info *device;
TRACE(("ide_dpc()\n"));
TRACE(("\n"));
//snooze(500000);
@ -42,7 +42,7 @@ ide_dpc(void *arg)
// in idle state, so we just check whether there is an active request,
// which means that we were async_waiting
if (bus->active_qrequest != NULL) {
FAST_LOG1( bus->log, ev_ide_dpc_continue, (uint32)bus->active_qrequest );
FAST_LOG1(bus->log, ev_ide_dpc_continue, (uint32)bus->active_qrequest);
TRACE(("continue command\n"));
// cancel timeout
@ -69,7 +69,6 @@ ide_dpc(void *arg)
FAST_LOG0(bus->log, ev_ide_dpc_service);
device = get_current_device(bus);
if (device == NULL) {
// got an interrupt from a non-existing device
// either this is a spurious interrupt or there *is* a device
@ -99,7 +98,7 @@ ide_irq_handler(ide_bus_info *bus, uint8 status)
{
ide_device_info *device;
TRACE(("ide_irq_handler()\n"));
TRACE(("\n"));
FAST_LOG0(bus->log, ev_ide_irq_handle);
// we need to lock bus to have a solid bus state

View File

@ -43,15 +43,15 @@
#endif
#ifndef debug_level_flow
# define debug_level_flow 3
# define debug_level_flow 4
#endif
#ifndef debug_level_info
# define debug_level_info 2
# define debug_level_info 4
#endif
#ifndef debug_level_error
# define debug_level_error 1
# define debug_level_error 4
#endif
#define FUNC_NAME DEBUG_MSG_PREFIX __FUNCTION__ ": "