copying current ide bus manager into ata directory

git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@23276 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Marcus Overhagen 2008-01-07 12:59:53 +00:00
parent 413e26b8a0
commit 0cd362c755
21 changed files with 6139 additions and 0 deletions

View File

@ -0,0 +1,23 @@
SubDir HAIKU_TOP src add-ons kernel bus_managers ide ;
UsePrivateHeaders drivers kernel ;
UsePrivateHeaders [ FDirName kernel arch $(TARGET_ARCH) ] ;
UsePrivateHeaders [ FDirName kernel boot platform $(TARGET_BOOT_PLATFORM) ] ;
KernelAddon ide :
ata.c
atapi.c
basic_protocol.c
channels.c
devices.c
dma.c
emulation.c
ide.c
ide_sim.c
pio.c
queuing.c
scsi2ata.c
sync.c
virtual_memory.c
;

View File

@ -0,0 +1,31 @@
/*
* Copyright 2004-2006, Haiku, Inc. All RightsReserved.
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
/*
Functions that are missing in kernel.
*/
#ifndef _KERNEL_EXPORT_EXT_H
#define _KERNEL_EXPORT_EXT_H
#include <KernelExport.h>
#include <iovec.h>
// get memory map of iovec
status_t get_iovec_memory_map(
iovec *vec, // iovec to analyze
size_t vec_count, // number of entries in vec
size_t vec_offset, // number of bytes to skip at beginning of vec
size_t len, // number of bytes to analyze
physical_entry *map, // resulting memory map
size_t max_entries, // max number of entries in map
size_t *num_entries, // actual number of map entries used
size_t *mapped_len // actual number of bytes described by map
);
#endif // _KERNEL_EXPORT_EXT_H

View File

@ -0,0 +1,787 @@
/*
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
/*
Part of Open IDE bus manager
ATA command protocol
*/
#include "ide_internal.h"
#include "ide_sim.h"
#include "ide_cmds.h"
/** verify that device is ready for further PIO transmission */
static bool
check_rw_status(ide_device_info *device, bool drqStatus)
{
ide_bus_info *bus = device->bus;
int status;
status = bus->controller->get_altstatus(bus->channel_cookie);
if ((status & ide_status_bsy) != 0) {
device->subsys_status = SCSI_SEQUENCE_FAIL;
return false;
}
if (drqStatus != ((status & ide_status_drq) != 0)) {
device->subsys_status = SCSI_SEQUENCE_FAIL;
return false;
}
return true;
}
/** DPC called at
* - begin of each PIO read/write block
* - end of PUI write transmission
*/
void
ata_dpc_PIO(ide_qrequest *qrequest)
{
ide_device_info *device = qrequest->device;
uint32 timeout = qrequest->request->timeout > 0 ?
qrequest->request->timeout : IDE_STD_TIMEOUT;
SHOW_FLOW0(3, "");
if (check_rw_error(device, qrequest)
|| !check_rw_status(device, qrequest->is_write ? device->left_blocks > 0 : true))
{
// failure reported by device
SHOW_FLOW0( 3, "command finished unsuccessfully" );
finish_checksense(qrequest);
return;
}
if (qrequest->is_write) {
if (device->left_blocks == 0) {
// this was the end-of-transmission IRQ
SHOW_FLOW0(3, "write access finished");
if (!wait_for_drqdown(device)) {
SHOW_ERROR0(3, "device wants to transmit data though command is finished");
goto finish;
}
goto finish;
}
// wait until device requests data
SHOW_FLOW0(3, "Waiting for device ready to transmit");
if (!wait_for_drq(device)) {
SHOW_FLOW0(3, "device not ready for data transmission - abort");
goto finish;
}
// start async waiting for next block/end of command
// we should start that when block is transmitted, but with bad
// luck the IRQ fires exactly between transmission and start of waiting,
// so we better start waiting too early; as we are in service thread,
// a DPC initiated by IRQ cannot overtake us, so there is no need to block
// IRQs during sent
start_waiting_nolock(device->bus, timeout, ide_state_async_waiting);
// having a too short data buffer shouldn't happen here
// anyway - we are prepared
SHOW_FLOW0(3, "Writing one block");
if (write_PIO_block(qrequest, 512) == B_ERROR)
goto finish_cancel_timeout;
--device->left_blocks;
} else {
if (device->left_blocks > 1) {
// start async waiting for next command (see above)
start_waiting_nolock(device->bus, timeout, ide_state_async_waiting);
}
// see write
SHOW_FLOW0( 3, "Reading one block" );
if (read_PIO_block(qrequest, 512) == B_ERROR)
goto finish_cancel_timeout;
--device->left_blocks;
if (device->left_blocks == 0) {
// at end of transmission, wait for data request going low
SHOW_FLOW0( 3, "Waiting for device to finish transmission" );
if (!wait_for_drqdown(device))
SHOW_FLOW0( 3, "Device continues data transmission - abort command" );
// we don't cancel timeout as no timeout is started during last block
goto finish;
}
}
return;
finish_cancel_timeout:
cancel_irq_timeout(device->bus);
finish:
finish_checksense(qrequest);
}
/** DPC called when IRQ was fired at end of DMA transmission */
void
ata_dpc_DMA(ide_qrequest *qrequest)
{
ide_device_info *device = qrequest->device;
bool dma_success, dev_err;
dma_success = finish_dma(device);
dev_err = check_rw_error(device, qrequest);
if (dma_success && !dev_err) {
// reset error count if DMA worked
device->DMA_failures = 0;
device->CQ_failures = 0;
qrequest->request->data_resid = 0;
finish_checksense(qrequest);
} else {
SHOW_ERROR0( 2, "Error in DMA transmission" );
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_COM_FAILURE);
if (++device->DMA_failures >= MAX_DMA_FAILURES) {
SHOW_ERROR0( 2, "Disabled DMA because of too many errors" );
device->DMA_enabled = false;
}
// reset queue in case queuing is active
finish_reset_queue(qrequest);
}
}
// list of LBA48 opcodes
static uint8 cmd_48[2][2] = {
{ IDE_CMD_READ_SECTORS_EXT, IDE_CMD_WRITE_SECTORS_EXT },
{ IDE_CMD_READ_DMA_EXT, IDE_CMD_WRITE_DMA_EXT }
};
// list of normal LBA opcodes
static uint8 cmd_28[2][2] = {
{ IDE_CMD_READ_SECTORS, IDE_CMD_WRITE_SECTORS },
{ IDE_CMD_READ_DMA, IDE_CMD_WRITE_DMA }
};
/** create IDE read/write command */
static bool
create_rw_taskfile(ide_device_info *device, ide_qrequest *qrequest,
uint64 pos, size_t length, bool write)
{
SHOW_FLOW0( 3, "" );
// XXX disable any writes
/* if( write )
goto err;*/
if (device->use_LBA) {
if (device->use_48bits && (pos + length > 0xfffffff || length > 0x100)) {
// use LBA48 only if necessary
SHOW_FLOW0( 3, "using LBA48" );
if (length > 0xffff)
goto err;
if (qrequest->queuable) {
// queued LBA48
device->tf_param_mask = ide_mask_features_48
| ide_mask_sector_count
| ide_mask_LBA_low_48
| ide_mask_LBA_mid_48
| ide_mask_LBA_high_48;
device->tf.queued48.sector_count_0_7 = length & 0xff;
device->tf.queued48.sector_count_8_15 = (length >> 8) & 0xff;
device->tf.queued48.tag = qrequest->tag;
device->tf.queued48.lba_0_7 = pos & 0xff;
device->tf.queued48.lba_8_15 = (pos >> 8) & 0xff;
device->tf.queued48.lba_16_23 = (pos >> 16) & 0xff;
device->tf.queued48.lba_24_31 = (pos >> 24) & 0xff;
device->tf.queued48.lba_32_39 = (pos >> 32) & 0xff;
device->tf.queued48.lba_40_47 = (pos >> 40) & 0xff;
device->tf.queued48.command = write ? IDE_CMD_WRITE_DMA_QUEUED
: IDE_CMD_READ_DMA_QUEUED;
return true;
} else {
// non-queued LBA48
device->tf_param_mask = ide_mask_sector_count_48
| ide_mask_LBA_low_48
| ide_mask_LBA_mid_48
| ide_mask_LBA_high_48;
device->tf.lba48.sector_count_0_7 = length & 0xff;
device->tf.lba48.sector_count_8_15 = (length >> 8) & 0xff;
device->tf.lba48.lba_0_7 = pos & 0xff;
device->tf.lba48.lba_8_15 = (pos >> 8) & 0xff;
device->tf.lba48.lba_16_23 = (pos >> 16) & 0xff;
device->tf.lba48.lba_24_31 = (pos >> 24) & 0xff;
device->tf.lba48.lba_32_39 = (pos >> 32) & 0xff;
device->tf.lba48.lba_40_47 = (pos >> 40) & 0xff;
device->tf.lba48.command = cmd_48[qrequest->uses_dma][write];
return true;
}
} else {
// normal LBA
SHOW_FLOW0(3, "using LBA");
if (length > 0x100)
goto err;
if (qrequest->queuable) {
// queued LBA
SHOW_FLOW( 3, "creating DMA queued command, tag=%d", qrequest->tag );
device->tf_param_mask = ide_mask_features
| ide_mask_sector_count
| ide_mask_LBA_low
| ide_mask_LBA_mid
| ide_mask_LBA_high
| ide_mask_device_head;
device->tf.queued.sector_count = length & 0xff;
device->tf.queued.tag = qrequest->tag;
device->tf.queued.lba_0_7 = pos & 0xff;
device->tf.queued.lba_8_15 = (pos >> 8) & 0xff;
device->tf.queued.lba_16_23 = (pos >> 16) & 0xff;
device->tf.queued.lba_24_27 = (pos >> 24) & 0xf;
device->tf.queued.command = write ? IDE_CMD_WRITE_DMA_QUEUED
: IDE_CMD_READ_DMA_QUEUED;
return true;
} else {
// non-queued LBA
SHOW_FLOW0( 3, "creating normal DMA/PIO command" );
device->tf_param_mask = ide_mask_sector_count
| ide_mask_LBA_low
| ide_mask_LBA_mid
| ide_mask_LBA_high
| ide_mask_device_head;
device->tf.lba.sector_count = length & 0xff;
device->tf.lba.lba_0_7 = pos & 0xff;
device->tf.lba.lba_8_15 = (pos >> 8) & 0xff;
device->tf.lba.lba_16_23 = (pos >> 16) & 0xff;
device->tf.lba.lba_24_27 = (pos >> 24) & 0xf;
device->tf.lba.command = cmd_28[qrequest->uses_dma][write];
return true;
}
}
} else {
// CHS mode
// (probably, noone would notice if we'd dropped support)
uint32 track_size, cylinder_offset, cylinder;
ide_device_infoblock *infoblock = &device->infoblock;
if (length > 0x100)
goto err;
device->tf.chs.mode = ide_mode_chs;
device->tf_param_mask = ide_mask_sector_count
| ide_mask_sector_number
| ide_mask_cylinder_low
| ide_mask_cylinder_high
| ide_mask_device_head;
device->tf.chs.sector_count = length & 0xff;
track_size = infoblock->current_heads * infoblock->current_sectors;
if (track_size == 0) {
set_sense(device,
SCSIS_KEY_MEDIUM_ERROR, SCSIS_ASC_MEDIUM_FORMAT_CORRUPTED);
return false;
}
cylinder = pos / track_size;
device->tf.chs.cylinder_0_7 = cylinder & 0xff;
device->tf.chs.cylinder_8_15 = (cylinder >> 8) & 0xff;
cylinder_offset = pos - cylinder * track_size;
device->tf.chs.sector_number = (cylinder_offset % infoblock->current_sectors + 1) & 0xff;
device->tf.chs.head = cylinder_offset / infoblock->current_sectors;
device->tf.chs.command = cmd_28[qrequest->uses_dma][write];
return true;
}
return true;
err:
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD);
return false;
}
/** execute read/write command
* pos - first block
* length - number of blocks
*/
void
ata_send_rw(ide_device_info *device, ide_qrequest *qrequest,
uint64 pos, size_t length, bool write)
{
ide_bus_info *bus = device->bus;
uint32 timeout;
// make a copy first as settings may get changed by user during execution
qrequest->is_write = write;
qrequest->uses_dma = device->DMA_enabled;
if (qrequest->uses_dma) {
if (!prepare_dma(device, qrequest)) {
// fall back to PIO on error
// if command queueing is used and there is another command
// already running, we cannot fallback to PIO immediately -> declare
// command as not queuable and resubmit it, so the scsi bus manager
// will block other requests on retry
// (XXX this is not fine if the caller wants to recycle the CCB)
if (device->num_running_reqs > 1) {
qrequest->request->flags &= ~SCSI_ORDERED_QTAG;
finish_retry(qrequest);
return;
}
qrequest->uses_dma = false;
}
}
if (!qrequest->uses_dma) {
prep_PIO_transfer(device, qrequest);
device->left_blocks = length;
}
// compose command
if (!create_rw_taskfile(device, qrequest, pos, length, write))
goto err_setup;
// if no timeout is specified, use standard
timeout = qrequest->request->timeout > 0 ?
qrequest->request->timeout : IDE_STD_TIMEOUT;
// in DMA mode, we continue with "accessing",
// on PIO read, we continue with "async waiting"
// on PIO write, we continue with "accessing"
if (!send_command(device, qrequest, !device->is_atapi, timeout,
(!qrequest->uses_dma && !qrequest->is_write) ?
ide_state_async_waiting : ide_state_accessing))
goto err_send;
if (qrequest->uses_dma) {
// if queuing used, we have to ask device first whether it wants
// to postpone the command
// XXX: using the bus release IRQ we don't have to busy wait for
// a response, but I heard that IBM drives have problems with
// that IRQ; to be evaluated
if (qrequest->queuable) {
if (!wait_for_drdy(device))
goto err_send;
if (check_rw_error(device, qrequest))
goto err_send;
if (device_released_bus(device)) {
// device enqueued command, so we have to wait;
// in access_finished, we'll ask device whether it wants to
// continue some other command
bus->active_qrequest = NULL;
access_finished(bus, device);
// we may have rejected commands meanwhile, so tell
// the SIM that it can resend them now
scsi->cont_send_bus(bus->scsi_cookie);
return;
}
//SHOW_ERROR0( 2, "device executes command instantly" );
}
start_dma_wait_no_lock(device, qrequest);
} else {
// on PIO read, we start with waiting, on PIO write we can
// transmit data immediately; we let the service thread do
// the writing, so the caller can issue the next command
// immediately (this optimisation really pays on SMP systems
// only)
SHOW_FLOW0(3, "Ready for PIO");
if (qrequest->is_write) {
SHOW_FLOW0(3, "Scheduling write DPC");
scsi->schedule_dpc(bus->scsi_cookie, bus->irq_dpc, ide_dpc, bus);
}
}
return;
err_setup:
// error during setup
if (qrequest->uses_dma)
abort_dma(device, qrequest);
finish_checksense(qrequest);
return;
err_send:
// error during/after send;
// in this case, the device discards queued request automatically
if (qrequest->uses_dma)
abort_dma(device, qrequest);
finish_reset_queue(qrequest);
}
/** check for errors reported by read/write command
* return: true, if an error occured
*/
bool
check_rw_error(ide_device_info *device, ide_qrequest *qrequest)
{
ide_bus_info *bus = device->bus;
uint8 status;
status = bus->controller->get_altstatus(bus->channel_cookie);
if ((status & ide_status_err) != 0) {
uint8 error;
if (bus->controller->read_command_block_regs(bus->channel_cookie,
&device->tf, ide_mask_error) != B_OK) {
device->subsys_status = SCSI_HBA_ERR;
return true;
}
error = device->tf.read.error;
if ((error & ide_error_icrc) != 0) {
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_COM_CRC);
return true;
}
if (qrequest->is_write) {
if ((error & ide_error_wp) != 0) {
set_sense(device, SCSIS_KEY_DATA_PROTECT, SCSIS_ASC_WRITE_PROTECTED);
return true;
}
} else {
if ((error & ide_error_unc) != 0) {
set_sense(device, SCSIS_KEY_MEDIUM_ERROR, SCSIS_ASC_UNREC_READ_ERR);
return true;
}
}
if ((error & ide_error_mc) != 0) {
set_sense(device, SCSIS_KEY_UNIT_ATTENTION, SCSIS_ASC_MEDIUM_CHANGED);
return true;
}
if ((error & ide_error_idnf) != 0) {
// ID not found - invalid CHS mapping (was: seek error?)
set_sense(device, SCSIS_KEY_MEDIUM_ERROR, SCSIS_ASC_RANDOM_POS_ERROR);
return true;
}
if ((error & ide_error_mcr) != 0) {
// XXX proper sense key?
// for TUR this case is not defined !?
set_sense(device, SCSIS_KEY_UNIT_ATTENTION, SCSIS_ASC_REMOVAL_REQUESTED);
return true;
}
if ((error & ide_error_nm) != 0) {
set_sense(device, SCSIS_KEY_NOT_READY, SCSIS_ASC_NO_MEDIUM);
return true;
}
if ((error & ide_error_abrt) != 0) {
set_sense(device, SCSIS_KEY_ABORTED_COMMAND, SCSIS_ASC_NO_SENSE);
return true;
}
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
return true;
}
return false;
}
/** check result of ATA command
* drdy_required - true if drdy must be set by device
* error_mask - bits to be checked in error register
* is_write - true, if command was a write command
*/
bool
check_output(ide_device_info *device, bool drdy_required,
int error_mask, bool is_write)
{
ide_bus_info *bus = device->bus;
uint8 status;
// check IRQ timeout
if (bus->sync_wait_timeout) {
bus->sync_wait_timeout = false;
device->subsys_status = SCSI_CMD_TIMEOUT;
return false;
}
status = bus->controller->get_altstatus(bus->channel_cookie);
// if device is busy, other flags are indeterminate
if ((status & ide_status_bsy) != 0) {
device->subsys_status = SCSI_SEQUENCE_FAIL;
return false;
}
if (drdy_required && ((status & ide_status_drdy) == 0)) {
device->subsys_status = SCSI_SEQUENCE_FAIL;
return false;
}
if ((status & ide_status_err) != 0) {
uint8 error;
if (bus->controller->read_command_block_regs(bus->channel_cookie,
&device->tf, ide_mask_error) != B_OK) {
device->subsys_status = SCSI_HBA_ERR;
return false;
}
error = device->tf.read.error & error_mask;
if ((error & ide_error_icrc) != 0) {
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_COM_CRC);
return false;
}
if (is_write) {
if ((error & ide_error_wp) != 0) {
set_sense(device, SCSIS_KEY_DATA_PROTECT, SCSIS_ASC_WRITE_PROTECTED);
return false;
}
} else {
if ((error & ide_error_unc) != 0) {
set_sense(device, SCSIS_KEY_MEDIUM_ERROR, SCSIS_ASC_UNREC_READ_ERR);
return false;
}
}
if ((error & ide_error_mc) != 0) {
// XXX proper sense key?
set_sense(device, SCSIS_KEY_UNIT_ATTENTION, SCSIS_ASC_MEDIUM_CHANGED);
return false;
}
if ((error & ide_error_idnf) != 0) {
// XXX strange error code, don't really know what it means
set_sense(device, SCSIS_KEY_MEDIUM_ERROR, SCSIS_ASC_RANDOM_POS_ERROR);
return false;
}
if ((error & ide_error_mcr) != 0) {
// XXX proper sense key?
set_sense(device, SCSIS_KEY_UNIT_ATTENTION, SCSIS_ASC_REMOVAL_REQUESTED);
return false;
}
if ((error & ide_error_nm) != 0) {
set_sense(device, SCSIS_KEY_MEDIUM_ERROR, SCSIS_ASC_NO_MEDIUM);
return false;
}
if ((error & ide_error_abrt) != 0) {
set_sense(device, SCSIS_KEY_ABORTED_COMMAND, SCSIS_ASC_NO_SENSE);
return false;
}
// either there was no error bit set or it was masked out
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
return false;
}
return true;
}
/** execute SET FEATURE command
* set subcommand in task file before calling this
*/
static bool
device_set_feature(ide_device_info *device, int feature)
{
device->tf_param_mask = ide_mask_features;
device->tf.write.features = feature;
device->tf.write.command = IDE_CMD_SET_FEATURES;
if (!send_command(device, NULL, true, 1, ide_state_sync_waiting))
return false;
wait_for_sync(device->bus);
return check_output(device, true, ide_error_abrt, false);
}
static bool
configure_rmsn(ide_device_info *device)
{
ide_bus_info *bus = device->bus;
int i;
if (!device->infoblock.RMSN_supported
|| device->infoblock._127_RMSN_support != 1)
return true;
if (!device_set_feature(device, IDE_CMD_SET_FEATURES_ENABLE_MSN))
return false;
bus->controller->read_command_block_regs(bus->channel_cookie, &device->tf,
ide_mask_LBA_mid | ide_mask_LBA_high);
for (i = 0; i < 5; ++i) {
// don't use TUR as it checks not ide_error_mcr | ide_error_mc | ide_error_wp
// but: we don't check wp as well
device->combined_sense = 0;
device->tf_param_mask = 0;
device->tf.write.command = IDE_CMD_GET_MEDIA_STATUS;
if (!send_command(device, NULL, true, 15, ide_state_sync_waiting))
continue;
if (check_output(device, true,
ide_error_nm | ide_error_abrt | ide_error_mcr | ide_error_mc,
true)
|| decode_sense_asc_ascq(device->combined_sense) == SCSIS_ASC_NO_MEDIUM)
return true;
}
return false;
}
static bool
configure_command_queueing(ide_device_info *device)
{
device->CQ_enabled = device->CQ_supported = false;
if (!device->bus->can_CQ
|| !device->infoblock.DMA_QUEUED_supported)
return initialize_qreq_array(device, 1);
if (device->infoblock.RELEASE_irq_supported
&& !device_set_feature( device, IDE_CMD_SET_FEATURES_DISABLE_REL_INT))
dprintf("Cannot disable release irq\n");
if (device->infoblock.SERVICE_irq_supported
&& !device_set_feature(device, IDE_CMD_SET_FEATURES_DISABLE_SERV_INT))
dprintf("Cannot disable service irq\n");
device->CQ_enabled = device->CQ_supported = true;
SHOW_INFO0(2, "Enabled command queueing");
// official IBM docs talk about 31 queue entries, though
// their disks report 32; let's hope their docs are wrong
return initialize_qreq_array(device, device->infoblock.queue_depth + 1);
}
bool
prep_ata(ide_device_info *device)
{
ide_device_infoblock *infoblock = &device->infoblock;
uint32 chs_capacity;
SHOW_FLOW0(3, "");
device->is_atapi = false;
device->exec_io = ata_exec_io;
device->last_lun = 0;
// warning: ata == 0 means "this is ata"...
if (infoblock->_0.ata.ATA != 0) {
// CF has either magic header or CFA bit set
// we merge it to "CFA bit set" for easier (later) testing
if (*(uint16 *)infoblock == 0x848a)
infoblock->CFA_supported = true;
else
return false;
}
SHOW_FLOW0(3, "1");
if (!infoblock->_54_58_valid) {
// normally, current_xxx contains active CHS mapping,
// but if BIOS didn't call INITIALIZE DEVICE PARAMETERS
// the default mapping is used
infoblock->current_sectors = infoblock->sectors;
infoblock->current_cylinders = infoblock->cylinders;
infoblock->current_heads = infoblock->heads;
}
// just in case capacity_xxx isn't initialized - calculate it manually
// (seems that this information is really redundant; hopefully)
chs_capacity = infoblock->current_sectors * infoblock->current_cylinders *
infoblock->current_heads;
infoblock->capacity_low = chs_capacity & 0xff;
infoblock->capacity_high = chs_capacity >> 8;
// checking LBA_supported flag should be sufficient, but it seems
// that checking LBA_total_sectors is a good idea
device->use_LBA = infoblock->LBA_supported && infoblock->LBA_total_sectors != 0;
if (device->use_LBA) {
device->total_sectors = infoblock->LBA_total_sectors;
device->tf.lba.mode = ide_mode_lba;
} else {
device->total_sectors = chs_capacity;
device->tf.chs.mode = ide_mode_chs;
}
device->use_48bits = infoblock->_48_bit_addresses_supported;
if (device->use_48bits)
device->total_sectors = infoblock->LBA48_total_sectors;
SHOW_FLOW0(3, "2");
if (!configure_dma(device)
|| !configure_command_queueing(device)
|| !configure_rmsn(device))
return false;
SHOW_FLOW0(3, "3");
return true;
}
void
enable_CQ(ide_device_info *device, bool enable)
{
}

View File

@ -0,0 +1,516 @@
/*
* Copyright 2004-2007, Haiku, Inc. All RightsReserved.
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
/*
Part of Open IDE bus manager
ATAPI command protocol
*/
#include "ide_internal.h"
#include <scsi_cmds.h>
#include "ide_cmds.h"
#include "ide_sim.h"
#include <string.h>
// used for MODE SENSE/SELECT 6 emulation; maximum size is 255 + header,
// so this is a safe bet
#define IDE_ATAPI_BUFFER_SIZE 512
/*!
Set sense according to error reported by device
return: true - device reported error
*/
static bool
check_packet_error(ide_device_info *device, ide_qrequest *qrequest)
{
ide_bus_info *bus = device->bus;
int status;
status = bus->controller->get_altstatus(bus->channel_cookie);
if ((status & (ide_status_err | ide_status_df)) != 0) {
int error;
SHOW_FLOW(3, "packet error, status=%02x", status);
if (bus->controller->read_command_block_regs(bus->channel_cookie,
&device->tf, ide_mask_error) != B_OK) {
device->subsys_status = SCSI_HBA_ERR;
return true;
}
// the upper 4 bits contain sense key
// we don't want to clutter syslog with "not ready" and UA messages,
// so use FLOW messages for them
error = device->tf.read.error;
if ((error >> 4) == SCSIS_KEY_NOT_READY
|| (error >> 4) == SCSIS_KEY_UNIT_ATTENTION)
SHOW_FLOW(3, "error=%x", error);
else
SHOW_ERROR(3, "error=%x", error);
// ATAPI says that:
// "ABRT shall be set to one if the requested command has been command
// aborted because the command code or a command parameter is invalid.
// ABRT may be set to one if the device is not able to complete the
// action requested by the command."
// Effectively, it can be set if "something goes wrong", including
// if the medium got changed. Therefore, we currently ignore the bit
// and rely on auto-sense information
/*
if ((error & ide_error_abrt) != 0) {
// if command got aborted, there's no point in reading sense
set_sense(device, SCSIS_KEY_ABORTED_COMMAND, SCSIS_ASC_NO_SENSE);
return false;
}
*/
// tell SCSI layer that sense must be requested
// (we don't take care of auto-sense ourselve)
device->subsys_status = SCSI_REQ_CMP_ERR;
qrequest->request->device_status = SCSI_STATUS_CHECK_CONDITION;
// reset pending emulated sense - its overwritten by a real one
device->combined_sense = 0;
return true;
}
return false;
}
/*! IRQ handler of packet transfer (executed as DPC) */
void
packet_dpc(ide_qrequest *qrequest)
{
ide_device_info *device = qrequest->device;
ide_bus_info *bus = device->bus;
int status;
uint32 timeout = qrequest->request->timeout > 0 ?
qrequest->request->timeout : IDE_STD_TIMEOUT;
SHOW_FLOW0(3, "");
bus->controller->read_command_block_regs(bus->channel_cookie,
&device->tf, ide_mask_error | ide_mask_ireason);
status = bus->controller->get_altstatus(bus->channel_cookie);
if (qrequest->packet_irq) {
// device requests packet
qrequest->packet_irq = false;
if (!device->tf.packet_res.cmd_or_data
|| device->tf.packet_res.input_or_output
|| (status & ide_status_drq) == 0) {
device->subsys_status = SCSI_SEQUENCE_FAIL;
goto err;
}
start_waiting_nolock(device->bus, timeout, ide_state_async_waiting);
// send packet
if (bus->controller->write_pio(bus->channel_cookie,
(uint16 *)device->packet, sizeof(device->packet) / sizeof(uint16),
true) != B_OK) {
SHOW_ERROR0( 1, "Error sending command packet" );
device->subsys_status = SCSI_HBA_ERR;
goto err_cancel_timer;
}
return;
}
if (qrequest->uses_dma) {
// DMA transmission finished
bool dma_err, dev_err;
// don't check drq - if there is some data left, we cannot handle
// it anyway
// XXX does the device throw remaining data away on DMA overflow?
SHOW_FLOW0(3, "DMA done");
dma_err = !finish_dma(device);
dev_err = check_packet_error(device, qrequest);
// what to do if both the DMA controller and the device reports an error?
// let's assume that the DMA controller got problems because there was a
// device error, so we ignore the dma error and use the device error instead
if (dev_err) {
finish_checksense(qrequest);
return;
}
// device is happy, let's see what the controller says
if (!dma_err) {
// if DMA works, reset error counter so we don't disable
// DMA only because it didn't work once in a while
device->DMA_failures = 0;
// this is a lie, but there is no way to find out
// how much has been transmitted
qrequest->request->data_resid = 0;
finish_checksense(qrequest);
} else {
// DMA transmission went wrong
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_COM_FAILURE);
if (++device->DMA_failures >= MAX_DMA_FAILURES) {
SHOW_ERROR0(1, "Disabling DMA because of too many errors");
device->DMA_enabled = false;
}
finish_checksense(qrequest);
}
return;
}
// PIO mode
if ((status & ide_status_drq) != 0) {
// device wants to transmit data
int length;
status_t err;
SHOW_FLOW0(3, "data transmission");
if (device->tf.packet_res.cmd_or_data) {
device->subsys_status = SCSI_SEQUENCE_FAIL;
goto err;
}
// check whether transmission direction matches
if ((device->tf.packet_res.input_or_output ^ qrequest->is_write) == 0) {
SHOW_ERROR0(2, "data transmission in wrong way!?");
// TODO: hm, either the device is broken or the caller has specified
// the wrong direction - what is the proper handling?
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_COM_FAILURE);
// reset device to make it alive
// TODO: the device will abort next command with a reset condition
// perhaps we should hide that by reading sense?
SHOW_FLOW0(3, "Reset");
reset_device(device, qrequest);
finish_checksense(qrequest);
return;
}
// ask device how much data it wants to transmit
bus->controller->read_command_block_regs(bus->channel_cookie,
&device->tf, ide_mask_byte_count);
length = device->tf.packet_res.byte_count_0_7
| ((int)device->tf.packet_res.byte_count_8_15 << 8);
SHOW_FLOW(3, "device transmittes %d bytes", length);
// start waiting before starting transmission, else we
// could start waiting too late;
// don't mind getting overtaken by IRQ handler - as it will
// issue a DPC for the thread context we are in, we are save
start_waiting_nolock(device->bus, timeout, ide_state_async_waiting);
if (device->tf.packet_res.input_or_output)
err = read_PIO_block(qrequest, length);
else
err = write_PIO_block(qrequest, length);
// only report "real" errors;
// discarding data (ERR_TOO_BIG) can happen but is OK
if (err == B_ERROR) {
SHOW_ERROR0(2, "Error during PIO transmission");
device->subsys_status = SCSI_HBA_ERR;
goto err_cancel_timer;
}
SHOW_FLOW0(3, "7");
return;
} else {
// device has done job and doesn't want to transmit data anymore
// -> finish request
SHOW_FLOW0(3, "no data");
check_packet_error(device, qrequest);
SHOW_FLOW(3, "finished: %d of %d left",
(int)qrequest->request->data_resid,
(int)qrequest->request->data_length);
finish_checksense(qrequest);
return;
}
return;
err_cancel_timer:
cancel_irq_timeout(device->bus);
err:
finish_checksense(qrequest);
}
/*! Create taskfile for ATAPI packet */
static bool
create_packet_taskfile(ide_device_info *device, ide_qrequest *qrequest,
bool write)
{
scsi_ccb *request = qrequest->request;
SHOW_FLOW(3, "DMA enabled=%d, uses_dma=%d, scsi_cmd=%x",
device->DMA_enabled, qrequest->uses_dma, device->packet[0]);
device->tf_param_mask = ide_mask_features | ide_mask_byte_count;
device->tf.packet.dma = qrequest->uses_dma;
device->tf.packet.ovl = 0;
device->tf.packet.byte_count_0_7 = request->data_length & 0xff;
device->tf.packet.byte_count_8_15 = request->data_length >> 8;
device->tf.packet.command = IDE_CMD_PACKET;
return true;
}
/*! Send ATAPI packet */
void
send_packet(ide_device_info *device, ide_qrequest *qrequest, bool write)
{
ide_bus_info *bus = device->bus;
bool packet_irq = device->atapi.packet_irq;
uint8 scsi_cmd = device->packet[0];
SHOW_FLOW( 3, "qrequest=%p, command=%x", qrequest, scsi_cmd );
/*{
unsigned int i;
for( i = 0; i < sizeof( device->packet ); ++i )
dprintf( "%x ", device->packet[i] );
}*/
SHOW_FLOW(3, "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x (len=%d)",
device->packet[0], device->packet[1], device->packet[2],
device->packet[3], device->packet[4], device->packet[5],
device->packet[6], device->packet[7], device->packet[8],
device->packet[9], device->packet[10], device->packet[11],
qrequest->request->cdb_length);
//snooze( 1000000 );
qrequest->is_write = write;
// if needed, mark first IRQ as being packet request IRQ
qrequest->packet_irq = packet_irq;
// only READ/WRITE commands can use DMA
// (the device may support it always, but IDE controllers don't
// report how much data is transmitted, and this information is
// crucial for the SCSI protocol)
// special offer: let READ_CD commands use DMA too
qrequest->uses_dma = device->DMA_enabled
&& (scsi_cmd == SCSI_OP_READ_6 || scsi_cmd == SCSI_OP_WRITE_6
|| scsi_cmd == SCSI_OP_READ_10 || scsi_cmd == SCSI_OP_WRITE_10
|| scsi_cmd == SCSI_OP_READ_12 || scsi_cmd == SCSI_OP_WRITE_12
|| scsi_cmd == SCSI_OP_READ_CD);
// try preparing DMA, if that fails, fall back to PIO
if (qrequest->uses_dma) {
SHOW_FLOW0(3, "0");
if (!prepare_dma( device, qrequest))
qrequest->uses_dma = false;
SHOW_FLOW(3, "0->%d", qrequest->uses_dma);
}
SHOW_FLOW0(3, "1");
if (!qrequest->uses_dma)
prep_PIO_transfer(device, qrequest);
SHOW_FLOW0(3, "2");
if (!create_packet_taskfile(device, qrequest, write))
goto err_setup;
SHOW_FLOW0(3, "3");
if (!send_command(device, qrequest, false,
device->atapi.packet_irq_timeout,
device->atapi.packet_irq ? ide_state_async_waiting : ide_state_accessing))
goto err_setup;
SHOW_FLOW0(3, "4");
if (packet_irq) {
// device asks for packet via IRQ;
// timeout and stuff is already set by send_command
return;
}
SHOW_FLOW0(3, "5");
// wait for device to get ready for packet transmission
if (!ide_wait(device, ide_status_drq, ide_status_bsy, false, 100000))
goto err_setup;
SHOW_FLOW0(3, "6");
// make sure device really asks for command packet
bus->controller->read_command_block_regs(bus->channel_cookie, &device->tf,
ide_mask_ireason);
if (!device->tf.packet_res.cmd_or_data
|| device->tf.packet_res.input_or_output) {
device->subsys_status = SCSI_SEQUENCE_FAIL;
goto err_setup;
}
SHOW_FLOW0(3, "7");
// some old drives need a delay before submitting the packet
spin(10);
// locking is evil here: as soon as the packet is transmitted, the device
// may raise an IRQ (which actually happens if the device reports an Check
// Condition error). Thus, we have to lock out the IRQ handler _before_ we
// start packet transmission, which forbids all kind of interrupts for some
// time; to reduce this period, blocking is done just before last dword is
// sent (avoid sending 16 bits as controller may transmit 32 bit chunks)
// write packet
if (bus->controller->write_pio(bus->channel_cookie,
(uint16 *)device->packet, sizeof(device->packet) / sizeof(uint16) - 2,
true) != B_OK) {
goto err_packet;
}
IDE_LOCK(bus);
if (bus->controller->write_pio(bus->channel_cookie,
(uint16 *)device->packet + sizeof(device->packet) / sizeof(uint16) - 2,
2, true) != B_OK) {
goto err_packet2;
}
if (qrequest->uses_dma) {
SHOW_FLOW0( 3, "ready for DMA" );
// S/G table must already be setup - we hold the bus lock, so
// we really have to hurry up
start_dma_wait(device, qrequest);
} else {
uint32 timeout = qrequest->request->timeout > 0 ?
qrequest->request->timeout : IDE_STD_TIMEOUT;
start_waiting(bus, timeout, ide_state_async_waiting);
}
SHOW_FLOW0(3, "8");
return;
err_packet2:
IDE_UNLOCK(bus);
err_packet:
device->subsys_status = SCSI_HBA_ERR;
err_setup:
if (qrequest->uses_dma)
abort_dma(device, qrequest);
finish_checksense(qrequest);
}
/*! Execute SCSI I/O for atapi devices */
void
atapi_exec_io(ide_device_info *device, ide_qrequest *qrequest)
{
scsi_ccb *request = qrequest->request;
SHOW_FLOW(3, "command=%x", qrequest->request->cdb[0]);
// ATAPI command packets are 12 bytes long;
// if the command is shorter, remaining bytes must be padded with zeros
memset(device->packet, 0, sizeof(device->packet));
memcpy(device->packet, request->cdb, request->cdb_length);
if (request->cdb[0] == SCSI_OP_REQUEST_SENSE && device->combined_sense) {
// we have a pending emulated sense - return it on REQUEST SENSE
ide_request_sense(device, qrequest);
finish_checksense(qrequest);
} else {
// reset all error codes for new request
start_request(device, qrequest);
// now we have an IDE packet
send_packet(device, qrequest,
(request->flags & SCSI_DIR_MASK) == SCSI_DIR_OUT);
}
}
/*! Prepare device info for ATAPI device */
bool
prep_atapi(ide_device_info *device)
{
ide_device_infoblock *infoblock = &device->infoblock;
SHOW_FLOW0(3, "");
device->is_atapi = true;
device->exec_io = atapi_exec_io;
if (infoblock->_0.atapi.ATAPI != 2)
return false;
switch(infoblock->_0.atapi.drq_speed) {
case 0:
case 2:
device->atapi.packet_irq = false;
break;
case 1:
device->atapi.packet_irq = true;
device->atapi.packet_irq_timeout = IDE_STD_TIMEOUT;
break;
default:
return false;
}
SHOW_FLOW(3, "drq speed: %d", infoblock->_0.atapi.drq_speed);
/*if( infoblock->_0.atapi.packet_size != 0 )
return false;*/
device->device_type = infoblock->_0.atapi.type;
device->last_lun = infoblock->last_lun;
SHOW_FLOW(3, "device_type=%d, last_lun=%d",
device->device_type, device->last_lun);
// don't use task file to select LUN but command packet
// (SCSI bus manager sets LUN there automatically)
device->tf.packet.lun = 0;
if (!initialize_qreq_array(device, 1)
|| !configure_dma(device))
return false;
// currently, we don't support queuing, but I haven't found any
// ATAPI device that supports queuing anyway, so this is no loss
device->CQ_enabled = device->CQ_supported = false;
return true;
}

View File

@ -0,0 +1,476 @@
/*
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
* Distributed under the terms of the MIT License.
*/
/*
Part of Open IDE bus manager
Basic ATA/ATAPI protocol functions
*/
#include "ide_internal.h"
#include <scsi_cmds.h>
#include "ide_sim.h"
#include "ide_cmds.h"
// time in µs an IDE interrupt may get delayed
// as this is used for waiting in normal code, this applies to hardware delays only
// it's used for a hardware bug fix as well, see send_command
#define MAX_IRQ_DELAY 50
// maximum number send tries before giving up
#define MAX_FAILED_SEND 1
/** busy-wait for data request going high */
bool
wait_for_drq(ide_device_info *device)
{
return ide_wait(device, ide_status_drq, 0, true, 10000000);
}
/** busy-wait for data request going low */
bool
wait_for_drqdown(ide_device_info *device)
{
return ide_wait(device, 0, ide_status_drq, true, 1000000);
}
/** busy-wait for device ready */
bool
wait_for_drdy(ide_device_info *device)
{
return ide_wait(device, ide_status_drdy, ide_status_bsy, false, 5000000);
}
/** reset entire IDE bus
* all active request apart from <ignore> are resubmitted
*/
bool
reset_bus(ide_device_info *device, ide_qrequest *ignore)
{
ide_bus_info *bus = device->bus;
ide_controller_interface *controller = bus->controller;
ide_channel_cookie channel = bus->channel_cookie;
dprintf("ide: reset_bus() device %p, bus %p\n", device, bus);
FAST_LOG0(bus->log, ev_ide_reset_bus);
if (device->reconnect_timer_installed) {
cancel_timer(&device->reconnect_timer.te);
device->reconnect_timer_installed = false;
}
if (device->other_device->reconnect_timer_installed) {
cancel_timer(&device->other_device->reconnect_timer.te);
device->other_device->reconnect_timer_installed = false;
}
// activate srst signal for 5 µs
// also, deactivate IRQ
// (as usual, we will get an IRQ on disabling, but as we leave them
// disabled for 2 ms, this false report is ignored)
if (controller->write_device_control(channel,
ide_devctrl_nien | ide_devctrl_srst | ide_devctrl_bit3) != B_OK)
goto err0;
spin(5);
if (controller->write_device_control(channel, ide_devctrl_nien | ide_devctrl_bit3) != B_OK)
goto err0;
// let devices wake up
snooze(2000);
// ouch, we have to wait up to 31 seconds!
if (!ide_wait(device, 0, ide_status_bsy, true, 31000000)) {
// as we don't know which of the devices is broken
// we leave them both alive
if (controller->write_device_control(channel, ide_devctrl_bit3) != B_OK)
goto err0;
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_TIMEOUT);
goto err1;
}
if (controller->write_device_control(channel, ide_devctrl_bit3) != B_OK)
goto err0;
finish_all_requests(bus->devices[0], ignore, SCSI_SCSI_BUS_RESET, true);
finish_all_requests(bus->devices[1], ignore, SCSI_SCSI_BUS_RESET, true);
dprintf("ide: reset_bus() device %p, bus %p success\n", device, bus);
return true;
err0:
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
err1:
finish_all_requests(bus->devices[0], ignore, SCSI_SCSI_BUS_RESET, true);
finish_all_requests(bus->devices[1], ignore, SCSI_SCSI_BUS_RESET, true);
//xpt->call_async( bus->xpt_cookie, -1, -1, AC_BUS_RESET, NULL, 0 );
dprintf("ide: reset_bus() device %p, bus %p failed\n", device, bus);
return false;
}
/** execute packet device reset.
* resets entire bus on fail or if device is not atapi;
* all requests but <ignore> are resubmitted
*/
bool
reset_device(ide_device_info *device, ide_qrequest *ignore)
{
ide_bus_info *bus = device->bus;
status_t res;
uint8 orig_command;
dprintf("ide: reset_device() device %p\n", device);
FAST_LOG1(bus->log, ev_ide_reset_device, device->is_device1);
SHOW_FLOW0(3, "");
if (!device->is_atapi)
goto err;
if (device->reconnect_timer_installed) {
cancel_timer(&device->reconnect_timer.te);
device->reconnect_timer_installed = false;
}
// select device
if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf,
ide_mask_device_head) != B_OK)
goto err;
// safe original command to let caller restart it
orig_command = device->tf.write.command;
// send device reset, independ of current device state
// (that's the point of a reset)
device->tf.write.command = IDE_CMD_DEVICE_RESET;
res = bus->controller->write_command_block_regs(bus->channel_cookie,
&device->tf, ide_mask_command);
device->tf.write.command = orig_command;
if (res != B_OK)
goto err;
// don't know how long to wait, but 31 seconds, like soft reset,
// should be enough
if (!ide_wait(device, 0, ide_status_bsy, true, 31000000))
goto err;
// alright, resubmit all requests
finish_all_requests(device, ignore, SCSI_SCSI_BUS_RESET, true);
SHOW_FLOW0(3, "done");
dprintf("ide: reset_device() device %p success\n", device);
return true;
err:
// do the hard way
dprintf("ide: reset_device() device %p failed, calling reset_bus\n", device);
return reset_bus(device, ignore);
}
/** new_state must be either accessing, async_waiting or sync_waiting
* param_mask must not include command register
*/
bool
send_command(ide_device_info *device, ide_qrequest *qrequest,
bool need_drdy, uint32 timeout, ide_bus_state new_state)
{
ide_bus_info *bus = device->bus;
bigtime_t irq_disabled_at = 0; // make compiler happy
uint8 num_retries = 0;
bool irq_guard;
FAST_LOGN(bus->log, ev_ide_send_command, 15, device->is_device1, (uint32)qrequest,
device->tf.raw.r[0], device->tf.raw.r[1], device->tf.raw.r[2],
device->tf.raw.r[3], device->tf.raw.r[4], device->tf.raw.r[5],
device->tf.raw.r[6],
device->tf.raw.r[7], device->tf.raw.r[8], device->tf.raw.r[9],
device->tf.raw.r[10], device->tf.raw.r[11]);
retry:
irq_guard = bus->num_running_reqs > 1;
SHOW_FLOW(3, "qrequest=%p, request=%p", qrequest,
qrequest ? qrequest->request : NULL);
// if there are pending requests, IRQs must be disabled to
// not mix up IRQ reasons
// XXX can we avoid that with the IDE_LOCK trick? It would
// save some work and the bug workaround!
if (irq_guard) {
if (bus->controller->write_device_control(bus->channel_cookie,
ide_devctrl_nien | ide_devctrl_bit3) != B_OK)
goto err;
irq_disabled_at = system_time();
}
// select device
if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf,
ide_mask_device_head) != B_OK)
goto err;
bus->active_device = device;
if (!ide_wait(device, 0, ide_status_bsy | ide_status_drq, false, 50000)) {
uint8 status;
SHOW_FLOW0(1, "device is not ready");
status = bus->controller->get_altstatus(bus->channel_cookie);
if (status == 0xff) {
// there is no device (should happen during detection only)
SHOW_FLOW0(1, "there is no device");
// device detection recognizes this code as "all hope lost", so
// neither replace it nor use it anywhere else
device->subsys_status = SCSI_TID_INVALID;
return false;
}
// reset device and retry
if (reset_device(device, qrequest) && ++num_retries <= MAX_FAILED_SEND) {
SHOW_FLOW0(1, "retrying");
goto retry;
}
SHOW_FLOW0(1, "giving up");
// reset to often - abort request
device->subsys_status = SCSI_SEL_TIMEOUT;
return false;
}
if (need_drdy
&& (bus->controller->get_altstatus(bus->channel_cookie) & ide_status_drdy) == 0) {
SHOW_FLOW0(3, "drdy not set");
device->subsys_status = SCSI_SEQUENCE_FAIL;
return false;
}
// write parameters
if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf,
device->tf_param_mask) != B_OK)
goto err;
if (irq_guard) {
// IRQ may be fired by service requests and by the process of disabling(!)
// them (I heard this is caused by edge triggered PCI IRQs)
// wait at least 50 µs to catch all pending irq's
// (at my system, up to 30 µs elapsed)
// additionally, old drives (at least my IBM-DTTA-351010) loose
// sync if they are pushed too hard - on heavy overlapped write
// stress this drive tends to forget outstanding requests,
// waiting at least 50 µs seems(!) to solve this
while (system_time() - irq_disabled_at < MAX_IRQ_DELAY)
spin(1);
}
// if we will start waiting once the command is sent, we have to
// lock the bus before sending; this way, IRQs that are fired
// shortly before/after sending of command are delayed until the
// command is really sent (start_waiting unlocks the bus) and then
// the IRQ handler can check savely whether the IRQ really signals
// finishing of command or not by testing the busy-signal of the device
if (new_state != ide_state_accessing) {
IDE_LOCK(bus);
}
if (irq_guard) {
// now it's clear why IRQs gets fired, so we can enable them again
if (bus->controller->write_device_control(bus->channel_cookie,
ide_devctrl_bit3) != B_OK)
goto err1;
}
// write command code - this will start the actual command
SHOW_FLOW(3, "Writing command 0x%02x", (int)device->tf.write.command);
if (bus->controller->write_command_block_regs(bus->channel_cookie,
&device->tf, ide_mask_command) != B_OK)
goto err1;
// start waiting now; also un-blocks IRQ handler (see above)
if (new_state != ide_state_accessing)
start_waiting(bus, timeout, new_state);
return true;
err1:
if (timeout > 0) {
bus->state = ide_state_accessing;
IDE_UNLOCK(bus);
}
err:
device->subsys_status = SCSI_HBA_ERR;
return false;
}
/** busy-wait for device
* mask - bits of status register that must be set
* not_mask - bits of status register that must not be set
* check_err - abort if error bit is set
* timeout - waiting timeout
* return: true on success
*/
bool
ide_wait(ide_device_info *device, int mask, int not_mask,
bool check_err, bigtime_t timeout)
{
ide_bus_info *bus = device->bus;
bigtime_t start_time = system_time();
while (1) {
bigtime_t elapsed_time;
int status;
// do spin before test as the device needs 400 ns
// to update its status register
spin(1);
status = bus->controller->get_altstatus(bus->channel_cookie);
if ((status & mask) == mask && (status & not_mask) == 0)
return true;
if (check_err && (status & ide_status_err) != 0) {
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
return false;
}
elapsed_time = system_time() - start_time;
if (elapsed_time > timeout) {
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_TIMEOUT);
return false;
}
// if we've waited more then 5ms, we start passive waiting
// to reduce system load
if (elapsed_time > 5000)
snooze(elapsed_time / 10);
}
}
/** tell device to continue queued command
* on return, no waiting is active!
* tag - will contain tag of command to be continued
* return: true - request continued
* false - something went wrong; sense set
*/
bool
device_start_service(ide_device_info *device, int *tag)
{
ide_bus_info *bus = device->bus;
FAST_LOG1(bus->log, ev_ide_device_start_service, device->is_device1);
device->tf.write.command = IDE_CMD_SERVICE;
device->tf.queued.mode = ide_mode_lba;
if (bus->active_device != device) {
// don't apply any precautions in terms of IRQ
// -> the bus is in accessing state, so IRQs are ignored anyway
if (bus->controller->write_command_block_regs(bus->channel_cookie,
&device->tf, ide_mask_device_head) != B_OK)
// on error, pretend that this device asks for service
// -> the disappeared controller will be recognized soon ;)
return true;
bus->active_device = device;
// give one clock (400 ns) to take notice
spin(1);
}
// here we go...
if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf,
ide_mask_command) != B_OK)
goto err;
// we need to wait for the device as we want to read the tag
if (!ide_wait(device, ide_status_drdy, ide_status_bsy, false, 1000000))
return false;
// read tag
if (bus->controller->read_command_block_regs(bus->channel_cookie, &device->tf,
ide_mask_sector_count) != B_OK)
goto err;
if (device->tf.queued.release) {
// bus release is the wrong answer to a service request
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
return false;
}
*tag = device->tf.queued.tag;
FAST_LOG2(bus->log, ev_ide_device_start_service2, device->is_device1, *tag);
return true;
err:
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
return false;
}
/** check device whether it wants to continue queued request */
bool
check_service_req(ide_device_info *device)
{
ide_bus_info *bus = device->bus;
int status;
// fast bailout if there is no request pending
if (device->num_running_reqs == 0)
return false;
if (bus->active_device != device) {
// don't apply any precautions in terms of IRQ
// -> the bus is in accessing state, so IRQs are ignored anyway
if (bus->controller->write_command_block_regs(bus->channel_cookie,
&device->tf, ide_mask_device_head) != B_OK)
// on error, pretend that this device asks for service
// -> the disappeared controller will be recognized soon ;)
return true;
bus->active_device = device;
// give one clock (400 ns) to take notice
spin(1);
}
status = bus->controller->get_altstatus(bus->channel_cookie);
return (status & ide_status_service) != 0;
}

View File

@ -0,0 +1,121 @@
/*
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
* Distributed under the terms of the MIT License.
*/
/*
Part of Open IDE bus manager
Manager of IDE controllers
Whenever a new IDE channel is reported, a new SIM is
registered at the SCSI bus manager.
*/
#include "ide_internal.h"
#include "ide_sim.h"
#include <string.h>
#include <malloc.h>
#include <block_io.h>
/** called when an IDE channel was registered by a controller driver */
static status_t
ide_channel_added(device_node_handle parent)
{
char *controller_name = NULL;
uint32 channel_id;
SHOW_FLOW0(2, "");
if (pnp->get_attr_string(parent, IDE_CONTROLLER_CONTROLLER_NAME_ITEM,
&controller_name, true) != B_OK) {
dprintf("ide: ignored controller - controller name missing\n");
goto err;
}
channel_id = pnp->create_id(IDE_CHANNEL_ID_GENERATOR);
if (channel_id < 0) {
SHOW_ERROR(0, "Cannot register IDE controller %s - out of IDs", controller_name);
goto err;
}
{
device_attr attrs[] =
{
{ B_DRIVER_MODULE, B_STRING_TYPE, { string: IDE_SIM_MODULE_NAME }},
{ B_DRIVER_FIXED_CHILD, B_STRING_TYPE, { string: SCSI_FOR_SIM_MODULE_NAME }},
{ SCSI_DESCRIPTION_CONTROLLER_NAME, B_STRING_TYPE,
{ string: controller_name }},
// maximum number of blocks per transmission:
// - ATAPI uses packets, i.e. normal SCSI limits apply
// but I'm not sure about controller restrictions
// - ATA allows up to 256 blocks
// - some broken disk's firmware (read: IBM DTTA drives)
// don't like 256 blocks in command queuing mode
// -> use 255 blocks as a least common nominator
// (this is still 127.5K for HDs and 510K for CDs,
// which should be sufficient)
// Note: to fix specific drive bugs, use ide_sim_get_restrictions()
// in ide_sim.c!
{ B_BLOCK_DEVICE_MAX_BLOCKS_ITEM, B_UINT32_TYPE, { ui32: 255 }},
{ IDE_CHANNEL_ID_ITEM, B_UINT32_TYPE, { ui32: channel_id }},
{ PNP_MANAGER_ID_GENERATOR, B_STRING_TYPE, { string: IDE_CHANNEL_ID_GENERATOR }},
{ PNP_MANAGER_AUTO_ID, B_UINT32_TYPE, { ui32: channel_id }},
{ NULL }
};
device_node_handle node;
status_t res;
res = pnp->register_device(parent, attrs, NULL, &node);
free(controller_name);
return res;
}
err:
free(controller_name);
return B_NO_MEMORY;
}
static status_t
std_ops(int32 op, ...)
{
switch (op) {
case B_MODULE_INIT:
case B_MODULE_UNINIT:
return B_OK;
default:
return B_ERROR;
}
}
ide_for_controller_interface ide_for_controller_module = {
{
{
IDE_FOR_CONTROLLER_MODULE_NAME,
0,
&std_ops
},
NULL, // supported devices
ide_channel_added,
NULL,
NULL,
NULL
},
ide_irq_handler
};

View File

@ -0,0 +1,332 @@
/*
** Copyright 2007, Marcus Overhagen. All rights reserved.
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the Haiku License.
*/
/*
Part of Open IDE bus manager
Device manager
As the IDE bus manager is an SCSI to IDE translater, it
has to know a bit more about connected devices then a standard
SIM. This file contains device detection and classification.
*/
#include "ide_internal.h"
#include "ide_sim.h"
#include "ide_cmds.h"
#include <string.h>
#include <malloc.h>
#include <ByteOrder.h>
#define TRACE(x...) dprintf("IDE: " x)
/** cleanup links devices on one bus when <device> is deleted */
static void
cleanup_device_links(ide_device_info *device)
{
ide_bus_info *bus = device->bus;
TRACE("cleanup_device_links: device %p\n", device);
bus->devices[device->is_device1] = NULL;
if (device->other_device) {
if (device->other_device != device) {
device->other_device->other_device = device->other_device;
bus->first_device = device->other_device;
} else
bus->first_device = NULL;
}
device->other_device = NULL;
}
/** destroy device info */
static void
destroy_device(ide_device_info *device)
{
TRACE("destroy_device: device %p\n", device);
// paranoia
device->exec_io = NULL;
cancel_timer(&device->reconnect_timer.te);
scsi->free_dpc(device->reconnect_timeout_dpc);
cleanup_device_links(device);
destroy_qreq_array(device);
uninit_synced_pc(&device->reconnect_timeout_synced_pc);
free(device);
}
/** setup links between the devices on one bus */
static void
setup_device_links(ide_bus_info *bus, ide_device_info *device)
{
TRACE("setup_device_links: bus %p, device %p\n", bus, device);
device->bus = bus;
bus->devices[device->is_device1] = device;
device->other_device = device;
if (device->is_device1) {
if (bus->devices[0]) {
device->other_device = bus->devices[0];
bus->devices[0]->other_device = device;
}
} else {
if (bus->devices[1]) {
device->other_device = bus->devices[1];
bus->devices[1]->other_device = device;
}
}
if (bus->first_device == NULL)
bus->first_device = device;
}
/** create device info */
static ide_device_info *
create_device(ide_bus_info *bus, bool is_device1)
{
ide_device_info *device;
TRACE("create_device: bus %p, device-number %d\n", bus, is_device1);
device = (ide_device_info *)malloc(sizeof(*device));
if (device == NULL)
return NULL;
memset(device, 0, sizeof(*device));
device->is_device1 = is_device1;
device->target_id = is_device1;
setup_device_links(bus, device);
device->DMA_failures = 0;
device->CQ_failures = 0;
device->num_failed_send = 0;
device->combined_sense = 0;
device->num_running_reqs = 0;
device->reconnect_timer.device = device;
init_synced_pc(&device->reconnect_timeout_synced_pc,
reconnect_timeout_worker);
if (scsi->alloc_dpc(&device->reconnect_timeout_dpc) != B_OK)
goto err;
device->total_sectors = 0;
return device;
err:
destroy_device(device);
return NULL;
}
#if B_HOST_IS_LENDIAN
#define B_BENDIAN_TO_HOST_MULTI(v, n) do { \
size_t __swap16_multi_n = (n); \
uint16 *__swap16_multi_v = (v); \
\
while( __swap16_multi_n ) { \
*__swap16_multi_v = B_SWAP_INT16(*__swap16_multi_v); \
__swap16_multi_v++; \
__swap16_multi_n--; \
} \
} while (0)
#else
#define B_BENDIAN_TO_HOST_MULTI(v, n)
#endif
/** prepare infoblock for further use, i.e. fix endianess */
static void
prep_infoblock(ide_device_info *device)
{
ide_device_infoblock *infoblock = &device->infoblock;
B_BENDIAN_TO_HOST_MULTI((uint16 *)infoblock->serial_number,
sizeof(infoblock->serial_number) / 2);
B_BENDIAN_TO_HOST_MULTI( (uint16 *)infoblock->firmware_version,
sizeof(infoblock->firmware_version) / 2);
B_BENDIAN_TO_HOST_MULTI( (uint16 *)infoblock->model_number,
sizeof(infoblock->model_number) / 2);
infoblock->LBA_total_sectors = B_LENDIAN_TO_HOST_INT32(infoblock->LBA_total_sectors);
infoblock->LBA48_total_sectors = B_LENDIAN_TO_HOST_INT64(infoblock->LBA48_total_sectors);
}
/** read info block of ATA or ATAPI device */
static bool
scan_device_int(ide_device_info *device, bool atapi)
{
ide_bus_info *bus = device->bus;
int status;
TRACE("scan_device_int: device %p, atapi %d\n", device, atapi);
device->tf_param_mask = 0;
device->tf.write.command = atapi ? IDE_CMD_IDENTIFY_PACKET_DEVICE
: IDE_CMD_IDENTIFY_DEVICE;
// initialize device selection flags,
// this is the only place where this bit gets initialized in the task file
if (bus->controller->read_command_block_regs(bus->channel_cookie, &device->tf,
ide_mask_device_head) != B_OK) {
TRACE("scan_device_int: read_command_block_regs failed\n");
return false;
}
device->tf.lba.device = device->is_device1;
if (!send_command(device, NULL, atapi ? false : true, 20, ide_state_sync_waiting)) {
TRACE("scan_device_int: send_command failed\n");
return false;
}
// do a short wait first - if there's no device at all we could wait forever
// ToDo: have a look at this; if it times out (when the time is too short),
// the kernel seems to crash a little later)!
TRACE("scan_device_int: waiting 100ms...\n");
if (acquire_sem_etc(bus->sync_wait_sem, 1, B_RELATIVE_TIMEOUT, 100000) == B_TIMED_OUT) {
bool cont;
TRACE("scan_device_int: no fast response to inquiry\n");
// check the busy flag - if it's still set, there's probably no device
IDE_LOCK(bus);
status = bus->controller->get_altstatus(bus->channel_cookie);
cont = (status & ide_status_bsy) == ide_status_bsy;
IDE_UNLOCK(bus);
TRACE("scan_device_int: status %#04x\n", status);
if (!cont) {
TRACE("scan_device_int: busy bit not set after 100ms - probably noone there\n");
// no reaction -> abort waiting
cancel_irq_timeout(bus);
// timeout or irq may have been fired, reset semaphore just is case
acquire_sem_etc(bus->sync_wait_sem, 1, B_RELATIVE_TIMEOUT, 0);
TRACE("scan_device_int: aborting because busy bit not set\n");
return false;
}
TRACE("scan_device_int: busy bit set, give device more time\n");
// there is something, so wait for it
acquire_sem(bus->sync_wait_sem);
}
TRACE("scan_device_int: got a fast response\n");
// cancel the timeout manually; usually this is done by wait_for_sync(), but
// we've used the wait semaphore directly
cancel_irq_timeout(bus);
if (bus->sync_wait_timeout) {
TRACE("scan_device_int: aborting on sync_wait_timeout\n");
return false;
}
ide_wait(device, ide_status_drq, ide_status_bsy, true, 1000);
status = bus->controller->get_altstatus(bus->channel_cookie);
if ((status & ide_status_err) != 0) {
// if there's no device, all bits including the error bit are set
TRACE("scan_device_int: error bit set - no device or wrong type (status: %#04x)\n", status);
return false;
}
// get the infoblock
bus->controller->read_pio(bus->channel_cookie, (uint16 *)&device->infoblock,
sizeof(device->infoblock) / sizeof(uint16), false);
if (!wait_for_drqdown(device)) {
TRACE("scan_device_int: wait_for_drqdown failed\n");
return false;
}
TRACE("scan_device_int: device found\n");
prep_infoblock(device);
return true;
}
/** scan one device */
void
scan_device_worker(ide_bus_info *bus, void *arg)
{
int is_device1 = (int)arg;
ide_device_info *device;
TRACE("scan_device_worker: bus %p, device-number %d\n", bus, is_device1);
// forget everything we know about the device;
// don't care about peripheral drivers using this device
// as the device info is only used by us and not published
// directly or indirectly to the SCSI bus manager
if (bus->devices[is_device1])
destroy_device(bus->devices[is_device1]);
device = create_device(bus, is_device1);
// reset status so we can see what goes wrong during detection
device->subsys_status = SCSI_REQ_CMP;
if (scan_device_int(device, false)) {
if (!prep_ata(device))
goto err;
} else if (device->subsys_status != SCSI_TID_INVALID
&& scan_device_int(device, true)) {
// only try ATAPI if there is at least some device
// (see send_command - this error code must be unique!)
if (!prep_atapi(device))
goto err;
} else
goto err;
bus->state = ide_state_idle;
release_sem(bus->scan_device_sem);
return;
err:
destroy_device(device);
bus->state = ide_state_idle;
release_sem(bus->scan_device_sem);
}

View File

@ -0,0 +1,136 @@
/*
* Copyright 2004-2007, Haiku, Inc. All RightsReserved.
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
//! DMA helper functions
#include "ide_internal.h"
#define CHECK_DEV_DMA_MODE(infoblock, elem, mode, this_mode, num_modes ) \
if( infoblock->elem ) { \
mode = this_mode; \
++num_modes; \
}
static int
get_device_dma_mode(ide_device_info *device)
{
ide_device_infoblock *infoblock = &device->infoblock;
int num_modes, mode;
mode = 0;
num_modes = 0;
if (!infoblock->DMA_supported)
return -1;
CHECK_DEV_DMA_MODE(infoblock, MDMA0_selected, mode, 0, num_modes);
CHECK_DEV_DMA_MODE(infoblock, MDMA1_selected, mode, 1, num_modes);
CHECK_DEV_DMA_MODE(infoblock, MDMA2_selected, mode, 2, num_modes);
if (infoblock->_88_valid) {
CHECK_DEV_DMA_MODE(infoblock, UDMA0_selected, mode, 0x10, num_modes);
CHECK_DEV_DMA_MODE(infoblock, UDMA1_selected, mode, 0x11, num_modes);
CHECK_DEV_DMA_MODE(infoblock, UDMA2_selected, mode, 0x12, num_modes);
CHECK_DEV_DMA_MODE(infoblock, UDMA3_selected, mode, 0x13, num_modes);
CHECK_DEV_DMA_MODE(infoblock, UDMA4_selected, mode, 0x14, num_modes);
CHECK_DEV_DMA_MODE(infoblock, UDMA5_selected, mode, 0x15, num_modes);
CHECK_DEV_DMA_MODE(infoblock, UDMA6_selected, mode, 0x16, num_modes);
}
if (num_modes != 1)
return -1;
SHOW_FLOW(3, "%x", mode);
return mode;
}
bool
configure_dma(ide_device_info *device)
{
device->DMA_enabled = device->DMA_supported = device->bus->can_DMA
&& get_device_dma_mode(device) != -1;
return true;
}
/*! Abort DMA transmission
must be called _before_ start_dma_wait
*/
void
abort_dma(ide_device_info *device, ide_qrequest *qrequest)
{
ide_bus_info *bus = device->bus;
SHOW_FLOW0(0, "");
bus->controller->finish_dma(bus->channel_cookie);
}
/*! Prepare DMA transmission
on return, DMA engine waits for device to transmit data
warning: doesn't set sense data on error
*/
bool
prepare_dma(ide_device_info *device, ide_qrequest *qrequest)
{
ide_bus_info *bus = device->bus;
scsi_ccb *request = qrequest->request;
status_t res;
res = bus->controller->prepare_dma(bus->channel_cookie, request->sg_list,
request->sg_count, qrequest->is_write);
if (res != B_OK)
return false;
return true;
}
/*! Start waiting for DMA to be finished */
void
start_dma_wait(ide_device_info *device, ide_qrequest *qrequest)
{
ide_bus_info *bus = device->bus;
bus->controller->start_dma(bus->channel_cookie);
start_waiting(bus, qrequest->request->timeout > 0 ?
qrequest->request->timeout : IDE_STD_TIMEOUT, ide_state_async_waiting);
}
/*! Start waiting for DMA to be finished with bus lock not hold */
void
start_dma_wait_no_lock(ide_device_info *device, ide_qrequest *qrequest)
{
ide_bus_info *bus = device->bus;
IDE_LOCK(bus);
start_dma_wait(device, qrequest);
}
/*! Finish dma transmission after device has fired IRQ */
bool
finish_dma(ide_device_info *device)
{
ide_bus_info *bus = device->bus;
status_t dma_res;
dma_res = bus->controller->finish_dma(bus->channel_cookie);
return dma_res == B_OK || dma_res == B_DEV_DATA_OVERRUN;
}

View File

@ -0,0 +1,115 @@
/*
* Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
//! General SCSI emulation routines
#include "ide_internal.h"
#include "ide_sim.h"
#include <vm.h>
#include <string.h>
/*! Emulate REQUEST SENSE */
void
ide_request_sense(ide_device_info *device, ide_qrequest *qrequest)
{
scsi_ccb *request = qrequest->request;
scsi_cmd_request_sense *cmd = (scsi_cmd_request_sense *)request->cdb;
scsi_sense sense;
uint32 transferSize;
// cannot use finish_checksense here, as data is not copied into autosense buffer
// but into normal data buffer, SCSI result is GOOD and CAM status is REQ_CMP
if (device->combined_sense)
create_sense(device, &sense);
else
memset(&sense, 0, sizeof(sense));
copy_sg_data(request, 0, cmd->allocation_length, &sense, sizeof(sense), false);
// reset sense information on read
device->combined_sense = 0;
transferSize = min(sizeof(sense), cmd->allocation_length);
transferSize = min(transferSize, request->data_length);
request->data_resid = request->data_length - transferSize;
// normally, all flags are set to "success", but for Request Sense
// this would have overwritten the sense we want to read
device->subsys_status = SCSI_REQ_CMP;
request->device_status = SCSI_STATUS_GOOD;
}
/*! Copy data between request data and buffer
request - request to copy data from/to
offset - offset of data in request
allocation_length- limit of request's data buffer according to CDB
buffer - data to copy data from/to
size - number of bytes to copy
to_buffer - true: copy from request to buffer
false: copy from buffer to request
return: true, if data of request was large enough
*/
bool
copy_sg_data(scsi_ccb *request, uint offset, uint allocationLength,
void *buffer, int size, bool toBuffer)
{
const physical_entry *sgList = request->sg_list;
int sgCount = request->sg_count;
int requestSize;
SHOW_FLOW(3, "offset=%u, req_size_limit=%d, size=%d, sg_list=%p, sg_cnt=%d, %s buffer",
offset, allocationLength, size, sgList, sgCount, toBuffer ? "to" : "from");
// skip unused S/G entries
while (sgCount > 0 && offset >= sgList->size) {
offset -= sgList->size;
++sgList;
--sgCount;
}
if (sgCount == 0)
return 0;
// remaining bytes we are allowed to copy from/to request
requestSize = min(allocationLength, request->data_length) - offset;
// copy one S/G entry at a time
for (; size > 0 && requestSize > 0 && sgCount > 0; ++sgList, --sgCount) {
addr_t virtualAddress;
size_t bytes;
bytes = min(size, requestSize);
bytes = min(bytes, sgList->size);
if (vm_get_physical_page((addr_t)sgList->address, &virtualAddress,
PHYSICAL_PAGE_CAN_WAIT) != B_OK)
return false;
SHOW_FLOW(4, "buffer=%p, virt_addr=%p, bytes=%d, to_buffer=%d",
buffer, (void *)(virtualAddress + offset), (int)bytes, toBuffer);
if (toBuffer)
memcpy(buffer, (void *)(virtualAddress + offset), bytes);
else
memcpy((void *)(virtualAddress + offset), buffer, bytes);
vm_put_physical_page(virtualAddress);
buffer = (char *)buffer + bytes;
size -= bytes;
offset = 0;
}
return size == 0;
}

View File

@ -0,0 +1,21 @@
/*
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
* Distributed under the terms of the MIT License.
*/
/*! Contains interface used by IDE controller driver. */
#include "ide_internal.h"
#include "ide_sim.h"
device_manager_info *pnp;
#if !_BUILDING_kernel && !BOOT
module_info *modules[] = {
(module_info *)&ide_for_controller_module,
(module_info *)&ide_sim_module,
NULL
};
#endif

View File

@ -0,0 +1,64 @@
/*
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
/*
Part of Open IDE bus manager
OP-Codes of IDE commands
*/
#ifndef __IDE_CMDS_H__
#define __IDE_CMDS_H__
#define IDE_CMD_WRITE_DMA 0xca
#define IDE_CMD_WRITE_DMA_QUEUED 0xcc
#define IDE_CMD_WRITE_MULTIPLE 0xc5
#define IDE_CMD_WRITE_SECTORS 0x30
#define IDE_CMD_READ_DMA 0xc8
#define IDE_CMD_READ_DMA_QUEUED 0xc7
#define IDE_CMD_READ_MULTIPLE 0xc4
#define IDE_CMD_READ_SECTORS 0x20
#define IDE_CMD_WRITE_DMA_EXT 0x35
#define IDE_CMD_WRITE_DMA_QUEUED_EXT 0x36
#define IDE_CMD_WRITE_MULTIPLE_EXT 0x39
#define IDE_CMD_WRITE_SECTORS_EXT 0x34
#define IDE_CMD_READ_DMA_EXT 0x25
#define IDE_CMD_READ_DMA_QUEUED_EXT 0x26
#define IDE_CMD_READ_MULTIPLE_EXT 0x29
#define IDE_CMD_READ_SECTORS_EXT 0x24
#define IDE_CMD_PACKET 0xa0
#define IDE_CMD_DEVICE_RESET 0x08
#define IDE_CMD_SERVICE 0xa2
#define IDE_CMD_NOP 0
#define IDE_CMD_NOP_NOP 0
#define IDE_CMD_NOP_NOP_AUTOPOLL 1
#define IDE_CMD_GET_MEDIA_STATUS 0xda
#define IDE_CMD_FLUSH_CACHE 0xe7
#define IDE_CMD_FLUSH_CACHE_EXT 0xea
#define IDE_CMD_MEDIA_EJECT 0xed
#define IDE_CMD_IDENTIFY_PACKET_DEVICE 0xa1
#define IDE_CMD_IDENTIFY_DEVICE 0xec
#define IDE_CMD_SET_FEATURES 0xef
#define IDE_CMD_SET_FEATURES_ENABLE_REL_INT 0x5d
#define IDE_CMD_SET_FEATURES_ENABLE_SERV_INT 0x5e
#define IDE_CMD_SET_FEATURES_DISABLE_REL_INT 0xdd
#define IDE_CMD_SET_FEATURES_DISABLE_SERV_INT 0xde
#define IDE_CMD_SET_FEATURES_ENABLE_MSN 0x95
#endif

View File

@ -0,0 +1,195 @@
/*
* Copyright 2004-2006, Haiku, Inc. All RightsReserved.
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
#ifndef _IDE_DEVICE_INFOBLOCK_H_
#define _IDE_DEVICE_INFOBLOCK_H_
/*
Definition of response to IDE_CMD_IDENTIFY_DEVICE or
IDE_CMD_IDENTIFY_PACKET_DEVICE
When a new entry is inserted, add its offset in hex
and its index in decimal as a remark. Without that, you
have a rough time when you messed up the offsets.
*/
#include <lendian_bitfield.h>
#define IDE_GET_INFO_BLOCK 0x2710
#define IDE_GET_STATUS 0x2711
// must be 512 bytes!!!
typedef struct tagdevice_infoblock {
union { // 0 general configuration
struct {
LBITFIELD8 (
_0_res1 : 1,
_0_ret1 : 1,
response_incomplete : 1,
_0_ret2 : 3,
removable_controller_or_media : 1,
removable_media : 1,
_0_ret3 : 7,
ATA : 1 // 0 - is ATA!
);
} ata;
struct {
LBITFIELD8 (
packet_size : 2, // 0 - 12 bytes, 1 - 16 bytes
response_incomplete : 1,
_0_res2 : 2,
drq_speed : 2, // 0 - 3ms, 1 - IRQ, 2 - 50µs
removable_media : 1,
type : 5,
_0_res13 : 1,
ATAPI : 2 // 2 - is ATAPI
);
} atapi;
} _0;
uint16 cylinders; // 2
uint16 dummy1; // 4
uint16 heads; // 6
uint16 dummy2[2]; // 8
uint16 sectors; // 0c
uint16 dummy3[3]; // 0e
char serial_number[20]; // 14
uint16 dummy4[3]; // 28
char firmware_version[8]; // 2e
char model_number[40]; // 36
uint16 dummy5[2]; // 5e
LBITFIELD5 ( // 62 (49) capabilities
_49_ret1 : 8,
DMA_supported : 1,
LBA_supported : 1,
IORDY_can_disable : 1,
IORDY_supported : 1
);
uint16 dummy6[1]; // 64
LBITFIELD2 ( // 66 (51) obsolete: PIO modes?
_51_obs1 : 8,
PIO_mode : 8
);
uint16 dummy7[1]; // 68
LBITFIELD3 ( // 6a (53) validity
_54_58_valid : 1,
_64_70_valid : 1,
_88_valid : 1
);
uint16 current_cylinders; // 6c (54)
uint16 current_heads; // 6e
uint16 current_sectors; // 70
uint16 capacity_low; // 72 (57) ALIGNMENT SPLIT - don't merge
uint16 capacity_high;
uint16 dummy8[1];
uint32 LBA_total_sectors; // 78 (60)
uint16 dummy9[1]; // 7c
LBITFIELD7 ( // 7e (63) MDMA modes
MDMA0_supported : 1,
MDMA1_supported : 1,
MDMA2_supported : 1,
_63_res1 : 5,
MDMA0_selected : 1,
MDMA1_selected : 1,
MDMA2_selected : 1
);
uint16 dummy10[11]; // 80
LBITFIELD2 ( // 96 (75)
queue_depth : 5,
_75_res1 : 9
);
uint16 dummy11[6]; // 98
LBITFIELD16 ( // a4 (82) supported_command_set
SMART_supported : 1,
security_mode_supported : 1,
removable_media_supported : 1,
PM_supported : 1,
_81_fixed : 1, // must be 0
write_cache_supported : 1,
look_ahead_supported : 1,
RELEASE_irq_supported : 1,
SERVICE_irq_supported : 1,
DEVICE_RESET_supported : 1,
HPA_supported : 1,
_81_obs1 : 1,
WRITE_BUFFER_supported : 1,
READ_BUFFER_supported : 1,
NOP_supported : 1,
_81_obs2 : 1
);
LBITFIELD15 ( // a6 (83) supported_command_sets
DOWNLOAD_MICROCODE_supported : 1,
DMA_QUEUED_supported : 1,
CFA_supported : 1,
APM_supported : 1,
RMSN_supported : 1,
power_up_in_stand_by_supported : 1,
SET_FEATURES_on_power_up_required : 1,
reserved_boot_area_supported : 1,
SET_MAX_security_supported : 1,
auto_acustic_managemene_supported : 1,
_48_bit_addresses_supported : 1,
device_conf_overlay_supported : 1,
FLUSH_CACHE_supported : 1,
FLUSH_CACHE_EXT_supported : 1,
_83_fixed : 2 // must be 1
);
uint16 dummy12[4]; // a8 (84)
LBITFIELD15 ( // b0 (88) UDMA modes
UDMA0_supported : 1,
UDMA1_supported : 1,
UDMA2_supported : 1,
UDMA3_supported : 1,
UDMA4_supported : 1,
UDMA5_supported : 1,
UDMA6_supported : 1, // !guessed
_88_res1 : 1,
UDMA0_selected : 1,
UDMA1_selected : 1,
UDMA2_selected : 1,
UDMA3_selected : 1,
UDMA4_selected : 1,
UDMA5_selected : 1,
UDMA6_selected : 1
);
uint16 dummy89[11]; // b2 (89)
uint64 LBA48_total_sectors; // c8 (100)
uint16 dummy102[22]; // cc (104)
LBITFIELD2 ( // fc (126)
last_lun : 2,
_126_res2 : 14
);
LBITFIELD4 ( // fe (127) RMSN support
_127_RMSN_support : 2,// 0 = not supported, 1 = supported, 3, 4 = reserved
_127_res2 : 6,
device_write_protect: 2,
_127_res9 : 6
);
uint16 dummy14[128]; // 100 (128)
} ide_device_infoblock;
typedef struct ide_status {
uint8 _reserved;
uint8 dma_status;
uint8 pio_mode;
uint8 dma_mode;
} ide_status;
#endif /* _IDE_DEVICE_INFOBLOCK_H_ */

View File

@ -0,0 +1,429 @@
/*
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef __IDE_INTERNAL_H__
#define __IDE_INTERNAL_H__
/*
Part of Open IDE bus manager
Internal structures
*/
#include <bus/IDE.h>
#include <bus/SCSI.h>
#include "ide_device_infoblock.h"
#include <ide_types.h>
#include <device_manager.h>
#include <fast_log.h>
#define debug_level_error 2
#define debug_level_info 1
#define debug_level_flow 0
#define DEBUG_MSG_PREFIX "IDE -- "
#include "wrapper.h"
//#define USE_FAST_LOG
#ifdef USE_FAST_LOG
#define FAST_LOG0( handle, event ) fast_log->log_0( handle, event )
#define FAST_LOG1( handle, event, param ) fast_log->log_1( handle, event, param )
#define FAST_LOG2( handle, event, param1, param2 ) fast_log->log_2( handle, event, param1, param2 )
#define FAST_LOG3( handle, event, param1, param2, param3 ) fast_log->log_3( handle, event, param1, param2, param3 )
#define FAST_LOGN( handle, event, num_params... ) fast_log->log_n( handle, event, num_params )
#else
#define FAST_LOG0( handle, event )
#define FAST_LOG1( handle, event, param )
#define FAST_LOG2( handle, event, param1, param2 )
#define FAST_LOG3( handle, event, param1, param2, param3 )
#define FAST_LOGN( handle, event, num_params... )
#endif
#define IDE_STD_TIMEOUT 10
#define IDE_RELEASE_TIMEOUT 10000000
// number of timeouts before we disable DMA automatically
#define MAX_DMA_FAILURES 3
// name of pnp generator of channel ids
#define IDE_CHANNEL_ID_GENERATOR "ide/channel_id"
// node item containing channel id (uint32)
#define IDE_CHANNEL_ID_ITEM "ide/channel_id"
extern device_manager_info *pnp;
extern fast_log_info *fast_log;
typedef struct ide_bus_info ide_bus_info;
typedef void (*ide_synced_pc_func)(ide_bus_info *bus, void *arg);
typedef struct ide_synced_pc {
struct ide_synced_pc *next;
ide_synced_pc_func func;
void *arg;
bool registered;
} ide_synced_pc;
// structure for device time-outs
typedef struct ide_device_timer_info {
timer te;
struct ide_device_info *device;
} ide_device_timer_info;
// structure for bus time-outs
typedef struct ide_bus_timer_info {
timer te;
struct ide_bus_info *bus;
} ide_bus_timer_info;
typedef struct ide_device_info {
struct ide_bus_info *bus;
uint8 use_LBA : 1; // true for LBA, false for CHS
uint8 use_48bits : 1; // true for LBA48
uint8 is_atapi : 1; // true for ATAPI, false for ATA
uint8 CQ_supported : 1; // Command Queuing supported
uint8 CQ_enabled : 1; // Command Queuing enabled
uint8 DMA_supported : 1; // DMA supported
uint8 DMA_enabled : 1; // DMA enabled
uint8 is_device1 : 1; // true for slave, false for master
uint8 queue_depth; // maximum Command Queueing depth
uint8 last_lun; // last LUN
uint8 DMA_failures; // DMA failures in a row
uint8 CQ_failures; // Command Queuing failures during _last_ command
uint8 num_failed_send; // number of consequetive send problems
// next two error codes are copied to request on finish_request & co.
uint8 subsys_status; // subsystem status of current request
uint32 new_combined_sense; // emulated sense of current request
// pending error codes
uint32 combined_sense; // emulated sense of device
struct ide_qrequest *qreq_array; // array of ide requests
struct ide_qrequest *free_qrequests; // free list
int num_running_reqs; // number of running requests
struct ide_device_info *other_device; // other device on same bus
// entry for scsi's exec_io request
void (*exec_io)( struct ide_device_info *device, struct ide_qrequest *qrequest );
int target_id; // target id (currently, same as is_device1)
ide_reg_mask tf_param_mask; // flag of valid bytes in task file
ide_task_file tf; // task file
// ata from here on
uint64 total_sectors; // size in sectors
// atapi from here on
uint8 packet[12]; // atapi command packet
struct {
uint8 packet_irq : 1; // true, if command packet irq required
bigtime_t packet_irq_timeout; // timeout for it
} atapi;
uint8 device_type; // atapi device type
bool reconnect_timer_installed; // true, if reconnect timer is running
ide_device_timer_info reconnect_timer; // reconnect timeout
scsi_dpc_cookie reconnect_timeout_dpc; // dpc fired by timeout
ide_synced_pc reconnect_timeout_synced_pc; // spc fired by dpc
// pio from here on
int left_sg_elem; // remaining sg elements
const physical_entry *cur_sg_elem; // active sg element
int cur_sg_ofs; // offset in active sg element
int left_blocks; // remaining blocks
bool has_odd_byte; // remaining odd byte
int odd_byte; // content off odd byte
ide_device_infoblock infoblock; // infoblock of device
} ide_device_info;
/*// state as stored in sim_state of scsi_ccb
typedef enum {
ide_request_normal = 0, // this must be zero as this is initial value
ide_request_start_autosense = 1,
ide_request_autosense = 2
} ide_request_state;*/
// ide request
typedef struct ide_qrequest {
struct ide_qrequest *next;
ide_device_info *device;
scsi_ccb *request; // basic request
uint8 is_write : 1; // true for write request
uint8 running : 1; // true if "on bus"
uint8 uses_dma : 1; // true if using dma
uint8 packet_irq : 1; // true if waiting for command packet irq
uint8 queuable : 1; // true if command queuing is used
uint8 tag; // command queuing tag
} ide_qrequest;
// state of ide bus
typedef enum {
ide_state_idle, // noone is using it, but overlapped
// commands may be pending
ide_state_accessing, // bus is in use
ide_state_async_waiting, // waiting for IRQ, to be reported via irq_dpc
ide_state_sync_waiting, // waiting for IRQ, to be reported via sync_wait_sem
} ide_bus_state;
struct ide_bus_info {
ide_qrequest *active_qrequest;
// controller
ide_controller_interface *controller;
void *channel_cookie;
// lock, used for changes of bus state
spinlock lock;
cpu_status prev_irq_state;
ide_bus_state state; // current state of bus
benaphore status_report_ben; // to lock when you report XPT about bus state
// i.e. during requeue, resubmit or finished
bool disconnected; // true, if controller is lost
int num_running_reqs; // total number of running requests
scsi_bus scsi_cookie; // cookie for scsi bus
ide_bus_timer_info timer; // timeout
scsi_dpc_cookie irq_dpc;
ide_synced_pc *synced_pc_list;
sem_id sync_wait_sem; // released when sync_wait finished
bool sync_wait_timeout; // true, if timeout occured
ide_device_info *active_device;
ide_device_info *devices[2];
ide_device_info *first_device;
ide_synced_pc scan_bus_syncinfo; // used to start bus scan
sem_id scan_device_sem; // released when device has been scanned
ide_synced_pc disconnect_syncinfo; // used to handle lost controller
uchar path_id;
device_node_handle node; // our pnp node
// restrictions, read from controller node
uint8 max_devices;
uint8 can_DMA;
uint8 can_CQ;
fast_log_handle log;
char name[32];
};
// call this before you change bus state
#define IDE_LOCK( bus ) { \
cpu_status prev_irq_state = disable_interrupts(); \
acquire_spinlock( &bus->lock ); \
bus->prev_irq_state = prev_irq_state; \
}
// call this after you changed bus state
#define IDE_UNLOCK( bus ) { \
cpu_status prev_irq_state = bus->prev_irq_state; \
release_spinlock( &bus->lock ); \
restore_interrupts( prev_irq_state ); \
}
// SIM interface
#define IDE_SIM_MODULE_NAME "bus_managers/ide/sim/v1"
enum {
ev_ide_send_command = 1,
ev_ide_device_start_service,
ev_ide_device_start_service2,
ev_ide_dpc_service,
ev_ide_dpc_continue,
ev_ide_irq_handle,
ev_ide_cancel_irq_timeout,
ev_ide_start_waiting,
ev_ide_timeout_dpc,
ev_ide_timeout,
ev_ide_reset_bus,
ev_ide_reset_device,
ev_ide_scsi_io,
ev_ide_scsi_io_exec,
ev_ide_scsi_io_invalid_device,
ev_ide_scsi_io_bus_busy,
ev_ide_scsi_io_device_busy,
ev_ide_scsi_io_disconnected,
ev_ide_finish_request,
ev_ide_finish_norelease,
ev_ide_scan_device_int,
ev_ide_scan_device_int_cant_send,
ev_ide_scan_device_int_keeps_busy,
ev_ide_scan_device_int_found
};
// get selected device
static inline
ide_device_info *get_current_device(ide_bus_info *bus)
{
ide_task_file tf;
bus->controller->read_command_block_regs(bus->channel_cookie, &tf,
ide_mask_device_head);
return bus->devices[tf.lba.device];
}
// check if device has released the bus
// return: true, if bus was released
static inline int
device_released_bus(ide_device_info *device)
{
ide_bus_info *bus = device->bus;
bus->controller->read_command_block_regs(bus->channel_cookie,
&device->tf, ide_mask_sector_count);
return device->tf.queued.release;
}
// ata.c
bool check_rw_error(ide_device_info *device, ide_qrequest *qrequest);
bool check_output(ide_device_info *device, bool drdy_required, int error_mask, bool is_write);
bool prep_ata(ide_device_info *device);
void enable_CQ(ide_device_info *device, bool enable);
void ata_send_rw(ide_device_info *device, ide_qrequest *qrequest,
uint64 pos, size_t length, bool write);
void ata_dpc_DMA(ide_qrequest *qrequest);
void ata_dpc_PIO(ide_qrequest *qrequest);
void ata_exec_io(ide_device_info *device, ide_qrequest *qrequest);
// atapi.c
bool prep_atapi(ide_device_info *device);
void send_packet(ide_device_info *device, ide_qrequest *qrequest, bool write);
void packet_dpc(ide_qrequest *qrequest);
void atapi_exec_io(ide_device_info *device, ide_qrequest *qrequest);
// basic_prot.c
bool ide_wait(ide_device_info *device, int mask, int not_mask, bool check_err,
bigtime_t timeout);
bool wait_for_drq(ide_device_info *device);
bool wait_for_drqdown(ide_device_info *device);
bool wait_for_drdy(ide_device_info *device);
// timeout in seconds
bool send_command(ide_device_info *device, ide_qrequest *qrequest,
bool need_drdy, uint32 timeout, ide_bus_state new_state);
bool device_start_service( ide_device_info *device, int *tag);
bool reset_device(ide_device_info *device, ide_qrequest *ignore);
bool reset_bus(ide_device_info *device, ide_qrequest *ignore);
bool check_service_req(ide_device_info *device);
// channel_mgr.c
extern ide_for_controller_interface ide_for_controller_module;
// device_mgr.c
void scan_device_worker(ide_bus_info *bus, void *arg);
// dma.c
bool prepare_dma(ide_device_info *device, ide_qrequest *qrequest);
void start_dma(ide_device_info *device, ide_qrequest *qrequest);
void start_dma_wait(ide_device_info *device, ide_qrequest *qrequest);
void start_dma_wait_no_lock(ide_device_info *device, ide_qrequest *qrequest);
bool finish_dma(ide_device_info *device);
void abort_dma(ide_device_info *device, ide_qrequest *qrequest);
bool configure_dma(ide_device_info *device);
// emulation.c
bool copy_sg_data(scsi_ccb *request, uint offset, uint req_size_limit,
void *buffer, int size, bool to_buffer);
void ide_request_sense(ide_device_info *device, ide_qrequest *qrequest);
// pio.c
void prep_PIO_transfer(ide_device_info *device, ide_qrequest *qrequest);
status_t read_PIO_block(ide_qrequest *qrequest, int length);
status_t write_PIO_block(ide_qrequest *qrequest, int length);
// queuing.c
bool send_abort_queue(ide_device_info *device);
bool try_service(ide_device_info *device);
void reconnect_timeout_worker(ide_bus_info *bus, void *arg);
int32 reconnect_timeout(timer *arg);
bool initialize_qreq_array(ide_device_info *device, int queue_depth);
void destroy_qreq_array(ide_device_info *device);
// sync.c
// timeout in seconds (according to CAM)
void start_waiting(ide_bus_info *bus, uint32 timeout, int new_state);
void start_waiting_nolock(ide_bus_info *bus, uint32 timeout, int new_state);
void wait_for_sync(ide_bus_info *bus);
void cancel_irq_timeout(ide_bus_info *bus);
status_t schedule_synced_pc(ide_bus_info *bus, ide_synced_pc *pc, void *arg);
void init_synced_pc(ide_synced_pc *pc, ide_synced_pc_func func);
void uninit_synced_pc(ide_synced_pc *pc);
void ide_dpc(void *arg);
void access_finished(ide_bus_info *bus, ide_device_info *device);
status_t ide_irq_handler(ide_bus_info *bus, uint8 status);
status_t ide_timeout(timer *arg);
#endif /* __IDE_INTERNAL_H__ */

View File

@ -0,0 +1,873 @@
/*
* Copyright 2004-2006, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
/*
Part of Open IDE bus manager
Interface between ide bus manager and scsi bus manager.
The IDE bus manager has a bit unusual structure as it
consists of a single level only. In fact it is no bus manager
in terms of the PnP structure at all but a driver that maps
one SCSI bus onto one IDE controller.
This structure does not allow us to publish IDE devices
as they can be accessed via the SCSI bus node only. Therefore
we do a full bus scan every time the IDE bus node is loaded.
The drawback is that a bus rescan must be done indirectly via a
SCSI bus scan.
*/
#include "ide_internal.h"
#include "ide_sim.h"
#include <scsi_cmds.h>
#include <safemode.h>
#include <string.h>
#include <malloc.h>
#include <stdio.h>
scsi_for_sim_interface *scsi;
fast_log_info *fast_log;
#ifdef USE_FAST_LOG
static fast_log_event_type ide_events[] =
{
{ ev_ide_send_command, "ev_ide_send_command " },
{ ev_ide_device_start_service, "ev_ide_device_start_service" },
{ ev_ide_device_start_service2, "ev_ide_device_start_service2" },
{ ev_ide_dpc_service, "ev_ide_dpc_service" },
{ ev_ide_dpc_continue, "ev_ide_dpc_continue" },
{ ev_ide_irq_handle, "ev_ide_irq_handle" },
{ ev_ide_cancel_irq_timeout, "ev_ide_cancel_irq_timeout" },
{ ev_ide_start_waiting, "ev_ide_start_waiting" },
{ ev_ide_timeout_dpc, "ev_ide_timeout_dpc" },
{ ev_ide_timeout, "ev_ide_timeout" },
{ ev_ide_reset_bus, "ev_ide_reset_bus" },
{ ev_ide_reset_device, "ev_ide_reset_device" },
{ ev_ide_scsi_io, "ev_ide_scsi_io" },
{ ev_ide_scsi_io_exec, "ev_ide_scsi_io_exec" },
{ ev_ide_scsi_io_invalid_device, "ev_ide_scsi_io_invalid_device" },
{ ev_ide_scsi_io_bus_busy, "ev_ide_scsi_io_bus_busy" },
{ ev_ide_scsi_io_device_busy, "ev_ide_scsi_io_device_busy" },
{ ev_ide_scsi_io_disconnected, "ev_ide_scsi_io_disconnected" },
{ ev_ide_finish_request, "ev_ide_finish_request" },
{ ev_ide_finish_norelease, "ev_ide_finish_norelease" },
{ ev_ide_scan_device_int, "ev_ide_scan_device_int" },
{ ev_ide_scan_device_int_cant_send, "ev_ide_scan_device_int_cant_send" },
{ ev_ide_scan_device_int_keeps_busy, "ev_ide_scan_device_int_keeps_busy" },
{ ev_ide_scan_device_int_found, "ev_ide_scan_device_int_found" },
{}
};
#endif
static void disconnect_worker(ide_bus_info *bus, void *arg);
static void set_check_condition(ide_qrequest *qrequest);
/** check whether this request can be within device */
static inline bool
is_queuable(ide_device_info *device, scsi_ccb *request)
{
int opcode = request->cdb[0];
// XXX disable queuing
if (!device->CQ_enabled)
return false;
// make sure the caller allows queuing
if ((request->flags & SCSI_ORDERED_QTAG) != 0)
return false;
// for atapi, all commands could be queued, but all
// atapi devices I know don't support queuing anyway
return opcode == SCSI_OP_READ_6 || opcode == SCSI_OP_WRITE_6
|| opcode == SCSI_OP_READ_10 || opcode == SCSI_OP_WRITE_10;
}
static void
sim_scsi_io(ide_bus_info *bus, scsi_ccb *request)
{
ide_device_info *device;
bool queuable;
ide_qrequest *qrequest;
//ide_request_priv *priv;
// FAST_LOG3( bus->log, ev_ide_scsi_io, (uint32)request, request->target_id, request->target_lun );
SHOW_FLOW(3, "%d:%d", request->target_id, request->target_lun);
if (bus->disconnected)
goto err_disconnected;
// make sure, device is valid
// I've read that there are ATAPI devices with more then one LUN,
// but it seems that most (all?) devices ignore LUN, so we have
// to restrict to LUN 0 to avoid mirror devices
if (request->target_id >= 2)
goto err_inv_device;
device = bus->devices[request->target_id];
if (device == NULL)
goto err_inv_device;
if (request->target_lun > device->last_lun)
goto err_inv_device;
queuable = is_queuable(device, request);
// grab the bus
ACQUIRE_BEN(&bus->status_report_ben);
IDE_LOCK(bus);
if (bus->state != ide_state_idle)
goto err_bus_busy;
// bail out if device can't accept further requests
if (device->free_qrequests == NULL
|| (device->num_running_reqs > 0 && !queuable))
goto err_device_busy;
bus->state = ide_state_accessing;
IDE_UNLOCK(bus);
RELEASE_BEN(&bus->status_report_ben);
// as we own the bus, noone can bother us
qrequest = device->free_qrequests;
device->free_qrequests = qrequest->next;
qrequest->request = request;
qrequest->queuable = queuable;
qrequest->running = true;
qrequest->uses_dma = false;
++device->num_running_reqs;
++bus->num_running_reqs;
bus->active_qrequest = qrequest;
FAST_LOGN(bus->log, ev_ide_scsi_io_exec, 4, (uint32)qrequest,
(uint32)request, bus->num_running_reqs, device->num_running_reqs);
device->exec_io(device, qrequest);
return;
err_inv_device:
SHOW_ERROR(3, "Invalid device %d:%d",
request->target_id, request->target_lun);
FAST_LOG1(bus->log, ev_ide_scsi_io_invalid_device, (uint32)request);
request->subsys_status = SCSI_SEL_TIMEOUT;
scsi->finished(request, 1);
return;
err_bus_busy:
SHOW_FLOW0(3, "Bus busy");
FAST_LOG1(bus->log, ev_ide_scsi_io_bus_busy, (uint32)request);
IDE_UNLOCK(bus);
scsi->requeue(request, true);
RELEASE_BEN(&bus->status_report_ben);
return;
err_device_busy:
SHOW_FLOW0(3, "Device busy");
FAST_LOG1(bus->log, ev_ide_scsi_io_device_busy, (uint32)request);
IDE_UNLOCK(bus);
scsi->requeue(request, false);
RELEASE_BEN(&bus->status_report_ben);
return;
err_disconnected:
SHOW_ERROR0(3, "No controller anymore");
FAST_LOG1(bus->log, ev_ide_scsi_io_disconnected, (uint32)request);
request->subsys_status = SCSI_NO_HBA;
scsi->finished(request, 1);
return;
}
static uchar
sim_path_inquiry(ide_bus_info *bus, scsi_path_inquiry *info)
{
char *controller_name;
SHOW_FLOW0(4, "");
if (bus->disconnected)
return SCSI_NO_HBA;
info->hba_inquiry = SCSI_PI_TAG_ABLE | SCSI_PI_WIDE_16;
info->hba_misc = 0;
memset(info->vuhba_flags, 0, sizeof(info->vuhba_flags));
// we don't need any of the private data
info->sim_priv = 0;
// there is no initiator for IDE, but SCSI needs it for scanning
info->initiator_id = 2;
// there's no controller limit, so set it higher then the maximum
// number of queued requests, which is 32 per device * 2 devices
info->hba_queue_size = 65;
strncpy(info->sim_vid, "Haiku", SCSI_SIM_ID);
if (pnp->get_attr_string(bus->node, SCSI_DESCRIPTION_CONTROLLER_NAME,
&controller_name, true) == B_OK) {
strlcpy(info->hba_vid, controller_name, SCSI_HBA_ID);
free(controller_name);
} else
strlcpy(info->hba_vid, "", SCSI_HBA_ID);
strlcpy(info->controller_family, "IDE", SCSI_FAM_ID);
strlcpy(info->controller_type, "IDE", SCSI_TYPE_ID);
SHOW_FLOW0(4, "done");
return SCSI_REQ_CMP;
}
static void
scan_device(ide_bus_info *bus, int device)
{
SHOW_FLOW0(4, "");
// currently, the SCSI bus manager doesn't block the
// bus when a bus or device scan is issued, so we
// have to use a SPC for that to be sure no one else
// is accessing the device or bus concurrently
schedule_synced_pc(bus, &bus->scan_bus_syncinfo, (void *)device);
acquire_sem(bus->scan_device_sem);
}
static uchar
sim_scan_bus(ide_bus_info *bus)
{
int i;
SHOW_FLOW0(4, "");
if (bus->disconnected)
return SCSI_NO_HBA;
for (i = 0; i < bus->max_devices; ++i) {
scan_device(bus, i);
}
return SCSI_REQ_CMP;
}
static uchar
sim_abort(ide_bus_info *bus, scsi_ccb *ccb_to_abort)
{
// we cannot abort specific commands, so just ignore
if (bus->disconnected)
return SCSI_NO_HBA;
return SCSI_REQ_CMP;
}
static uchar
sim_term_io(ide_bus_info *bus, scsi_ccb *ccb_to_abort)
{
// we cannot terminate commands, so just ignore
if (bus->disconnected)
return SCSI_NO_HBA;
return SCSI_REQ_CMP;
}
static uchar
sim_reset_bus(ide_bus_info *bus)
{
// no, we don't do that
if (bus->disconnected)
return SCSI_NO_HBA;
return SCSI_REQ_INVALID;
}
static uchar
sim_reset_device(ide_bus_info *bus, uchar target_id, uchar target_lun)
{
// xxx to do
if (bus->disconnected)
return SCSI_NO_HBA;
return SCSI_REQ_INVALID;
}
/** fill sense buffer according to device sense */
void
create_sense(ide_device_info *device, scsi_sense *sense)
{
memset(sense, 0, sizeof(*sense));
sense->error_code = SCSIS_CURR_ERROR;
sense->sense_key = decode_sense_key(device->combined_sense);
sense->add_sense_length = sizeof(*sense) - 7;
sense->asc = decode_sense_asc(device->combined_sense);
sense->ascq = decode_sense_ascq(device->combined_sense);
sense->sense_key_spec.raw.SKSV = 0; // no additional info
}
/** finish command, updating sense of device and request, and release bus */
void
finish_checksense(ide_qrequest *qrequest)
{
SHOW_FLOW(3, "%p, subsys_status=%d, sense=%x",
qrequest->request,
qrequest->request->subsys_status,
(int)qrequest->device->new_combined_sense);
qrequest->request->subsys_status = qrequest->device->subsys_status;
if (qrequest->request->subsys_status == SCSI_REQ_CMP) {
// device or emulation code completed command
qrequest->device->combined_sense = qrequest->device->new_combined_sense;
// if emulation code detected error, set CHECK CONDITION
if (qrequest->device->combined_sense)
set_check_condition(qrequest);
}
finish_request(qrequest, false);
}
/** finish request and release bus
* resubmit - true, if request should be resubmitted by XPT
*/
void
finish_request(ide_qrequest *qrequest, bool resubmit)
{
ide_device_info *device = qrequest->device;
ide_bus_info *bus = device->bus;
scsi_ccb *request;
uint num_running;
FAST_LOG2(bus->log, ev_ide_finish_request, (uint32)qrequest, resubmit);
SHOW_FLOW0(3, "");
// save request first, as qrequest can be reused as soon as
// access_finished is called!
request = qrequest->request;
qrequest->running = false;
qrequest->next = device->free_qrequests;
device->free_qrequests = qrequest;
// num_running is not really correct as the XPT is interested
// in the number of concurrent requests when it was *started* !
num_running = device->num_running_reqs--;
--bus->num_running_reqs;
// paranoia
bus->active_qrequest = NULL;
// release bus, handling service requests;
// TBD:
// if we really handle a service request, the finished command
// is delayed unnecessarily, but if we tell the XPT about the finished
// command first, it will instantly try to pass us another
// request to handle, which we will refuse as the bus is still
// locked; this really has to be improved
access_finished(bus, device);
ACQUIRE_BEN(&bus->status_report_ben);
if (resubmit)
scsi->resubmit(request);
else
scsi->finished(request, num_running);
RELEASE_BEN(&bus->status_report_ben);
}
/** set CHECK CONDITION of device and perform auto-sense if requested.
* (don't move it before finish_request - we don't want to inline
* it as it's on the rarely used error path)
*/
static void
set_check_condition(ide_qrequest *qrequest)
{
scsi_ccb *request = qrequest->request;
ide_device_info *device = qrequest->device;
SHOW_FLOW0(3, "");
request->subsys_status = SCSI_REQ_CMP_ERR;
request->device_status = SCSI_STATUS_CHECK_CONDITION;
// copy sense only if caller requested it
if ((request->flags & SCSI_DIS_AUTOSENSE) == 0) {
scsi_sense sense;
int sense_len;
SHOW_FLOW0(3, "autosense");
// we cannot copy sense directly as sense buffer may be too small
create_sense(device, &sense);
sense_len = min(SCSI_MAX_SENSE_SIZE, sizeof(sense));
memcpy(request->sense, &sense, sense_len);
request->sense_resid = SCSI_MAX_SENSE_SIZE - sense_len;
request->subsys_status |= SCSI_AUTOSNS_VALID;
// device sense gets reset once it's read
device->combined_sense = 0;
}
}
void
finish_retry(ide_qrequest *qrequest)
{
qrequest->device->combined_sense = 0;
finish_request(qrequest, true);
}
/** finish request and abort pending requests of the device
* (to be called when the request failed and thus messed up the queue)
*/
void
finish_reset_queue(ide_qrequest *qrequest)
{
ide_bus_info *bus = qrequest->device->bus;
// don't remove block_bus!!!
// during finish_checksense, the bus is released, so
// the SCSI bus manager could send us further commands
scsi->block_bus(bus->scsi_cookie);
finish_checksense(qrequest);
send_abort_queue(qrequest->device);
scsi->unblock_bus(bus->scsi_cookie);
}
/** finish request, but don't release bus
* if resubmit is true, the request will be resubmitted
*/
static void
finish_norelease(ide_qrequest *qrequest, bool resubmit)
{
ide_device_info *device = qrequest->device;
ide_bus_info *bus = device->bus;
uint num_requests;
FAST_LOG2(bus->log, ev_ide_finish_norelease, (uint32)qrequest, resubmit);
qrequest->running = false;
qrequest->next = device->free_qrequests;
device->free_qrequests = qrequest;
num_requests = device->num_running_reqs++;
--bus->num_running_reqs;
if (bus->active_qrequest == qrequest)
bus->active_qrequest = NULL;
ACQUIRE_BEN(&bus->status_report_ben);
if (resubmit)
scsi->resubmit(qrequest->request);
else
scsi->finished(qrequest->request, num_requests);
RELEASE_BEN(&bus->status_report_ben);
}
/** finish all queued requests but <ignore> of the device;
* set resubmit, if requests are to be resubmitted by xpt
*/
void
finish_all_requests(ide_device_info *device, ide_qrequest *ignore,
int subsys_status, bool resubmit)
{
int i;
if (device == NULL)
return;
// we only have to block the device, but for CD changers we
// have to block all LUNS of the device (and we neither know
// their device handle nor which exist at all), so block
// the entire bus instead (it won't take that long anyway)
scsi->block_bus(device->bus->scsi_cookie);
for (i = 0; i < device->queue_depth; ++i) {
ide_qrequest *qrequest = &device->qreq_array[i];
if (qrequest->running && qrequest != ignore) {
qrequest->request->subsys_status = subsys_status;
finish_norelease(qrequest, resubmit);
}
}
scsi->unblock_bus(device->bus->scsi_cookie);
}
static status_t
ide_sim_init_bus(device_node_handle node, void *user_cookie, void **cookie)
{
device_node_handle parent;
ide_bus_info *bus;
bool dmaDisabled = false;
status_t status;
SHOW_FLOW0(3, "");
// first prepare the info structure
bus = (ide_bus_info *)malloc(sizeof(*bus));
if (bus == NULL)
return B_NO_MEMORY;
memset(bus, 0, sizeof(*bus));
bus->node = node;
bus->lock = 0;
bus->num_running_reqs = 0;
bus->active_qrequest = NULL;
bus->disconnected = false;
{
int32 channel_id = -1;
pnp->get_attr_uint32(node, IDE_CHANNEL_ID_ITEM, (uint32 *)&channel_id, true);
sprintf(bus->name, "ide_bus %d", (int)channel_id);
}
#if 0
bus->log = fast_log->start_log(bus->name, ide_events);
if (bus->log == NULL) {
res = B_NO_MEMORY;
goto err;
}
#endif
init_synced_pc(&bus->scan_bus_syncinfo, scan_device_worker);
init_synced_pc(&bus->disconnect_syncinfo, disconnect_worker);
bus->scsi_cookie = user_cookie;
bus->state = ide_state_idle;
bus->timer.bus = bus;
bus->synced_pc_list = NULL;
if ((status = scsi->alloc_dpc(&bus->irq_dpc)) < B_OK)
goto err1;
bus->active_device = NULL;
bus->sync_wait_sem = create_sem(0, "ide_sync_wait");
if (bus->sync_wait_sem < 0) {
status = bus->sync_wait_sem;
goto err2;
}
bus->devices[0] = bus->devices[1] = NULL;
bus->scan_device_sem = create_sem(0, "ide_scan_finished");
if (bus->scan_device_sem < 0) {
status = bus->scan_device_sem;
goto err3;
}
status = INIT_BEN(&bus->status_report_ben, "ide_status_report");
if (status < B_OK)
goto err4;
{
// check if safemode settings disable DMA
void *settings = load_driver_settings(B_SAFEMODE_DRIVER_SETTINGS);
if (settings != NULL) {
dmaDisabled = get_driver_boolean_parameter(settings, B_SAFEMODE_DISABLE_IDE_DMA,
false, false);
unload_driver_settings(settings);
}
}
bus->first_device = NULL;
// read restrictions of controller
if (pnp->get_attr_uint8(node, IDE_CONTROLLER_MAX_DEVICES_ITEM,
&bus->max_devices, true) != B_OK) {
// per default, 2 devices are supported per node
bus->max_devices = 2;
}
bus->max_devices = min(bus->max_devices, 2);
if (dmaDisabled
|| pnp->get_attr_uint8(node, IDE_CONTROLLER_CAN_DMA_ITEM, &bus->can_DMA, true) != B_OK) {
// per default, no dma support
bus->can_DMA = false;
}
SHOW_FLOW(2, "can_dma: %d", bus->can_DMA);
if (bus->can_DMA) {
if (pnp->get_attr_uint8(node, IDE_CONTROLLER_CAN_CQ_ITEM, &bus->can_CQ, true) != B_OK) {
// per default, command queuing is supported unless the driver
// reports problems (queuing should be transparent to
// controller, but for sure there is some buggy, over-optimizing
// controller out there)
bus->can_CQ = true;
}
} else {
// I am not sure if it's a problem of the driver or the drive (probably the
// former), but we're generally disabling command queueing in case of PIO
// transfers. Since those should be rare on a real system (as is CQ support
// in the drive), it's not really worth investigating, though.
bus->can_CQ = false;
}
parent = pnp->get_parent(node);
status = pnp->init_driver(parent, bus, (driver_module_info **)&bus->controller,
(void **)&bus->channel_cookie);
pnp->put_device_node(parent);
if (status != B_OK)
goto err5;
*cookie = bus;
// detect devices
sim_scan_bus(bus);
return B_OK;
err5:
DELETE_BEN(&bus->status_report_ben);
err4:
delete_sem(bus->scan_device_sem);
err3:
delete_sem(bus->sync_wait_sem);
err2:
scsi->free_dpc(bus->irq_dpc);
err1:
uninit_synced_pc(&bus->scan_bus_syncinfo);
uninit_synced_pc(&bus->disconnect_syncinfo);
#ifdef USE_FAST_LOG
fast_log->stop_log(bus->log);
err:
#endif
free(bus);
return status;
}
static status_t
ide_sim_uninit_bus(ide_bus_info *bus)
{
device_node_handle parent = pnp->get_parent(bus->node);
pnp->uninit_driver(parent);
pnp->put_device_node(parent);
DELETE_BEN(&bus->status_report_ben);
delete_sem(bus->scan_device_sem);
delete_sem(bus->sync_wait_sem);
scsi->free_dpc(bus->irq_dpc);
uninit_synced_pc(&bus->scan_bus_syncinfo);
uninit_synced_pc(&bus->disconnect_syncinfo);
// fast_log->stop_log(bus->log);
free(bus);
return B_OK;
}
// abort all running requests with SCSI_NO_HBA; finally, unblock bus
static void
disconnect_worker(ide_bus_info *bus, void *arg)
{
int i;
for (i = 0; i < bus->max_devices; ++i) {
if (bus->devices[i])
// is this the proper error code?
finish_all_requests(bus->devices[i], NULL, SCSI_NO_HBA, false);
}
scsi->unblock_bus(bus->scsi_cookie);
}
static void
ide_sim_bus_removed(device_node_handle node, ide_bus_info *bus)
{
if (bus == NULL)
// driver not loaded - no manual intervention needed
return;
// XPT must not issue further commands
scsi->block_bus(bus->scsi_cookie);
// make sure, we refuse all new commands
bus->disconnected = true;
// abort all running commands with SCSI_NO_HBA
// (the scheduled function also unblocks the bus when finished)
schedule_synced_pc(bus, &bus->disconnect_syncinfo, NULL);
}
static void
ide_sim_get_restrictions(ide_bus_info *bus, uchar target_id,
bool *is_atapi, bool *no_autosense, uint32 *max_blocks)
{
ide_device_info *device = bus->devices[target_id];
// we declare even ATA devices as ATAPI so we have to emulate fewer
// commands
*is_atapi = true;
// we emulate autosense for ATA devices
*no_autosense = false;
if (device != NULL && device->is_atapi) {
// we don't support native autosense for ATAPI devices
*no_autosense = true;
}
*max_blocks = 255;
if (device->is_atapi) {
if (strncmp(device->infoblock.model_number, "IOMEGA ZIP 100 ATAPI",
strlen("IOMEGA ZIP 100 ATAPI")) == 0
|| strncmp( device->infoblock.model_number, "IOMEGA Clik!",
strlen( "IOMEGA Clik!")) == 0) {
SHOW_ERROR0(2, "Found buggy ZIP/Clik! drive - restricting transmission size");
*max_blocks = 64;
}
}
}
static status_t
ide_sim_ioctl(ide_bus_info *bus, uint8 targetID, uint32 op, void *buffer, size_t length)
{
ide_device_info *device = bus->devices[targetID];
// We currently only support IDE_GET_INFO_BLOCK
switch (op) {
case IDE_GET_INFO_BLOCK:
// we already have the info block, just copy it
memcpy(buffer, &device->infoblock,
min(sizeof(device->infoblock), length));
return B_OK;
case IDE_GET_STATUS:
{
// TODO: have our own structure and fill it with some useful stuff
ide_status status;
if (device->DMA_enabled)
status.dma_status = 1;
else if (device->DMA_supported) {
if (device->DMA_failures > 0)
status.dma_status = 6;
else if (device->bus->can_DMA)
status.dma_status = 2;
else
status.dma_status = 4;
} else
status.dma_status = 2;
status.pio_mode = 0;
status.dma_mode = 0;
memcpy(buffer, &status, min(sizeof(status), length));
return B_OK;
}
}
return B_BAD_VALUE;
}
static status_t
std_ops(int32 op, ...)
{
switch (op) {
case B_MODULE_INIT:
case B_MODULE_UNINIT:
return B_OK;
default:
return B_ERROR;
}
}
module_dependency module_dependencies[] = {
{ SCSI_FOR_SIM_MODULE_NAME, (module_info **)&scsi },
{ B_DEVICE_MANAGER_MODULE_NAME, (module_info **)&pnp },
{}
};
scsi_sim_interface ide_sim_module = {
{
{
IDE_SIM_MODULE_NAME,
0,
std_ops,
},
NULL, // supported devices
NULL, // register node
(status_t (*)(device_node_handle, void *, void **))ide_sim_init_bus,
(status_t (*)(void *) ) ide_sim_uninit_bus,
(void (*)(device_node_handle, void *)) ide_sim_bus_removed
},
(void (*)(scsi_sim_cookie, scsi_ccb *)) sim_scsi_io,
(uchar (*)(scsi_sim_cookie, scsi_ccb *)) sim_abort,
(uchar (*)(scsi_sim_cookie, uchar, uchar)) sim_reset_device,
(uchar (*)(scsi_sim_cookie, scsi_ccb *)) sim_term_io,
(uchar (*)(scsi_sim_cookie, scsi_path_inquiry *))sim_path_inquiry,
(uchar (*)(scsi_sim_cookie)) sim_scan_bus,
(uchar (*)(scsi_sim_cookie)) sim_reset_bus,
(void (*)(scsi_sim_cookie, uchar,
bool*, bool *, uint32 *)) ide_sim_get_restrictions,
(status_t (*)(scsi_sim_cookie, uint8, uint32, void *, size_t))ide_sim_ioctl,
};

View File

@ -0,0 +1,78 @@
/*
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
/*
Part of Open IDE bus manager
Interface between ide and scsi bus manager
*/
#ifndef __IDE_SIM_H__
#define __IDE_SIM_H__
#include "scsi_cmds.h"
extern scsi_for_sim_interface *scsi;
extern scsi_sim_interface ide_sim_module;
// set sense of current request
static inline void
set_sense(ide_device_info *device, int sense_key, int sense_asc)
{
device->new_combined_sense = (sense_key << 16) | sense_asc;
}
// retrieve key from combined sense
static inline uint8
decode_sense_key(uint32 combined_sense)
{
return (combined_sense >> 16) & 0xff;
}
// retrieve asc from combined sense
static inline uint8
decode_sense_asc(uint32 combined_sense)
{
return (combined_sense >> 8) & 0xff;
}
// retrieve ascq from combined sense
static inline uint8
decode_sense_ascq(uint32 combined_sense)
{
return combined_sense & 0xff;
}
// retrieve asc and ascq from combined sense
static inline uint16
decode_sense_asc_ascq(uint32 combined_sense)
{
return combined_sense & 0xffff;
}
void finish_request(ide_qrequest *qrequest, bool resubmit);
void finish_reset_queue(ide_qrequest *qrequest);
void finish_retry(ide_qrequest *qrequest);
void finish_all_requests(ide_device_info *device, ide_qrequest *ignore,
int subsys_status, bool resubmit);
void finish_checksense(ide_qrequest *qrequest);
// start request by resetting sense
static inline void
start_request(ide_device_info *device, ide_qrequest *qrequest)
{
device->new_combined_sense = 0;
device->subsys_status = SCSI_REQ_CMP;
qrequest->request->device_status = SCSI_STATUS_GOOD;
}
void create_sense(ide_device_info *device, scsi_sense *sense);
#endif

View File

@ -0,0 +1,363 @@
/*
* Copyright 2004-2007, Haiku, Inc. All RightsReserved.
* Copyright 2002-2004, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
/*
PIO data transmission
This file is more difficult then you might expect as the SCSI system
uses physical addresses everywhere which have to be mapped into
virtual address space during transmission. Additionally, during ATAPI
commands we may have to transmit more data then exist because the
data len specified by the command doesn't need to be the same as
of the data buffer provided.
The handling of S/G entries of odd size may look superfluous as the
SCSI bus manager can take care of that. In general, this would be possible
as most controllers need even alignment for DMA as well, but some can
handle _any_ S/G list and it wouldn't be sensitive to enforce stricter
alignement just for some rare PIO transmissions.
Little hint for the meaning of "transferred": this is the number of bytes
sent over the bus. For read-transmissions, this may be one more then copied
into the buffer (the extra byte read is stored in device->odd_byte), for
write-transmissions, this may be one less (the waiting byte is pending in
device->odd_byte).
In terms of error handling: we don't bother checking transmission of every
single byte via read/write_pio(). At least at the end of the request, when
the status bits are verified, we will see that something has gone wrong.
TBD: S/G entries may have odd start address. For non-Intel architecture
we either have to copy data to an aligned buffer or have to modify
PIO-handling in controller drivers.
*/
#include "ide_internal.h"
#include "ide_sim.h"
#include <vm.h>
#include <string.h>
// internal error code if scatter gather table is too short
#define ERR_TOO_BIG (B_ERRORS_END + 1)
/*! Prepare PIO transfer */
void
prep_PIO_transfer(ide_device_info *device, ide_qrequest *qrequest)
{
SHOW_FLOW0(4, "");
device->left_sg_elem = qrequest->request->sg_count;
device->cur_sg_elem = qrequest->request->sg_list;
device->cur_sg_ofs = 0;
device->has_odd_byte = false;
qrequest->request->data_resid = qrequest->request->data_length;
}
/*! Transfer virtually continuous data */
static inline status_t
transfer_PIO_virtcont(ide_device_info *device, uint8 *virtualAddress,
int length, bool write, int *transferred)
{
ide_bus_info *bus = device->bus;
ide_controller_interface *controller = bus->controller;
void * channel_cookie = bus->channel_cookie;
if (write) {
// if there is a byte left from last chunk, transmit it together
// with the first byte of the current chunk (IDE requires 16 bits
// to be transmitted at once)
if (device->has_odd_byte) {
uint8 buffer[2];
buffer[0] = device->odd_byte;
buffer[1] = *virtualAddress++;
controller->write_pio(channel_cookie, (uint16 *)buffer, 1, false);
--length;
*transferred += 2;
}
controller->write_pio(channel_cookie, (uint16 *)virtualAddress,
length / 2, false);
// take care if chunk size was odd, which means that 1 byte remains
virtualAddress += length & ~1;
*transferred += length & ~1;
device->has_odd_byte = (length & 1) != 0;
if (device->has_odd_byte)
device->odd_byte = *virtualAddress;
} else {
// if we read one byte too much last time, push it into current chunk
if (device->has_odd_byte) {
*virtualAddress++ = device->odd_byte;
--length;
}
SHOW_FLOW(4, "Reading PIO to %p, %d bytes", virtualAddress, length);
controller->read_pio(channel_cookie, (uint16 *)virtualAddress,
length / 2, false);
// take care of odd chunk size;
// in this case we read 1 byte to few!
virtualAddress += length & ~1;
*transferred += length & ~1;
device->has_odd_byte = (length & 1) != 0;
if (device->has_odd_byte) {
uint8 buffer[2];
// now read the missing byte; as we have to read 2 bytes at once,
// we'll read one byte too much
controller->read_pio(channel_cookie, (uint16 *)buffer, 1, false);
*virtualAddress = buffer[0];
device->odd_byte = buffer[1];
*transferred += 2;
}
}
return B_OK;
}
/*! Transmit physically continuous data */
static inline status_t
transfer_PIO_physcont(ide_device_info *device, addr_t physicalAddress,
int length, bool write, int *transferred)
{
// we must split up chunk into B_PAGE_SIZE blocks as we can map only
// one page into address space at once
while (length > 0) {
addr_t virtualAddress;
int page_left, cur_len;
status_t err;
SHOW_FLOW(4, "Transmitting to/from physical address %lx, %d bytes left",
physicalAddress, length);
if (vm_get_physical_page(physicalAddress, &virtualAddress,
PHYSICAL_PAGE_CAN_WAIT) != B_OK) {
// ouch: this should never ever happen
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
return B_ERROR;
}
// if chunks starts in the middle of a page, we have even less then
// a page left
page_left = B_PAGE_SIZE - physicalAddress % B_PAGE_SIZE;
SHOW_FLOW(4, "page_left=%d", page_left);
cur_len = min(page_left, length);
SHOW_FLOW(4, "cur_len=%d", cur_len);
err = transfer_PIO_virtcont(device, (char *)virtualAddress,
cur_len, write, transferred);
vm_put_physical_page(virtualAddress);
if (err != B_OK)
return err;
length -= cur_len;
physicalAddress += cur_len;
}
return B_OK;
}
/*! Transfer PIO block from/to buffer */
static inline int
transfer_PIO_block(ide_device_info *device, int length, bool write, int *transferred)
{
// data is usually split up into multiple scatter/gather blocks
while (length > 0) {
int left_bytes, cur_len;
status_t err;
if (device->left_sg_elem == 0)
// ups - buffer too small (for ATAPI data, this is OK)
return ERR_TOO_BIG;
// we might have transmitted part of a scatter/entry already!
left_bytes = device->cur_sg_elem->size - device->cur_sg_ofs;
cur_len = min(left_bytes, length);
err = transfer_PIO_physcont(device,
(addr_t)device->cur_sg_elem->address + device->cur_sg_ofs,
cur_len, write, transferred);
if (err != B_OK)
return err;
if (left_bytes <= length) {
// end of one scatter/gather block reached
device->cur_sg_ofs = 0;
++device->cur_sg_elem;
--device->left_sg_elem;
} else {
// still in the same block
device->cur_sg_ofs += cur_len;
}
length -= cur_len;
}
return B_OK;
}
/*! Write zero data (required for ATAPI if we ran out of data) */
static void
write_discard_PIO(ide_device_info *device, int length)
{
ide_bus_info *bus = device->bus;
uint8 buffer[32];
memset(buffer, 0, sizeof(buffer));
// we transmit 32 zero-bytes at once
// (not very efficient but easy to implement - you get what you deserve
// when you don't provide enough buffer)
while (length > 0) {
int cur_len;
// if device asks for odd number of bytes, append an extra byte to
// make length even (this is the "length + 1" term)
cur_len = min(length + 1, (int)(sizeof(buffer))) / 2;
bus->controller->write_pio(bus->channel_cookie, (uint16 *)buffer, cur_len, false);
length -= cur_len * 2;
}
}
/*! Read PIO data and discard it (required for ATAPI if buffer was too small) */
static void
read_discard_PIO(ide_device_info *device, int length)
{
ide_bus_info *bus = device->bus;
uint8 buffer[32];
// discard 32 bytes at once (see write_discard_PIO)
while (length > 0) {
int cur_len;
// read extra byte if length is odd (that's the "length + 1")
cur_len = min(length + 1, (int)sizeof(buffer)) / 2;
bus->controller->read_pio(bus->channel_cookie, (uint16 *)buffer, cur_len, false);
length -= cur_len * 2;
}
}
/*! write PIO data
return: there are 3 possible results
NO_ERROR - everything's nice and groovy
ERR_TOO_BIG - data buffer was too short, remaining data got discarded
B_ERROR - something serious went wrong, sense data was set
*/
status_t
write_PIO_block(ide_qrequest *qrequest, int length)
{
ide_device_info *device = qrequest->device;
int transferred;
status_t err;
transferred = 0;
err = transfer_PIO_block(device, length, true, &transferred);
qrequest->request->data_resid -= transferred;
if (err != ERR_TOO_BIG)
return err;
// there may be a pending odd byte - transmit that now
if (qrequest->device->has_odd_byte) {
uint8 buffer[2];
buffer[0] = device->odd_byte;
buffer[1] = 0;
device->has_odd_byte = false;
qrequest->request->data_resid -= 1;
transferred += 2;
device->bus->controller->write_pio(device->bus->channel_cookie, (uint16 *)buffer, 1, false);
}
// "transferred" may actually be larger then length because the last odd-byte
// is sent together with an extra zero-byte
if (transferred >= length)
return err;
// Ouch! the device asks for data but we haven't got any left.
// Sadly, this behaviour is OK for ATAPI packets, but there is no
// way to tell the device that we don't have any data left;
// only solution is to send zero bytes, though it's BAD BAD BAD
write_discard_PIO(qrequest->device, length - transferred);
return ERR_TOO_BIG;
}
/*! read PIO data
return: see write_PIO_block
*/
status_t
read_PIO_block(ide_qrequest *qrequest, int length)
{
ide_device_info *device = qrequest->device;
int transferred;
status_t err;
transferred = 0;
err = transfer_PIO_block(qrequest->device, length, false, &transferred);
qrequest->request->data_resid -= transferred;
// if length was odd, there's an extra byte waiting in device->odd_byte
if (device->has_odd_byte) {
// discard byte
device->has_odd_byte = false;
// adjust res_id as the extra byte didn't reach the buffer
++qrequest->request->data_resid;
}
if (err != ERR_TOO_BIG)
return err;
// the device returns more data then the buffer can store;
// for ATAPI this is OK - we just discard remaining bytes (there
// is no way to tell ATAPI about that, but we "only" waste time)
// perhaps discarding the extra odd-byte was sufficient
if (transferred >= length)
return err;
SHOW_FLOW(3, "discarding after %d bytes", transferred);
read_discard_PIO(qrequest->device, length - transferred);
return ERR_TOO_BIG;
}

View File

@ -0,0 +1,374 @@
/*
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
/*
Part of Open IDE bus manager
Command queuing functions
*/
#include "ide_internal.h"
#include "ide_sim.h"
#include "ide_cmds.h"
#include <string.h>
#include <malloc.h>
// maximum number of errors until command queuing is disabled
#define MAX_CQ_FAILURES 3
/** convert tag to request */
static inline ide_qrequest *
tag2request(ide_device_info *device, int tag)
{
ide_qrequest *qrequest = &device->qreq_array[tag];
if (qrequest->running)
return qrequest;
return NULL;
}
/** service device
*
* (expects locked bus and bus in "accessing" state)
* returns true if servicing a command (implies having bus unlocked)
* returns false on error
*/
static bool
service_device(ide_device_info *device)
{
ide_qrequest *qrequest;
int tag;
SHOW_FLOW0( 3, "Start servicing" );
// delete timeout first
// we must unlock bus before cancelling timer: if the timeout has
// just been fired we have to wait for it, but in turn it waits
// for the ide bus -> deadlock
IDE_UNLOCK(device->bus);
cancel_timer(&device->reconnect_timer.te);
// between IDE_UNLOCK and cancel_timer the request may got
// discarded due to timeout, so it's not a hardware problem
// if servicing fails
// further, the device discards the entire queue if anything goes
// wrong, thus we call send_abort_queue on each error
// (we could also discard the queue without telling the device,
// but we prefer setting the device into a safe state)
// ask device to continue
if (!device_start_service(device, &tag)) {
send_abort_queue(device);
goto err;
}
SHOW_FLOW0( 3, "device starts service" );
// get tag of request
qrequest = tag2request(device, tag);
if (qrequest == NULL) {
send_abort_queue(device);
goto err;
}
SHOW_FLOW( 3, "continue request %p with tag %d", qrequest, tag );
device->bus->active_qrequest = qrequest;
// from here on, queuing is ATA read/write specific, so you have to
// modify that if you want to support ATAPI queuing!
if (check_rw_error(device, qrequest)) {
// if a read/write error occured, the request really failed
finish_reset_queue(qrequest);
goto err;
}
// all ATA commands continue with a DMA request
if (!prepare_dma(device, qrequest)) {
// this is effectively impossible: before the command was initially
// sent, prepare_dma had been called and obviously didn't fail,
// so why should it fail now?
device->subsys_status = SCSI_HBA_ERR;
finish_reset_queue(qrequest);
goto err;
}
SHOW_FLOW0( 3, "launch DMA" );
start_dma_wait_no_lock(device, qrequest);
return true;
err:
// don't start timeout - all requests have been discarded at this point
IDE_LOCK(device->bus);
return false;
}
/** check if some device on bus wants to continue queued requests;
*
* (expects locked bus and bus in "accessing" state)
* returns true if servicing a command (implies having bus unlocked)
* returns false if nothing to service
*/
bool
try_service(ide_device_info *device)
{
bool this_device_needs_service;
ide_device_info *other_device;
other_device = device->other_device;
// first check whether current device requests service
// (the current device is selected anyway, so asking it is fast)
this_device_needs_service = check_service_req(device);
// service other device first as it was certainly waiting
// longer then the current device
if (other_device != device && check_service_req(other_device)) {
if (service_device(other_device)) {
// we handed over control; start timeout for device
// (see below about fairness)
if (device->num_running_reqs > 0) {
if (!device->reconnect_timer_installed) {
device->reconnect_timer_installed = true;
add_timer(&device->reconnect_timer.te, reconnect_timeout,
IDE_RELEASE_TIMEOUT, B_ONE_SHOT_RELATIVE_TIMER);
}
}
return true;
}
}
// service our device second
if (this_device_needs_service) {
if (service_device(device))
return true;
}
// if device has pending reqs, start timeout.
// this may sound strange as we cannot be blamed if the
// other device blocks us. But: the timeout is delayed until
// the bus is idle, so once the other device finishes its
// access, we have a chance of servicing all the pending
// commands before the timeout handler is executed
if (device->num_running_reqs > 0) {
if (!device->reconnect_timer_installed) {
device->reconnect_timer_installed = true;
add_timer(&device->reconnect_timer.te, reconnect_timeout,
IDE_RELEASE_TIMEOUT, B_ONE_SHOT_RELATIVE_TIMER);
}
}
return false;
}
bool
initialize_qreq_array(ide_device_info *device, int queue_depth)
{
int i;
device->queue_depth = queue_depth;
SHOW_FLOW( 3, "queue depth=%d", device->queue_depth );
device->qreq_array = (ide_qrequest *)malloc(queue_depth * sizeof(ide_qrequest));
if (device->qreq_array == NULL)
return false;
memset(device->qreq_array, 0, queue_depth * sizeof(ide_qrequest));
device->free_qrequests = NULL;
for (i = queue_depth - 1; i >= 0 ; --i) {
ide_qrequest *qrequest = &device->qreq_array[i];
qrequest->next = device->free_qrequests;
device->free_qrequests = qrequest;
qrequest->running = false;
qrequest->device = device;
qrequest->tag = i;
qrequest->request = NULL;
}
return true;
}
void
destroy_qreq_array(ide_device_info *device)
{
if (device->qreq_array) {
free(device->qreq_array);
device->qreq_array = NULL;
}
device->num_running_reqs = 0;
device->queue_depth = 0;
device->free_qrequests = NULL;
}
/** change maximum number of queuable requests */
static bool
change_qreq_array(ide_device_info *device, int queue_depth)
{
ide_qrequest *qreq_array = device->qreq_array;
ide_qrequest *old_free_qrequests = device->free_qrequests;
int old_queue_depth = queue_depth;
// be very causious - even if no queuing supported, we still need
// one queue entry; if this allocation fails, we have a device that
// cannot accept any command, which would be odd
if (initialize_qreq_array( device, queue_depth)) {
free(qreq_array);
return true;
}
device->qreq_array = qreq_array;
device->num_running_reqs = 0;
device->queue_depth = old_queue_depth;
device->free_qrequests = old_free_qrequests;
return false;
}
/** reconnect timeout worker
* must be called as a synced procedure call, i.e.
* the bus is allocated for us
*/
void
reconnect_timeout_worker(ide_bus_info *bus, void *arg)
{
ide_device_info *device = (ide_device_info *)arg;
// perhaps all requests have been successfully finished
// when the synced pc was waiting; in this case, everything's fine
// (this is _very_ important if the other device blocks the bus
// for a long time - if this leads to a reconnect timeout, the
// device has a last chance by servicing all requests without
// delay, in which case this function gets delayed until all
// pending requests are finished and the following test would
// make sure that this false alarm gets ignored)
if (device->num_running_reqs > 0) {
// if one queued command fails, all of them fail
send_abort_queue(device);
// if too many timeouts occure, disable CQ
if (++device->CQ_failures > MAX_CQ_FAILURES) {
device->CQ_enabled = false;
change_qreq_array(device, 1);
}
}
// we've blocked the bus in dpc - undo that
scsi->unblock_bus(device->bus->scsi_cookie);
}
/** dpc callback for reconnect timeout */
static void
reconnect_timeout_dpc(void *arg)
{
ide_device_info *device = (ide_device_info *)arg;
// even though we are in the service thread,
// the bus can be in use (e.g. by an ongoing PIO command),
// so we have to issue a synced procedure call which
// waits for the command to be finished
// meanwhile, we don't want any command to be issued to this device
// as we are going to discard the entire device queue;
// sadly, we don't have a reliable XPT device handle, so we block
// bus instead (as this is an error handler, so performance is
// not crucial)
scsi->block_bus(device->bus->scsi_cookie);
schedule_synced_pc(device->bus, &device->reconnect_timeout_synced_pc, device);
}
/** timer function for reconnect timeout */
int32
reconnect_timeout(timer *arg)
{
ide_device_info *device = ((ide_device_timer_info *)arg)->device;
ide_bus_info *bus = device->bus;
// we are polite and let the service thread do the job
scsi->schedule_dpc(bus->scsi_cookie, device->reconnect_timeout_dpc,
reconnect_timeout_dpc, device);
return B_INVOKE_SCHEDULER;
}
/** tell device to abort all queued requests
* (tells XPT to resubmit these requests)
* return: true - abort successful
* false - abort failed (in this case, nothing can be done)
*/
bool
send_abort_queue(ide_device_info *device)
{
int status;
ide_bus_info *bus = device->bus;
SHOW_FLOW0( 3, "" );
device->tf.write.command = IDE_CMD_NOP;
// = discard outstanding commands
device->tf.write.features = IDE_CMD_NOP_NOP;
device->tf_param_mask = ide_mask_features;
if (!send_command(device, NULL, true, 0, ide_state_accessing))
goto err;
if (!wait_for_drdy(device))
goto err;
// device must answer "command rejected" and discard outstanding commands
status = bus->controller->get_altstatus(bus->channel_cookie);
if ((status & ide_status_err) == 0)
goto err;
if (!bus->controller->read_command_block_regs(bus->channel_cookie,
&device->tf, ide_mask_error)) {
// don't bother trying bus_reset as controller disappeared
device->subsys_status = SCSI_HBA_ERR;
return false;
}
if ((device->tf.read.error & ide_error_abrt) == 0)
goto err;
finish_all_requests(device, NULL, 0, true);
return true;
err:
// ouch! device didn't react - we have to reset it
return reset_device(device, NULL);
}

View File

@ -0,0 +1,523 @@
/*
* Copyright 2004-2007, Haiku, Inc. All RightsReserved.
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
/*
Part of Open IDE bus manager
Converts SCSI commands to ATA commands.
*/
#include "ide_internal.h"
#include "ide_sim.h"
#include "ide_cmds.h"
#include <string.h>
/** emulate MODE SENSE 10 command */
static void
ata_mode_sense_10(ide_device_info *device, ide_qrequest *qrequest)
{
scsi_ccb *request = qrequest->request;
scsi_cmd_mode_sense_10 *cmd = (scsi_cmd_mode_sense_10 *)request->cdb;
scsi_mode_param_header_10 param_header;
scsi_modepage_control control;
scsi_mode_param_block_desc block_desc;
size_t totalLength = sizeof(scsi_mode_param_header_10)
+ sizeof(scsi_mode_param_block_desc)
+ sizeof(scsi_modepage_control);
scsi_mode_param_dev_spec_da devspec = {
_res0_0 : 0,
dpo_fua : 0,
_res0_6 : 0,
write_protected : 0
};
uint32 allocationLength;
SHOW_FLOW0(1, "Hi!");
allocationLength = B_BENDIAN_TO_HOST_INT16(cmd->allocation_length);
// we answer control page requests and "all pages" requests
// (as the latter are the same as the first)
if ((cmd->page_code != SCSI_MODEPAGE_CONTROL && cmd->page_code != SCSI_MODEPAGE_ALL)
|| (cmd->page_control != SCSI_MODE_SENSE_PC_CURRENT
&& cmd->page_control != SCSI_MODE_SENSE_PC_SAVED)) {
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD);
return;
}
//param_header = (scsi_mode_param_header_10 *)request->data;
param_header.mode_data_length = B_HOST_TO_BENDIAN_INT16(totalLength - 1);
param_header.medium_type = 0; // XXX standard is a bit vague here
param_header.dev_spec_parameter = *(uint8 *)&devspec;
param_header.block_desc_length
= B_HOST_TO_BENDIAN_INT16(sizeof(scsi_mode_param_block_desc));
copy_sg_data(request, 0, allocationLength, &param_header,
sizeof(param_header), false);
/*block_desc = (scsi_mode_param_block_desc *)(request->data
+ sizeof(*param_header));*/
memset(&block_desc, 0, sizeof(block_desc));
// density is reserved (0), descriptor apply to entire medium (num_blocks=0)
// remains the blocklen to be set
block_desc.high_blocklen = 0;
block_desc.med_blocklen = 512 >> 8;
block_desc.low_blocklen = 512 & 0xff;
copy_sg_data(request, sizeof(param_header), allocationLength,
&block_desc, sizeof(block_desc), false);
/*contr = (scsi_modepage_contr *)(request->data
+ sizeof(*param_header)
+ ((uint16)param_header->high_block_desc_len << 8)
+ param_header->low_block_desc_len);*/
memset(&control, 0, sizeof(control));
control.RLEC = false;
control.DQue = !device->CQ_enabled;
control.QErr = false;
// when a command fails we requeue all
// lost commands automagically
control.QAM = SCSI_QAM_UNRESTRICTED;
copy_sg_data(request, sizeof(param_header)
+ B_BENDIAN_TO_HOST_INT16(param_header.block_desc_length),
allocationLength, &control, sizeof(control), false);
// the number of bytes that were transferred to buffer is
// restricted by allocation length and by request data buffer size
totalLength = min(totalLength, allocationLength);
totalLength = min(totalLength, request->data_length);
request->data_resid = request->data_length - totalLength;
}
/*! Emulate modifying control page */
static bool
ata_mode_select_control_page(ide_device_info *device, ide_qrequest *qrequest,
scsi_modepage_control *page)
{
if (page->header.page_length != sizeof(*page) - sizeof(page->header)) {
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_LIST_LENGTH_ERR);
return false;
}
// we only support enabling/disabling command queuing
enable_CQ(device, !page->DQue);
return true;
}
/*! Emulate MODE SELECT 10 command */
static void
ata_mode_select_10(ide_device_info *device, ide_qrequest *qrequest)
{
scsi_ccb *request = qrequest->request;
scsi_cmd_mode_select_10 *cmd = (scsi_cmd_mode_select_10 *)request->cdb;
scsi_mode_param_header_10 param_header;
scsi_modepage_header page_header;
uint32 totalLength;
uint32 modepageOffset;
char modepage_buffer[64]; // !!! enlarge this to support longer mode pages
if (cmd->save_pages || cmd->pf != 1) {
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD);
return;
}
totalLength = min(request->data_length,
B_BENDIAN_TO_HOST_INT16(cmd->param_list_length));
// first, retrieve page header to get size of different chunks
//param_header = (scsi_mode_param_header_10 *)request->data;
if (!copy_sg_data(request, 0, totalLength, &param_header, sizeof(param_header), true))
goto err;
totalLength = min(totalLength,
B_BENDIAN_TO_HOST_INT16(param_header.mode_data_length) + 1UL);
// this is the start of the first mode page;
// we ignore the block descriptor silently
modepageOffset = sizeof(param_header)
+ B_BENDIAN_TO_HOST_INT16(param_header.block_desc_length);
// go through list of pages
while (modepageOffset < totalLength) {
uint32 pageLength;
// get header to know how long page is
if (!copy_sg_data(request, modepageOffset, totalLength,
&page_header, sizeof(page_header), true))
goto err;
// get size of one page and copy it to buffer
pageLength = page_header.page_length + sizeof(scsi_modepage_header);
// the buffer has a maximum size - this is really standard compliant but
// sufficient for our needs
if (pageLength > sizeof(modepage_buffer))
goto err;
if (!copy_sg_data(request, modepageOffset, totalLength,
&modepage_buffer, min(pageLength, sizeof(modepage_buffer)), true))
goto err;
// modify page;
// currently, we only support the control mode page
switch (page_header.page_code) {
case SCSI_MODEPAGE_CONTROL:
if (!ata_mode_select_control_page(device, qrequest,
(scsi_modepage_control *)modepage_buffer))
return;
break;
default:
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST,
SCSIS_ASC_INV_PARAM_LIST_FIELD);
return;
}
modepageOffset += pageLength;
}
if (modepageOffset != totalLength)
goto err;
request->data_resid = request->data_length - totalLength;
return;
// if we arrive here, data length was incorrect
err:
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_LIST_LENGTH_ERR);
}
/*! Emulate TEST UNIT READY */
static bool
ata_test_unit_ready(ide_device_info *device, ide_qrequest *qrequest)
{
SHOW_FLOW0(3, "");
if (!device->infoblock.RMSN_supported
|| device->infoblock._127_RMSN_support != 1)
return true;
// ask device about status
device->tf_param_mask = 0;
device->tf.write.command = IDE_CMD_GET_MEDIA_STATUS;
if (!send_command(device, qrequest, true, 15, ide_state_sync_waiting))
return false;
// bits ide_error_mcr | ide_error_mc | ide_error_wp are also valid
// but not requested by TUR; ide_error_wp can safely be ignored, but
// we don't want to loose media change (request) reports
if (!check_output(device, true,
ide_error_nm | ide_error_abrt | ide_error_mcr | ide_error_mc,
false)) {
// SCSI spec is unclear here: we shouldn't report "media change (request)"
// but what to do if there is one? anyway - we report them
;
}
return true;
}
/*! Flush internal device cache */
static bool
ata_flush_cache(ide_device_info *device, ide_qrequest *qrequest)
{
// we should also ask for FLUSH CACHE support, but everyone denies it
// (looks like they cheat to gain some performance advantage, but
// that's pretty useless: everyone does it...)
if (!device->infoblock.write_cache_supported)
return true;
device->tf_param_mask = 0;
device->tf.lba.command = device->use_48bits ? IDE_CMD_FLUSH_CACHE_EXT
: IDE_CMD_FLUSH_CACHE;
// spec says that this may take more then 30s, how much more?
if (!send_command(device, qrequest, true, 60, ide_state_sync_waiting))
return false;
wait_for_sync(device->bus);
return check_output(device, true, ide_error_abrt, false);
}
/*! Load or eject medium
load = true - load medium
*/
static bool
ata_load_eject(ide_device_info *device, ide_qrequest *qrequest, bool load)
{
if (load) {
// ATA doesn't support loading
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_NOT_SUPPORTED);
return false;
}
device->tf_param_mask = 0;
device->tf.lba.command = IDE_CMD_MEDIA_EJECT;
if (!send_command(device, qrequest, true, 15, ide_state_sync_waiting))
return false;
wait_for_sync(device->bus);
return check_output(device, true, ide_error_abrt | ide_error_nm, false);
}
/*! Emulate PREVENT ALLOW command */
static bool
ata_prevent_allow(ide_device_info *device, bool prevent)
{
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_ILL_FUNCTION);
return false;
}
/*! Emulate INQUIRY command */
static void
ata_inquiry(ide_device_info *device, ide_qrequest *qrequest)
{
scsi_ccb *request = qrequest->request;
scsi_res_inquiry data;
scsi_cmd_inquiry *cmd = (scsi_cmd_inquiry *)request->cdb;
uint32 allocation_length = cmd->allocation_length;
uint32 transfer_size;
if (cmd->evpd || cmd->page_code) {
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD);
return;
}
memset(&data, 0, sizeof(data));
data.device_type = scsi_dev_direct_access;
data.device_qualifier = scsi_periph_qual_connected;
data.device_type_modifier = 0;
data.removable_medium = false;
data.ansi_version = 2;
data.ecma_version = 0;
data.iso_version = 0;
data.response_data_format = 2;
data.term_iop = false;
// to be changed if we support TERM I/O
data.additional_length = sizeof(scsi_res_inquiry) - 4;
data.soft_reset = false;
data.cmd_queue = device->queue_depth > 1;
data.linked = false;
// these values are free-style
data.sync = false;
data.write_bus16 = true;
data.write_bus32 = false;
data.relative_address = false;
// the following fields are *much* to small, sigh...
memcpy(data.vendor_ident, device->infoblock.model_number,
sizeof(data.vendor_ident));
memcpy(data.product_ident, device->infoblock.model_number + 8,
sizeof(data.product_ident));
memcpy(data.product_rev, " ", sizeof(data.product_rev));
copy_sg_data(request, 0, allocation_length, &data, sizeof(data), false);
transfer_size = min(sizeof(data), allocation_length);
transfer_size = min(transfer_size, request->data_length);
request->data_resid = request->data_length - transfer_size;
}
/*! Emulate READ CAPACITY command */
static void
read_capacity(ide_device_info *device, ide_qrequest *qrequest)
{
scsi_ccb *request = qrequest->request;
scsi_res_read_capacity data;
scsi_cmd_read_capacity *cmd = (scsi_cmd_read_capacity *)request->cdb;
uint32 lastBlock;
if (cmd->pmi || cmd->lba) {
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD);
return;
}
// TODO: 512 bytes fixed block size?
data.block_size = B_HOST_TO_BENDIAN_INT32(512);
lastBlock = device->total_sectors - 1;
data.lba = B_HOST_TO_BENDIAN_INT32(lastBlock);
copy_sg_data(request, 0, request->data_length, &data, sizeof(data), false);
request->data_resid = max(request->data_length - sizeof(data), 0);
}
/*! Execute SCSI command */
void
ata_exec_io(ide_device_info *device, ide_qrequest *qrequest)
{
scsi_ccb *request = qrequest->request;
SHOW_FLOW(3, "command=%x", request->cdb[0]);
// ATA devices have one LUN only
if (request->target_lun != 0) {
request->subsys_status = SCSI_SEL_TIMEOUT;
finish_request(qrequest, false);
return;
}
// starting a request means deleting sense, so don't do it if
// the command wants to read it
if (request->cdb[0] != SCSI_OP_REQUEST_SENSE)
start_request(device, qrequest);
switch (request->cdb[0]) {
case SCSI_OP_TEST_UNIT_READY:
ata_test_unit_ready(device, qrequest);
break;
case SCSI_OP_REQUEST_SENSE:
ide_request_sense(device, qrequest);
return;
case SCSI_OP_FORMAT: /* FORMAT UNIT */
// we could forward request to disk, but modern disks cannot
// be formatted anyway, so we just refuse request
// (exceptions are removable media devices, but to my knowledge
// they don't have to be formatted as well)
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
break;
case SCSI_OP_INQUIRY:
ata_inquiry(device, qrequest);
break;
case SCSI_OP_MODE_SELECT_10:
ata_mode_select_10(device, qrequest);
break;
case SCSI_OP_MODE_SENSE_10:
ata_mode_sense_10(device, qrequest);
break;
case SCSI_OP_MODE_SELECT_6:
case SCSI_OP_MODE_SENSE_6:
// we've told SCSI bus manager to emulates these commands
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
break;
case SCSI_OP_RESERVE:
case SCSI_OP_RELEASE:
// though mandatory, this doesn't make much sense in a
// single initiator environment; so what
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
break;
case SCSI_OP_START_STOP: {
scsi_cmd_ssu *cmd = (scsi_cmd_ssu *)request->cdb;
// with no LoEj bit set, we should only allow/deny further access
// we ignore that (unsupported for ATA)
// with LoEj bit set, we should additionally either load or eject the medium
// (start = 0 - eject; start = 1 - load)
if (!cmd->start)
// we must always flush cache if start = 0
ata_flush_cache(device, qrequest);
if (cmd->load_eject)
ata_load_eject(device, qrequest, cmd->start);
break;
}
case SCSI_OP_PREVENT_ALLOW: {
scsi_cmd_prevent_allow *cmd = (scsi_cmd_prevent_allow *)request->cdb;
ata_prevent_allow(device, cmd->prevent);
break;
}
case SCSI_OP_READ_CAPACITY:
read_capacity(device, qrequest);
break;
case SCSI_OP_VERIFY:
// does anyone uses this function?
// effectly, it does a read-and-compare, which IDE doesn't support
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
break;
case SCSI_OP_SYNCHRONIZE_CACHE:
// we ignore range and immediate bit, we always immediately flush everything
ata_flush_cache(device, qrequest);
break;
// sadly, there are two possible read/write operation codes;
// at least, the third one, read/write(12), is not valid for DAS
case SCSI_OP_READ_6:
case SCSI_OP_WRITE_6:
{
scsi_cmd_rw_6 *cmd = (scsi_cmd_rw_6 *)request->cdb;
uint32 pos;
size_t length;
pos = ((uint32)cmd->high_lba << 16) | ((uint32)cmd->mid_lba << 8)
| (uint32)cmd->low_lba;
length = cmd->length != 0 ? cmd->length : 256;
SHOW_FLOW(3, "READ6/WRITE6 pos=%lx, length=%lx", pos, length);
ata_send_rw(device, qrequest, pos, length, cmd->opcode == SCSI_OP_WRITE_6);
return;
}
case SCSI_OP_READ_10:
case SCSI_OP_WRITE_10:
{
scsi_cmd_rw_10 *cmd = (scsi_cmd_rw_10 *)request->cdb;
uint32 pos;
size_t length;
pos = B_BENDIAN_TO_HOST_INT32(cmd->lba);
length = B_BENDIAN_TO_HOST_INT16(cmd->length);
if (length != 0) {
ata_send_rw(device, qrequest, pos, length, cmd->opcode == SCSI_OP_WRITE_10);
} else {
// we cannot transfer zero blocks (apart from LBA48)
finish_request(qrequest, false);
}
return;
}
default:
set_sense(device, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
}
finish_checksense(qrequest);
}

View File

@ -0,0 +1,472 @@
/*
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
* Distributed under the terms of the MIT License.
*/
/*
Part of Open IDE bus manager
Handling of passive waiting and synchronized procedure calls.
The latter are calls that get delayed until the bus is idle.
*/
#include "ide_internal.h"
#include "ide_sim.h"
#include <string.h>
//#define TRACE_SYNC
#ifdef TRACE_SYNC
# define TRACE(x) { dprintf("%s(): ", __FUNCTION__); dprintf x ; }
#else
# define TRACE(x) ;
#endif
/** DPC handler for IRQs */
void
ide_dpc(void *arg)
{
ide_bus_info *bus = (ide_bus_info *)arg;
ide_qrequest *qrequest;
ide_device_info *device;
TRACE(("\n"));
//snooze(500000);
// IRQ handler doesn't tell us whether this bus was in async_wait or
// in idle state, so we just check whether there is an active request,
// which means that we were async_waiting
if (bus->active_qrequest != NULL) {
FAST_LOG1(bus->log, ev_ide_dpc_continue, (uint32)bus->active_qrequest);
TRACE(("continue command\n"));
// cancel timeout
cancel_timer(&bus->timer.te);
qrequest = bus->active_qrequest;
device = qrequest->device;
// not perfect but simple: we simply know who is waiting why
if (device->is_atapi)
packet_dpc(qrequest);
else {
if (qrequest->uses_dma)
ata_dpc_DMA(qrequest);
else
ata_dpc_PIO(qrequest);
}
} else {
// no request active, so this must be a service request or
// a spurious IRQ; access_finished will take care of testing
// for service requests
TRACE(("irq in idle mode - possible service request\n"));
FAST_LOG0(bus->log, ev_ide_dpc_service);
device = get_current_device(bus);
if (device == NULL) {
// got an interrupt from a non-existing device
// either this is a spurious interrupt or there *is* a device
// but we haven't detected it - we better ignore it silently
access_finished(bus, bus->first_device);
} else {
// access_finished always checks the other device first, but as
// we do have a service request, we negate the negation
access_finished(bus, device->other_device);
}
// let XPT resend commands that got blocked
scsi->cont_send_bus(bus->scsi_cookie);
}
return;
/*err:
xpt->cont_send( bus->xpt_cookie );*/
}
/** handler for IDE IRQs */
status_t
ide_irq_handler(ide_bus_info *bus, uint8 status)
{
ide_device_info *device;
// we need to lock bus to have a solid bus state
// (side effect: we lock out the timeout handler and get
// delayed if the IRQ happens at the same time as a command is
// issued; in the latter case, we have no official way to determine
// whether the command was issued before or afterwards; if it was
// afterwards, the device must not be busy; if it was before,
// the device is either busy because of the sent command, or it's
// not busy as the command has already been finished, i.e. there
// was a second IRQ which we've overlooked as we didn't acknowledge
// the first IRQ)
IDE_LOCK(bus);
device = bus->active_device;
if (device == NULL) {
IDE_UNLOCK(bus);
TRACE(("IRQ though there is no active device\n"));
return B_UNHANDLED_INTERRUPT;
}
if ((status & ide_status_bsy) != 0) {
// the IRQ seems to be fired before the last command was sent,
// i.e. it's not the one that signals finishing of command
IDE_UNLOCK(bus);
TRACE(("IRQ though device is busy\n"));
return B_UNHANDLED_INTERRUPT;
}
switch (bus->state) {
case ide_state_async_waiting:
TRACE(("state: async waiting\n"));
bus->state = ide_state_accessing;
IDE_UNLOCK(bus);
scsi->schedule_dpc(bus->scsi_cookie, bus->irq_dpc, ide_dpc, bus);
return B_INVOKE_SCHEDULER;
case ide_state_idle:
TRACE(("state: idle, num_running_reqs %d\n", bus->num_running_reqs));
// this must be a service request;
// if no request is pending, the IRQ was fired wrongly
if (bus->num_running_reqs == 0) {
IDE_UNLOCK(bus);
return B_UNHANDLED_INTERRUPT;
}
bus->state = ide_state_accessing;
IDE_UNLOCK(bus);
scsi->schedule_dpc(bus->scsi_cookie, bus->irq_dpc, ide_dpc, bus);
return B_INVOKE_SCHEDULER;
case ide_state_sync_waiting:
TRACE(("state: sync waiting\n"));
bus->state = ide_state_accessing;
bus->sync_wait_timeout = false;
IDE_UNLOCK(bus);
release_sem_etc(bus->sync_wait_sem, 1, B_DO_NOT_RESCHEDULE);
return B_INVOKE_SCHEDULER;
case ide_state_accessing:
TRACE(("state: spurious IRQ - there is a command being executed\n"));
IDE_UNLOCK(bus);
return B_UNHANDLED_INTERRUPT;
default:
dprintf("BUG: unknown state (%d)\n", bus->state);
IDE_UNLOCK(bus);
return B_UNHANDLED_INTERRUPT;
}
}
/** cancel IRQ timeout
* it doesn't matter whether there really was a timout running;
* on return, bus state is set to _accessing_
*/
void
cancel_irq_timeout(ide_bus_info *bus)
{
FAST_LOG0(bus->log, ev_ide_cancel_irq_timeout);
IDE_LOCK(bus);
bus->state = ide_state_accessing;
IDE_UNLOCK(bus);
cancel_timer(&bus->timer.te);
}
/** start waiting for IRQ with bus lock hold
* new_state must be either sync_wait or async_wait
*/
void
start_waiting(ide_bus_info *bus, uint32 timeout, int new_state)
{
int res;
FAST_LOG1(bus->log, ev_ide_start_waiting, new_state);
TRACE(("timeout = %u\n", (uint)timeout));
bus->state = new_state;
res = add_timer(&bus->timer.te, ide_timeout,
(bigtime_t)timeout * 1000000, B_ONE_SHOT_RELATIVE_TIMER);
if (res != B_OK)
panic("Error setting timeout (%s)", strerror(res));
IDE_UNLOCK(bus);
}
/** start waiting for IRQ with bus lock not hold */
void
start_waiting_nolock(ide_bus_info *bus, uint32 timeout, int new_state)
{
IDE_LOCK(bus);
start_waiting(bus, timeout, new_state);
}
/** wait for sync IRQ */
void
wait_for_sync(ide_bus_info *bus)
{
acquire_sem(bus->sync_wait_sem);
cancel_timer(&bus->timer.te);
}
/** timeout dpc handler */
static void
ide_timeout_dpc(void *arg)
{
ide_bus_info *bus = (ide_bus_info *)arg;
ide_qrequest *qrequest;
ide_device_info *device;
qrequest = bus->active_qrequest;
device = qrequest->device;
dprintf("ide: ide_timeout_dpc() bus %p, device %p\n", bus, device);
FAST_LOG1(bus->log, ev_ide_timeout_dpc, (uint32)qrequest);
// this also resets overlapped commands
reset_device(device, qrequest);
device->subsys_status = SCSI_CMD_TIMEOUT;
if (qrequest->uses_dma) {
if (++device->DMA_failures >= MAX_DMA_FAILURES) {
dprintf("Disabling DMA because of too many errors\n");
device->DMA_enabled = false;
}
}
// let upper layer do the retry
finish_checksense(qrequest);
}
/** timeout handler, called by system timer */
status_t
ide_timeout(timer *arg)
{
ide_bus_info *bus = ((ide_bus_timer_info *)arg)->bus;
FAST_LOG0(bus->log, ev_ide_timeout);
TRACE(("ide_timeout(): %p\n", bus));
dprintf("ide: ide_timeout() bus %p\n", bus);
// we need to lock bus to have a solid bus state
// (side effect: we lock out the IRQ handler)
IDE_LOCK(bus);
switch (bus->state) {
case ide_state_async_waiting:
TRACE(("async waiting\n"));
bus->state = ide_state_accessing;
IDE_UNLOCK(bus);
scsi->schedule_dpc(bus->scsi_cookie, bus->irq_dpc, ide_timeout_dpc, bus);
return B_INVOKE_SCHEDULER;
case ide_state_sync_waiting:
TRACE(("sync waiting\n"));
bus->state = ide_state_accessing;
bus->sync_wait_timeout = true;
IDE_UNLOCK(bus);
release_sem_etc(bus->sync_wait_sem, 1, B_DO_NOT_RESCHEDULE);
return B_INVOKE_SCHEDULER;
case ide_state_accessing:
TRACE(("came too late - IRQ occured already\n"));
IDE_UNLOCK(bus);
return B_DO_NOT_RESCHEDULE;
default:
// this case also happens if a timeout fires too late;
// unless there is a bug, the timeout should always be canceled
// before declaring bus as being idle
dprintf("BUG: unknown state (%d)\n", (int)bus->state);
IDE_UNLOCK(bus);
return B_DO_NOT_RESCHEDULE;
}
}
void
init_synced_pc(ide_synced_pc *pc, ide_synced_pc_func func)
{
pc->func = func;
pc->registered = false;
}
void
uninit_synced_pc(ide_synced_pc *pc)
{
if (pc->registered)
panic("Tried to clean up pending synced PC\n");
}
/** schedule a synced pc
* a synced pc gets executed as soon as the bus becomes idle
*/
status_t
schedule_synced_pc(ide_bus_info *bus, ide_synced_pc *pc, void *arg)
{
//TRACE(());
IDE_LOCK(bus);
if (pc->registered) {
// spc cannot be registered twice
TRACE(("already registered\n"));
return B_ERROR;
} else if( bus->state != ide_state_idle ) {
// bus isn't idle - spc must be added to pending list
TRACE(("adding to pending list\n"));
pc->next = bus->synced_pc_list;
bus->synced_pc_list = pc;
pc->arg = arg;
pc->registered = true;
IDE_UNLOCK(bus);
return B_OK;
}
// we have luck - bus is idle, so grab it before
// releasing the lock
TRACE(("exec immediately\n"));
bus->state = ide_state_accessing;
IDE_UNLOCK(bus);
TRACE(("go\n"));
pc->func(bus, arg);
TRACE(("finished\n"));
access_finished(bus, bus->first_device);
// meanwhile, we may have rejected SCSI commands;
// usually, the XPT resends them once a command
// has finished, but in this case XPT doesn't know
// about our "private" command, so we have to tell about
// idle bus manually
TRACE(("tell SCSI bus manager about idle bus\n"));
scsi->cont_send_bus(bus->scsi_cookie);
return B_OK;
}
/** execute list of synced pcs */
static void
exec_synced_pcs(ide_bus_info *bus, ide_synced_pc *pc_list)
{
ide_synced_pc *pc;
// noone removes items from pc_list, so we don't need lock
// to access entries
for (pc = pc_list; pc; pc = pc->next) {
pc->func(bus, pc->arg);
}
// need lock now as items can be added to pc_list again as soon
// as <registered> is reset
IDE_LOCK(bus);
for (pc = pc_list; pc; pc = pc->next) {
pc->registered = false;
}
IDE_UNLOCK(bus);
}
/** finish bus access;
* check if any device wants to service pending commands + execute synced_pc
*/
void
access_finished(ide_bus_info *bus, ide_device_info *device)
{
TRACE(("bus = %p, device = %p\n", bus, device));
while (true) {
ide_synced_pc *synced_pc_list;
IDE_LOCK(bus);
// normally, there is always an device; only exception is a
// bus without devices, not sure whether this can really happen though
if (device) {
if (try_service(device))
return;
}
// noone wants it, so execute pending synced_pc
if (bus->synced_pc_list == NULL) {
bus->state = ide_state_idle;
IDE_UNLOCK(bus);
return;
}
synced_pc_list = bus->synced_pc_list;
bus->synced_pc_list = NULL;
IDE_UNLOCK(bus);
exec_synced_pcs(bus, synced_pc_list);
// executed synced_pc may have generated other sync_pc,
// thus the loop
}
}

View File

@ -0,0 +1,121 @@
/*
* Copyright 2004-2006, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
/*
VM helper functions.
Important assumption: get_memory_map must combine adjacent
physical pages, so contignous memory always leads to a S/G
list of length one.
*/
#include "KernelExport_ext.h"
#include "wrapper.h"
#include <string.h>
/** get sg list of iovec
* TBD: this should be moved to somewhere in kernel
*/
status_t
get_iovec_memory_map(iovec *vec, size_t vec_count, size_t vec_offset, size_t len,
physical_entry *map, size_t max_entries, size_t *num_entries, size_t *mapped_len)
{
size_t cur_idx;
size_t left_len;
SHOW_FLOW(3, "vec_count=%lu, vec_offset=%lu, len=%lu, max_entries=%lu",
vec_count, vec_offset, len, max_entries);
// skip iovec blocks if needed
while (vec_count > 0 && vec_offset > vec->iov_len) {
vec_offset -= vec->iov_len;
--vec_count;
++vec;
}
for (left_len = len, cur_idx = 0; left_len > 0 && vec_count > 0 && cur_idx < max_entries;) {
char *range_start;
size_t range_len;
status_t res;
size_t cur_num_entries, cur_mapped_len;
uint32 tmp_idx;
SHOW_FLOW( 3, "left_len=%d, vec_count=%d, cur_idx=%d",
(int)left_len, (int)vec_count, (int)cur_idx );
// map one iovec
range_start = (char *)vec->iov_base + vec_offset;
range_len = min( vec->iov_len - vec_offset, left_len );
SHOW_FLOW( 3, "range_start=%x, range_len=%x",
(int)range_start, (int)range_len );
vec_offset = 0;
if ((res = get_memory_map(range_start, range_len, &map[cur_idx],
max_entries - cur_idx)) != B_OK) {
// according to docu, no error is ever reported - argh!
SHOW_ERROR(1, "invalid io_vec passed (%s)", strerror(res));
return res;
}
// stupid: get_memory_map does neither tell how many sg blocks
// are used nor whether there were enough sg blocks at all;
// -> determine that manually
cur_mapped_len = 0;
cur_num_entries = 0;
for (tmp_idx = cur_idx; tmp_idx < max_entries; ++tmp_idx) {
if (map[tmp_idx].size == 0)
break;
cur_mapped_len += map[tmp_idx].size;
++cur_num_entries;
}
if (cur_mapped_len == 0) {
panic("get_memory_map() returned empty list; left_len=%d, idx=%d/%d",
(int)left_len, (int)cur_idx, (int)max_entries);
SHOW_ERROR(2, "get_memory_map() returned empty list; left_len=%d, idx=%d/%d",
(int)left_len, (int)cur_idx, (int)max_entries);
return B_ERROR;
}
SHOW_FLOW( 3, "cur_num_entries=%d, cur_mapped_len=%x",
(int)cur_num_entries, (int)cur_mapped_len );
// try to combine with previous sg block
if (cur_num_entries > 0 && cur_idx > 0
&& map[cur_idx].address == (char *)map[cur_idx - 1].address + map[cur_idx - 1].size) {
SHOW_FLOW0( 3, "combine with previous chunk" );
map[cur_idx - 1].size += map[cur_idx].size;
memcpy(&map[cur_idx], &map[cur_idx + 1], (cur_num_entries - 1) * sizeof(map[0]));
--cur_num_entries;
}
cur_idx += cur_num_entries;
left_len -= cur_mapped_len;
// advance iovec if current one is described completely
if (cur_mapped_len == range_len) {
++vec;
--vec_count;
}
}
*num_entries = cur_idx;
*mapped_len = len - left_len;
SHOW_FLOW( 3, "num_entries=%d, mapped_len=%x",
(int)*num_entries, (int)*mapped_len );
return B_OK;
}

View File

@ -0,0 +1,89 @@
#ifndef _WRAPPER_H
#define _WRAPPER_H
#include <KernelExport.h>
#include <lock.h>
// benaphores
#define INIT_BEN(x, prefix) benaphore_init(x, prefix)
#define DELETE_BEN(x) benaphore_destroy(x)
#define ACQUIRE_BEN(x) benaphore_lock(x)
#define RELEASE_BEN(x) benaphore_unlock(x)
// debug output
#ifdef DEBUG_WAIT_ON_MSG
# define DEBUG_WAIT snooze( DEBUG_WAIT_ON_MSG );
#else
# define DEBUG_WAIT
#endif
#ifdef DEBUG_WAIT_ON_ERROR
# define DEBUG_WAIT_ERROR snooze( DEBUG_WAIT_ON_ERROR );
#else
# define DEBUG_WAIT_ERROR
#endif
#ifndef DEBUG_MAX_LEVEL_FLOW
# define DEBUG_MAX_LEVEL_FLOW 4
#endif
#ifndef DEBUG_MAX_LEVEL_INFO
# define DEBUG_MAX_LEVEL_INFO 4
#endif
#ifndef DEBUG_MAX_LEVEL_ERROR
# define DEBUG_MAX_LEVEL_ERROR 4
#endif
#ifndef DEBUG_MSG_PREFIX
# define DEBUG_MSG_PREFIX ""
#endif
#ifndef debug_level_flow
# define debug_level_flow 4
#endif
#ifndef debug_level_info
# define debug_level_info 4
#endif
#ifndef debug_level_error
# define debug_level_error 4
#endif
#define FUNC_NAME DEBUG_MSG_PREFIX, __FUNCTION__
#define SHOW_FLOW(seriousness, format, param...) \
do { if( seriousness <= debug_level_flow && seriousness <= DEBUG_MAX_LEVEL_FLOW ) { \
dprintf( "%s%s: "format"\n", FUNC_NAME, param ); DEBUG_WAIT \
}} while( 0 )
#define SHOW_FLOW0(seriousness, format) \
do { if( seriousness <= debug_level_flow && seriousness <= DEBUG_MAX_LEVEL_FLOW ) { \
dprintf( "%s%s: "format"\n", FUNC_NAME); DEBUG_WAIT \
}} while( 0 )
#define SHOW_INFO(seriousness, format, param...) \
do { if( seriousness <= debug_level_info && seriousness <= DEBUG_MAX_LEVEL_INFO ) { \
dprintf( "%s%s: "format"\n", FUNC_NAME, param ); DEBUG_WAIT \
}} while( 0 )
#define SHOW_INFO0(seriousness, format) \
do { if( seriousness <= debug_level_info && seriousness <= DEBUG_MAX_LEVEL_INFO ) { \
dprintf( "%s%s: "format"\n", FUNC_NAME); DEBUG_WAIT \
}} while( 0 )
#define SHOW_ERROR(seriousness, format, param...) \
do { if( seriousness <= debug_level_error && seriousness <= DEBUG_MAX_LEVEL_ERROR ) { \
dprintf( "%s%s: "format"\n", FUNC_NAME, param ); DEBUG_WAIT_ERROR \
}} while( 0 )
#define SHOW_ERROR0(seriousness, format) \
do { if( seriousness <= debug_level_error && seriousness <= DEBUG_MAX_LEVEL_ERROR ) { \
dprintf( "%s%s: "format"\n", FUNC_NAME); DEBUG_WAIT_ERROR \
}} while( 0 )
#endif /* _BENAPHORE_H */