Remove dead code of ATA bus_manager. Note that the new ATA bus_manager now has

it's own target named ata and doesn't reuse the ide name. Because of that we
can also add it to the build by default.


git-svn-id: file:///srv/svn/repos/haiku/haiku/trunk@30271 a95241bf-73f2-0310-859d-f6bbb57e9c96
This commit is contained in:
Michael Lotz 2009-04-19 23:02:54 +00:00
parent ca68245eb7
commit 14d5277c14
24 changed files with 1 additions and 5159 deletions

View File

@ -4,7 +4,7 @@ SubInclude HAIKU_TOP src add-ons kernel bus_managers acpi ;
SubInclude HAIKU_TOP src add-ons kernel bus_managers agp_gart ;
SubInclude HAIKU_TOP src add-ons kernel bus_managers config_manager ;
SubInclude HAIKU_TOP src add-ons kernel bus_managers firewire ;
#SubInclude HAIKU_TOP src add-ons kernel bus_managers ata ;
SubInclude HAIKU_TOP src add-ons kernel bus_managers ata ;
SubInclude HAIKU_TOP src add-ons kernel bus_managers ide ;
SubInclude HAIKU_TOP src add-ons kernel bus_managers isa ;
SubInclude HAIKU_TOP src add-ons kernel bus_managers pci ;

View File

@ -3,23 +3,6 @@ SubDir HAIKU_TOP src add-ons kernel bus_managers ata ;
UsePrivateHeaders drivers ;
UsePrivateKernelHeaders ;
KernelAddon ide :
ata.c
ata_tracing.cpp
ata_request.c
atapi.c
channels.c
devices.c
dma.c
emulation.c
ide.c
ide_sim.c
pio.c
scsi2ata.c
sync.c
virtual_memory.c
;
KernelAddon ata :
ATAChannel.cpp
ATADevice.cpp

View File

@ -1,31 +0,0 @@
/*
* Copyright 2004-2006, Haiku, Inc. All RightsReserved.
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
/*
Functions that are missing in kernel.
*/
#ifndef _KERNEL_EXPORT_EXT_H
#define _KERNEL_EXPORT_EXT_H
#include <KernelExport.h>
#include <iovec.h>
// get memory map of iovec
status_t get_iovec_memory_map(
iovec *vec, // iovec to analyze
size_t vec_count, // number of entries in vec
size_t vec_offset, // number of bytes to skip at beginning of vec
size_t len, // number of bytes to analyze
physical_entry *map, // resulting memory map
size_t max_entries, // max number of entries in map
size_t *num_entries, // actual number of map entries used
size_t *mapped_len // actual number of bytes described by map
);
#endif // _KERNEL_EXPORT_EXT_H

File diff suppressed because it is too large Load Diff

View File

@ -1,166 +0,0 @@
#include "ata_request.h"
#include "ide_internal.h"
#include "scsi_cmds.h"
#include <string.h>
#define TRACE dprintf
void
ata_request_init(ata_request *request, struct ide_device_info *device)
{
memset(request, 0, sizeof(*request));
request->device = device;
}
/*! Start the request, but don't clear sense to allow
retrieving the previous sense data.
*/
void
ata_request_start(ata_request **_request, struct ide_device_info *device,
struct scsi_ccb *ccb)
{
ata_request *request;
IDE_LOCK(device->bus);
if (device->bus->state != ata_state_idle) {
request = NULL;
} else {
ASSERT(device->requestFree != NULL);
ASSERT(device->requestActive == NULL);
ASSERT(device->bus->active_device == NULL);
device->bus->state = ata_state_busy;
device->bus->active_device = device;
request = device->requestFree;
device->requestActive = request;
device->requestFree = NULL;
}
IDE_UNLOCK(device->bus);
*_request = request;
if (!request)
return; // bus was busy
ASSERT(request->device == device);
request->ccb = ccb;
/*
already set
request->is_write = 0;
request->uses_dma = 0;
request->packet_irq = 0;
*/
// XXX the following always triggers. Why?
// ASSERT(request->ccb->subsys_status == SCSI_REQ_INPROG);
// pretend success
request->ccb->subsys_status = SCSI_REQ_CMP;
// device_status always remains set to SCSI_STATUS_GOOD
// except when ata_request_set_checkcondition() is called.
request->ccb->device_status = SCSI_STATUS_GOOD;
}
void
ata_request_clear_sense(ata_request *request)
{
request->senseKey = 0;
request->senseAsc = 0;
request->senseAscq = 0;
}
void
ata_request_set_status(ata_request *request, uint8 status)
{
ASSERT(status != SCSI_REQ_CMP);
if (request && request->ccb)
request->ccb->subsys_status = status;
}
void
ata_request_set_sense(ata_request *request, uint8 key, uint16 asc_acq)
{
if (request) {
request->senseKey = key;
request->senseAsc = asc_acq >> 8;
request->senseAscq = asc_acq & 0xff;
}
}
void
ata_request_finish(ata_request *request, bool resubmit)
{
scsi_ccb *ccb = request->ccb;
ASSERT(ccb);
if (ccb->subsys_status != SCSI_REQ_CMP || request->senseKey) {
TRACE("ata_request_finish: request %p, subsys_status 0x%02x, senseKey "
"%02x\n", request, ccb->subsys_status, request->senseKey);
}
// when the request completed and has set sense
// data, report this to the scsi stack by setting
// CHECK CONDITION status
if (ccb->subsys_status == SCSI_REQ_CMP && request->senseKey != 0) {
TRACE("ata_request_finish - setting check condition\n");
request->ccb->subsys_status = SCSI_REQ_CMP_ERR;
request->ccb->device_status = SCSI_STATUS_CHECK_CONDITION;
// copy sense data if caller requested it
if ((request->ccb->flags & SCSI_DIS_AUTOSENSE) == 0) {
scsi_sense sense;
int sense_len;
TRACE("ata_request_finish - copying autosense data\n");
// we cannot copy sense directly as sense buffer may be too small
scsi_set_sense(&sense, request);
ASSERT(sizeof(request->ccb->sense) == SCSI_MAX_SENSE_SIZE);
sense_len = min(sizeof(request->ccb->sense), sizeof(sense));
memcpy(request->ccb->sense, &sense, sense_len);
request->ccb->sense_resid = SCSI_MAX_SENSE_SIZE - sense_len;
request->ccb->subsys_status |= SCSI_AUTOSNS_VALID;
// device sense gets reset once it's read
ata_request_clear_sense(request);
ASSERT(request->ccb->subsys_status
== (SCSI_REQ_CMP_ERR | SCSI_AUTOSNS_VALID));
ASSERT(request->ccb->device_status == SCSI_STATUS_CHECK_CONDITION);
}
}
IDE_LOCK(request->device->bus);
ASSERT(request->device->bus->state != ata_state_idle);
ASSERT(request->device->bus->active_device == request->device);
ASSERT(request->device->requestActive == request);
ASSERT(request->device->requestFree == NULL);
request->device->bus->state = ata_state_idle;
request->device->bus->active_device = NULL;
request->device->requestActive = NULL;
request->device->requestFree = request;
IDE_UNLOCK(request->device->bus);
ACQUIRE_BEN(&request->device->bus->status_report_ben);
if (resubmit)
scsi->resubmit(ccb);
else
scsi->finished(ccb, 1);
RELEASE_BEN(&request->device->bus->status_report_ben);
}

View File

@ -1,35 +0,0 @@
/*
* Copyright 2008 Marcus Overhagen. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef _ATA_REQUEST_H
#define _ATA_REQUEST_H
#include <SupportDefs.h>
typedef struct ata_request {
struct ide_device_info * device;
struct scsi_ccb * ccb; // basic scsi request
uint8 is_write : 1; // true for write request
uint8 uses_dma : 1; // true if using dma
uint8 packet_irq : 1; // true if waiting for command packet irq
uint8 senseKey;
uint8 senseAsc;
uint8 senseAscq;
bigtime_t timeout;
} ata_request;
struct scsi_ccb;
struct ide_device_info;
void ata_request_init(ata_request *request, struct ide_device_info *device);
void ata_request_start(ata_request **_request, struct ide_device_info *device, struct scsi_ccb *ccb);
void ata_request_clear_sense(ata_request *request);
void ata_request_set_status(ata_request *request, uint8 status);
void ata_request_set_sense(ata_request *request, uint8 key, uint16 asc_acq);
void ata_request_finish(ata_request *request, bool resubmit);
#endif

View File

@ -1,61 +0,0 @@
/*
* Copyright 2008, Marcus Overhagen. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#include <tracing.h>
#include <string.h>
#include "ata_tracing.h"
#include "ide_internal.h"
#if ATA_TRACING
class ATATraceEntry : public AbstractTraceEntry {
public:
ATATraceEntry(int bus, int device, const char *info)
: fBus(bus)
, fDevice(device)
, fInfo(alloc_tracing_buffer_strcpy(info, 666, false))
{
Initialized();
}
void AddDump(TraceOutput& out)
{
out.Print("ata %d:%d %s", fBus, fDevice, fInfo);
}
int fBus;
int fDevice;
char *fInfo;
};
extern "C" void
__ata_trace_device(ide_device_info *dev, const char *fmt, ...)
{
char info[120];
va_list ap;
va_start(ap, fmt);
vsnprintf(info, sizeof(info), fmt, ap);
va_end(ap);
new(std::nothrow) ATATraceEntry(dev->bus->path_id, dev->is_device1, info);
}
extern "C" void
__ata_trace_bus_device(ide_bus_info *bus, int dev, const char *fmt, ...)
{
char info[120];
va_list ap;
va_start(ap, fmt);
vsnprintf(info, sizeof(info), fmt, ap);
va_end(ap);
new(std::nothrow) ATATraceEntry(bus->path_id, dev, info);
}
#endif

View File

@ -1,32 +0,0 @@
/*
* Copyright 2008, Marcus Overhagen. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#include "tracing_config.h"
#if ATA_TRACING
struct ide_bus_info;
struct ide_device_info;
#ifdef __cplusplus
extern "C" {
#endif
void __ata_trace_device(struct ide_device_info *dev, const char *fmt, ...);
void __ata_trace_bus_device(struct ide_bus_info *bus, int dev, const char *fmt, ...);
#ifdef __cplusplus
}
#endif
#define T(dev, args...) __ata_trace_device(dev, args)
#define T2(bus, dev, args...) __ata_trace_bus_device(bus, dev, args)
#else
#define T(x...)
#define T2(x...)
#endif

View File

@ -1,544 +0,0 @@
/*
* Copyright 2004-2007, Haiku, Inc. All RightsReserved.
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
/*
Part of Open IDE bus manager
ATAPI command protocol
*/
#include "ide_internal.h"
#include <scsi_cmds.h>
#include "ide_cmds.h"
#include "ide_sim.h"
#include <string.h>
// used for MODE SENSE/SELECT 6 emulation; maximum size is 255 + header,
// so this is a safe bet
#define IDE_ATAPI_BUFFER_SIZE 512
#define TRACE dprintf
#define FLOW dprintf
/*!
Set sense according to error reported by device
return: true - device reported error
*/
static bool
check_packet_error(ide_device_info *device, ata_request *request)
{
ide_bus_info *bus = device->bus;
int status;
status = bus->controller->get_altstatus(bus->channel_cookie);
if ((status & (ide_status_err | ide_status_df)) != 0) {
int error;
SHOW_FLOW(3, "packet error, status=%02x", status);
if (bus->controller->read_command_block_regs(bus->channel_cookie,
&device->tf, ide_mask_error) != B_OK) {
ata_request_set_status(request, SCSI_HBA_ERR);
return true;
}
// the upper 4 bits contain sense key
// we don't want to clutter syslog with "not ready" and UA messages,
// so use FLOW messages for them
error = device->tf.read.error;
if ((error >> 4) == SCSIS_KEY_NOT_READY
|| (error >> 4) == SCSIS_KEY_UNIT_ATTENTION)
SHOW_FLOW(3, "error=%x", error);
else
SHOW_ERROR(3, "error=%x", error);
// ATAPI says that:
// "ABRT shall be set to one if the requested command has been command
// aborted because the command code or a command parameter is invalid.
// ABRT may be set to one if the device is not able to complete the
// action requested by the command."
// Effectively, it can be set if "something goes wrong", including
// if the medium got changed. Therefore, we currently ignore the bit
// and rely on auto-sense information
/*
if ((error & ide_error_abrt) != 0) {
// if command got aborted, there's no point in reading sense
set_sense(device, SCSIS_KEY_ABORTED_COMMAND, SCSIS_ASC_NO_SENSE);
return false;
}
*/
// tell SCSI layer that sense must be requested
// (we don't take care of auto-sense ourselve)
// XXX broken!
/* device->subsys_status = SCSI_REQ_CMP_ERR;
request->ccb->device_status = SCSI_STATUS_CHECK_CONDITION;
// reset pending emulated sense - its overwritten by a real one
device->combined_sense = 0;
*/
return true;
}
return false;
}
/*! IRQ handler of packet transfer (executed as DPC) */
void
packet_dpc(ata_request *request)
{
#if 0
ide_device_info *device = request->device;
ide_bus_info *bus = device->bus;
int status;
uint32 timeout = request->ccb->timeout > 0 ?
request->ccb->timeout * 1000000 : IDE_STD_TIMEOUT;
SHOW_FLOW0(3, "");
bus->controller->read_command_block_regs(bus->channel_cookie,
&device->tf, ide_mask_error | ide_mask_ireason);
status = bus->controller->get_altstatus(bus->channel_cookie);
if (request->packet_irq) {
// device requests packet
request->packet_irq = false;
if (!device->tf.packet_res.cmd_or_data
|| device->tf.packet_res.input_or_output
|| (status & ide_status_drq) == 0) {
ata_request_set_status(request, SCSI_SEQUENCE_FAIL);
goto err;
}
start_waiting_nolock(device->bus, timeout, ide_state_async_waiting);
// send packet
if (bus->controller->write_pio(bus->channel_cookie,
(uint16 *)device->packet, sizeof(device->packet) / sizeof(uint16),
true) != B_OK) {
SHOW_ERROR0( 1, "Error sending command packet" );
ata_request_set_status(request, SCSI_HBA_ERR);
goto err_cancel_timer;
}
return;
}
if (request->uses_dma) {
// DMA transmission finished
bool dma_err, dev_err;
// don't check drq - if there is some data left, we cannot handle
// it anyway
// XXX does the device throw remaining data away on DMA overflow?
SHOW_FLOW0(3, "DMA done");
dma_err = !finish_dma(device);
dev_err = check_packet_error(device, request);
// what to do if both the DMA controller and the device reports an error?
// let's assume that the DMA controller got problems because there was a
// device error, so we ignore the dma error and use the device error instead
if (dev_err) {
finish_checksense(request);
return;
}
// device is happy, let's see what the controller says
if (!dma_err) {
// if DMA works, reset error counter so we don't disable
// DMA only because it didn't work once in a while
device->DMA_failures = 0;
// this is a lie, but there is no way to find out
// how much has been transmitted
request->ccb->data_resid = 0;
finish_checksense(request);
} else {
// DMA transmission went wrong
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_COM_FAILURE);
if (++device->DMA_failures >= MAX_DMA_FAILURES) {
SHOW_ERROR0(1, "Disabling DMA because of too many errors");
device->DMA_enabled = false;
}
finish_checksense(request);
}
return;
}
// PIO mode
if ((status & ide_status_drq) != 0) {
// device wants to transmit data
int length;
status_t err;
SHOW_FLOW0(3, "data transmission");
if (device->tf.packet_res.cmd_or_data) {
ata_request_set_status(request, SCSI_SEQUENCE_FAIL);
goto err;
}
// check whether transmission direction matches
if ((device->tf.packet_res.input_or_output ^ request->is_write) == 0) {
SHOW_ERROR0(2, "data transmission in wrong way!?");
// TODO: hm, either the device is broken or the caller has specified
// the wrong direction - what is the proper handling?
set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_LUN_COM_FAILURE);
// reset device to make it alive
// TODO: the device will abort next command with a reset condition
// perhaps we should hide that by reading sense?
SHOW_FLOW0(3, "Reset");
// reset_device(device, request);
finish_checksense(request);
return;
}
// ask device how much data it wants to transmit
bus->controller->read_command_block_regs(bus->channel_cookie,
&device->tf, ide_mask_byte_count);
length = device->tf.packet_res.byte_count_0_7
| ((int)device->tf.packet_res.byte_count_8_15 << 8);
SHOW_FLOW(3, "device transmittes %d bytes", length);
// start waiting before starting transmission, else we
// could start waiting too late;
// don't mind getting overtaken by IRQ handler - as it will
// issue a DPC for the thread context we are in, we are save
start_waiting_nolock(device->bus, timeout, ide_state_async_waiting);
if (device->tf.packet_res.input_or_output)
err = read_PIO_block(request, length);
else
err = write_PIO_block(request, length);
// only report "real" errors;
// discarding data (ERR_TOO_BIG) can happen but is OK
if (err == B_ERROR) {
SHOW_ERROR0(2, "Error during PIO transmission");
ata_request_set_status(request, SCSI_HBA_ERR);
goto err_cancel_timer;
}
SHOW_FLOW0(3, "7");
return;
} else {
// device has done job and doesn't want to transmit data anymore
// -> finish ccb
SHOW_FLOW0(3, "no data");
check_packet_error(device, request);
SHOW_FLOW(3, "finished: %d of %d left",
(int)request->ccb->data_resid,
(int)request->ccb->data_length);
finish_checksense(request);
return;
}
return;
err_cancel_timer:
cancel_irq_timeout(device->bus);
err:
finish_checksense(request);
#endif
}
/*! Create taskfile for ATAPI packet */
static bool
create_packet_taskfile(ide_device_info *device, ata_request *request,
bool write)
{
scsi_ccb *ccb = request->ccb;
SHOW_FLOW(3, "DMA enabled=%d, uses_dma=%d, scsi_cmd=%x",
device->DMA_enabled, request->uses_dma, device->packet[0]);
device->tf_param_mask = ide_mask_features | ide_mask_byte_count;
device->tf.packet.dma = request->uses_dma;
device->tf.packet.ovl = 0;
device->tf.packet.byte_count_0_7 = ccb->data_length & 0xff;
device->tf.packet.byte_count_8_15 = ccb->data_length >> 8;
device->tf.packet.command = IDE_CMD_PACKET;
return true;
}
/*! Send ATAPI packet */
void
send_packet(ide_device_info *device, ata_request *request, bool write)
{
#if 0
ide_bus_info *bus = device->bus;
bool packet_irq = device->atapi.packet_irq;
uint8 scsi_cmd = device->packet[0];
SHOW_FLOW( 3, "request=%p, command=%x", request, scsi_cmd );
/*{
unsigned int i;
for( i = 0; i < sizeof( device->packet ); ++i )
dprintf( "%x ", device->packet[i] );
}*/
SHOW_FLOW(3, "%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x (len=%d)",
device->packet[0], device->packet[1], device->packet[2],
device->packet[3], device->packet[4], device->packet[5],
device->packet[6], device->packet[7], device->packet[8],
device->packet[9], device->packet[10], device->packet[11],
request->ccb->cdb_length);
//snooze( 1000000 );
request->is_write = write;
// if needed, mark first IRQ as being packet ccb IRQ
request->packet_irq = packet_irq;
// only READ/WRITE commands can use DMA
// (the device may support it always, but IDE controllers don't
// report how much data is transmitted, and this information is
// crucial for the SCSI protocol)
// special offer: let READ_CD commands use DMA too
request->uses_dma = device->DMA_enabled
&& (scsi_cmd == SCSI_OP_READ_6 || scsi_cmd == SCSI_OP_WRITE_6
|| scsi_cmd == SCSI_OP_READ_10 || scsi_cmd == SCSI_OP_WRITE_10
|| scsi_cmd == SCSI_OP_READ_12 || scsi_cmd == SCSI_OP_WRITE_12
|| scsi_cmd == SCSI_OP_READ_CD);
// try preparing DMA, if that fails, fall back to PIO
if (request->uses_dma) {
SHOW_FLOW0(3, "0");
if (!prepare_dma( device, request))
request->uses_dma = false;
SHOW_FLOW(3, "0->%d", request->uses_dma);
}
SHOW_FLOW0(3, "1");
if (!request->uses_dma)
prep_PIO_transfer(device, request);
SHOW_FLOW0(3, "2");
if (!create_packet_taskfile(device, request, write))
goto err_setup;
SHOW_FLOW0(3, "3");
if (!send_command(device, request, DRDY_NOT_REQUIRED,
device->atapi.packet_irq_timeout,
device->atapi.packet_irq ? ide_state_async_waiting : ide_state_accessing))
goto err_setup;
SHOW_FLOW0(3, "4");
if (packet_irq) {
// device asks for packet via IRQ;
// timeout and stuff is already set by send_command
return;
}
SHOW_FLOW0(3, "5");
// wait for device to get ready for packet transmission
if (!ide_wait(device, ide_status_drq, ide_status_bsy, false, 100000))
goto err_setup;
SHOW_FLOW0(3, "6");
// make sure device really asks for command packet
bus->controller->read_command_block_regs(bus->channel_cookie, &device->tf,
ide_mask_ireason);
if (!device->tf.packet_res.cmd_or_data
|| device->tf.packet_res.input_or_output) {
ata_request_set_status(request, SCSI_SEQUENCE_FAIL);
goto err_setup;
}
SHOW_FLOW0(3, "7");
// some old drives need a delay before submitting the packet
spin(10);
// locking is evil here: as soon as the packet is transmitted, the device
// may raise an IRQ (which actually happens if the device reports an Check
// Condition error). Thus, we have to lock out the IRQ handler _before_ we
// start packet transmission, which forbids all kind of interrupts for some
// time; to reduce this period, blocking is done just before last dword is
// sent (avoid sending 16 bits as controller may transmit 32 bit chunks)
// write packet
if (bus->controller->write_pio(bus->channel_cookie,
(uint16 *)device->packet, sizeof(device->packet) / sizeof(uint16) - 2,
true) != B_OK) {
goto err_packet;
}
IDE_LOCK(bus);
if (bus->controller->write_pio(bus->channel_cookie,
(uint16 *)device->packet + sizeof(device->packet) / sizeof(uint16) - 2,
2, true) != B_OK) {
goto err_packet2;
}
if (request->uses_dma) {
SHOW_FLOW0( 3, "ready for DMA" );
// S/G table must already be setup - we hold the bus lock, so
// we really have to hurry up
start_dma_wait(device, request);
} else {
uint32 timeout = request->ccb->timeout > 0 ?
request->ccb->timeout * 1000000 : IDE_STD_TIMEOUT;
start_waiting(bus, timeout, ide_state_async_waiting);
}
SHOW_FLOW0(3, "8");
return;
err_packet2:
IDE_UNLOCK(bus);
err_packet:
data_request_set_status(request, SCSI_HBA_ERR);
err_setup:
if (request->uses_dma)
abort_dma(device, request);
finish_checksense(request);
#endif
}
/*! Execute SCSI I/O for atapi devices */
void
atapi_exec_io(ide_device_info *device, ata_request *request)
{
scsi_ccb *ccb = request->ccb;
TRACE("atapi_exec_io\n");
if (ccb->cdb[0] == SCSI_OP_REQUEST_SENSE) {
// No initial clear sense, as this request is used
// by the scsi stack to request the sense data of
// the previous command.
scsi_request_sense(device, request);
ata_request_finish(request, false /* no resubmit */);
return;
}
ata_request_clear_sense(request);
FLOW("command not implemented\n");
ata_request_set_sense(request, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
ata_request_finish(request, false /* no resubmit */);
/*
scsi_ccb *ccb = request->ccb;
SHOW_FLOW(3, "command=%x", request->ccb->cdb[0]);
// ATAPI command packets are 12 bytes long;
// if the command is shorter, remaining bytes must be padded with zeros
memset(device->packet, 0, sizeof(device->packet));
memcpy(device->packet, ccb->cdb, ccb->cdb_length);
if (ccb->cdb[0] == SCSI_OP_REQUEST_SENSE && device->combined_sense) {
// we have a pending emulated sense - return it on REQUEST SENSE
ide_request_sense(device, request);
finish_checksense(request);
} else {
// reset all error codes for new ccb
start_request(device, request);
// now we have an IDE packet
send_packet(device, request,
(ccb->flags & SCSI_DIR_MASK) == SCSI_DIR_OUT);
}
*/
}
/*! Prepare device info for ATAPI device */
status_t
configure_atapi_device(ide_device_info *device)
{
ide_device_infoblock *infoblock = &device->infoblock;
dprintf("configure_atapi_device\n");
device->is_atapi = true;
device->exec_io = atapi_exec_io;
if (infoblock->_0.atapi.ATAPI != 2)
return B_ERROR;
switch(infoblock->_0.atapi.drq_speed) {
case 0:
case 2:
device->atapi.packet_irq = false;
break;
case 1:
device->atapi.packet_irq = true;
device->atapi.packet_irq_timeout = IDE_STD_TIMEOUT;
break;
default:
return B_ERROR;
}
SHOW_FLOW(3, "drq speed: %d", infoblock->_0.atapi.drq_speed);
/*if( infoblock->_0.atapi.packet_size != 0 )
return false;*/
device->device_type = infoblock->_0.atapi.type;
device->last_lun = infoblock->last_lun;
SHOW_FLOW(3, "device_type=%d, last_lun=%d",
device->device_type, device->last_lun);
// don't use task file to select LUN but command packet
// (SCSI bus manager sets LUN there automatically)
device->tf.packet.lun = 0;
if (!configure_dma(device))
return B_ERROR;
return B_OK;
}

View File

@ -1,109 +0,0 @@
/*
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
* Distributed under the terms of the MIT License.
*/
/*
Manager of IDE controllers
Whenever a new IDE channel is reported, a new SIM is
registered at the SCSI bus manager.
*/
#include "ide_internal.h"
#include "ide_sim.h"
#include <string.h>
#include <malloc.h>
#define TRACE dprintf
/*! Called when an IDE channel was registered by a controller driver */
static status_t
ide_channel_added(device_node *parent)
{
const char *controller_name = NULL;
uint32 channel_id;
TRACE("ide_channel_added, parent is %p\n", parent);
if (pnp->get_attr_string(parent, IDE_CONTROLLER_CONTROLLER_NAME_ITEM,
&controller_name, true) != B_OK) {
dprintf("ide: ignored controller - controller name missing\n");
goto err;
}
channel_id = pnp->create_id(IDE_CHANNEL_ID_GENERATOR);
if (channel_id < 0) {
dprintf("Cannot register IDE controller %s - out of IDs", controller_name);
goto err;
}
{
device_attr attrs[] =
{
{ B_DEVICE_FIXED_CHILD, B_STRING_TYPE, { string: SCSI_FOR_SIM_MODULE_NAME }},
{ SCSI_DESCRIPTION_CONTROLLER_NAME, B_STRING_TYPE,
{ string: controller_name }},
// maximum number of blocks per transmission:
// - ATAPI uses packets, i.e. normal SCSI limits apply
// but I'm not sure about controller restrictions
// - ATA allows up to 256 blocks
// - some broken disk's firmware (read: IBM DTTA drives)
// don't like 256 blocks in command queuing mode
// -> use 255 blocks as a least common nominator
// (this is still 127.5K for HDs and 510K for CDs,
// which should be sufficient)
// Note: to fix specific drive bugs, use ide_sim_get_restrictions()
// in ide_sim.c!
{ B_DMA_MAX_TRANSFER_BLOCKS, B_UINT32_TYPE, { ui32: 255 }},
{ IDE_CHANNEL_ID_ITEM, B_UINT32_TYPE, { ui32: channel_id }},
// { PNP_MANAGER_ID_GENERATOR, B_STRING_TYPE, { string: IDE_CHANNEL_ID_GENERATOR }},
// { PNP_MANAGER_AUTO_ID, B_UINT32_TYPE, { ui32: channel_id }},
{ NULL }
};
return pnp->register_node(parent, IDE_SIM_MODULE_NAME, attrs, NULL,
NULL);
}
err:
return B_NO_MEMORY;
}
static status_t
std_ops(int32 op, ...)
{
switch (op) {
case B_MODULE_INIT:
case B_MODULE_UNINIT:
return B_OK;
default:
return B_ERROR;
}
}
ide_for_controller_interface ide_for_controller_module = {
{
{
IDE_FOR_CONTROLLER_MODULE_NAME,
0,
&std_ops
},
NULL, // supported devices
ide_channel_added,
NULL,
NULL,
NULL
},
ide_irq_handler
};

View File

@ -1,189 +0,0 @@
/*
** Copyright 2007, Marcus Overhagen. All rights reserved.
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the Haiku License.
*/
/*
Part of Open IDE bus manager
Device manager
As the IDE bus manager is an SCSI to IDE translater, it
has to know a bit more about connected devices then a standard
SIM. This file contains device detection and classification.
*/
#include "ide_internal.h"
#include "ide_sim.h"
#include "ide_cmds.h"
#include <string.h>
#include <malloc.h>
#include <ByteOrder.h>
#define TRACE(x...) dprintf("IDE: " x)
static void setup_device_links(ide_bus_info *bus, ide_device_info *device);
static void cleanup_device_links(ide_device_info *device);
/** create device info */
ide_device_info *
create_device(ide_bus_info *bus, bool is_device1)
{
ide_device_info *device;
TRACE("create_device: bus %p, device-number %d\n", bus, is_device1);
device = (ide_device_info *)malloc(sizeof(*device));
if (device == NULL)
return NULL;
memset(device, 0, sizeof(*device));
device->is_device1 = is_device1;
device->target_id = is_device1;
setup_device_links(bus, device);
device->DMA_failures = 0;
device->num_failed_send = 0;
device->total_sectors = 0;
device->requestActive = NULL;
device->requestFree = (ata_request *)malloc(sizeof(ata_request));
ata_request_init(device->requestFree, device);
// disable interrupts
bus->controller->write_device_control(bus->channel_cookie, ide_devctrl_bit3 | ide_devctrl_nien);
// make sure LBA bit is set, and initialize device selection flag
device->tf.chs.head = 0;
device->tf.chs.mode = ide_mode_lba;
device->tf.chs.device = is_device1;
bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf, ide_mask_device_head);
return device;
}
/** destroy device info */
void
destroy_device(ide_device_info *device)
{
TRACE("destroy_device: device %p\n", device);
// paranoia
device->exec_io = NULL;
if (device->requestActive)
dprintf("destroy_device: Warning request still active\n");
free(device->requestFree);
cleanup_device_links(device);
free(device);
}
/** setup links between the devices on one bus */
static void
setup_device_links(ide_bus_info *bus, ide_device_info *device)
{
TRACE("setup_device_links: bus %p, device %p\n", bus, device);
device->bus = bus;
bus->devices[device->is_device1] = device;
}
/** cleanup links devices on one bus when <device> is deleted */
static void
cleanup_device_links(ide_device_info *device)
{
ide_bus_info *bus = device->bus;
TRACE("cleanup_device_links: device %p\n", device);
bus->devices[device->is_device1] = NULL;
}
#if B_HOST_IS_LENDIAN
#define B_BENDIAN_TO_HOST_MULTI(v, n) do { \
size_t __swap16_multi_n = (n); \
uint16 *__swap16_multi_v = (v); \
\
while( __swap16_multi_n ) { \
*__swap16_multi_v = B_SWAP_INT16(*__swap16_multi_v); \
__swap16_multi_v++; \
__swap16_multi_n--; \
} \
} while (0)
#else
#define B_BENDIAN_TO_HOST_MULTI(v, n)
#endif
/** prepare infoblock for further use, i.e. fix endianess */
static void
fix_infoblock_endian(ide_device_info *device)
{
ide_device_infoblock *infoblock = &device->infoblock;
B_BENDIAN_TO_HOST_MULTI((uint16 *)infoblock->serial_number,
sizeof(infoblock->serial_number) / 2);
B_BENDIAN_TO_HOST_MULTI( (uint16 *)infoblock->firmware_version,
sizeof(infoblock->firmware_version) / 2);
B_BENDIAN_TO_HOST_MULTI( (uint16 *)infoblock->model_number,
sizeof(infoblock->model_number) / 2);
infoblock->LBA_total_sectors = B_LENDIAN_TO_HOST_INT32(infoblock->LBA_total_sectors);
infoblock->LBA48_total_sectors = B_LENDIAN_TO_HOST_INT64(infoblock->LBA48_total_sectors);
}
/** scan one device */
status_t
scan_device(ide_device_info *device, bool isAtapi)
{
status_t result;
dprintf("ATA: scan_device\n");
retry:
result = ata_identify_device(device, isAtapi);
if (result != B_OK && !isAtapi) {
dprintf("ATA: scan_device: possibly ATAPI, retrying identify\n");
isAtapi = true;
goto retry;
}
if (result != B_OK) {
dprintf("ATA: couldn't read infoblock for device %p\n", device);
return B_ERROR;
}
fix_infoblock_endian(device);
return B_OK;
}
status_t
configure_device(ide_device_info *device, bool isAtapi)
{
dprintf("ATA: configure_device\n");
if (isAtapi)
return configure_atapi_device(device);
else
return configure_ata_device(device);
}

View File

@ -1,141 +0,0 @@
/*
* Copyright 2004-2007, Haiku, Inc. All RightsReserved.
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
//! DMA helper functions
#include "ide_internal.h"
#define CHECK_DEV_DMA_MODE(infoblock, elem, mode, this_mode, num_modes ) \
if( infoblock->elem ) { \
mode = this_mode; \
++num_modes; \
}
int
get_device_dma_mode(ide_device_info *device)
{
ide_device_infoblock *infoblock = &device->infoblock;
int num_modes, mode;
mode = 0;
num_modes = 0;
if (!infoblock->DMA_supported)
return -1;
CHECK_DEV_DMA_MODE(infoblock, MDMA0_selected, mode, 0, num_modes);
CHECK_DEV_DMA_MODE(infoblock, MDMA1_selected, mode, 1, num_modes);
CHECK_DEV_DMA_MODE(infoblock, MDMA2_selected, mode, 2, num_modes);
if (infoblock->_88_valid) {
CHECK_DEV_DMA_MODE(infoblock, UDMA0_selected, mode, 0x10, num_modes);
CHECK_DEV_DMA_MODE(infoblock, UDMA1_selected, mode, 0x11, num_modes);
CHECK_DEV_DMA_MODE(infoblock, UDMA2_selected, mode, 0x12, num_modes);
CHECK_DEV_DMA_MODE(infoblock, UDMA3_selected, mode, 0x13, num_modes);
CHECK_DEV_DMA_MODE(infoblock, UDMA4_selected, mode, 0x14, num_modes);
CHECK_DEV_DMA_MODE(infoblock, UDMA5_selected, mode, 0x15, num_modes);
CHECK_DEV_DMA_MODE(infoblock, UDMA6_selected, mode, 0x16, num_modes);
}
if (num_modes != 1)
return -1;
SHOW_FLOW(3, "%x", mode);
return mode;
}
bool
configure_dma(ide_device_info *device)
{
device->DMA_enabled = device->DMA_supported = device->bus->can_DMA
&& get_device_dma_mode(device) != -1;
dprintf("XXX DISABLING DMA\n");
device->DMA_enabled = false;
return true;
}
/*! Abort DMA transmission
must be called _before_ start_dma_wait
*/
void
abort_dma(ide_device_info *device, ata_request *request)
{
ide_bus_info *bus = device->bus;
SHOW_FLOW0(0, "");
bus->controller->finish_dma(bus->channel_cookie);
}
/*! Prepare DMA transmission
on return, DMA engine waits for device to transmit data
warning: doesn't set sense data on error
*/
bool
prepare_dma(ide_device_info *device, ata_request *request)
{
ide_bus_info *bus = device->bus;
scsi_ccb *ccb = request->ccb;
status_t res;
res = bus->controller->prepare_dma(bus->channel_cookie, ccb->sg_list,
ccb->sg_count, request->is_write);
if (res != B_OK)
return false;
return true;
}
/*! Start waiting for DMA to be finished */
void
start_dma_wait(ide_device_info *device, ata_request *request)
{
#if 0
ide_bus_info *bus = device->bus;
bus->controller->start_dma(bus->channel_cookie);
start_waiting(bus, request->ccb->timeout > 0 ?
request->ccb->timeout : IDE_STD_TIMEOUT, ide_state_async_waiting);
#endif
}
/*! Start waiting for DMA to be finished with bus lock not hold */
void
start_dma_wait_no_lock(ide_device_info *device, ata_request *request)
{
ide_bus_info *bus = device->bus;
IDE_LOCK(bus);
start_dma_wait(device, request);
}
/*! Finish dma transmission after device has fired IRQ */
bool
finish_dma(ide_device_info *device)
{
ide_bus_info *bus = device->bus;
status_t dma_res;
dma_res = bus->controller->finish_dma(bus->channel_cookie);
return dma_res == B_OK || dma_res == B_DEV_DATA_OVERRUN;
}

View File

@ -1,77 +0,0 @@
/*
* Copyright 2004-2007, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
//! General SCSI emulation routines
#include "ide_internal.h"
#include "ide_sim.h"
#include <vm.h>
#include <string.h>
/*! Copy data between ccb data and buffer
ccb - ccb to copy data from/to
offset - offset of data in ccb
allocation_length- limit of ccb's data buffer according to CDB
buffer - data to copy data from/to
size - number of bytes to copy
to_buffer - true: copy from ccb to buffer
false: copy from buffer to ccb
return: true, if data of ccb was large enough
*/
bool
copy_sg_data(scsi_ccb *ccb, uint offset, uint allocationLength,
void *buffer, int size, bool toBuffer)
{
const physical_entry *sgList = ccb->sg_list;
int sgCount = ccb->sg_count;
int requestSize;
SHOW_FLOW(3, "offset=%u, req_size_limit=%d, size=%d, sg_list=%p, sg_cnt=%d, %s buffer",
offset, allocationLength, size, sgList, sgCount, toBuffer ? "to" : "from");
// skip unused S/G entries
while (sgCount > 0 && offset >= sgList->size) {
offset -= sgList->size;
++sgList;
--sgCount;
}
if (sgCount == 0)
return 0;
// remaining bytes we are allowed to copy from/to ccb
requestSize = min(allocationLength, ccb->data_length) - offset;
// copy one S/G entry at a time
for (; size > 0 && requestSize > 0 && sgCount > 0; ++sgList, --sgCount) {
size_t bytes;
bytes = min(size, requestSize);
bytes = min(bytes, sgList->size);
SHOW_FLOW(4, "buffer=%p, virt_addr=%p, bytes=%d, to_buffer=%d",
buffer, (void *)((addr_t)sgList->address + offset), (int)bytes,
toBuffer);
if (toBuffer) {
vm_memcpy_from_physical(buffer, (addr_t)sgList->address + offset,
bytes, false);
} else {
vm_memcpy_to_physical((addr_t)sgList->address + offset, buffer,
bytes, false);
}
buffer = (char *)buffer + bytes;
size -= bytes;
offset = 0;
}
return size == 0;
}

View File

@ -1,21 +0,0 @@
/*
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
* Distributed under the terms of the MIT License.
*/
/*! Contains interface used by IDE controller driver. */
#include "ide_internal.h"
#include "ide_sim.h"
device_manager_info *pnp;
#if !_BUILDING_kernel && !BOOT
module_info *modules[] = {
(module_info *)&ide_for_controller_module,
(module_info *)&ide_sim_module,
NULL
};
#endif

View File

@ -1,64 +0,0 @@
/*
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
/*
Part of Open IDE bus manager
OP-Codes of IDE commands
*/
#ifndef __IDE_CMDS_H__
#define __IDE_CMDS_H__
#define IDE_CMD_WRITE_DMA 0xca
#define IDE_CMD_WRITE_DMA_QUEUED 0xcc
#define IDE_CMD_WRITE_MULTIPLE 0xc5
#define IDE_CMD_WRITE_SECTORS 0x30
#define IDE_CMD_READ_DMA 0xc8
#define IDE_CMD_READ_DMA_QUEUED 0xc7
#define IDE_CMD_READ_MULTIPLE 0xc4
#define IDE_CMD_READ_SECTORS 0x20
#define IDE_CMD_WRITE_DMA_EXT 0x35
#define IDE_CMD_WRITE_DMA_QUEUED_EXT 0x36
#define IDE_CMD_WRITE_MULTIPLE_EXT 0x39
#define IDE_CMD_WRITE_SECTORS_EXT 0x34
#define IDE_CMD_READ_DMA_EXT 0x25
#define IDE_CMD_READ_DMA_QUEUED_EXT 0x26
#define IDE_CMD_READ_MULTIPLE_EXT 0x29
#define IDE_CMD_READ_SECTORS_EXT 0x24
#define IDE_CMD_PACKET 0xa0
#define IDE_CMD_DEVICE_RESET 0x08
#define IDE_CMD_SERVICE 0xa2
#define IDE_CMD_NOP 0
#define IDE_CMD_NOP_NOP 0
#define IDE_CMD_NOP_NOP_AUTOPOLL 1
#define IDE_CMD_GET_MEDIA_STATUS 0xda
#define IDE_CMD_FLUSH_CACHE 0xe7
#define IDE_CMD_FLUSH_CACHE_EXT 0xea
#define IDE_CMD_MEDIA_EJECT 0xed
#define IDE_CMD_IDENTIFY_PACKET_DEVICE 0xa1
#define IDE_CMD_IDENTIFY_DEVICE 0xec
#define IDE_CMD_SET_FEATURES 0xef
#define IDE_CMD_SET_FEATURES_ENABLE_REL_INT 0x5d
#define IDE_CMD_SET_FEATURES_ENABLE_SERV_INT 0x5e
#define IDE_CMD_SET_FEATURES_DISABLE_REL_INT 0xdd
#define IDE_CMD_SET_FEATURES_DISABLE_SERV_INT 0xde
#define IDE_CMD_SET_FEATURES_ENABLE_MSN 0x95
#endif

View File

@ -1,195 +0,0 @@
/*
* Copyright 2004-2006, Haiku, Inc. All RightsReserved.
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
#ifndef _IDE_DEVICE_INFOBLOCK_H_
#define _IDE_DEVICE_INFOBLOCK_H_
/*
Definition of response to IDE_CMD_IDENTIFY_DEVICE or
IDE_CMD_IDENTIFY_PACKET_DEVICE
When a new entry is inserted, add its offset in hex
and its index in decimal as a remark. Without that, you
have a rough time when you messed up the offsets.
*/
#include <lendian_bitfield.h>
#define IDE_GET_INFO_BLOCK 0x2710
#define IDE_GET_STATUS 0x2711
// must be 512 bytes!!!
typedef struct tagdevice_infoblock {
union { // 0 general configuration
struct {
LBITFIELD8 (
_0_res1 : 1,
_0_ret1 : 1,
response_incomplete : 1,
_0_ret2 : 3,
removable_controller_or_media : 1,
removable_media : 1,
_0_ret3 : 7,
ATA : 1 // 0 - is ATA!
);
} ata;
struct {
LBITFIELD8 (
packet_size : 2, // 0 - 12 bytes, 1 - 16 bytes
response_incomplete : 1,
_0_res2 : 2,
drq_speed : 2, // 0 - 3ms, 1 - IRQ, 2 - 50µs
removable_media : 1,
type : 5,
_0_res13 : 1,
ATAPI : 2 // 2 - is ATAPI
);
} atapi;
} _0;
uint16 cylinders; // 2
uint16 dummy1; // 4
uint16 heads; // 6
uint16 dummy2[2]; // 8
uint16 sectors; // 0c
uint16 dummy3[3]; // 0e
char serial_number[20]; // 14
uint16 dummy4[3]; // 28
char firmware_version[8]; // 2e
char model_number[40]; // 36
uint16 dummy5[2]; // 5e
LBITFIELD5 ( // 62 (49) capabilities
_49_ret1 : 8,
DMA_supported : 1,
LBA_supported : 1,
IORDY_can_disable : 1,
IORDY_supported : 1
);
uint16 dummy6[1]; // 64
LBITFIELD2 ( // 66 (51) obsolete: PIO modes?
_51_obs1 : 8,
PIO_mode : 8
);
uint16 dummy7[1]; // 68
LBITFIELD3 ( // 6a (53) validity
_54_58_valid : 1,
_64_70_valid : 1,
_88_valid : 1
);
uint16 current_cylinders; // 6c (54)
uint16 current_heads; // 6e
uint16 current_sectors; // 70
uint16 capacity_low; // 72 (57) ALIGNMENT SPLIT - don't merge
uint16 capacity_high;
uint16 dummy8[1];
uint32 LBA_total_sectors; // 78 (60)
uint16 dummy9[1]; // 7c
LBITFIELD7 ( // 7e (63) MDMA modes
MDMA0_supported : 1,
MDMA1_supported : 1,
MDMA2_supported : 1,
_63_res1 : 5,
MDMA0_selected : 1,
MDMA1_selected : 1,
MDMA2_selected : 1
);
uint16 dummy10[11]; // 80
LBITFIELD2 ( // 96 (75)
queue_depth : 5,
_75_res1 : 9
);
uint16 dummy11[6]; // 98
LBITFIELD16 ( // a4 (82) supported_command_set
SMART_supported : 1,
security_mode_supported : 1,
removable_media_supported : 1,
PM_supported : 1,
_81_fixed : 1, // must be 0
write_cache_supported : 1,
look_ahead_supported : 1,
RELEASE_irq_supported : 1,
SERVICE_irq_supported : 1,
DEVICE_RESET_supported : 1,
HPA_supported : 1,
_81_obs1 : 1,
WRITE_BUFFER_supported : 1,
READ_BUFFER_supported : 1,
NOP_supported : 1,
_81_obs2 : 1
);
LBITFIELD15 ( // a6 (83) supported_command_sets
DOWNLOAD_MICROCODE_supported : 1,
DMA_QUEUED_supported : 1,
CFA_supported : 1,
APM_supported : 1,
RMSN_supported : 1,
power_up_in_stand_by_supported : 1,
SET_FEATURES_on_power_up_required : 1,
reserved_boot_area_supported : 1,
SET_MAX_security_supported : 1,
auto_acustic_managemene_supported : 1,
_48_bit_addresses_supported : 1,
device_conf_overlay_supported : 1,
FLUSH_CACHE_supported : 1,
FLUSH_CACHE_EXT_supported : 1,
_83_fixed : 2 // must be 1
);
uint16 dummy12[4]; // a8 (84)
LBITFIELD15 ( // b0 (88) UDMA modes
UDMA0_supported : 1,
UDMA1_supported : 1,
UDMA2_supported : 1,
UDMA3_supported : 1,
UDMA4_supported : 1,
UDMA5_supported : 1,
UDMA6_supported : 1, // !guessed
_88_res1 : 1,
UDMA0_selected : 1,
UDMA1_selected : 1,
UDMA2_selected : 1,
UDMA3_selected : 1,
UDMA4_selected : 1,
UDMA5_selected : 1,
UDMA6_selected : 1
);
uint16 dummy89[11]; // b2 (89)
uint64 LBA48_total_sectors; // c8 (100)
uint16 dummy102[22]; // cc (104)
LBITFIELD2 ( // fc (126)
last_lun : 2,
_126_res2 : 14
);
LBITFIELD4 ( // fe (127) RMSN support
_127_RMSN_support : 2,// 0 = not supported, 1 = supported, 3, 4 = reserved
_127_res2 : 6,
device_write_protect: 2,
_127_res9 : 6
);
uint16 dummy14[128]; // 100 (128)
} ide_device_infoblock;
typedef struct ide_status {
uint8 _reserved;
uint8 dma_status;
uint8 pio_mode;
uint8 dma_mode;
} ide_status;
#endif /* _IDE_DEVICE_INFOBLOCK_H_ */

View File

@ -1,283 +0,0 @@
/*
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
* Distributed under the terms of the MIT License.
*/
#ifndef __IDE_INTERNAL_H__
#define __IDE_INTERNAL_H__
#include <bus/IDE.h>
#include <bus/SCSI.h>
#include <device_manager.h>
#include <ide_types.h>
#include "ide_device_infoblock.h"
#include "ata_request.h"
#define debug_level_error 2
#define debug_level_info 1
#define debug_level_flow 0
#define DEBUG_MSG_PREFIX "IDE -- "
#include "wrapper.h"
#define IDE_STD_TIMEOUT 10000000
#define IDE_RELEASE_TIMEOUT 10000000
// number of timeouts before we disable DMA automatically
#define MAX_DMA_FAILURES 3
// name of pnp generator of channel ids
#define IDE_CHANNEL_ID_GENERATOR "ide/channel_id"
// node item containing channel id (uint32)
#define IDE_CHANNEL_ID_ITEM "ide/channel_id"
// SIM interface
#define IDE_SIM_MODULE_NAME "bus_managers/ide/sim/driver_v1"
extern device_manager_info *pnp;
extern scsi_for_sim_interface *scsi;
typedef struct ide_bus_info ide_bus_info;
// structure for device time-outs
typedef struct ide_device_timer_info {
timer te;
struct ide_device_info *device;
} ide_device_timer_info;
// structure for bus time-outs
typedef struct ide_bus_timer_info {
timer te;
struct ide_bus_info *bus;
} ide_bus_timer_info;
typedef struct ide_device_info {
struct ide_bus_info *bus;
uint8 use_LBA : 1; // true for LBA, false for CHS
uint8 use_48bits : 1; // true for LBA48
uint8 is_atapi : 1; // true for ATAPI, false for ATA
uint8 DMA_supported : 1; // DMA supported
uint8 DMA_enabled : 1; // DMA enabled
uint8 is_device1 : 1; // true for slave, false for master
uint8 last_lun; // last LUN
uint8 DMA_failures; // DMA failures in a row
uint8 num_failed_send; // number of consequetive send problems
struct ata_request * requestActive;
struct ata_request * requestFree;
// entry for scsi's exec_io request
void (*exec_io)( struct ide_device_info *device, struct ata_request *request );
int target_id; // target id (currently, same as is_device1)
ide_reg_mask tf_param_mask; // flag of valid bytes in task file
ide_task_file tf; // task file
// ata from here on
uint64 total_sectors; // size in sectors
// atapi from here on
uint8 packet[12]; // atapi command packet
struct {
uint8 packet_irq : 1; // true, if command packet irq required
bigtime_t packet_irq_timeout; // timeout for it
} atapi;
uint8 device_type; // atapi device type
// pio from here on
int left_sg_elem; // remaining sg elements
const physical_entry *cur_sg_elem; // active sg element
int cur_sg_ofs; // offset in active sg element
int left_blocks; // remaining blocks
bool has_odd_byte; // remaining odd byte
int odd_byte; // content off odd byte
ide_device_infoblock infoblock; // infoblock of device
} ide_device_info;
// state of ide bus
typedef enum {
ata_state_idle, // not is using it
ata_state_busy, // got bus but no command issued yet
ata_state_pio, // bus is executing a PIO command
ata_state_dma // bus is executing a DMA command
} ata_bus_state;
typedef enum {
ATA_DRDY_REQUIRED = 0x01,
ATA_IS_WRITE = 0x02,
// ATA_PIO_TRANSFER = 0x04,
ATA_DMA_TRANSFER = 0x08,
ATA_CHECK_ERROR_BIT = 0x10,
ATA_WAIT_FINISH = 0x20
} ata_flags;
struct ide_bus_info {
// controller
ide_controller_interface *controller;
void *channel_cookie;
// lock, used for changes of bus state
spinlock lock;
cpu_status prev_irq_state;
ata_bus_state state; // current state of bus
mutex status_report_ben; // to lock when you report XPT about bus state
// i.e. during requeue, resubmit or finished
bool disconnected; // true, if controller is lost
scsi_bus scsi_cookie; // cookie for scsi bus
ide_bus_timer_info timer; // timeout
scsi_dpc_cookie irq_dpc;
ide_device_info *active_device;
ide_device_info *devices[2];
uchar path_id;
device_node *node; // our pnp node
// restrictions, read from controller node
uint8 max_devices;
uint8 can_DMA;
uint8 can_CQ;
char name[32];
};
// call this before you change bus state
#define IDE_LOCK( bus ) { \
cpu_status prev_irq_state = disable_interrupts(); \
acquire_spinlock( &bus->lock ); \
bus->prev_irq_state = prev_irq_state; \
}
// call this after you changed bus state
#define IDE_UNLOCK( bus ) { \
cpu_status prev_irq_state = bus->prev_irq_state; \
release_spinlock( &bus->lock ); \
restore_interrupts( prev_irq_state ); \
}
// ata.c
void ata_select_device(ide_bus_info *bus, int device);
void ata_select(ide_device_info *device);
bool ata_is_device_present(ide_bus_info *bus, int device);
status_t ata_wait(ide_bus_info *bus, uint8 set, uint8 not_set, ata_flags flags, bigtime_t timeout);
status_t ata_wait_for_drq(ide_bus_info *bus);
status_t ata_wait_for_drqdown(ide_bus_info *bus);
status_t ata_wait_for_drdy(ide_bus_info *bus);
status_t ata_pio_wait_drdy(ide_device_info *device);
status_t ata_reset_bus(ide_bus_info *bus, bool *_devicePresent0, uint32 *_sigDev0, bool *_devicePresent1, uint32 *_sigDev1);
status_t ata_reset_device(ide_device_info *device, bool *_devicePresent);
status_t ata_send_command(ide_device_info *device, ata_request *request, ata_flags flags, bigtime_t timeout);
status_t ata_finish_command(ide_device_info *device, ata_request *request, ata_flags flags, uint8 errorMask);
bool check_rw_error(ide_device_info *device, ata_request *request);
bool check_output(ide_device_info *device, bool drdy_required, int error_mask, bool is_write);
void ata_exec_read_write(ide_device_info *device, ata_request *request, uint64 address, size_t sectorCount, bool write);
void ata_dpc_DMA(ata_request *request);
void ata_dpc_PIO(ata_request *request);
void ata_exec_io(ide_device_info *device, ata_request *request);
status_t ata_identify_device(ide_device_info *device, bool isAtapi);
status_t configure_ata_device(ide_device_info *device);
// atapi.c
status_t configure_atapi_device(ide_device_info *device);
void send_packet(ide_device_info *device, ata_request *request, bool write);
void packet_dpc(ata_request *request);
void atapi_exec_io(ide_device_info *device, ata_request *request);
// basic_prot.c
// timeout in seconds
// channel_mgr.c
extern ide_for_controller_interface ide_for_controller_module;
// device_mgr.c
status_t scan_device(ide_device_info *device, bool isAtapi);
void destroy_device(ide_device_info *device);
ide_device_info *create_device(ide_bus_info *bus, bool is_device1);
status_t configure_device(ide_device_info *device, bool isAtapi);
// dma.c
bool prepare_dma(ide_device_info *device, ata_request *request);
void start_dma(ide_device_info *device, ata_request *request);
void start_dma_wait(ide_device_info *device, ata_request *request);
void start_dma_wait_no_lock(ide_device_info *device, ata_request *request);
bool finish_dma(ide_device_info *device);
void abort_dma(ide_device_info *device, ata_request *request);
bool configure_dma(ide_device_info *device);
int get_device_dma_mode(ide_device_info *device);
// emulation.c
bool copy_sg_data(scsi_ccb *request, uint offset, uint req_size_limit,
void *buffer, int size, bool to_buffer);
void ide_request_sense(ide_device_info *device, ata_request *request);
// pio.c
void prep_PIO_transfer(ide_device_info *device, ata_request *request);
status_t read_PIO_block(ata_request *request, int length);
status_t write_PIO_block(ata_request *request, int length);
// scsi
struct scsi_sense;
void scsi_set_sense(struct scsi_sense *sense, const ata_request *request);
void scsi_request_sense(ide_device_info *device, ata_request *request);
// sync.c
// timeout in seconds (according to CAM)
void ide_dpc(void *arg);
void access_finished(ide_bus_info *bus, ide_device_info *device);
status_t ide_irq_handler(ide_bus_info *bus, uint8 status);
status_t ide_timeout(timer *arg);
#endif /* __IDE_INTERNAL_H__ */

View File

@ -1,517 +0,0 @@
/*
* Copyright 2004-2008, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
/*
Interface between ide bus manager and scsi bus manager.
The IDE bus manager has a bit unusual structure as it
consists of a single level only. In fact it is no bus manager
in terms of the PnP structure at all but a driver that maps
one SCSI bus onto one IDE controller.
This structure does not allow us to publish IDE devices
as they can be accessed via the SCSI bus node only. Therefore
we do a full bus scan every time the IDE bus node is loaded.
The drawback is that a bus rescan must be done indirectly via a
SCSI bus scan.
*/
#include "ide_internal.h"
#include "ide_sim.h"
#include <scsi_cmds.h>
#include <safemode.h>
#include <string.h>
#include <malloc.h>
#include <stdio.h>
//#define FLOW dprintf
#define FLOW(x...)
#define TRACE dprintf
//#define TRACE(x...)
scsi_for_sim_interface *scsi;
static void
sim_scsi_io(ide_bus_info *bus, scsi_ccb *ccb)
{
ide_device_info *device;
ata_request *request;
FLOW("sim_scsi_iobus %p, %d:%d\n", bus, ccb->target_id, ccb->target_lun);
if (bus->disconnected)
goto err_disconnected;
// make sure, device is valid
// I've read that there are ATAPI devices with more then one LUN,
// but it seems that most (all?) devices ignore LUN, so we have
// to restrict to LUN 0 to avoid mirror devices
if (ccb->target_id >= 2)
goto err_inv_device;
device = bus->devices[ccb->target_id];
if (device == NULL)
goto err_inv_device;
if (ccb->target_lun > device->last_lun)
goto err_inv_device;
ata_request_start(&request, device, ccb);
if (request) {
FLOW("calling exec_io: %p, %d:%d\n", bus, ccb->target_id, ccb->target_lun);
device->exec_io(device, request);
return;
}
TRACE("Bus busy %d:%d\n", ccb->target_id, ccb->target_lun);
ACQUIRE_BEN(&bus->status_report_ben);
scsi->requeue(ccb, true);
RELEASE_BEN(&bus->status_report_ben);
return;
err_inv_device:
FLOW("Invalid device %d:%d\n", ccb->target_id, ccb->target_lun);
ccb->subsys_status = SCSI_SEL_TIMEOUT;
ACQUIRE_BEN(&bus->status_report_ben);
scsi->finished(ccb, 1);
RELEASE_BEN(&bus->status_report_ben);
return;
err_disconnected:
TRACE("No controller anymore %d:%d\n", ccb->target_id, ccb->target_lun);
ccb->subsys_status = SCSI_NO_HBA;
ACQUIRE_BEN(&bus->status_report_ben);
scsi->finished(ccb, 1);
RELEASE_BEN(&bus->status_report_ben);
return;
}
static uchar
sim_path_inquiry(ide_bus_info *bus, scsi_path_inquiry *info)
{
const char *controller_name;
FLOW("sim_path_inquiry, bus %p\n", bus);
if (bus->disconnected)
return SCSI_NO_HBA;
info->hba_inquiry = SCSI_PI_TAG_ABLE | SCSI_PI_WIDE_16;
info->hba_misc = 0;
memset(info->vuhba_flags, 0, sizeof(info->vuhba_flags));
// we don't need any of the private data
info->sim_priv = 0;
// there is no initiator for IDE, but SCSI needs it for scanning
info->initiator_id = 2;
// we only support 1 request at a time
info->hba_queue_size = 1;
strncpy(info->sim_vid, "Haiku", SCSI_SIM_ID);
if (pnp->get_attr_string(bus->node, SCSI_DESCRIPTION_CONTROLLER_NAME,
&controller_name, true) == B_OK) {
strlcpy(info->hba_vid, controller_name, SCSI_HBA_ID);
} else
strlcpy(info->hba_vid, "", SCSI_HBA_ID);
strlcpy(info->controller_family, "IDE", SCSI_FAM_ID);
strlcpy(info->controller_type, "IDE", SCSI_TYPE_ID);
SHOW_FLOW0(4, "done");
return SCSI_REQ_CMP;
}
static void
scan_bus(ide_bus_info *bus)
{
uint32 deviceSignature[2];
bool devicePresent[2];
ide_device_info *device;
status_t status;
bool isAtapi;
int i;
TRACE("ATA: scan_bus: bus %p\n", bus);
if (bus->disconnected)
return;
// XXX fix me
IDE_LOCK(bus);
ASSERT(bus->state == ata_state_idle);
bus->state = ata_state_busy;
IDE_UNLOCK(bus);
for (i = 0; i < bus->max_devices; ++i) {
if (bus->devices[i])
destroy_device(bus->devices[i]);
}
status = ata_reset_bus(bus, &devicePresent[0], &deviceSignature[0], &devicePresent[1], &deviceSignature[1]);
for (i = 0; i < bus->max_devices; ++i) {
if (!devicePresent[i])
continue;
dprintf("ATA: scan_bus: bus %p, creating device %d, signature is 0x%08lx\n",
bus, i, deviceSignature[i]);
isAtapi = deviceSignature[i] == 0xeb140101;
device = create_device(bus, i /* isDevice1 */);
if (scan_device(device, isAtapi) != B_OK) {
dprintf("ATA: scan_bus: bus %p, scanning failed, destroying device %d\n", bus, i);
destroy_device(device);
continue;
}
if (configure_device(device, isAtapi) != B_OK) {
dprintf("ATA: scan_bus: bus %p, configure failed, destroying device %d\n", bus, i);
destroy_device(device);
}
}
// XXX fix me
IDE_LOCK(bus);
ASSERT(bus->state != ata_state_idle);
bus->state = ata_state_idle;
IDE_UNLOCK(bus);
TRACE("ATA: scan_bus: bus %p finished\n", bus);
}
static void
sim_set_scsi_bus(ide_bus_info *bus, scsi_bus scsi)
{
bus->scsi_cookie = scsi;
// detect devices
scan_bus(bus);
}
static uchar
sim_rescan_bus(ide_bus_info *bus)
{
TRACE("ATA: sim_rescan_bus - not implemented\n");
return SCSI_REQ_CMP;
}
static uchar
sim_abort(ide_bus_info *bus, scsi_ccb *ccb_to_abort)
{
// we cannot abort specific commands, so just ignore
if (bus->disconnected)
return SCSI_NO_HBA;
return SCSI_REQ_CMP;
}
static uchar
sim_term_io(ide_bus_info *bus, scsi_ccb *ccb_to_abort)
{
// we cannot terminate commands, so just ignore
if (bus->disconnected)
return SCSI_NO_HBA;
return SCSI_REQ_CMP;
}
static uchar
sim_reset_bus(ide_bus_info *bus)
{
// no, we don't do that
if (bus->disconnected)
return SCSI_NO_HBA;
TRACE("ATA: sim_reset_bus - not implemented\n");
return SCSI_REQ_INVALID;
}
static uchar
sim_reset_device(ide_bus_info *bus, uchar target_id, uchar target_lun)
{
// xxx to do
if (bus->disconnected)
return SCSI_NO_HBA;
return SCSI_REQ_INVALID;
}
static status_t
ide_sim_init_bus(device_node *node, void **cookie)
{
device_node *parent;
ide_bus_info *bus;
bool dmaDisabled = false;
status_t status;
FLOW("ide_sim_init_bus, node %p\n", node);
// first prepare the info structure
bus = (ide_bus_info *)malloc(sizeof(*bus));
if (bus == NULL)
return B_NO_MEMORY;
memset(bus, 0, sizeof(*bus));
bus->node = node;
bus->lock = 0;
bus->disconnected = false;
{
int32 channel_id = -1;
pnp->get_attr_uint32(node, IDE_CHANNEL_ID_ITEM, (uint32 *)&channel_id, true);
sprintf(bus->name, "ide_bus %d", (int)channel_id);
}
bus->timer.bus = bus;
if ((status = scsi->alloc_dpc(&bus->irq_dpc)) < B_OK)
goto err1;
bus->state = ata_state_idle;
bus->active_device = NULL;
bus->devices[0] = NULL;
bus->devices[1] = NULL;
status = INIT_BEN(&bus->status_report_ben, "ide_status_report");
if (status < B_OK)
goto err4;
{
// check if safemode settings disable DMA
void *settings = load_driver_settings(B_SAFEMODE_DRIVER_SETTINGS);
if (settings != NULL) {
dmaDisabled = get_driver_boolean_parameter(settings, B_SAFEMODE_DISABLE_IDE_DMA,
false, false);
unload_driver_settings(settings);
}
}
// read restrictions of controller
if (pnp->get_attr_uint8(node, IDE_CONTROLLER_MAX_DEVICES_ITEM,
&bus->max_devices, true) != B_OK) {
// per default, 2 devices are supported per node
bus->max_devices = 2;
}
bus->max_devices = min(bus->max_devices, 2);
if (dmaDisabled
|| pnp->get_attr_uint8(node, IDE_CONTROLLER_CAN_DMA_ITEM, &bus->can_DMA, true) != B_OK) {
// per default, no dma support
bus->can_DMA = false;
}
SHOW_FLOW(2, "can_dma: %d", bus->can_DMA);
parent = pnp->get_parent_node(node);
status = pnp->get_driver(parent, (driver_module_info **)&bus->controller,
(void **)&bus->channel_cookie);
pnp->put_node(parent);
if (status != B_OK)
goto err5;
*cookie = bus;
return B_OK;
err5:
DELETE_BEN(&bus->status_report_ben);
err4:
scsi->free_dpc(bus->irq_dpc);
err1:
free(bus);
return status;
}
static void
ide_sim_uninit_bus(ide_bus_info *bus)
{
FLOW("ide_sim_uninit_bus: bus %p\n", bus);
DELETE_BEN(&bus->status_report_ben);
scsi->free_dpc(bus->irq_dpc);
free(bus);
}
static void
ide_sim_bus_removed(ide_bus_info *bus)
{
FLOW("ide_sim_bus_removed\n");
if (bus == NULL)
// driver not loaded - no manual intervention needed
return;
// XPT must not issue further commands
scsi->block_bus(bus->scsi_cookie);
// make sure, we refuse all new commands
bus->disconnected = true;
// abort all running commands with SCSI_NO_HBA
// XXX
}
static void
ide_sim_get_restrictions(ide_bus_info *bus, uchar target_id,
bool *is_atapi, bool *no_autosense, uint32 *max_blocks)
{
ide_device_info *device = bus->devices[target_id];
FLOW("ide_sim_get_restrictions\n");
// we declare even ATA devices as ATAPI so we have to emulate fewer
// commands
*is_atapi = true;
// we emulate autosense for ATA devices
*no_autosense = false;
if (device != NULL && device->is_atapi) {
// we don't support native autosense for ATAPI devices
*no_autosense = true;
}
*max_blocks = 255;
if (device->is_atapi) {
if (strncmp(device->infoblock.model_number, "IOMEGA ZIP 100 ATAPI",
strlen("IOMEGA ZIP 100 ATAPI")) == 0
|| strncmp( device->infoblock.model_number, "IOMEGA Clik!",
strlen( "IOMEGA Clik!")) == 0) {
SHOW_ERROR0(2, "Found buggy ZIP/Clik! drive - restricting transmission size");
*max_blocks = 64;
}
}
}
static status_t
ide_sim_ioctl(ide_bus_info *bus, uint8 targetID, uint32 op, void *buffer, size_t length)
{
ide_device_info *device = bus->devices[targetID];
// We currently only support IDE_GET_INFO_BLOCK
switch (op) {
case IDE_GET_INFO_BLOCK:
// we already have the info block, just copy it
memcpy(buffer, &device->infoblock,
min(sizeof(device->infoblock), length));
return B_OK;
case IDE_GET_STATUS:
{
// TODO: have our own structure and fill it with some useful stuff
ide_status status;
if (device->DMA_enabled)
status.dma_status = 1;
else if (device->DMA_supported) {
if (device->DMA_failures > 0)
status.dma_status = 6;
else if (device->bus->can_DMA)
status.dma_status = 2;
else
status.dma_status = 4;
} else
status.dma_status = 2;
status.pio_mode = 0;
status.dma_mode = get_device_dma_mode(device);
memcpy(buffer, &status, min(sizeof(status), length));
return B_OK;
}
}
return B_BAD_VALUE;
}
static status_t
std_ops(int32 op, ...)
{
switch (op) {
case B_MODULE_INIT:
case B_MODULE_UNINIT:
return B_OK;
default:
return B_ERROR;
}
}
module_dependency module_dependencies[] = {
{ SCSI_FOR_SIM_MODULE_NAME, (module_info **)&scsi },
{ B_DEVICE_MANAGER_MODULE_NAME, (module_info **)&pnp },
{}
};
scsi_sim_interface ide_sim_module = {
{
{
IDE_SIM_MODULE_NAME,
0,
std_ops,
},
NULL, // supported devices
NULL, // register node
(status_t (*)(device_node *, void **)) ide_sim_init_bus,
(void (*)(void *)) ide_sim_uninit_bus,
NULL, // register child devices
NULL, // rescan
(void (*)(void *)) ide_sim_bus_removed,
NULL, // suspend
NULL // resume
},
(void (*)(scsi_sim_cookie, scsi_bus)) sim_set_scsi_bus,
(void (*)(scsi_sim_cookie, scsi_ccb *)) sim_scsi_io,
(uchar (*)(scsi_sim_cookie, scsi_ccb *)) sim_abort,
(uchar (*)(scsi_sim_cookie, uchar, uchar)) sim_reset_device,
(uchar (*)(scsi_sim_cookie, scsi_ccb *)) sim_term_io,
(uchar (*)(scsi_sim_cookie, scsi_path_inquiry *))sim_path_inquiry,
(uchar (*)(scsi_sim_cookie)) sim_rescan_bus,
(uchar (*)(scsi_sim_cookie)) sim_reset_bus,
(void (*)(scsi_sim_cookie, uchar,
bool*, bool *, uint32 *)) ide_sim_get_restrictions,
(status_t (*)(scsi_sim_cookie, uint8, uint32, void *, size_t))ide_sim_ioctl,
};

View File

@ -1,21 +0,0 @@
/*
** Copyright 2002/03, Thomas Kurschel. All rights reserved.
** Distributed under the terms of the OpenBeOS License.
*/
/*
Part of Open IDE bus manager
Interface between ide and scsi bus manager
*/
#ifndef __IDE_SIM_H__
#define __IDE_SIM_H__
#include "scsi_cmds.h"
extern scsi_for_sim_interface *scsi;
extern scsi_sim_interface ide_sim_module;
#endif

View File

@ -1,370 +0,0 @@
/*
* Copyright 2004-2007, Haiku, Inc. All RightsReserved.
* Copyright 2002-2004, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
/*
PIO data transmission
This file is more difficult then you might expect as the SCSI system
uses physical addresses everywhere which have to be mapped into
virtual address space during transmission. Additionally, during ATAPI
commands we may have to transmit more data then exist because the
data len specified by the command doesn't need to be the same as
of the data buffer provided.
The handling of S/G entries of odd size may look superfluous as the
SCSI bus manager can take care of that. In general, this would be possible
as most controllers need even alignment for DMA as well, but some can
handle _any_ S/G list and it wouldn't be sensitive to enforce stricter
alignement just for some rare PIO transmissions.
Little hint for the meaning of "transferred": this is the number of bytes
sent over the bus. For read-transmissions, this may be one more then copied
into the buffer (the extra byte read is stored in device->odd_byte), for
write-transmissions, this may be one less (the waiting byte is pending in
device->odd_byte).
In terms of error handling: we don't bother checking transmission of every
single byte via read/write_pio(). At least at the end of the request, when
the status bits are verified, we will see that something has gone wrong.
TBD: S/G entries may have odd start address. For non-Intel architecture
we either have to copy data to an aligned buffer or have to modify
PIO-handling in controller drivers.
*/
#include "ide_internal.h"
#include "ide_sim.h"
#include <thread.h>
#include <vm.h>
#include <string.h>
#define FLOW dprintf
// internal error code if scatter gather table is too short
#define ERR_TOO_BIG (B_ERRORS_END + 1)
/*! Prepare PIO transfer */
void
prep_PIO_transfer(ide_device_info *device, ata_request *request)
{
SHOW_FLOW0(4, "");
device->left_sg_elem = request->ccb->sg_count;
device->cur_sg_elem = request->ccb->sg_list;
device->cur_sg_ofs = 0;
device->has_odd_byte = false;
request->ccb->data_resid = request->ccb->data_length;
}
/*! Transfer virtually continuous data */
static inline status_t
transfer_PIO_virtcont(ide_device_info *device, uint8 *virtualAddress,
int length, bool write, int *transferred)
{
ide_bus_info *bus = device->bus;
ide_controller_interface *controller = bus->controller;
void * channel_cookie = bus->channel_cookie;
if (write) {
// if there is a byte left from last chunk, transmit it together
// with the first byte of the current chunk (IDE requires 16 bits
// to be transmitted at once)
if (device->has_odd_byte) {
uint8 buffer[2];
buffer[0] = device->odd_byte;
buffer[1] = *virtualAddress++;
controller->write_pio(channel_cookie, (uint16 *)buffer, 1, false);
--length;
*transferred += 2;
}
controller->write_pio(channel_cookie, (uint16 *)virtualAddress,
length / 2, false);
// take care if chunk size was odd, which means that 1 byte remains
virtualAddress += length & ~1;
*transferred += length & ~1;
device->has_odd_byte = (length & 1) != 0;
if (device->has_odd_byte)
device->odd_byte = *virtualAddress;
} else {
// if we read one byte too much last time, push it into current chunk
if (device->has_odd_byte) {
*virtualAddress++ = device->odd_byte;
--length;
}
SHOW_FLOW(4, "Reading PIO to %p, %d bytes", virtualAddress, length);
controller->read_pio(channel_cookie, (uint16 *)virtualAddress,
length / 2, false);
// take care of odd chunk size;
// in this case we read 1 byte to few!
virtualAddress += length & ~1;
*transferred += length & ~1;
device->has_odd_byte = (length & 1) != 0;
if (device->has_odd_byte) {
uint8 buffer[2];
// now read the missing byte; as we have to read 2 bytes at once,
// we'll read one byte too much
controller->read_pio(channel_cookie, (uint16 *)buffer, 1, false);
*virtualAddress = buffer[0];
device->odd_byte = buffer[1];
*transferred += 2;
}
}
return B_OK;
}
/*! Transmit physically continuous data */
static inline status_t
transfer_PIO_physcont(ide_device_info *device, addr_t physicalAddress,
int length, bool write, int *transferred)
{
// we must split up chunk into B_PAGE_SIZE blocks as we can map only
// one page into address space at once
while (length > 0) {
addr_t virtualAddress;
void* handle;
int page_left, cur_len;
status_t err;
struct thread* thread = thread_get_current_thread();
SHOW_FLOW(4, "Transmitting to/from physical address %lx, %d bytes left",
physicalAddress, length);
thread_pin_to_current_cpu(thread);
if (vm_get_physical_page_current_cpu(physicalAddress, &virtualAddress,
&handle) != B_OK) {
thread_unpin_from_current_cpu(thread);
// ouch: this should never ever happen
//xxx fix this set_sense(device, SCSIS_KEY_HARDWARE_ERROR, SCSIS_ASC_INTERNAL_FAILURE);
return B_ERROR;
}
ASSERT(physicalAddress % B_PAGE_SIZE == virtualAddress % B_PAGE_SIZE);
// if chunks starts in the middle of a page, we have even less then
// a page left
page_left = B_PAGE_SIZE - physicalAddress % B_PAGE_SIZE;
SHOW_FLOW(4, "page_left=%d", page_left);
cur_len = min(page_left, length);
SHOW_FLOW(4, "cur_len=%d", cur_len);
err = transfer_PIO_virtcont(device, (char *)virtualAddress,
cur_len, write, transferred);
vm_put_physical_page_current_cpu(virtualAddress, handle);
thread_unpin_from_current_cpu(thread);
if (err != B_OK)
return err;
length -= cur_len;
physicalAddress += cur_len;
}
return B_OK;
}
/*! Transfer PIO block from/to buffer */
static inline status_t
transfer_PIO_block(ide_device_info *device, int length, bool write, int *transferred)
{
// data is usually split up into multiple scatter/gather blocks
while (length > 0) {
int left_bytes, cur_len;
status_t err;
if (device->left_sg_elem == 0)
// ups - buffer too small (for ATAPI data, this is OK)
return ERR_TOO_BIG;
// we might have transmitted part of a scatter/entry already!
left_bytes = device->cur_sg_elem->size - device->cur_sg_ofs;
cur_len = min(left_bytes, length);
err = transfer_PIO_physcont(device,
(addr_t)device->cur_sg_elem->address + device->cur_sg_ofs,
cur_len, write, transferred);
if (err != B_OK)
return err;
if (left_bytes <= length) {
// end of one scatter/gather block reached
device->cur_sg_ofs = 0;
++device->cur_sg_elem;
--device->left_sg_elem;
} else {
// still in the same block
device->cur_sg_ofs += cur_len;
}
length -= cur_len;
}
return B_OK;
}
/*! Write zero data (required for ATAPI if we ran out of data) */
static void
write_discard_PIO(ide_device_info *device, int length)
{
static const uint8 buffer[32] = {};
ide_bus_info *bus = device->bus;
// we transmit 32 zero-bytes at once
// (not very efficient but easy to implement - you get what you deserve
// when you don't provide enough buffer)
while (length > 0) {
int cur_len;
// if device asks for odd number of bytes, append an extra byte to
// make length even (this is the "length + 1" term)
cur_len = min(length + 1, (int)(sizeof(buffer))) / 2;
bus->controller->write_pio(bus->channel_cookie, (uint16 *)buffer, cur_len, false);
length -= cur_len * 2;
}
}
/*! Read PIO data and discard it (required for ATAPI if buffer was too small) */
static void
read_discard_PIO(ide_device_info *device, int length)
{
ide_bus_info *bus = device->bus;
uint8 buffer[32];
// discard 32 bytes at once (see write_discard_PIO)
while (length > 0) {
int cur_len;
// read extra byte if length is odd (that's the "length + 1")
cur_len = min(length + 1, (int)sizeof(buffer)) / 2;
bus->controller->read_pio(bus->channel_cookie, (uint16 *)buffer, cur_len, false);
length -= cur_len * 2;
}
}
/*! write PIO data
return: there are 3 possible results
NO_ERROR - everything's nice and groovy
ERR_TOO_BIG - data buffer was too short, remaining data got discarded
B_ERROR - something serious went wrong, sense data was set
*/
status_t
write_PIO_block(ata_request *request, int length)
{
ide_device_info *device = request->device;
int transferred;
status_t err;
transferred = 0;
err = transfer_PIO_block(device, length, true, &transferred);
request->ccb->data_resid -= transferred;
if (err != ERR_TOO_BIG)
return err;
// there may be a pending odd byte - transmit that now
if (request->device->has_odd_byte) {
uint8 buffer[2];
buffer[0] = device->odd_byte;
buffer[1] = 0;
device->has_odd_byte = false;
request->ccb->data_resid -= 1;
transferred += 2;
device->bus->controller->write_pio(device->bus->channel_cookie, (uint16 *)buffer, 1, false);
}
// "transferred" may actually be larger then length because the last odd-byte
// is sent together with an extra zero-byte
if (transferred >= length)
return err;
// Ouch! the device asks for data but we haven't got any left.
// Sadly, this behaviour is OK for ATAPI packets, but there is no
// way to tell the device that we don't have any data left;
// only solution is to send zero bytes, though it's BAD BAD BAD
write_discard_PIO(request->device, length - transferred);
return ERR_TOO_BIG;
}
/*! read PIO data
return: see write_PIO_block
*/
status_t
read_PIO_block(ata_request *request, int length)
{
ide_device_info *device = request->device;
int transferred;
status_t err;
transferred = 0;
err = transfer_PIO_block(request->device, length, false, &transferred);
request->ccb->data_resid -= transferred;
// if length was odd, there's an extra byte waiting in device->odd_byte
if (device->has_odd_byte) {
// discard byte
device->has_odd_byte = false;
// adjust res_id as the extra byte didn't reach the buffer
++request->ccb->data_resid;
}
if (err != ERR_TOO_BIG)
return err;
// the device returns more data then the buffer can store;
// for ATAPI this is OK - we just discard remaining bytes (there
// is no way to tell ATAPI about that, but we "only" waste time)
// perhaps discarding the extra odd-byte was sufficient
if (transferred >= length)
return err;
SHOW_FLOW(3, "discarding after %d bytes", transferred);
read_discard_PIO(request->device, length - transferred);
return ERR_TOO_BIG;
}

View File

@ -1,575 +0,0 @@
/*
* Copyright 2004-2008, Haiku, Inc. All RightsReserved.
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
/*
Part of Open IDE bus manager
Converts SCSI commands to ATA commands.
*/
#include "ide_internal.h"
#include "ide_sim.h"
#include "ide_cmds.h"
#include <string.h>
#define TRACE dprintf_no_syslog
#define FLOW dprintf_no_syslog
// set sense buffer according to device sense in ata_request
void
scsi_set_sense(scsi_sense *sense, const ata_request *request)
{
memset(sense, 0, sizeof(*sense));
sense->error_code = SCSIS_CURR_ERROR;
sense->sense_key = request->senseKey;
sense->add_sense_length = sizeof(*sense) - 7;
sense->asc = request->senseAsc;
sense->ascq = request->senseAscq;
sense->sense_key_spec.raw.SKSV = 0; // no additional info
}
static void
scsi_mode_sense_10(ide_device_info *device, ata_request *request)
{
scsi_ccb *ccb = request->ccb;
scsi_cmd_mode_sense_10 *cmd = (scsi_cmd_mode_sense_10 *)ccb->cdb;
scsi_mode_param_header_10 param_header;
scsi_modepage_control control;
scsi_mode_param_block_desc block_desc;
size_t totalLength = sizeof(scsi_mode_param_header_10)
+ sizeof(scsi_mode_param_block_desc)
+ sizeof(scsi_modepage_control);
scsi_mode_param_dev_spec_da devspec = {
_res0_0 : 0,
dpo_fua : 0,
_res0_6 : 0,
write_protected : 0
};
uint32 allocationLength;
SHOW_FLOW0(1, "Hi!");
allocationLength = B_BENDIAN_TO_HOST_INT16(cmd->allocation_length);
// we answer control page requests and "all pages" requests
// (as the latter are the same as the first)
if ((cmd->page_code != SCSI_MODEPAGE_CONTROL && cmd->page_code != SCSI_MODEPAGE_ALL)
|| (cmd->page_control != SCSI_MODE_SENSE_PC_CURRENT
&& cmd->page_control != SCSI_MODE_SENSE_PC_SAVED)) {
ata_request_set_sense(request, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD);
return;
}
//param_header = (scsi_mode_param_header_10 *)ccb->data;
param_header.mode_data_length = B_HOST_TO_BENDIAN_INT16(totalLength - 1);
param_header.medium_type = 0; // XXX standard is a bit vague here
param_header.dev_spec_parameter = *(uint8 *)&devspec;
param_header.block_desc_length
= B_HOST_TO_BENDIAN_INT16(sizeof(scsi_mode_param_block_desc));
copy_sg_data(ccb, 0, allocationLength, &param_header,
sizeof(param_header), false);
/*block_desc = (scsi_mode_param_block_desc *)(ccb->data
+ sizeof(*param_header));*/
memset(&block_desc, 0, sizeof(block_desc));
// density is reserved (0), descriptor apply to entire medium (num_blocks=0)
// remains the blocklen to be set
block_desc.high_blocklen = 0;
block_desc.med_blocklen = 512 >> 8;
block_desc.low_blocklen = 512 & 0xff;
copy_sg_data(ccb, sizeof(param_header), allocationLength,
&block_desc, sizeof(block_desc), false);
/*contr = (scsi_modepage_contr *)(ccb->data
+ sizeof(*param_header)
+ ((uint16)param_header->high_block_desc_len << 8)
+ param_header->low_block_desc_len);*/
memset(&control, 0, sizeof(control));
control.RLEC = false;
control.DQue = 1;//!device->CQ_enabled;
control.QErr = false;
// when a command fails we requeue all
// lost commands automagically
control.QAM = SCSI_QAM_UNRESTRICTED;
copy_sg_data(ccb, sizeof(param_header)
+ B_BENDIAN_TO_HOST_INT16(param_header.block_desc_length),
allocationLength, &control, sizeof(control), false);
// the number of bytes that were transferred to buffer is
// restricted by allocation length and by ccb data buffer size
totalLength = min(totalLength, allocationLength);
totalLength = min(totalLength, ccb->data_length);
ccb->data_resid = ccb->data_length - totalLength;
}
/*! Emulate modifying control page */
static bool
ata_mode_select_control_page(ide_device_info *device, ata_request *request,
scsi_modepage_control *page)
{
if (page->header.page_length != sizeof(*page) - sizeof(page->header)) {
ata_request_set_sense(request, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_LIST_LENGTH_ERR);
return false;
}
// we only support enabling/disabling command queuing
// enable_CQ(device, !page->DQue);
return true;
}
/*! Emulate MODE SELECT 10 command */
static void
scsi_mode_select_10(ide_device_info *device, ata_request *request)
{
scsi_ccb *ccb = request->ccb;
scsi_cmd_mode_select_10 *cmd = (scsi_cmd_mode_select_10 *)ccb->cdb;
scsi_mode_param_header_10 param_header;
scsi_modepage_header page_header;
uint32 totalLength;
uint32 modepageOffset;
char modepage_buffer[64]; // !!! enlarge this to support longer mode pages
if (cmd->save_pages || cmd->pf != 1) {
ata_request_set_sense(request, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD);
return;
}
totalLength = min(ccb->data_length,
B_BENDIAN_TO_HOST_INT16(cmd->param_list_length));
// first, retrieve page header to get size of different chunks
//param_header = (scsi_mode_param_header_10 *)ccb->data;
if (!copy_sg_data(ccb, 0, totalLength, &param_header, sizeof(param_header), true))
goto err;
totalLength = min(totalLength,
B_BENDIAN_TO_HOST_INT16(param_header.mode_data_length) + 1UL);
// this is the start of the first mode page;
// we ignore the block descriptor silently
modepageOffset = sizeof(param_header)
+ B_BENDIAN_TO_HOST_INT16(param_header.block_desc_length);
// go through list of pages
while (modepageOffset < totalLength) {
uint32 pageLength;
// get header to know how long page is
if (!copy_sg_data(ccb, modepageOffset, totalLength,
&page_header, sizeof(page_header), true))
goto err;
// get size of one page and copy it to buffer
pageLength = page_header.page_length + sizeof(scsi_modepage_header);
// the buffer has a maximum size - this is really standard compliant but
// sufficient for our needs
if (pageLength > sizeof(modepage_buffer))
goto err;
if (!copy_sg_data(ccb, modepageOffset, totalLength,
&modepage_buffer, min(pageLength, sizeof(modepage_buffer)), true))
goto err;
// modify page;
// currently, we only support the control mode page
switch (page_header.page_code) {
case SCSI_MODEPAGE_CONTROL:
if (!ata_mode_select_control_page(device, request,
(scsi_modepage_control *)modepage_buffer))
return;
break;
default:
ata_request_set_sense(request, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_PARAM_LIST_FIELD);
return;
}
modepageOffset += pageLength;
}
if (modepageOffset != totalLength)
goto err;
ccb->data_resid = ccb->data_length - totalLength;
return;
// if we arrive here, data length was incorrect
err:
ata_request_set_sense(request, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_LIST_LENGTH_ERR);
}
static void
scsi_test_unit_ready(ide_device_info *device, ata_request *request)
{
TRACE("scsi_test_unit_ready\n");
if (device->infoblock.RMSN_supported == 0
|| device->infoblock._127_RMSN_support != 1)
return;
// ask device about status
device->tf_param_mask = 0;
device->tf.write.command = IDE_CMD_GET_MEDIA_STATUS;
if (ata_send_command(device, request, ATA_DRDY_REQUIRED, 15000000) != B_OK) {
ata_request_set_status(request, SCSI_SEQUENCE_FAIL);
return;
}
// bits ide_error_mcr | ide_error_mc | ide_error_wp are also valid
// but not requested by TUR; ide_error_wp can safely be ignored, but
// we don't want to loose media change (request) reports
ata_finish_command(device, request, ATA_WAIT_FINISH | ATA_DRDY_REQUIRED,
ide_error_nm | ide_error_abrt | ide_error_mcr | ide_error_mc);
// SCSI spec is unclear here: we shouldn't report "media change (request)"
// but what to do if there is one? anyway - we report them
}
/*! Flush internal device cache */
static bool
scsi_synchronize_cache(ide_device_info *device, ata_request *request)
{
TRACE("scsi_synchronize_cache\n");
// we should also ask for FLUSH CACHE support, but everyone denies it
// (looks like they cheat to gain some performance advantage, but
// that's pretty useless: everyone does it...)
if (!device->infoblock.write_cache_supported)
return true;
device->tf_param_mask = 0;
device->tf.lba.command = device->use_48bits ? IDE_CMD_FLUSH_CACHE_EXT
: IDE_CMD_FLUSH_CACHE;
// spec says that this may take more then 30s, how much more?
if (ata_send_command(device, request, ATA_DRDY_REQUIRED, 60000000) != B_OK)
return false;
if (ata_finish_command(device, request, ATA_WAIT_FINISH | ATA_DRDY_REQUIRED, ide_error_abrt) != B_OK)
return false;
return true;
}
/*! Load or eject medium
load = true - load medium
*/
static bool
scsi_load_eject(ide_device_info *device, ata_request *request, bool load)
{
TRACE("scsi_load_eject\n");
if (load) {
// ATA doesn't support loading
ata_request_set_sense(request, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_PARAM_NOT_SUPPORTED);
return false;
}
device->tf_param_mask = 0;
device->tf.lba.command = IDE_CMD_MEDIA_EJECT;
if (ata_send_command(device, request, ATA_DRDY_REQUIRED, 15000000) != B_OK)
return false;
if (ata_finish_command(device, request, ATA_WAIT_FINISH | ATA_DRDY_REQUIRED, ide_error_abrt | ide_error_nm) != B_OK)
return false;
return true;
}
static void
scsi_prevent_allow(ide_device_info *device, ata_request *request, bool prevent)
{
ata_request_set_sense(request, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_ILL_FUNCTION);
}
static void
scsi_inquiry(ide_device_info *device, ata_request *request)
{
scsi_ccb *ccb = request->ccb;
scsi_res_inquiry data;
scsi_cmd_inquiry *cmd = (scsi_cmd_inquiry *)ccb->cdb;
uint32 allocation_length = cmd->allocation_length;
uint32 transfer_size;
if (cmd->evpd || cmd->page_code) {
ata_request_set_sense(request, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD);
return;
}
memset(&data, 0, sizeof(data));
data.device_type = scsi_dev_direct_access;
data.device_qualifier = scsi_periph_qual_connected;
data.device_type_modifier = 0;
data.removable_medium = false;
data.ansi_version = 2;
data.ecma_version = 0;
data.iso_version = 0;
data.response_data_format = 2;
data.term_iop = false;
// to be changed if we support TERM I/O
data.additional_length = sizeof(scsi_res_inquiry) - 4;
data.soft_reset = false;
data.cmd_queue = 0;//device->queue_depth > 1;
data.linked = false;
// these values are free-style
data.sync = false;
data.write_bus16 = true;
data.write_bus32 = false;
data.relative_address = false;
// the following fields are *much* to small, sigh...
memcpy(data.vendor_ident, device->infoblock.model_number,
sizeof(data.vendor_ident));
memcpy(data.product_ident, device->infoblock.model_number + 8,
sizeof(data.product_ident));
memcpy(data.product_rev, " ", sizeof(data.product_rev));
copy_sg_data(ccb, 0, allocation_length, &data, sizeof(data), false);
transfer_size = min(sizeof(data), allocation_length);
transfer_size = min(transfer_size, ccb->data_length);
ccb->data_resid = ccb->data_length - transfer_size;
}
static void
scsi_read_capacity(ide_device_info *device, ata_request *request)
{
scsi_ccb *ccb = request->ccb;
scsi_res_read_capacity data;
scsi_cmd_read_capacity *cmd = (scsi_cmd_read_capacity *)ccb->cdb;
uint32 lastBlock;
TRACE("scsi_read_capacity\n");
if (cmd->pmi || cmd->lba) {
ata_request_set_sense(request, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_CDB_FIELD);
return;
}
// TODO: 512 bytes fixed block size?
data.block_size = B_HOST_TO_BENDIAN_INT32(512);
lastBlock = device->total_sectors - 1;
data.lba = B_HOST_TO_BENDIAN_INT32(lastBlock);
copy_sg_data(ccb, 0, ccb->data_length, &data, sizeof(data), false);
ccb->data_resid = max(ccb->data_length - sizeof(data), 0);
}
void
scsi_request_sense(ide_device_info *device, ata_request *request)
{
scsi_ccb *ccb = request->ccb;
scsi_cmd_request_sense *cmd = (scsi_cmd_request_sense *)ccb->cdb;
scsi_sense sense;
uint32 transferSize;
TRACE("scsi_request_sense\n");
// Copy sense data from last request into data buffer of current request.
// The sense data of last request is still present in the current request,
// as is isn't been cleared by ata_exec_io for SCSI_OP_REQUEST_SENSE.
if (request->senseKey != 0)
scsi_set_sense(&sense, request);
else
memset(&sense, 0, sizeof(sense));
copy_sg_data(ccb, 0, cmd->allocation_length, &sense, sizeof(sense), false);
transferSize = min(sizeof(sense), cmd->allocation_length);
transferSize = min(transferSize, ccb->data_length);
ccb->data_resid = ccb->data_length - transferSize;
// reset sense information on read
ata_request_clear_sense(request);
}
/*! Execute SCSI command */
void
ata_exec_io(ide_device_info *device, ata_request *request)
{
scsi_ccb *ccb = request->ccb;
//FLOW("ata_exec_io: scsi command 0x%02x\n", ccb->cdb[0]);
// ATA devices have one LUN only
if (ccb->target_lun != 0) {
FLOW("ata_exec_io: wrong target lun\n");
ata_request_set_status(request, SCSI_SEL_TIMEOUT);
ata_request_finish(request, false /* no resubmit */);
return;
}
if (ccb->cdb[0] == SCSI_OP_REQUEST_SENSE) {
// No initial clear sense, as this request is used
// by the scsi stack to request the sense data of
// the previous command.
scsi_request_sense(device, request);
ata_request_finish(request, false /* no resubmit */);
return;
}
ata_request_clear_sense(request);
switch (ccb->cdb[0]) {
case SCSI_OP_TEST_UNIT_READY:
scsi_test_unit_ready(device, request);
break;
case SCSI_OP_FORMAT: /* FORMAT UNIT */
// we could forward ccb to disk, but modern disks cannot
// be formatted anyway, so we just refuse ccb
// (exceptions are removable media devices, but to my knowledge
// they don't have to be formatted as well)
ata_request_set_sense(request, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
break;
case SCSI_OP_INQUIRY:
scsi_inquiry(device, request);
break;
case SCSI_OP_MODE_SELECT_10:
scsi_mode_select_10(device, request);
break;
case SCSI_OP_MODE_SENSE_10:
scsi_mode_sense_10(device, request);
break;
case SCSI_OP_MODE_SELECT_6:
case SCSI_OP_MODE_SENSE_6:
// we've told SCSI bus manager to emulates these commands
ata_request_set_sense(request, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
break;
case SCSI_OP_RESERVE:
case SCSI_OP_RELEASE:
// though mandatory, this doesn't make much sense in a
// single initiator environment; so what
ata_request_set_sense(request, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
break;
case SCSI_OP_START_STOP: {
scsi_cmd_ssu *cmd = (scsi_cmd_ssu *)ccb->cdb;
// with no LoEj bit set, we should only allow/deny further access
// we ignore that (unsupported for ATA)
// with LoEj bit set, we should additionally either load or eject the medium
// (start = 0 - eject; start = 1 - load)
if (!cmd->start)
// we must always flush cache if start = 0
scsi_synchronize_cache(device, request);
if (cmd->load_eject)
scsi_load_eject(device, request, cmd->start);
break;
}
case SCSI_OP_PREVENT_ALLOW:
{
scsi_cmd_prevent_allow *cmd = (scsi_cmd_prevent_allow *)ccb->cdb;
scsi_prevent_allow(device, request, cmd->prevent);
break;
}
case SCSI_OP_READ_CAPACITY:
scsi_read_capacity(device, request);
break;
case SCSI_OP_VERIFY:
// does anyone uses this function?
// effectly, it does a read-and-compare, which IDE doesn't support
ata_request_set_sense(request, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
break;
case SCSI_OP_SYNCHRONIZE_CACHE:
// we ignore range and immediate bit, we always immediately flush everything
scsi_synchronize_cache(device, request);
break;
// sadly, there are two possible read/write operation codes;
// at least, the third one, read/write(12), is not valid for DAS
case SCSI_OP_READ_6:
case SCSI_OP_WRITE_6:
{
scsi_cmd_rw_6 *cmd = (scsi_cmd_rw_6 *)ccb->cdb;
uint32 pos;
size_t length;
pos = ((uint32)cmd->high_lba << 16) | ((uint32)cmd->mid_lba << 8)
| (uint32)cmd->low_lba;
length = cmd->length != 0 ? cmd->length : 256;
//FLOW("READ6/WRITE6 pos=%lx, length=%lx\n", pos, length);
ata_exec_read_write(device, request, pos, length, cmd->opcode == SCSI_OP_WRITE_6);
return;
}
case SCSI_OP_READ_10:
case SCSI_OP_WRITE_10:
{
scsi_cmd_rw_10 *cmd = (scsi_cmd_rw_10 *)ccb->cdb;
uint32 pos;
size_t length;
pos = B_BENDIAN_TO_HOST_INT32(cmd->lba);
length = B_BENDIAN_TO_HOST_INT16(cmd->length);
//FLOW("READ10/WRITE10 pos=%lx, length=%lx\n", pos, length);
if (length != 0) {
ata_exec_read_write(device, request, pos, length, cmd->opcode == SCSI_OP_WRITE_10);
return;
} else {
// we cannot transfer zero blocks (apart from LBA48)
ata_request_set_status(request, SCSI_REQ_CMP);
}
}
default:
FLOW("command not implemented\n");
ata_request_set_sense(request, SCSIS_KEY_ILLEGAL_REQUEST, SCSIS_ASC_INV_OPCODE);
}
ata_request_finish(request, false /* no resubmit */);
}

View File

@ -1,346 +0,0 @@
/*
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
* Distributed under the terms of the MIT License.
*/
/*
Part of Open IDE bus manager
Handling of passive waiting and synchronized procedure calls.
The latter are calls that get delayed until the bus is idle.
*/
#include "ide_internal.h"
#include "ide_sim.h"
#include <string.h>
//#define TRACE_SYNC
#ifdef TRACE_SYNC
# define TRACE(x) { dprintf("%s(): ", __FUNCTION__); dprintf x ; }
#else
# define TRACE(x) ;
#endif
/** DPC handler for IRQs */
void
ide_dpc(void *arg)
{
#if 0
ide_bus_info *bus = (ide_bus_info *)arg;
ata_request *request;
ide_device_info *device;
TRACE(("\n"));
//snooze(500000);
// IRQ handler doesn't tell us whether this bus was in async_wait or
// in idle state, so we just check whether there is an active request,
// which means that we were async_waiting
if (bus->active_qrequest != NULL) {
TRACE(("continue command\n"));
// cancel timeout
cancel_timer(&bus->timer.te);
request = bus->active_qrequest;
device = request->device;
// not perfect but simple: we simply know who is waiting why
if (device->is_atapi)
packet_dpc(request);
else {
if (request->uses_dma)
ata_dpc_DMA(request);
else
ata_dpc_PIO(request);
}
} else {
// no request active, so this must be a service request or
// a spurious IRQ; access_finished will take care of testing
// for service requests
TRACE(("irq in idle mode - possible service request\n"));
device = get_current_device(bus);
if (device == NULL) {
// got an interrupt from a non-existing device
// either this is a spurious interrupt or there *is* a device
// but we haven't detected it - we better ignore it silently
access_finished(bus, bus->first_device);
} else {
// access_finished always checks the other device first, but as
// we do have a service request, we negate the negation
access_finished(bus, device->other_device);
}
// let XPT resend commands that got blocked
scsi->cont_send_bus(bus->scsi_cookie);
}
return;
/*err:
xpt->cont_send( bus->xpt_cookie );*/
#endif
}
/** handler for IDE IRQs */
status_t
ide_irq_handler(ide_bus_info *bus, uint8 status)
{
return B_UNHANDLED_INTERRUPT;
/*
ide_device_info *device;
// the first IRQ)
IDE_LOCK(bus);
device = bus->active_device;
if (device == NULL) {
IDE_UNLOCK(bus);
TRACE(("IRQ though there is no active device\n"));
return B_UNHANDLED_INTERRUPT;
}
if ((status & ide_status_bsy) != 0) {
// the IRQ seems to be fired before the last command was sent,
// i.e. it's not the one that signals finishing of command
IDE_UNLOCK(bus);
TRACE(("IRQ though device is busy\n"));
return B_UNHANDLED_INTERRUPT;
}
switch (bus->state) {
case ide_state_async_waiting:
TRACE(("state: async waiting\n"));
bus->state = ide_state_accessing;
IDE_UNLOCK(bus);
scsi->schedule_dpc(bus->scsi_cookie, bus->irq_dpc, ide_dpc, bus);
return B_INVOKE_SCHEDULER;
case ide_state_idle:
TRACE(("state: idle, num_running_reqs %d\n", bus->num_running_reqs));
// this must be a service request;
// if no request is pending, the IRQ was fired wrongly
if (bus->num_running_reqs == 0) {
IDE_UNLOCK(bus);
return B_UNHANDLED_INTERRUPT;
}mmand
if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf, ide_mask_command) != B_OK)
goto err_clearint;
bus->state = ide_state_accessing;
IDE_UNLOCK(bus);
scsi->schedule_dpc(bus->scsi_cookie, bus->irq_dpc, ide_dpc, bus);
return B_INVOKE_SCHEDULER;
case ide_state_sync_waiting:
TRACE(("state: sync waiting\n"));
bus->state = ide_state_accessing;
bus->sync_wait_timeout = false;
IDE_UNLOCK(bus);mmand
if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf, ide_mask_command) != B_OK)
goto err_clearint;
release_sem_etc(bus->sync_wait_sem, 1, B_DO_NOT_RESCHEDULE);
return B_INVOKE_SCHEDULER;
case ide_state_accessing:
TRACE(("state: spurious IRQ - there is a command being executed\n"));
IDE_UNLOCK(bus);
return B_UNHANDLED_INTERRUPT;
default:
dprintf("BUG: unknown state (%d)\n", bus->state);
IDE_UNLOCK(bus);
return B_UNHANDLED_INTERRUPT;
}
*/
}
/** cancel IRQ timeout
* it doesn't matter whether there really was a timout running;
* on return, bus state is set to _accessing_
*/
/*
void
cancel_irq_timeout(ide_bus_info *bus)
{
IDE_LOCK(bus);
bus->state = ide_state_accessing;
IDE_UNLOCK(bus);
cancel_timer(&bus->timer.te);
}
// start waiting for IRQ with bus lock hold
// new_state must be either sync_wait or async_wait
void
start_waiting(ide_bus_info *bus, uint32 timeout, int new_state)
{mmand
if (bus->controller->write_command_block_regs(bus->channel_cookie, &device->tf, ide_mask_command) != B_OK)
goto err_clearint;
int res;
TRACE(("timeout = %u\n", (uint)timeout));
bus->state = new_state;
res = add_timer(&bus->timer.te, ide_timeout,
(bigtime_t)timeout * 1000000, B_ONE_SHOT_RELATIVE_TIMER);
if (res != B_OK)
panic("Error setting timeout (%s)", strerror(res));
IDE_UNLOCK(bus);
}
// start waiting for IRQ with bus lock not hold
void
start_waiting_nolock(ide_bus_info *bus, uint32 timeout, int new_state)
{
IDE_LOCK(bus);
start_waiting(bus, timeout, new_state);
}
// wait for sync IRQ
void
wait_for_sync(ide_bus_info *bus)
{
acquire_sem(bus->sync_wait_sem);
cancel_timer(&bus->timer.te);
}
*/
// timeout dpc handler
static void
ide_timeout_dpc(void *arg)
{
/*
ide_bus_info *bus = (ide_bus_info *)arg;
ata_request *request;
ide_device_info *device;
device = request->device;
dprintf("ide: ide_timeout_dpc() bus %p, device %p\n", bus, device);
// this also resets overlapped commands
// reset_device(device, request);
device->subsys_status = SCSI_CMD_TIMEOUT;
if (request->uses_dma) {
if (++device->DMA_failures >= MAX_DMA_FAILURES) {
dprintf("Disabling DMA because of too many errors\n");
device->DMA_enabled = false;
}
}
// let upper layer do the retry
finish_checksense(request);
*/
}
// timeout handler, called by system timer
status_t
ide_timeout(timer *arg)
{
ide_bus_info *bus = ((ide_bus_timer_info *)arg)->bus;
dprintf("ide: ide_timeout() bus %p\n", bus);
/*
// we need to lock bus to have a solid bus state
// (side effect: we lock out the IRQ handler)
IDE_LOCK(bus);
switch (bus->state) {
case ide_state_async_waiting:
TRACE(("async waiting\n"));
bus->state = ide_state_accessing;
IDE_UNLOCK(bus);
scsi->schedule_dpc(bus->scsi_cookie, bus->irq_dpc, ide_timeout_dpc, bus);
return B_INVOKE_SCHEDULER;
case ide_state_sync_waiting:
TRACE(("sync waiting\n"));
bus->state = ide_state_accessing;
bus->sync_wait_timeout = true;
IDE_UNLOCK(bus);
release_sem_etc(bus->sync_wait_sem, 1, B_DO_NOT_RESCHEDULE);
return B_INVOKE_SCHEDULER;
case ide_state_accessing:
TRACE(("came too late - IRQ occured already\n"));
IDE_UNLOCK(bus);
return B_DO_NOT_RESCHEDULE;
default:
// this case also happens if a timeout fires too late;
// unless there is a bug, the timeout should always be canceled
// before declaring bus as being idle
dprintf("BUG: unknown state (%d)\n", (int)bus->state);
IDE_UNLOCK(bus);
return B_DO_NOT_RESCHEDULE;
}
*/
return B_DO_NOT_RESCHEDULE;
}
/** finish bus access;
* check if any device wants to service pending commands + execute synced_pc
*/
void
access_finished(ide_bus_info *bus, ide_device_info *device)
{
TRACE(("bus = %p, device = %p\n", bus, device));
// this would be the correct place to called synced pc
}

View File

@ -1,121 +0,0 @@
/*
* Copyright 2004-2006, Axel Dörfler, axeld@pinc-software.de. All rights reserved.
* Copyright 2002/03, Thomas Kurschel. All rights reserved.
*
* Distributed under the terms of the MIT License.
*/
/*
VM helper functions.
Important assumption: get_memory_map must combine adjacent
physical pages, so contignous memory always leads to a S/G
list of length one.
*/
#include "KernelExport_ext.h"
#include "wrapper.h"
#include <string.h>
/** get sg list of iovec
* TBD: this should be moved to somewhere in kernel
*/
status_t
get_iovec_memory_map(iovec *vec, size_t vec_count, size_t vec_offset, size_t len,
physical_entry *map, size_t max_entries, size_t *num_entries, size_t *mapped_len)
{
size_t cur_idx;
size_t left_len;
SHOW_FLOW(3, "vec_count=%lu, vec_offset=%lu, len=%lu, max_entries=%lu",
vec_count, vec_offset, len, max_entries);
// skip iovec blocks if needed
while (vec_count > 0 && vec_offset > vec->iov_len) {
vec_offset -= vec->iov_len;
--vec_count;
++vec;
}
for (left_len = len, cur_idx = 0; left_len > 0 && vec_count > 0 && cur_idx < max_entries;) {
char *range_start;
size_t range_len;
status_t res;
size_t cur_num_entries, cur_mapped_len;
uint32 tmp_idx;
SHOW_FLOW( 3, "left_len=%d, vec_count=%d, cur_idx=%d",
(int)left_len, (int)vec_count, (int)cur_idx );
// map one iovec
range_start = (char *)vec->iov_base + vec_offset;
range_len = min( vec->iov_len - vec_offset, left_len );
SHOW_FLOW( 3, "range_start=%x, range_len=%x",
(int)range_start, (int)range_len );
vec_offset = 0;
if ((res = get_memory_map(range_start, range_len, &map[cur_idx],
max_entries - cur_idx)) != B_OK) {
// according to docu, no error is ever reported - argh!
SHOW_ERROR(1, "invalid io_vec passed (%s)", strerror(res));
return res;
}
// stupid: get_memory_map does neither tell how many sg blocks
// are used nor whether there were enough sg blocks at all;
// -> determine that manually
cur_mapped_len = 0;
cur_num_entries = 0;
for (tmp_idx = cur_idx; tmp_idx < max_entries; ++tmp_idx) {
if (map[tmp_idx].size == 0)
break;
cur_mapped_len += map[tmp_idx].size;
++cur_num_entries;
}
if (cur_mapped_len == 0) {
panic("get_memory_map() returned empty list; left_len=%d, idx=%d/%d",
(int)left_len, (int)cur_idx, (int)max_entries);
SHOW_ERROR(2, "get_memory_map() returned empty list; left_len=%d, idx=%d/%d",
(int)left_len, (int)cur_idx, (int)max_entries);
return B_ERROR;
}
SHOW_FLOW( 3, "cur_num_entries=%d, cur_mapped_len=%x",
(int)cur_num_entries, (int)cur_mapped_len );
// try to combine with previous sg block
if (cur_num_entries > 0 && cur_idx > 0
&& map[cur_idx].address == (char *)map[cur_idx - 1].address + map[cur_idx - 1].size) {
SHOW_FLOW0( 3, "combine with previous chunk" );
map[cur_idx - 1].size += map[cur_idx].size;
memcpy(&map[cur_idx], &map[cur_idx + 1], (cur_num_entries - 1) * sizeof(map[0]));
--cur_num_entries;
}
cur_idx += cur_num_entries;
left_len -= cur_mapped_len;
// advance iovec if current one is described completely
if (cur_mapped_len == range_len) {
++vec;
--vec_count;
}
}
*num_entries = cur_idx;
*mapped_len = len - left_len;
SHOW_FLOW( 3, "num_entries=%d, mapped_len=%x",
(int)*num_entries, (int)*mapped_len );
return B_OK;
}

View File

@ -1,90 +0,0 @@
#ifndef _WRAPPER_H
#define _WRAPPER_H
#include <KernelExport.h>
#include <lock.h>
// benaphores
#define INIT_BEN(x, prefix) (mutex_init_etc(x, prefix, MUTEX_FLAG_CLONE_NAME), \
B_OK)
#define DELETE_BEN(x) mutex_destroy(x)
#define ACQUIRE_BEN(x) mutex_lock(x)
#define RELEASE_BEN(x) mutex_unlock(x)
// debug output
#ifdef DEBUG_WAIT_ON_MSG
# define DEBUG_WAIT snooze( DEBUG_WAIT_ON_MSG );
#else
# define DEBUG_WAIT
#endif
#ifdef DEBUG_WAIT_ON_ERROR
# define DEBUG_WAIT_ERROR snooze( DEBUG_WAIT_ON_ERROR );
#else
# define DEBUG_WAIT_ERROR
#endif
#ifndef DEBUG_MAX_LEVEL_FLOW
# define DEBUG_MAX_LEVEL_FLOW 4
#endif
#ifndef DEBUG_MAX_LEVEL_INFO
# define DEBUG_MAX_LEVEL_INFO 4
#endif
#ifndef DEBUG_MAX_LEVEL_ERROR
# define DEBUG_MAX_LEVEL_ERROR 4
#endif
#ifndef DEBUG_MSG_PREFIX
# define DEBUG_MSG_PREFIX ""
#endif
#ifndef debug_level_flow
# define debug_level_flow 4
#endif
#ifndef debug_level_info
# define debug_level_info 4
#endif
#ifndef debug_level_error
# define debug_level_error 4
#endif
#define FUNC_NAME DEBUG_MSG_PREFIX, __FUNCTION__
#define SHOW_FLOW(seriousness, format, param...) \
do { if( seriousness <= debug_level_flow && seriousness <= DEBUG_MAX_LEVEL_FLOW ) { \
dprintf( "%s%s: "format"\n", FUNC_NAME, param ); DEBUG_WAIT \
}} while( 0 )
#define SHOW_FLOW0(seriousness, format) \
do { if( seriousness <= debug_level_flow && seriousness <= DEBUG_MAX_LEVEL_FLOW ) { \
dprintf( "%s%s: "format"\n", FUNC_NAME); DEBUG_WAIT \
}} while( 0 )
#define SHOW_INFO(seriousness, format, param...) \
do { if( seriousness <= debug_level_info && seriousness <= DEBUG_MAX_LEVEL_INFO ) { \
dprintf( "%s%s: "format"\n", FUNC_NAME, param ); DEBUG_WAIT \
}} while( 0 )
#define SHOW_INFO0(seriousness, format) \
do { if( seriousness <= debug_level_info && seriousness <= DEBUG_MAX_LEVEL_INFO ) { \
dprintf( "%s%s: "format"\n", FUNC_NAME); DEBUG_WAIT \
}} while( 0 )
#define SHOW_ERROR(seriousness, format, param...) \
do { if( seriousness <= debug_level_error && seriousness <= DEBUG_MAX_LEVEL_ERROR ) { \
dprintf( "%s%s: "format"\n", FUNC_NAME, param ); DEBUG_WAIT_ERROR \
}} while( 0 )
#define SHOW_ERROR0(seriousness, format) \
do { if( seriousness <= debug_level_error && seriousness <= DEBUG_MAX_LEVEL_ERROR ) { \
dprintf( "%s%s: "format"\n", FUNC_NAME); DEBUG_WAIT_ERROR \
}} while( 0 )
#endif /* _BENAPHORE_H */