2009-08-20 17:22:20 +04:00
|
|
|
/*
|
|
|
|
* QEMU IDE Emulation: PCI Bus support.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2003 Fabrice Bellard
|
|
|
|
* Copyright (c) 2006 Openedhand Ltd.
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
2019-05-23 17:35:07 +03:00
|
|
|
|
2016-01-26 21:17:09 +03:00
|
|
|
#include "qemu/osdep.h"
|
2016-06-22 20:11:19 +03:00
|
|
|
#include "hw/pci/pci.h"
|
2019-08-12 08:23:45 +03:00
|
|
|
#include "migration/vmstate.h"
|
2012-12-17 21:20:04 +04:00
|
|
|
#include "sysemu/dma.h"
|
ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1414785819-26209-4-git-send-email-jsnow@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-10-31 23:03:39 +03:00
|
|
|
#include "qemu/error-report.h"
|
2019-05-23 17:35:07 +03:00
|
|
|
#include "qemu/module.h"
|
2016-06-22 20:11:19 +03:00
|
|
|
#include "hw/ide/pci.h"
|
2017-09-18 22:01:25 +03:00
|
|
|
#include "trace.h"
|
2009-08-20 17:22:20 +04:00
|
|
|
|
2010-12-15 02:23:00 +03:00
|
|
|
#define BMDMA_PAGE_SIZE 4096
|
|
|
|
|
2014-08-05 01:11:13 +04:00
|
|
|
#define BM_MIGRATION_COMPAT_STATUS_BITS \
|
2014-08-05 01:11:14 +04:00
|
|
|
(IDE_RETRY_DMA | IDE_RETRY_PIO | \
|
|
|
|
IDE_RETRY_READ | IDE_RETRY_FLUSH)
|
2014-08-05 01:11:13 +04:00
|
|
|
|
2019-01-25 22:52:11 +03:00
|
|
|
static uint64_t pci_ide_cmd_read(void *opaque, hwaddr addr, unsigned size)
|
|
|
|
{
|
|
|
|
IDEBus *bus = opaque;
|
|
|
|
|
|
|
|
if (addr != 2 || size != 1) {
|
|
|
|
return ((uint64_t)1 << (size * 8)) - 1;
|
|
|
|
}
|
|
|
|
return ide_status_read(bus, addr + 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pci_ide_cmd_write(void *opaque, hwaddr addr,
|
|
|
|
uint64_t data, unsigned size)
|
|
|
|
{
|
|
|
|
IDEBus *bus = opaque;
|
|
|
|
|
|
|
|
if (addr != 2 || size != 1) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ide_cmd_write(bus, addr + 2, data);
|
|
|
|
}
|
|
|
|
|
|
|
|
const MemoryRegionOps pci_ide_cmd_le_ops = {
|
|
|
|
.read = pci_ide_cmd_read,
|
|
|
|
.write = pci_ide_cmd_write,
|
|
|
|
.endianness = DEVICE_LITTLE_ENDIAN,
|
|
|
|
};
|
|
|
|
|
|
|
|
static uint64_t pci_ide_data_read(void *opaque, hwaddr addr, unsigned size)
|
|
|
|
{
|
|
|
|
IDEBus *bus = opaque;
|
|
|
|
|
|
|
|
if (size == 1) {
|
|
|
|
return ide_ioport_read(bus, addr);
|
|
|
|
} else if (addr == 0) {
|
|
|
|
if (size == 2) {
|
|
|
|
return ide_data_readw(bus, addr);
|
|
|
|
} else {
|
|
|
|
return ide_data_readl(bus, addr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ((uint64_t)1 << (size * 8)) - 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pci_ide_data_write(void *opaque, hwaddr addr,
|
|
|
|
uint64_t data, unsigned size)
|
|
|
|
{
|
|
|
|
IDEBus *bus = opaque;
|
|
|
|
|
|
|
|
if (size == 1) {
|
|
|
|
ide_ioport_write(bus, addr, data);
|
|
|
|
} else if (addr == 0) {
|
|
|
|
if (size == 2) {
|
|
|
|
ide_data_writew(bus, addr, data);
|
|
|
|
} else {
|
|
|
|
ide_data_writel(bus, addr, data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const MemoryRegionOps pci_ide_data_le_ops = {
|
|
|
|
.read = pci_ide_data_read,
|
|
|
|
.write = pci_ide_data_write,
|
|
|
|
.endianness = DEVICE_LITTLE_ENDIAN,
|
|
|
|
};
|
|
|
|
|
2010-12-15 02:23:00 +03:00
|
|
|
static void bmdma_start_dma(IDEDMA *dma, IDEState *s,
|
2014-10-07 15:59:15 +04:00
|
|
|
BlockCompletionFunc *dma_cb)
|
2010-12-15 02:23:00 +03:00
|
|
|
{
|
|
|
|
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
|
|
|
|
|
|
|
|
bm->dma_cb = dma_cb;
|
|
|
|
bm->cur_prd_last = 0;
|
|
|
|
bm->cur_prd_addr = 0;
|
|
|
|
bm->cur_prd_len = 0;
|
|
|
|
|
|
|
|
if (bm->status & BM_STATUS_DMAING) {
|
|
|
|
bm->dma_cb(bmdma_active_if(bm), 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1414785819-26209-4-git-send-email-jsnow@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-10-31 23:03:39 +03:00
|
|
|
/**
|
ide: add limit to .prepare_buf()
prepare_buf should not always grab as many descriptors
as it can, sometimes it should self-limit.
For example, an NCQ transfer of 1 sector with a PRDT that
describes 4GiB of data should not copy 4GiB of data, it
should just transfer that first 512 bytes.
PIO is not affected, because the dma_buf_rw dma helpers
already have a byte limit built-in to them, but DMA/NCQ
will exhaust the entire list regardless of requested size.
AHCI 1.3 specifies in section 6.1.6 Command List Underflow that
NCQ is not required to detect underflow conditions. Non-NCQ
pathways signal underflow by writing to the PRDBC field, which
will already occur by writing the actual transferred byte count
to the PRDBC, signaling the underflow.
Our NCQ pathways aren't required to detect underflow, but since our DMA
backend uses the size of the PRDT to determine the size of the transer,
if our PRDT is bigger than the transaction (the underflow condition) it
doesn't cost us anything to detect it and truncate the PRDT.
This is a recoverable error and is not signaled to the guest, in either
NCQ or normal DMA cases.
For BMDMA, the existing pathways should see no guest-visible difference,
but any bytes described in the overage will no longer be transferred
before indicating to the guest that there was an underflow.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 1435767578-32743-2-git-send-email-jsnow@redhat.com
2015-07-04 09:06:04 +03:00
|
|
|
* Prepare an sglist based on available PRDs.
|
|
|
|
* @limit: How many bytes to prepare total.
|
|
|
|
*
|
|
|
|
* Returns the number of bytes prepared, -1 on error.
|
|
|
|
* IDEState.io_buffer_size will contain the number of bytes described
|
|
|
|
* by the PRDs, whether or not we added them to the sglist.
|
ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1414785819-26209-4-git-send-email-jsnow@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-10-31 23:03:39 +03:00
|
|
|
*/
|
ide: add limit to .prepare_buf()
prepare_buf should not always grab as many descriptors
as it can, sometimes it should self-limit.
For example, an NCQ transfer of 1 sector with a PRDT that
describes 4GiB of data should not copy 4GiB of data, it
should just transfer that first 512 bytes.
PIO is not affected, because the dma_buf_rw dma helpers
already have a byte limit built-in to them, but DMA/NCQ
will exhaust the entire list regardless of requested size.
AHCI 1.3 specifies in section 6.1.6 Command List Underflow that
NCQ is not required to detect underflow conditions. Non-NCQ
pathways signal underflow by writing to the PRDBC field, which
will already occur by writing the actual transferred byte count
to the PRDBC, signaling the underflow.
Our NCQ pathways aren't required to detect underflow, but since our DMA
backend uses the size of the PRDT to determine the size of the transer,
if our PRDT is bigger than the transaction (the underflow condition) it
doesn't cost us anything to detect it and truncate the PRDT.
This is a recoverable error and is not signaled to the guest, in either
NCQ or normal DMA cases.
For BMDMA, the existing pathways should see no guest-visible difference,
but any bytes described in the overage will no longer be transferred
before indicating to the guest that there was an underflow.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 1435767578-32743-2-git-send-email-jsnow@redhat.com
2015-07-04 09:06:04 +03:00
|
|
|
static int32_t bmdma_prepare_buf(IDEDMA *dma, int32_t limit)
|
2010-12-15 02:23:00 +03:00
|
|
|
{
|
|
|
|
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
|
|
|
|
IDEState *s = bmdma_active_if(bm);
|
2013-07-17 20:44:48 +04:00
|
|
|
PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev);
|
2010-12-15 02:23:00 +03:00
|
|
|
struct {
|
|
|
|
uint32_t addr;
|
|
|
|
uint32_t size;
|
|
|
|
} prd;
|
|
|
|
int l, len;
|
|
|
|
|
2013-07-17 20:44:48 +04:00
|
|
|
pci_dma_sglist_init(&s->sg, pci_dev,
|
2011-10-31 10:06:56 +04:00
|
|
|
s->nsector / (BMDMA_PAGE_SIZE / 512) + 1);
|
2010-12-15 02:23:00 +03:00
|
|
|
s->io_buffer_size = 0;
|
|
|
|
for(;;) {
|
|
|
|
if (bm->cur_prd_len == 0) {
|
|
|
|
/* end of table (with a fail safe of one page) */
|
|
|
|
if (bm->cur_prd_last ||
|
ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1414785819-26209-4-git-send-email-jsnow@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-10-31 23:03:39 +03:00
|
|
|
(bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) {
|
ide: add limit to .prepare_buf()
prepare_buf should not always grab as many descriptors
as it can, sometimes it should self-limit.
For example, an NCQ transfer of 1 sector with a PRDT that
describes 4GiB of data should not copy 4GiB of data, it
should just transfer that first 512 bytes.
PIO is not affected, because the dma_buf_rw dma helpers
already have a byte limit built-in to them, but DMA/NCQ
will exhaust the entire list regardless of requested size.
AHCI 1.3 specifies in section 6.1.6 Command List Underflow that
NCQ is not required to detect underflow conditions. Non-NCQ
pathways signal underflow by writing to the PRDBC field, which
will already occur by writing the actual transferred byte count
to the PRDBC, signaling the underflow.
Our NCQ pathways aren't required to detect underflow, but since our DMA
backend uses the size of the PRDT to determine the size of the transer,
if our PRDT is bigger than the transaction (the underflow condition) it
doesn't cost us anything to detect it and truncate the PRDT.
This is a recoverable error and is not signaled to the guest, in either
NCQ or normal DMA cases.
For BMDMA, the existing pathways should see no guest-visible difference,
but any bytes described in the overage will no longer be transferred
before indicating to the guest that there was an underflow.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 1435767578-32743-2-git-send-email-jsnow@redhat.com
2015-07-04 09:06:04 +03:00
|
|
|
return s->sg.size;
|
ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1414785819-26209-4-git-send-email-jsnow@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-10-31 23:03:39 +03:00
|
|
|
}
|
2013-07-17 20:44:48 +04:00
|
|
|
pci_dma_read(pci_dev, bm->cur_addr, &prd, 8);
|
2010-12-15 02:23:00 +03:00
|
|
|
bm->cur_addr += 8;
|
|
|
|
prd.addr = le32_to_cpu(prd.addr);
|
|
|
|
prd.size = le32_to_cpu(prd.size);
|
|
|
|
len = prd.size & 0xfffe;
|
|
|
|
if (len == 0)
|
|
|
|
len = 0x10000;
|
|
|
|
bm->cur_prd_len = len;
|
|
|
|
bm->cur_prd_addr = prd.addr;
|
|
|
|
bm->cur_prd_last = (prd.size & 0x80000000);
|
|
|
|
}
|
|
|
|
l = bm->cur_prd_len;
|
|
|
|
if (l > 0) {
|
ide: add limit to .prepare_buf()
prepare_buf should not always grab as many descriptors
as it can, sometimes it should self-limit.
For example, an NCQ transfer of 1 sector with a PRDT that
describes 4GiB of data should not copy 4GiB of data, it
should just transfer that first 512 bytes.
PIO is not affected, because the dma_buf_rw dma helpers
already have a byte limit built-in to them, but DMA/NCQ
will exhaust the entire list regardless of requested size.
AHCI 1.3 specifies in section 6.1.6 Command List Underflow that
NCQ is not required to detect underflow conditions. Non-NCQ
pathways signal underflow by writing to the PRDBC field, which
will already occur by writing the actual transferred byte count
to the PRDBC, signaling the underflow.
Our NCQ pathways aren't required to detect underflow, but since our DMA
backend uses the size of the PRDT to determine the size of the transer,
if our PRDT is bigger than the transaction (the underflow condition) it
doesn't cost us anything to detect it and truncate the PRDT.
This is a recoverable error and is not signaled to the guest, in either
NCQ or normal DMA cases.
For BMDMA, the existing pathways should see no guest-visible difference,
but any bytes described in the overage will no longer be transferred
before indicating to the guest that there was an underflow.
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 1435767578-32743-2-git-send-email-jsnow@redhat.com
2015-07-04 09:06:04 +03:00
|
|
|
uint64_t sg_len;
|
|
|
|
|
|
|
|
/* Don't add extra bytes to the SGList; consume any remaining
|
|
|
|
* PRDs from the guest, but ignore them. */
|
|
|
|
sg_len = MIN(limit - s->sg.size, bm->cur_prd_len);
|
|
|
|
if (sg_len) {
|
|
|
|
qemu_sglist_add(&s->sg, bm->cur_prd_addr, sg_len);
|
|
|
|
}
|
ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1414785819-26209-4-git-send-email-jsnow@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-10-31 23:03:39 +03:00
|
|
|
|
2010-12-15 02:23:00 +03:00
|
|
|
bm->cur_prd_addr += l;
|
|
|
|
bm->cur_prd_len -= l;
|
|
|
|
s->io_buffer_size += l;
|
|
|
|
}
|
|
|
|
}
|
ide: Correct handling of malformed/short PRDTs
This impacts both BMDMA and AHCI HBA interfaces for IDE.
Currently, we confuse the difference between a PRDT having
"0 bytes" and a PRDT having "0 complete sectors."
When we receive an incomplete sector, inconsistent error checking
leads to an infinite loop wherein the call succeeds, but it
didn't give us enough bytes -- leading us to re-call the
DMA chain over and over again. This leads to, in the BMDMA case,
leaked memory for short PRDTs, and infinite loops and resource
usage in the AHCI case.
The .prepare_buf() callback is reworked to return the number of
bytes that it successfully prepared. 0 is a valid, non-error
answer that means the table was empty and described no bytes.
-1 indicates an error.
Our current implementation uses the io_buffer in IDEState to
ultimately describe the size of a prepared scatter-gather list.
Even though the AHCI PRDT/SGList can be as large as 256GiB, the
AHCI command header limits transactions to just 4GiB. ATA8-ACS3,
however, defines the largest transaction to be an LBA48 command
that transfers 65,536 sectors. With a 512 byte sector size, this
is just 32MiB.
Since our current state structures use the int type to describe
the size of the buffer, and this state is migrated as int32, we
are limited to describing 2GiB buffer sizes unless we change the
migration protocol.
For this reason, this patch begins to unify the assertions in the
IDE pathways that the scatter-gather list provided by either the
AHCI PRDT or the PCI BMDMA PRDs can only describe, at a maximum,
2GiB. This should be resilient enough unless we need a sector
size that exceeds 32KiB.
Further, the likelihood of any guest operating system actually
attempting to transfer this much data in a single operation is
very slim.
To this end, the IDEState variables have been updated to more
explicitly clarify our maximum supported size. Callers to the
prepare_buf callback have been reworked to understand the new
return code, and all versions of the prepare_buf callback have
been adjusted accordingly.
Lastly, the ahci_populate_sglist helper, relied upon by the
AHCI implementation of .prepare_buf() as well as the PCI
implementation of the callback have had overflow assertions
added to help make clear the reasonings behind the various
type changes.
[Added %d -> %"PRId64" fix John sent because off_pos changed from int to
int64_t.
--Stefan]
Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1414785819-26209-4-git-send-email-jsnow@redhat.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-10-31 23:03:39 +03:00
|
|
|
|
|
|
|
qemu_sglist_destroy(&s->sg);
|
|
|
|
s->io_buffer_size = 0;
|
|
|
|
return -1;
|
2010-12-15 02:23:00 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* return 0 if buffer completed */
|
|
|
|
static int bmdma_rw_buf(IDEDMA *dma, int is_write)
|
|
|
|
{
|
|
|
|
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
|
|
|
|
IDEState *s = bmdma_active_if(bm);
|
2013-07-17 20:44:48 +04:00
|
|
|
PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev);
|
2010-12-15 02:23:00 +03:00
|
|
|
struct {
|
|
|
|
uint32_t addr;
|
|
|
|
uint32_t size;
|
|
|
|
} prd;
|
|
|
|
int l, len;
|
|
|
|
|
|
|
|
for(;;) {
|
|
|
|
l = s->io_buffer_size - s->io_buffer_index;
|
|
|
|
if (l <= 0)
|
|
|
|
break;
|
|
|
|
if (bm->cur_prd_len == 0) {
|
|
|
|
/* end of table (with a fail safe of one page) */
|
|
|
|
if (bm->cur_prd_last ||
|
|
|
|
(bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
|
|
|
|
return 0;
|
2013-07-17 20:44:48 +04:00
|
|
|
pci_dma_read(pci_dev, bm->cur_addr, &prd, 8);
|
2010-12-15 02:23:00 +03:00
|
|
|
bm->cur_addr += 8;
|
|
|
|
prd.addr = le32_to_cpu(prd.addr);
|
|
|
|
prd.size = le32_to_cpu(prd.size);
|
|
|
|
len = prd.size & 0xfffe;
|
|
|
|
if (len == 0)
|
|
|
|
len = 0x10000;
|
|
|
|
bm->cur_prd_len = len;
|
|
|
|
bm->cur_prd_addr = prd.addr;
|
|
|
|
bm->cur_prd_last = (prd.size & 0x80000000);
|
|
|
|
}
|
|
|
|
if (l > bm->cur_prd_len)
|
|
|
|
l = bm->cur_prd_len;
|
|
|
|
if (l > 0) {
|
|
|
|
if (is_write) {
|
2013-07-17 20:44:48 +04:00
|
|
|
pci_dma_write(pci_dev, bm->cur_prd_addr,
|
2011-10-31 10:06:56 +04:00
|
|
|
s->io_buffer + s->io_buffer_index, l);
|
2010-12-15 02:23:00 +03:00
|
|
|
} else {
|
2013-07-17 20:44:48 +04:00
|
|
|
pci_dma_read(pci_dev, bm->cur_prd_addr,
|
2011-10-31 10:06:56 +04:00
|
|
|
s->io_buffer + s->io_buffer_index, l);
|
2010-12-15 02:23:00 +03:00
|
|
|
}
|
|
|
|
bm->cur_prd_addr += l;
|
|
|
|
bm->cur_prd_len -= l;
|
|
|
|
s->io_buffer_index += l;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2014-08-05 01:11:12 +04:00
|
|
|
static void bmdma_set_inactive(IDEDMA *dma, bool more)
|
2010-12-15 02:23:00 +03:00
|
|
|
{
|
|
|
|
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
|
|
|
|
|
|
|
|
bm->dma_cb = NULL;
|
2014-08-05 01:11:12 +04:00
|
|
|
if (more) {
|
|
|
|
bm->status |= BM_STATUS_DMAING;
|
|
|
|
} else {
|
|
|
|
bm->status &= ~BM_STATUS_DMAING;
|
|
|
|
}
|
2010-12-15 02:23:00 +03:00
|
|
|
}
|
|
|
|
|
2015-02-23 19:17:51 +03:00
|
|
|
static void bmdma_restart_dma(IDEDMA *dma)
|
2010-12-15 02:23:00 +03:00
|
|
|
{
|
|
|
|
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
|
|
|
|
|
2015-02-23 19:17:50 +03:00
|
|
|
bm->cur_addr = bm->addr;
|
2010-12-15 02:23:00 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
static void bmdma_cancel(BMDMAState *bm)
|
|
|
|
{
|
|
|
|
if (bm->status & BM_STATUS_DMAING) {
|
|
|
|
/* cancel DMA request */
|
2014-08-05 01:11:12 +04:00
|
|
|
bmdma_set_inactive(&bm->dma, false);
|
2010-12-15 02:23:00 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-05 01:11:06 +04:00
|
|
|
static void bmdma_reset(IDEDMA *dma)
|
2010-12-15 02:23:00 +03:00
|
|
|
{
|
|
|
|
BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
|
|
|
|
|
2017-09-18 22:01:25 +03:00
|
|
|
trace_bmdma_reset();
|
2010-12-15 02:23:00 +03:00
|
|
|
bmdma_cancel(bm);
|
|
|
|
bm->cmd = 0;
|
|
|
|
bm->status = 0;
|
|
|
|
bm->addr = 0;
|
|
|
|
bm->cur_addr = 0;
|
|
|
|
bm->cur_prd_last = 0;
|
|
|
|
bm->cur_prd_addr = 0;
|
|
|
|
bm->cur_prd_len = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bmdma_irq(void *opaque, int n, int level)
|
|
|
|
{
|
|
|
|
BMDMAState *bm = opaque;
|
|
|
|
|
|
|
|
if (!level) {
|
|
|
|
/* pass through lower */
|
|
|
|
qemu_set_irq(bm->irq, level);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-01-15 21:01:03 +03:00
|
|
|
bm->status |= BM_STATUS_INT;
|
2010-12-15 02:23:00 +03:00
|
|
|
|
|
|
|
/* trigger the real irq */
|
|
|
|
qemu_set_irq(bm->irq, level);
|
|
|
|
}
|
|
|
|
|
2011-08-08 17:09:11 +04:00
|
|
|
void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val)
|
2009-08-20 17:22:20 +04:00
|
|
|
{
|
2017-09-18 22:01:25 +03:00
|
|
|
trace_bmdma_cmd_writeb(val);
|
2010-11-26 18:44:53 +03:00
|
|
|
|
|
|
|
/* Ignore writes to SSBM if it keeps the old value */
|
|
|
|
if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) {
|
|
|
|
if (!(val & BM_CMD_START)) {
|
2016-02-10 21:29:39 +03:00
|
|
|
ide_cancel_dma_sync(idebus_active_if(bm->bus));
|
2013-03-13 19:53:06 +04:00
|
|
|
bm->status &= ~BM_STATUS_DMAING;
|
2010-11-26 18:44:53 +03:00
|
|
|
} else {
|
2010-11-26 18:36:16 +03:00
|
|
|
bm->cur_addr = bm->addr;
|
2010-11-26 18:44:53 +03:00
|
|
|
if (!(bm->status & BM_STATUS_DMAING)) {
|
|
|
|
bm->status |= BM_STATUS_DMAING;
|
|
|
|
/* start dma transfer if possible */
|
|
|
|
if (bm->dma_cb)
|
2010-12-15 02:23:00 +03:00
|
|
|
bm->dma_cb(bmdma_active_if(bm), 0);
|
2010-11-26 18:44:53 +03:00
|
|
|
}
|
2010-07-27 23:04:36 +04:00
|
|
|
}
|
2009-08-20 17:22:20 +04:00
|
|
|
}
|
2010-11-26 18:44:53 +03:00
|
|
|
|
|
|
|
bm->cmd = val & 0x09;
|
2009-08-20 17:22:20 +04:00
|
|
|
}
|
|
|
|
|
2012-10-23 14:30:10 +04:00
|
|
|
static uint64_t bmdma_addr_read(void *opaque, hwaddr addr,
|
2011-08-08 17:09:11 +04:00
|
|
|
unsigned width)
|
2009-08-20 17:22:20 +04:00
|
|
|
{
|
2011-08-08 17:09:11 +04:00
|
|
|
BMDMAState *bm = opaque;
|
2010-11-21 19:29:52 +03:00
|
|
|
uint32_t mask = (1ULL << (width * 8)) - 1;
|
2011-08-08 17:09:11 +04:00
|
|
|
uint64_t data;
|
2009-08-20 17:22:20 +04:00
|
|
|
|
2011-08-08 17:09:11 +04:00
|
|
|
data = (bm->addr >> (addr * 8)) & mask;
|
2017-09-18 22:01:25 +03:00
|
|
|
trace_bmdma_addr_read(data);
|
2011-08-08 17:09:11 +04:00
|
|
|
return data;
|
2009-08-20 17:22:20 +04:00
|
|
|
}
|
|
|
|
|
2012-10-23 14:30:10 +04:00
|
|
|
static void bmdma_addr_write(void *opaque, hwaddr addr,
|
2011-08-08 17:09:11 +04:00
|
|
|
uint64_t data, unsigned width)
|
2009-08-20 17:22:20 +04:00
|
|
|
{
|
2011-08-08 17:09:11 +04:00
|
|
|
BMDMAState *bm = opaque;
|
2010-11-21 19:29:52 +03:00
|
|
|
int shift = addr * 8;
|
|
|
|
uint32_t mask = (1ULL << (width * 8)) - 1;
|
2009-08-20 17:22:20 +04:00
|
|
|
|
2017-09-18 22:01:25 +03:00
|
|
|
trace_bmdma_addr_write(data);
|
2010-11-21 19:29:52 +03:00
|
|
|
bm->addr &= ~(mask << shift);
|
|
|
|
bm->addr |= ((data & mask) << shift) & ~3;
|
2009-08-20 17:22:20 +04:00
|
|
|
}
|
|
|
|
|
2011-08-08 17:09:11 +04:00
|
|
|
MemoryRegionOps bmdma_addr_ioport_ops = {
|
2010-11-21 19:29:52 +03:00
|
|
|
.read = bmdma_addr_read,
|
|
|
|
.write = bmdma_addr_write,
|
2011-08-08 17:09:11 +04:00
|
|
|
.endianness = DEVICE_LITTLE_ENDIAN,
|
2010-11-21 19:29:52 +03:00
|
|
|
};
|
2009-08-20 17:22:20 +04:00
|
|
|
|
2010-07-26 23:38:45 +04:00
|
|
|
static bool ide_bmdma_current_needed(void *opaque)
|
|
|
|
{
|
|
|
|
BMDMAState *bm = opaque;
|
|
|
|
|
|
|
|
return (bm->cur_prd_len != 0);
|
|
|
|
}
|
|
|
|
|
2011-05-26 17:00:34 +04:00
|
|
|
static bool ide_bmdma_status_needed(void *opaque)
|
|
|
|
{
|
|
|
|
BMDMAState *bm = opaque;
|
|
|
|
|
|
|
|
/* Older versions abused some bits in the status register for internal
|
|
|
|
* error state. If any of these bits are set, we must add a subsection to
|
|
|
|
* transfer the real status register */
|
|
|
|
uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
|
|
|
|
|
|
|
|
return ((bm->status & abused_bits) != 0);
|
|
|
|
}
|
|
|
|
|
2017-09-25 14:29:12 +03:00
|
|
|
static int ide_bmdma_pre_save(void *opaque)
|
2011-05-26 17:00:34 +04:00
|
|
|
{
|
|
|
|
BMDMAState *bm = opaque;
|
|
|
|
uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
|
|
|
|
|
2016-04-12 23:47:52 +03:00
|
|
|
if (!(bm->status & BM_STATUS_DMAING) && bm->dma_cb) {
|
|
|
|
bm->bus->error_status =
|
|
|
|
ide_dma_cmd_to_retry(bmdma_active_if(bm)->dma_cmd);
|
|
|
|
}
|
2015-02-23 19:17:57 +03:00
|
|
|
bm->migration_retry_unit = bm->bus->retry_unit;
|
2015-02-23 19:17:58 +03:00
|
|
|
bm->migration_retry_sector_num = bm->bus->retry_sector_num;
|
|
|
|
bm->migration_retry_nsector = bm->bus->retry_nsector;
|
2011-05-26 17:00:34 +04:00
|
|
|
bm->migration_compat_status =
|
|
|
|
(bm->status & ~abused_bits) | (bm->bus->error_status & abused_bits);
|
2017-09-25 14:29:12 +03:00
|
|
|
|
|
|
|
return 0;
|
2011-05-26 17:00:34 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This function accesses bm->bus->error_status which is loaded only after
|
|
|
|
* BMDMA itself. This is why the function is called from ide_pci_post_load
|
|
|
|
* instead of being registered with VMState where it would run too early. */
|
|
|
|
static int ide_bmdma_post_load(void *opaque, int version_id)
|
|
|
|
{
|
|
|
|
BMDMAState *bm = opaque;
|
|
|
|
uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
|
|
|
|
|
|
|
|
if (bm->status == 0) {
|
|
|
|
bm->status = bm->migration_compat_status & ~abused_bits;
|
|
|
|
bm->bus->error_status |= bm->migration_compat_status & abused_bits;
|
|
|
|
}
|
2015-02-23 19:17:57 +03:00
|
|
|
if (bm->bus->error_status) {
|
2015-02-23 19:17:58 +03:00
|
|
|
bm->bus->retry_sector_num = bm->migration_retry_sector_num;
|
|
|
|
bm->bus->retry_nsector = bm->migration_retry_nsector;
|
2015-02-23 19:17:57 +03:00
|
|
|
bm->bus->retry_unit = bm->migration_retry_unit;
|
|
|
|
}
|
2011-05-26 17:00:34 +04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-07-26 23:38:45 +04:00
|
|
|
static const VMStateDescription vmstate_bmdma_current = {
|
|
|
|
.name = "ide bmdma_current",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
2014-09-23 16:09:54 +04:00
|
|
|
.needed = ide_bmdma_current_needed,
|
2014-04-16 17:32:32 +04:00
|
|
|
.fields = (VMStateField[]) {
|
2010-07-26 23:38:45 +04:00
|
|
|
VMSTATE_UINT32(cur_addr, BMDMAState),
|
|
|
|
VMSTATE_UINT32(cur_prd_last, BMDMAState),
|
|
|
|
VMSTATE_UINT32(cur_prd_addr, BMDMAState),
|
|
|
|
VMSTATE_UINT32(cur_prd_len, BMDMAState),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2014-03-16 22:13:18 +04:00
|
|
|
static const VMStateDescription vmstate_bmdma_status = {
|
2011-05-26 17:00:34 +04:00
|
|
|
.name ="ide bmdma/status",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
2014-09-23 16:09:54 +04:00
|
|
|
.needed = ide_bmdma_status_needed,
|
2014-04-16 17:32:32 +04:00
|
|
|
.fields = (VMStateField[]) {
|
2011-05-26 17:00:34 +04:00
|
|
|
VMSTATE_UINT8(status, BMDMAState),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
|
|
|
};
|
2010-07-26 23:38:45 +04:00
|
|
|
|
2009-10-07 22:55:32 +04:00
|
|
|
static const VMStateDescription vmstate_bmdma = {
|
|
|
|
.name = "ide bmdma",
|
2010-07-26 23:38:42 +04:00
|
|
|
.version_id = 3,
|
2009-10-07 22:55:32 +04:00
|
|
|
.minimum_version_id = 0,
|
2011-05-26 17:00:34 +04:00
|
|
|
.pre_save = ide_bmdma_pre_save,
|
2014-04-16 17:32:32 +04:00
|
|
|
.fields = (VMStateField[]) {
|
2009-10-07 22:55:32 +04:00
|
|
|
VMSTATE_UINT8(cmd, BMDMAState),
|
2011-05-26 17:00:34 +04:00
|
|
|
VMSTATE_UINT8(migration_compat_status, BMDMAState),
|
2009-10-07 22:55:32 +04:00
|
|
|
VMSTATE_UINT32(addr, BMDMAState),
|
2015-02-23 19:17:58 +03:00
|
|
|
VMSTATE_INT64(migration_retry_sector_num, BMDMAState),
|
|
|
|
VMSTATE_UINT32(migration_retry_nsector, BMDMAState),
|
2015-02-23 19:17:57 +03:00
|
|
|
VMSTATE_UINT8(migration_retry_unit, BMDMAState),
|
2009-10-07 22:55:32 +04:00
|
|
|
VMSTATE_END_OF_LIST()
|
2010-07-26 23:38:45 +04:00
|
|
|
},
|
2014-09-23 16:09:54 +04:00
|
|
|
.subsections = (const VMStateDescription*[]) {
|
|
|
|
&vmstate_bmdma_current,
|
|
|
|
&vmstate_bmdma_status,
|
|
|
|
NULL
|
2009-08-20 17:22:20 +04:00
|
|
|
}
|
2009-10-07 22:55:32 +04:00
|
|
|
};
|
2009-08-20 17:22:20 +04:00
|
|
|
|
2009-10-07 22:55:32 +04:00
|
|
|
static int ide_pci_post_load(void *opaque, int version_id)
|
2009-08-20 17:22:20 +04:00
|
|
|
{
|
|
|
|
PCIIDEState *d = opaque;
|
2009-10-07 22:55:32 +04:00
|
|
|
int i;
|
2009-08-20 17:22:20 +04:00
|
|
|
|
|
|
|
for(i = 0; i < 2; i++) {
|
2009-10-07 22:55:32 +04:00
|
|
|
/* current versions always store 0/1, but older version
|
|
|
|
stored bigger values. We only need last bit */
|
2015-02-23 19:17:57 +03:00
|
|
|
d->bmdma[i].migration_retry_unit &= 1;
|
2011-05-26 17:00:34 +04:00
|
|
|
ide_bmdma_post_load(&d->bmdma[i], -1);
|
2009-08-20 17:22:20 +04:00
|
|
|
}
|
2011-05-26 17:00:34 +04:00
|
|
|
|
2009-08-20 17:22:20 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-10-07 22:55:32 +04:00
|
|
|
const VMStateDescription vmstate_ide_pci = {
|
|
|
|
.name = "ide",
|
2010-07-26 23:38:42 +04:00
|
|
|
.version_id = 3,
|
2009-10-07 22:55:32 +04:00
|
|
|
.minimum_version_id = 0,
|
|
|
|
.post_load = ide_pci_post_load,
|
2014-04-16 17:32:32 +04:00
|
|
|
.fields = (VMStateField[]) {
|
2013-07-17 20:44:48 +04:00
|
|
|
VMSTATE_PCI_DEVICE(parent_obj, PCIIDEState),
|
2009-10-07 22:55:32 +04:00
|
|
|
VMSTATE_STRUCT_ARRAY(bmdma, PCIIDEState, 2, 0,
|
|
|
|
vmstate_bmdma, BMDMAState),
|
|
|
|
VMSTATE_IDE_BUS_ARRAY(bus, PCIIDEState, 2),
|
|
|
|
VMSTATE_IDE_DRIVES(bus[0].ifs, PCIIDEState),
|
|
|
|
VMSTATE_IDE_DRIVES(bus[1].ifs, PCIIDEState),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2009-10-07 18:56:23 +04:00
|
|
|
void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table)
|
2009-09-16 00:04:24 +04:00
|
|
|
{
|
2013-07-17 20:44:48 +04:00
|
|
|
PCIIDEState *d = PCI_IDE(dev);
|
2009-09-16 00:04:24 +04:00
|
|
|
static const int bus[4] = { 0, 0, 1, 1 };
|
|
|
|
static const int unit[4] = { 0, 1, 0, 1 };
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
if (hd_table[i] == NULL)
|
|
|
|
continue;
|
2009-09-17 00:25:30 +04:00
|
|
|
ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]);
|
2009-09-16 00:04:24 +04:00
|
|
|
}
|
|
|
|
}
|
2010-12-15 02:23:00 +03:00
|
|
|
|
|
|
|
static const struct IDEDMAOps bmdma_ops = {
|
|
|
|
.start_dma = bmdma_start_dma,
|
|
|
|
.prepare_buf = bmdma_prepare_buf,
|
|
|
|
.rw_buf = bmdma_rw_buf,
|
2015-02-23 19:17:51 +03:00
|
|
|
.restart_dma = bmdma_restart_dma,
|
2010-12-15 02:23:00 +03:00
|
|
|
.set_inactive = bmdma_set_inactive,
|
|
|
|
.reset = bmdma_reset,
|
|
|
|
};
|
|
|
|
|
2011-08-08 17:09:11 +04:00
|
|
|
void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d)
|
2010-12-15 02:23:00 +03:00
|
|
|
{
|
|
|
|
if (bus->dma == &bm->dma) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
bm->dma.ops = &bmdma_ops;
|
|
|
|
bus->dma = &bm->dma;
|
|
|
|
bm->irq = bus->irq;
|
2015-05-26 04:46:06 +03:00
|
|
|
bus->irq = qemu_allocate_irq(bmdma_irq, bm, 0);
|
2011-08-08 17:09:11 +04:00
|
|
|
bm->pci_dev = d;
|
2010-12-15 02:23:00 +03:00
|
|
|
}
|
2013-07-17 20:44:48 +04:00
|
|
|
|
|
|
|
static const TypeInfo pci_ide_type_info = {
|
|
|
|
.name = TYPE_PCI_IDE,
|
|
|
|
.parent = TYPE_PCI_DEVICE,
|
|
|
|
.instance_size = sizeof(PCIIDEState),
|
|
|
|
.abstract = true,
|
2017-09-27 22:56:34 +03:00
|
|
|
.interfaces = (InterfaceInfo[]) {
|
|
|
|
{ INTERFACE_CONVENTIONAL_PCI_DEVICE },
|
|
|
|
{ },
|
|
|
|
},
|
2013-07-17 20:44:48 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
static void pci_ide_register_types(void)
|
|
|
|
{
|
|
|
|
type_register_static(&pci_ide_type_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(pci_ide_register_types)
|