ide: add support for IDEBufferedRequest

this patch adds a new aio readv compatible function which copies
all data through a bounce buffer. These buffered requests can be
flagged as orphaned which means that their original callback has
already been invoked and the request has just not been completed
by the backend storage. The bounce buffer guarantees that guest
memory corruption is avoided when such a orphaned request is
completed by the backend at a later stage.

This trick only works for read requests as a write request completed
at a later stage might corrupt data as there is no way to control
if and what data has already been written to the storage.

Signed-off-by: Peter Lieven <pl@kamp.de>
Reviewed-by: Fam Zheng <famz@redhat.com>
Message-id: 1447345846-15624-4-git-send-email-pl@kamp.de
Signed-off-by: John Snow <jsnow@redhat.com>
This commit is contained in:
Peter Lieven 2015-11-17 15:06:25 -05:00 committed by John Snow
parent ca78ecfa72
commit 1d8c11d631
2 changed files with 61 additions and 0 deletions

View File

@ -561,6 +561,53 @@ static bool ide_sect_range_ok(IDEState *s,
return true; return true;
} }
static void ide_buffered_readv_cb(void *opaque, int ret)
{
IDEBufferedRequest *req = opaque;
if (!req->orphaned) {
if (!ret) {
qemu_iovec_from_buf(req->original_qiov, 0, req->iov.iov_base,
req->original_qiov->size);
}
req->original_cb(req->original_opaque, ret);
}
QLIST_REMOVE(req, list);
qemu_vfree(req->iov.iov_base);
g_free(req);
}
#define MAX_BUFFERED_REQS 16
BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
QEMUIOVector *iov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque)
{
BlockAIOCB *aioreq;
IDEBufferedRequest *req;
int c = 0;
QLIST_FOREACH(req, &s->buffered_requests, list) {
c++;
}
if (c > MAX_BUFFERED_REQS) {
return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
}
req = g_new0(IDEBufferedRequest, 1);
req->original_qiov = iov;
req->original_cb = cb;
req->original_opaque = opaque;
req->iov.iov_base = qemu_blockalign(blk_bs(s->blk), iov->size);
req->iov.iov_len = iov->size;
qemu_iovec_init_external(&req->qiov, &req->iov, 1);
aioreq = blk_aio_readv(s->blk, sector_num, &req->qiov, nb_sectors,
ide_buffered_readv_cb, req);
QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
return aioreq;
}
static void ide_sector_read(IDEState *s); static void ide_sector_read(IDEState *s);
static void ide_sector_read_cb(void *opaque, int ret) static void ide_sector_read_cb(void *opaque, int ret)

View File

@ -343,6 +343,16 @@ enum ide_dma_cmd {
#define ide_cmd_is_read(s) \ #define ide_cmd_is_read(s) \
((s)->dma_cmd == IDE_DMA_READ) ((s)->dma_cmd == IDE_DMA_READ)
typedef struct IDEBufferedRequest {
QLIST_ENTRY(IDEBufferedRequest) list;
struct iovec iov;
QEMUIOVector qiov;
QEMUIOVector *original_qiov;
BlockCompletionFunc *original_cb;
void *original_opaque;
bool orphaned;
} IDEBufferedRequest;
/* NOTE: IDEState represents in fact one drive */ /* NOTE: IDEState represents in fact one drive */
struct IDEState { struct IDEState {
IDEBus *bus; IDEBus *bus;
@ -396,6 +406,7 @@ struct IDEState {
BlockAIOCB *pio_aiocb; BlockAIOCB *pio_aiocb;
struct iovec iov; struct iovec iov;
QEMUIOVector qiov; QEMUIOVector qiov;
QLIST_HEAD(, IDEBufferedRequest) buffered_requests;
/* ATA DMA state */ /* ATA DMA state */
uint64_t io_buffer_offset; uint64_t io_buffer_offset;
int32_t io_buffer_size; int32_t io_buffer_size;
@ -572,6 +583,9 @@ void ide_set_inactive(IDEState *s, bool more);
BlockAIOCB *ide_issue_trim(BlockBackend *blk, BlockAIOCB *ide_issue_trim(BlockBackend *blk,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque); BlockCompletionFunc *cb, void *opaque);
BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
QEMUIOVector *iov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque);
/* hw/ide/atapi.c */ /* hw/ide/atapi.c */
void ide_atapi_cmd(IDEState *s); void ide_atapi_cmd(IDEState *s);