macio: switch over to new byte-aligned DMA helpers

Now that the DMA helpers are byte-aligned they can be called directly from
the macio routines rather than emulating byte-aligned accesses via multiple
block-level accesses.

Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk>
Reviewed-by: Eric Blake <eblake@redhat.com>
Reviewed-by: John Snow <jsnow@redhat.com>
Message-Id: 1476445266-27503-3-git-send-email-mark.cave-ayland@ilande.co.uk
Signed-off-by: John Snow <jsnow@redhat.com>
This commit is contained in:
Mark Cave-Ayland 2016-10-27 16:29:13 -04:00 committed by John Snow
parent 99868af3d0
commit be1e343995

View File

@ -52,187 +52,6 @@ static const int debug_macio = 0;
#define MACIO_PAGE_SIZE 4096
/*
* Unaligned DMA read/write access functions required for OS X/Darwin which
* don't perform DMA transactions on sector boundaries. These functions are
* modelled on bdrv_co_preadv()/bdrv_co_pwritev() and so should be easy to
* remove if the unaligned block APIs are ever exposed.
*/
static void pmac_dma_read(BlockBackend *blk,
int64_t offset, unsigned int bytes,
void (*cb)(void *opaque, int ret), void *opaque)
{
DBDMA_io *io = opaque;
MACIOIDEState *m = io->opaque;
IDEState *s = idebus_active_if(&m->bus);
dma_addr_t dma_addr;
int64_t sector_num;
int nsector;
uint64_t align = BDRV_SECTOR_SIZE;
size_t head_bytes, tail_bytes;
qemu_iovec_destroy(&io->iov);
qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
sector_num = (offset >> 9);
nsector = (io->len >> 9);
MACIO_DPRINTF("--- DMA read transfer (0x%" HWADDR_PRIx ",0x%x): "
"sector_num: %" PRId64 ", nsector: %d\n", io->addr, io->len,
sector_num, nsector);
dma_addr = io->addr;
io->dir = DMA_DIRECTION_FROM_DEVICE;
io->dma_len = io->len;
io->dma_mem = dma_memory_map(&address_space_memory, dma_addr, &io->dma_len,
io->dir);
if (offset & (align - 1)) {
head_bytes = offset & (align - 1);
MACIO_DPRINTF("--- DMA unaligned head: sector %" PRId64 ", "
"discarding %zu bytes\n", sector_num, head_bytes);
qemu_iovec_add(&io->iov, &io->head_remainder, head_bytes);
bytes += offset & (align - 1);
offset = offset & ~(align - 1);
}
qemu_iovec_add(&io->iov, io->dma_mem, io->len);
if ((offset + bytes) & (align - 1)) {
tail_bytes = (offset + bytes) & (align - 1);
MACIO_DPRINTF("--- DMA unaligned tail: sector %" PRId64 ", "
"discarding bytes %zu\n", sector_num, tail_bytes);
qemu_iovec_add(&io->iov, &io->tail_remainder, align - tail_bytes);
bytes = ROUND_UP(bytes, align);
}
s->io_buffer_size -= io->len;
s->io_buffer_index += io->len;
io->len = 0;
MACIO_DPRINTF("--- Block read transfer - sector_num: %" PRIx64 " "
"nsector: %x\n", (offset >> 9), (bytes >> 9));
s->bus->dma->aiocb = blk_aio_preadv(blk, offset, &io->iov, 0, cb, io);
}
static void pmac_dma_write(BlockBackend *blk,
int64_t offset, int bytes,
void (*cb)(void *opaque, int ret), void *opaque)
{
DBDMA_io *io = opaque;
MACIOIDEState *m = io->opaque;
IDEState *s = idebus_active_if(&m->bus);
dma_addr_t dma_addr;
int64_t sector_num;
int nsector;
uint64_t align = BDRV_SECTOR_SIZE;
size_t head_bytes, tail_bytes;
bool unaligned_head = false, unaligned_tail = false;
qemu_iovec_destroy(&io->iov);
qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
sector_num = (offset >> 9);
nsector = (io->len >> 9);
MACIO_DPRINTF("--- DMA write transfer (0x%" HWADDR_PRIx ",0x%x): "
"sector_num: %" PRId64 ", nsector: %d\n", io->addr, io->len,
sector_num, nsector);
dma_addr = io->addr;
io->dir = DMA_DIRECTION_TO_DEVICE;
io->dma_len = io->len;
io->dma_mem = dma_memory_map(&address_space_memory, dma_addr, &io->dma_len,
io->dir);
if (offset & (align - 1)) {
head_bytes = offset & (align - 1);
sector_num = ((offset & ~(align - 1)) >> 9);
MACIO_DPRINTF("--- DMA unaligned head: pre-reading head sector %"
PRId64 "\n", sector_num);
blk_pread(s->blk, (sector_num << 9), &io->head_remainder, align);
qemu_iovec_add(&io->iov, &io->head_remainder, head_bytes);
qemu_iovec_add(&io->iov, io->dma_mem, io->len);
bytes += offset & (align - 1);
offset = offset & ~(align - 1);
unaligned_head = true;
}
if ((offset + bytes) & (align - 1)) {
tail_bytes = (offset + bytes) & (align - 1);
sector_num = (((offset + bytes) & ~(align - 1)) >> 9);
MACIO_DPRINTF("--- DMA unaligned tail: pre-reading tail sector %"
PRId64 "\n", sector_num);
blk_pread(s->blk, (sector_num << 9), &io->tail_remainder, align);
if (!unaligned_head) {
qemu_iovec_add(&io->iov, io->dma_mem, io->len);
}
qemu_iovec_add(&io->iov, &io->tail_remainder + tail_bytes,
align - tail_bytes);
bytes = ROUND_UP(bytes, align);
unaligned_tail = true;
}
if (!unaligned_head && !unaligned_tail) {
qemu_iovec_add(&io->iov, io->dma_mem, io->len);
}
s->io_buffer_size -= io->len;
s->io_buffer_index += io->len;
io->len = 0;
MACIO_DPRINTF("--- Block write transfer - sector_num: %" PRIx64 " "
"nsector: %x\n", (offset >> 9), (bytes >> 9));
s->bus->dma->aiocb = blk_aio_pwritev(blk, offset, &io->iov, 0, cb, io);
}
static void pmac_dma_trim(BlockBackend *blk,
int64_t offset, int bytes,
void (*cb)(void *opaque, int ret), void *opaque)
{
DBDMA_io *io = opaque;
MACIOIDEState *m = io->opaque;
IDEState *s = idebus_active_if(&m->bus);
dma_addr_t dma_addr;
qemu_iovec_destroy(&io->iov);
qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
dma_addr = io->addr;
io->dir = DMA_DIRECTION_TO_DEVICE;
io->dma_len = io->len;
io->dma_mem = dma_memory_map(&address_space_memory, dma_addr, &io->dma_len,
io->dir);
qemu_iovec_add(&io->iov, io->dma_mem, io->len);
s->io_buffer_size -= io->len;
s->io_buffer_index += io->len;
io->len = 0;
s->bus->dma->aiocb = ide_issue_trim(offset, &io->iov, cb, io, blk);
}
static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
{
DBDMA_io *io = opaque;
@ -244,6 +63,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
if (ret < 0) {
MACIO_DPRINTF("DMA error: %d\n", ret);
qemu_sglist_destroy(&s->sg);
ide_atapi_io_error(s, ret);
goto done;
}
@ -258,6 +78,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
if (s->io_buffer_size <= 0) {
MACIO_DPRINTF("End of IDE transfer\n");
qemu_sglist_destroy(&s->sg);
ide_atapi_cmd_ok(s);
m->dma_active = false;
goto done;
@ -282,7 +103,15 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
/* Calculate current offset */
offset = ((int64_t)s->lba << 11) + s->io_buffer_index;
pmac_dma_read(s->blk, offset, io->len, pmac_ide_atapi_transfer_cb, io);
qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
&address_space_memory);
qemu_sglist_add(&s->sg, io->addr, io->len);
s->io_buffer_size -= io->len;
s->io_buffer_index += io->len;
io->len = 0;
s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset, 0x1,
pmac_ide_atapi_transfer_cb, io);
return;
done:
@ -310,6 +139,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
if (ret < 0) {
MACIO_DPRINTF("DMA error: %d\n", ret);
qemu_sglist_destroy(&s->sg);
ide_dma_error(s);
goto done;
}
@ -324,6 +154,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
if (s->io_buffer_size <= 0) {
MACIO_DPRINTF("End of IDE transfer\n");
qemu_sglist_destroy(&s->sg);
s->status = READY_STAT | SEEK_STAT;
ide_set_irq(s->bus);
m->dma_active = false;
@ -338,15 +169,27 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
/* Calculate number of sectors */
offset = (ide_get_sector(s) << 9) + s->io_buffer_index;
qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
&address_space_memory);
qemu_sglist_add(&s->sg, io->addr, io->len);
s->io_buffer_size -= io->len;
s->io_buffer_index += io->len;
io->len = 0;
switch (s->dma_cmd) {
case IDE_DMA_READ:
pmac_dma_read(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset, 0x1,
pmac_ide_atapi_transfer_cb, io);
break;
case IDE_DMA_WRITE:
pmac_dma_write(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset, 0x1,
pmac_ide_transfer_cb, io);
break;
case IDE_DMA_TRIM:
pmac_dma_trim(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk), &s->sg,
offset, 0x1, ide_issue_trim, s->blk,
pmac_ide_transfer_cb, io,
DMA_DIRECTION_TO_DEVICE);
break;
default:
abort();