Block layer patches

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2.0.22 (GNU/Linux)
 
 iQIcBAABAgAGBQJXe9ZGAAoJEH8JsnLIjy/WmIkQAKpszWb2lH1/qgnWOzOEoz9s
 FUiZlf9f2LLIXMFnBFPGVs12SkTJHedsEKo0VG/tfVQdq+xJd8gEHSUZQbH1/vgg
 ArCQ76Rwc9giCrl4AwZhETbjTtpKlapxuWwDHCNjMYY9F4jndBRC+AZKUkIGAHHJ
 8BWrmPgv8ueDZKWpQBfVmm0ItYk0g09UCgCnsiVNkEC9+nN6YQ2OCgDkzQbMj6OQ
 GxvccTelueIgmC4fQdaAEnsbqK/LyVrnZfz1Y1ilErI8ny7gmiiT5TInQXjsrzAr
 UVfycyAlVDNDVaUabi3F3b6Rpj9aBOMYsNVySmJKVeWUf2LKNy7v6T+JteFFZ3Qw
 ULddAnZIlpplL3wrzIklElw+2xz2aJ5n9SoUdzq7xKLLt8asQE6nMNsszjiow+fC
 LzbyOVP+aLVgOaUBHgt0aFShLHyGIuLAuvtaXk/wxYPDQe0Cyn4qsI41gF/VgnET
 7ueIUwgi+AWyihz1uNsqO5RgakNQqN0Ms+LAYEBu7K9t88llv8yoSl0z8Ad+ZzN2
 fdjOssU+Q26N/OAInO73W8LjWGR0N0bk0EQa4xlA0zhJKOB52RE7nDnKEchs7Xgp
 tChW084u9ixlct7u8V/cBWTEVQnW/p9fmD4RZhSE0IrkPfoTY9dmND2adTgyVc1v
 7ONXnUCGoHcHVPNMVGG0
 =jKrb
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging

Block layer patches

# gpg: Signature made Tue 05 Jul 2016 16:46:14 BST
# gpg:                using RSA key 0x7F09B272C88F2FD6
# gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>"
# Primary key fingerprint: DC3D EB15 9A9A F95D 3D74  56FE 7F09 B272 C88F 2FD6

* remotes/kevin/tags/for-upstream: (43 commits)
  block/qcow2: Don't use cpu_to_*w()
  block: Convert bdrv_co_preadv/pwritev to BdrvChild
  block: Convert bdrv_prwv_co() to BdrvChild
  block: Convert bdrv_pwrite_zeroes() to BdrvChild
  block: Convert bdrv_pwrite(v/_sync) to BdrvChild
  block: Convert bdrv_pread(v) to BdrvChild
  block: Convert bdrv_write() to BdrvChild
  block: Convert bdrv_read() to BdrvChild
  block: Use BlockBackend for I/O in bdrv_commit()
  block: Move bdrv_commit() to block/commit.c
  block: Convert bdrv_co_do_readv/writev to BdrvChild
  block: Convert bdrv_aio_writev() to BdrvChild
  block: Convert bdrv_aio_readv() to BdrvChild
  block: Convert bdrv_co_writev() to BdrvChild
  block: Convert bdrv_co_readv() to BdrvChild
  vhdx: Some more BlockBackend use in vhdx_create()
  blkreplay: Convert to byte-based I/O
  vvfat: Use BdrvChild for s->qcow
  block/qdev: Fix NULL access when using BB twice
  block: fix return code for partial write for Linux AIO
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2016-07-05 17:53:02 +01:00
commit 07bee7f4f4
44 changed files with 842 additions and 651 deletions

142
block.c
View File

@ -536,9 +536,10 @@ BlockDriver *bdrv_probe_all(const uint8_t *buf, int buf_size,
return drv;
}
static int find_image_format(BlockDriverState *bs, const char *filename,
static int find_image_format(BdrvChild *file, const char *filename,
BlockDriver **pdrv, Error **errp)
{
BlockDriverState *bs = file->bs;
BlockDriver *drv;
uint8_t buf[BLOCK_PROBE_BUF_SIZE];
int ret = 0;
@ -549,7 +550,7 @@ static int find_image_format(BlockDriverState *bs, const char *filename,
return ret;
}
ret = bdrv_pread(bs, 0, buf, sizeof(buf));
ret = bdrv_pread(file, 0, buf, sizeof(buf));
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read image for determining its "
"format");
@ -937,7 +938,6 @@ static int bdrv_open_common(BlockDriverState *bs, BdrvChild *file,
goto fail_opts;
}
bs->request_alignment = drv->bdrv_co_preadv ? 1 : 512;
bs->read_only = !(bs->open_flags & BDRV_O_RDWR);
if (use_bdrv_whitelist && !bdrv_is_whitelisted(drv, bs->read_only)) {
@ -1017,7 +1017,7 @@ static int bdrv_open_common(BlockDriverState *bs, BdrvChild *file,
assert(bdrv_opt_mem_align(bs) != 0);
assert(bdrv_min_mem_align(bs) != 0);
assert(is_power_of_2(bs->request_alignment) || bdrv_is_sg(bs));
assert(is_power_of_2(bs->bl.request_alignment));
qemu_opts_del(opts);
return 0;
@ -1653,7 +1653,7 @@ static BlockDriverState *bdrv_open_inherit(const char *filename,
/* Image format probing */
bs->probed = !drv;
if (!drv && file) {
ret = find_image_format(file->bs, filename, &drv, &local_err);
ret = find_image_format(file, filename, &drv, &local_err);
if (ret < 0) {
goto fail;
}
@ -2184,9 +2184,9 @@ static void bdrv_close(BlockDriverState *bs)
bs->backing_file[0] = '\0';
bs->backing_format[0] = '\0';
bs->total_sectors = 0;
bs->encrypted = 0;
bs->valid_key = 0;
bs->sg = 0;
bs->encrypted = false;
bs->valid_key = false;
bs->sg = false;
QDECREF(bs->options);
QDECREF(bs->explicit_options);
bs->options = NULL;
@ -2323,116 +2323,6 @@ int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix)
return bs->drv->bdrv_check(bs, res, fix);
}
#define COMMIT_BUF_SECTORS 2048
/* commit COW file into the raw image */
int bdrv_commit(BlockDriverState *bs)
{
BlockDriver *drv = bs->drv;
int64_t sector, total_sectors, length, backing_length;
int n, ro, open_flags;
int ret = 0;
uint8_t *buf = NULL;
if (!drv)
return -ENOMEDIUM;
if (!bs->backing) {
return -ENOTSUP;
}
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, NULL) ||
bdrv_op_is_blocked(bs->backing->bs, BLOCK_OP_TYPE_COMMIT_TARGET, NULL)) {
return -EBUSY;
}
ro = bs->backing->bs->read_only;
open_flags = bs->backing->bs->open_flags;
if (ro) {
if (bdrv_reopen(bs->backing->bs, open_flags | BDRV_O_RDWR, NULL)) {
return -EACCES;
}
}
length = bdrv_getlength(bs);
if (length < 0) {
ret = length;
goto ro_cleanup;
}
backing_length = bdrv_getlength(bs->backing->bs);
if (backing_length < 0) {
ret = backing_length;
goto ro_cleanup;
}
/* If our top snapshot is larger than the backing file image,
* grow the backing file image if possible. If not possible,
* we must return an error */
if (length > backing_length) {
ret = bdrv_truncate(bs->backing->bs, length);
if (ret < 0) {
goto ro_cleanup;
}
}
total_sectors = length >> BDRV_SECTOR_BITS;
/* qemu_try_blockalign() for bs will choose an alignment that works for
* bs->backing->bs as well, so no need to compare the alignment manually. */
buf = qemu_try_blockalign(bs, COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
if (buf == NULL) {
ret = -ENOMEM;
goto ro_cleanup;
}
for (sector = 0; sector < total_sectors; sector += n) {
ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
if (ret < 0) {
goto ro_cleanup;
}
if (ret) {
ret = bdrv_read(bs, sector, buf, n);
if (ret < 0) {
goto ro_cleanup;
}
ret = bdrv_write(bs->backing->bs, sector, buf, n);
if (ret < 0) {
goto ro_cleanup;
}
}
}
if (drv->bdrv_make_empty) {
ret = drv->bdrv_make_empty(bs);
if (ret < 0) {
goto ro_cleanup;
}
bdrv_flush(bs);
}
/*
* Make sure all data we wrote to the backing device is actually
* stable on disk.
*/
if (bs->backing) {
bdrv_flush(bs->backing->bs);
}
ret = 0;
ro_cleanup:
qemu_vfree(buf);
if (ro) {
/* ignoring error return here */
bdrv_reopen(bs->backing->bs, open_flags & ~BDRV_O_RDWR, NULL);
}
return ret;
}
/*
* Return values:
* 0 - success
@ -2644,30 +2534,30 @@ void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
*nb_sectors_ptr = nb_sectors < 0 ? 0 : nb_sectors;
}
int bdrv_is_read_only(BlockDriverState *bs)
bool bdrv_is_read_only(BlockDriverState *bs)
{
return bs->read_only;
}
int bdrv_is_sg(BlockDriverState *bs)
bool bdrv_is_sg(BlockDriverState *bs)
{
return bs->sg;
}
int bdrv_is_encrypted(BlockDriverState *bs)
bool bdrv_is_encrypted(BlockDriverState *bs)
{
if (bs->backing && bs->backing->bs->encrypted) {
return 1;
return true;
}
return bs->encrypted;
}
int bdrv_key_required(BlockDriverState *bs)
bool bdrv_key_required(BlockDriverState *bs)
{
BdrvChild *backing = bs->backing;
if (backing && backing->bs->encrypted && !backing->bs->valid_key) {
return 1;
return true;
}
return (bs->encrypted && !bs->valid_key);
}
@ -2689,10 +2579,10 @@ int bdrv_set_key(BlockDriverState *bs, const char *key)
}
ret = bs->drv->bdrv_set_key(bs, key);
if (ret < 0) {
bs->valid_key = 0;
bs->valid_key = false;
} else if (!bs->valid_key) {
/* call the change callback now, we skipped it on open */
bs->valid_key = 1;
bs->valid_key = true;
bdrv_parent_cb_change_media(bs, true);
}
return ret;

View File

@ -9,7 +9,7 @@ block-obj-y += block-backend.o snapshot.o qapi.o
block-obj-$(CONFIG_WIN32) += raw-win32.o win32-aio.o
block-obj-$(CONFIG_POSIX) += raw-posix.o
block-obj-$(CONFIG_LINUX_AIO) += linux-aio.o
block-obj-y += null.o mirror.o io.o
block-obj-y += null.o mirror.o commit.o io.o
block-obj-y += throttle-groups.o
block-obj-y += nbd.o nbd-client.o sheepdog.o
@ -26,7 +26,6 @@ block-obj-y += write-threshold.o
block-obj-y += crypto.o
common-obj-y += stream.o
common-obj-y += commit.o
common-obj-y += backup.o
iscsi.o-cflags := $(LIBISCSI_CFLAGS)

View File

@ -37,6 +37,7 @@
typedef struct BDRVBlkdebugState {
int state;
int new_state;
int align;
QLIST_HEAD(, BlkdebugRule) rules[BLKDBG__MAX];
QSIMPLEQ_HEAD(, BlkdebugRule) active_rules;
@ -382,10 +383,10 @@ static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags,
}
/* Set request alignment */
align = qemu_opt_get_size(opts, "align", bs->request_alignment);
if (align > 0 && align < INT_MAX && !(align & (align - 1))) {
bs->request_alignment = align;
} else {
align = qemu_opt_get_size(opts, "align", 0);
if (align < INT_MAX && is_power_of_2(align)) {
s->align = align;
} else if (align) {
error_setg(errp, "Invalid alignment");
ret = -EINVAL;
goto fail_unref;
@ -456,7 +457,7 @@ static BlockAIOCB *blkdebug_aio_readv(BlockDriverState *bs,
return inject_error(bs, cb, opaque, rule);
}
return bdrv_aio_readv(bs->file->bs, sector_num, qiov, nb_sectors,
return bdrv_aio_readv(bs->file, sector_num, qiov, nb_sectors,
cb, opaque);
}
@ -479,7 +480,7 @@ static BlockAIOCB *blkdebug_aio_writev(BlockDriverState *bs,
return inject_error(bs, cb, opaque, rule);
}
return bdrv_aio_writev(bs->file->bs, sector_num, qiov, nb_sectors,
return bdrv_aio_writev(bs->file, sector_num, qiov, nb_sectors,
cb, opaque);
}
@ -720,6 +721,15 @@ static void blkdebug_refresh_filename(BlockDriverState *bs, QDict *options)
bs->full_open_options = opts;
}
static void blkdebug_refresh_limits(BlockDriverState *bs, Error **errp)
{
BDRVBlkdebugState *s = bs->opaque;
if (s->align) {
bs->bl.request_alignment = s->align;
}
}
static int blkdebug_reopen_prepare(BDRVReopenState *reopen_state,
BlockReopenQueue *queue, Error **errp)
{
@ -738,6 +748,7 @@ static BlockDriver bdrv_blkdebug = {
.bdrv_getlength = blkdebug_getlength,
.bdrv_truncate = blkdebug_truncate,
.bdrv_refresh_filename = blkdebug_refresh_filename,
.bdrv_refresh_limits = blkdebug_refresh_limits,
.bdrv_aio_readv = blkdebug_aio_readv,
.bdrv_aio_writev = blkdebug_aio_writev,

View File

@ -81,22 +81,22 @@ static void block_request_create(uint64_t reqid, BlockDriverState *bs,
replay_block_event(req->bh, reqid);
}
static int coroutine_fn blkreplay_co_readv(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
static int coroutine_fn blkreplay_co_preadv(BlockDriverState *bs,
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
{
uint64_t reqid = request_id++;
int ret = bdrv_co_readv(bs->file->bs, sector_num, nb_sectors, qiov);
int ret = bdrv_co_preadv(bs->file, offset, bytes, qiov, flags);
block_request_create(reqid, bs, qemu_coroutine_self());
qemu_coroutine_yield();
return ret;
}
static int coroutine_fn blkreplay_co_writev(BlockDriverState *bs,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
static int coroutine_fn blkreplay_co_pwritev(BlockDriverState *bs,
uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
{
uint64_t reqid = request_id++;
int ret = bdrv_co_writev(bs->file->bs, sector_num, nb_sectors, qiov);
int ret = bdrv_co_pwritev(bs->file, offset, bytes, qiov, flags);
block_request_create(reqid, bs, qemu_coroutine_self());
qemu_coroutine_yield();
@ -107,7 +107,7 @@ static int coroutine_fn blkreplay_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int count, BdrvRequestFlags flags)
{
uint64_t reqid = request_id++;
int ret = bdrv_co_pwrite_zeroes(bs->file->bs, offset, count, flags);
int ret = bdrv_co_pwrite_zeroes(bs->file, offset, count, flags);
block_request_create(reqid, bs, qemu_coroutine_self());
qemu_coroutine_yield();
@ -144,8 +144,8 @@ static BlockDriver bdrv_blkreplay = {
.bdrv_close = blkreplay_close,
.bdrv_getlength = blkreplay_getlength,
.bdrv_co_readv = blkreplay_co_readv,
.bdrv_co_writev = blkreplay_co_writev,
.bdrv_co_preadv = blkreplay_co_preadv,
.bdrv_co_pwritev = blkreplay_co_pwritev,
.bdrv_co_pwrite_zeroes = blkreplay_co_pwrite_zeroes,
.bdrv_co_discard = blkreplay_co_discard,

View File

@ -247,9 +247,9 @@ static BlockAIOCB *blkverify_aio_readv(BlockDriverState *bs,
qemu_iovec_init(&acb->raw_qiov, acb->qiov->niov);
qemu_iovec_clone(&acb->raw_qiov, qiov, acb->buf);
bdrv_aio_readv(s->test_file->bs, sector_num, qiov, nb_sectors,
bdrv_aio_readv(s->test_file, sector_num, qiov, nb_sectors,
blkverify_aio_cb, acb);
bdrv_aio_readv(bs->file->bs, sector_num, &acb->raw_qiov, nb_sectors,
bdrv_aio_readv(bs->file, sector_num, &acb->raw_qiov, nb_sectors,
blkverify_aio_cb, acb);
return &acb->common;
}
@ -262,9 +262,9 @@ static BlockAIOCB *blkverify_aio_writev(BlockDriverState *bs,
BlkverifyAIOCB *acb = blkverify_aio_get(bs, true, sector_num, qiov,
nb_sectors, cb, opaque);
bdrv_aio_writev(s->test_file->bs, sector_num, qiov, nb_sectors,
bdrv_aio_writev(s->test_file, sector_num, qiov, nb_sectors,
blkverify_aio_cb, acb);
bdrv_aio_writev(bs->file->bs, sector_num, qiov, nb_sectors,
bdrv_aio_writev(bs->file, sector_num, qiov, nb_sectors,
blkverify_aio_cb, acb);
return &acb->common;
}

View File

@ -760,7 +760,7 @@ int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
throttle_group_co_io_limits_intercept(blk, bytes, false);
}
return bdrv_co_preadv(blk_bs(blk), offset, bytes, qiov, flags);
return bdrv_co_preadv(blk->root, offset, bytes, qiov, flags);
}
int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
@ -785,7 +785,7 @@ int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
flags |= BDRV_REQ_FUA;
}
return bdrv_co_pwritev(blk_bs(blk), offset, bytes, qiov, flags);
return bdrv_co_pwritev(blk->root, offset, bytes, qiov, flags);
}
typedef struct BlkRwCo {
@ -870,6 +870,11 @@ int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
flags | BDRV_REQ_ZERO_WRITE);
}
int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
{
return bdrv_make_zero(blk->root, flags);
}
static void error_callback_bh(void *opaque)
{
struct BlockBackendAIOCB *acb = opaque;
@ -1303,15 +1308,16 @@ int blk_get_flags(BlockBackend *blk)
}
}
int blk_get_max_transfer_length(BlockBackend *blk)
/* Returns the maximum transfer length, in bytes; guaranteed nonzero */
uint32_t blk_get_max_transfer(BlockBackend *blk)
{
BlockDriverState *bs = blk_bs(blk);
uint32_t max = 0;
if (bs) {
return bs->bl.max_transfer_length;
} else {
return 0;
max = bs->bl.max_transfer;
}
return MIN_NON_ZERO(max, INT_MAX);
}
int blk_get_max_iov(BlockBackend *blk)

View File

@ -104,10 +104,9 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
struct bochs_header bochs;
int ret;
bs->read_only = 1; // no write support yet
bs->request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O supported */
bs->read_only = true; /* no write support yet */
ret = bdrv_pread(bs->file->bs, 0, &bochs, sizeof(bochs));
ret = bdrv_pread(bs->file, 0, &bochs, sizeof(bochs));
if (ret < 0) {
return ret;
}
@ -141,7 +140,7 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags,
return -ENOMEM;
}
ret = bdrv_pread(bs->file->bs, le32_to_cpu(bochs.header), s->catalog_bitmap,
ret = bdrv_pread(bs->file, le32_to_cpu(bochs.header), s->catalog_bitmap,
s->catalog_size * 4);
if (ret < 0) {
goto fail;
@ -189,6 +188,11 @@ fail:
return ret;
}
static void bochs_refresh_limits(BlockDriverState *bs, Error **errp)
{
bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
}
static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
{
BDRVBochsState *s = bs->opaque;
@ -210,7 +214,7 @@ static int64_t seek_to_sector(BlockDriverState *bs, int64_t sector_num)
(s->extent_blocks + s->bitmap_blocks));
/* read in bitmap for current extent */
ret = bdrv_pread(bs->file->bs, bitmap_offset + (extent_offset / 8),
ret = bdrv_pread(bs->file, bitmap_offset + (extent_offset / 8),
&bitmap_entry, 1);
if (ret < 0) {
return ret;
@ -251,7 +255,7 @@ bochs_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
qemu_iovec_concat(&local_qiov, qiov, bytes_done, 512);
if (block_offset > 0) {
ret = bdrv_co_preadv(bs->file->bs, block_offset, 512,
ret = bdrv_co_preadv(bs->file, block_offset, 512,
&local_qiov, 0);
if (ret < 0) {
goto fail;
@ -283,6 +287,7 @@ static BlockDriver bdrv_bochs = {
.instance_size = sizeof(BDRVBochsState),
.bdrv_probe = bochs_probe,
.bdrv_open = bochs_open,
.bdrv_refresh_limits = bochs_refresh_limits,
.bdrv_co_preadv = bochs_co_preadv,
.bdrv_close = bochs_close,
};

View File

@ -66,11 +66,10 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
uint32_t offsets_size, max_compressed_block_size = 1, i;
int ret;
bs->read_only = 1;
bs->request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O supported */
bs->read_only = true;
/* read header */
ret = bdrv_pread(bs->file->bs, 128, &s->block_size, 4);
ret = bdrv_pread(bs->file, 128, &s->block_size, 4);
if (ret < 0) {
return ret;
}
@ -96,7 +95,7 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
return -EINVAL;
}
ret = bdrv_pread(bs->file->bs, 128 + 4, &s->n_blocks, 4);
ret = bdrv_pread(bs->file, 128 + 4, &s->n_blocks, 4);
if (ret < 0) {
return ret;
}
@ -127,7 +126,7 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags,
return -ENOMEM;
}
ret = bdrv_pread(bs->file->bs, 128 + 4 + 4, s->offsets, offsets_size);
ret = bdrv_pread(bs->file, 128 + 4 + 4, s->offsets, offsets_size);
if (ret < 0) {
goto fail;
}
@ -199,6 +198,11 @@ fail:
return ret;
}
static void cloop_refresh_limits(BlockDriverState *bs, Error **errp)
{
bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
}
static inline int cloop_read_block(BlockDriverState *bs, int block_num)
{
BDRVCloopState *s = bs->opaque;
@ -207,7 +211,7 @@ static inline int cloop_read_block(BlockDriverState *bs, int block_num)
int ret;
uint32_t bytes = s->offsets[block_num + 1] - s->offsets[block_num];
ret = bdrv_pread(bs->file->bs, s->offsets[block_num],
ret = bdrv_pread(bs->file, s->offsets[block_num],
s->compressed_block, bytes);
if (ret != bytes) {
return -1;
@ -280,6 +284,7 @@ static BlockDriver bdrv_cloop = {
.instance_size = sizeof(BDRVCloopState),
.bdrv_probe = cloop_probe,
.bdrv_open = cloop_open,
.bdrv_refresh_limits = cloop_refresh_limits,
.bdrv_co_preadv = cloop_co_preadv,
.bdrv_close = cloop_close,
};

View File

@ -282,3 +282,124 @@ void commit_start(BlockDriverState *bs, BlockDriverState *base,
trace_commit_start(bs, base, top, s, s->common.co, opaque);
qemu_coroutine_enter(s->common.co, s);
}
#define COMMIT_BUF_SECTORS 2048
/* commit COW file into the raw image */
int bdrv_commit(BlockDriverState *bs)
{
BlockBackend *src, *backing;
BlockDriver *drv = bs->drv;
int64_t sector, total_sectors, length, backing_length;
int n, ro, open_flags;
int ret = 0;
uint8_t *buf = NULL;
if (!drv)
return -ENOMEDIUM;
if (!bs->backing) {
return -ENOTSUP;
}
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, NULL) ||
bdrv_op_is_blocked(bs->backing->bs, BLOCK_OP_TYPE_COMMIT_TARGET, NULL)) {
return -EBUSY;
}
ro = bs->backing->bs->read_only;
open_flags = bs->backing->bs->open_flags;
if (ro) {
if (bdrv_reopen(bs->backing->bs, open_flags | BDRV_O_RDWR, NULL)) {
return -EACCES;
}
}
src = blk_new();
blk_insert_bs(src, bs);
backing = blk_new();
blk_insert_bs(backing, bs->backing->bs);
length = blk_getlength(src);
if (length < 0) {
ret = length;
goto ro_cleanup;
}
backing_length = blk_getlength(backing);
if (backing_length < 0) {
ret = backing_length;
goto ro_cleanup;
}
/* If our top snapshot is larger than the backing file image,
* grow the backing file image if possible. If not possible,
* we must return an error */
if (length > backing_length) {
ret = blk_truncate(backing, length);
if (ret < 0) {
goto ro_cleanup;
}
}
total_sectors = length >> BDRV_SECTOR_BITS;
/* blk_try_blockalign() for src will choose an alignment that works for
* backing as well, so no need to compare the alignment manually. */
buf = blk_try_blockalign(src, COMMIT_BUF_SECTORS * BDRV_SECTOR_SIZE);
if (buf == NULL) {
ret = -ENOMEM;
goto ro_cleanup;
}
for (sector = 0; sector < total_sectors; sector += n) {
ret = bdrv_is_allocated(bs, sector, COMMIT_BUF_SECTORS, &n);
if (ret < 0) {
goto ro_cleanup;
}
if (ret) {
ret = blk_pread(src, sector * BDRV_SECTOR_SIZE, buf,
n * BDRV_SECTOR_SIZE);
if (ret < 0) {
goto ro_cleanup;
}
ret = blk_pwrite(backing, sector * BDRV_SECTOR_SIZE, buf,
n * BDRV_SECTOR_SIZE, 0);
if (ret < 0) {
goto ro_cleanup;
}
}
}
if (drv->bdrv_make_empty) {
ret = drv->bdrv_make_empty(bs);
if (ret < 0) {
goto ro_cleanup;
}
blk_flush(src);
}
/*
* Make sure all data we wrote to the backing device is actually
* stable on disk.
*/
blk_flush(backing);
ret = 0;
ro_cleanup:
qemu_vfree(buf);
blk_unref(src);
blk_unref(backing);
if (ro) {
/* ignoring error return here */
bdrv_reopen(bs->backing->bs, open_flags & ~BDRV_O_RDWR, NULL);
}
return ret;
}

View File

@ -64,7 +64,7 @@ static ssize_t block_crypto_read_func(QCryptoBlock *block,
BlockDriverState *bs = opaque;
ssize_t ret;
ret = bdrv_pread(bs->file->bs, offset, buf, buflen);
ret = bdrv_pread(bs->file, offset, buf, buflen);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read encryption header");
return ret;
@ -322,8 +322,8 @@ static int block_crypto_open_generic(QCryptoBlockFormat format,
goto cleanup;
}
bs->encrypted = 1;
bs->valid_key = 1;
bs->encrypted = true;
bs->valid_key = true;
ret = 0;
cleanup:
@ -428,7 +428,7 @@ block_crypto_co_readv(BlockDriverState *bs, int64_t sector_num,
qemu_iovec_reset(&hd_qiov);
qemu_iovec_add(&hd_qiov, cipher_data, cur_nr_sectors * 512);
ret = bdrv_co_readv(bs->file->bs,
ret = bdrv_co_readv(bs->file,
payload_offset + sector_num,
cur_nr_sectors, &hd_qiov);
if (ret < 0) {
@ -507,7 +507,7 @@ block_crypto_co_writev(BlockDriverState *bs, int64_t sector_num,
qemu_iovec_reset(&hd_qiov);
qemu_iovec_add(&hd_qiov, cipher_data, cur_nr_sectors * 512);
ret = bdrv_co_writev(bs->file->bs,
ret = bdrv_co_writev(bs->file,
payload_offset + sector_num,
cur_nr_sectors, &hd_qiov);
if (ret < 0) {

View File

@ -86,7 +86,7 @@ static int read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result)
uint64_t buffer;
int ret;
ret = bdrv_pread(bs->file->bs, offset, &buffer, 8);
ret = bdrv_pread(bs->file, offset, &buffer, 8);
if (ret < 0) {
return ret;
}
@ -100,7 +100,7 @@ static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
uint32_t buffer;
int ret;
ret = bdrv_pread(bs->file->bs, offset, &buffer, 4);
ret = bdrv_pread(bs->file, offset, &buffer, 4);
if (ret < 0) {
return ret;
}
@ -153,8 +153,9 @@ static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
}
}
static int64_t dmg_find_koly_offset(BlockDriverState *file_bs, Error **errp)
static int64_t dmg_find_koly_offset(BdrvChild *file, Error **errp)
{
BlockDriverState *file_bs = file->bs;
int64_t length;
int64_t offset = 0;
uint8_t buffer[515];
@ -178,7 +179,7 @@ static int64_t dmg_find_koly_offset(BlockDriverState *file_bs, Error **errp)
offset = length - 511 - 512;
}
length = length < 515 ? length : 515;
ret = bdrv_pread(file_bs, offset, buffer, length);
ret = bdrv_pread(file, offset, buffer, length);
if (ret < 0) {
error_setg_errno(errp, -ret, "Failed while reading UDIF trailer");
return ret;
@ -355,7 +356,7 @@ static int dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds,
offset += 4;
buffer = g_realloc(buffer, count);
ret = bdrv_pread(bs->file->bs, offset, buffer, count);
ret = bdrv_pread(bs->file, offset, buffer, count);
if (ret < 0) {
goto fail;
}
@ -392,7 +393,7 @@ static int dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds,
buffer = g_malloc(info_length + 1);
buffer[info_length] = '\0';
ret = bdrv_pread(bs->file->bs, info_begin, buffer, info_length);
ret = bdrv_pread(bs->file, info_begin, buffer, info_length);
if (ret != info_length) {
ret = -EINVAL;
goto fail;
@ -438,8 +439,7 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
int64_t offset;
int ret;
bs->read_only = 1;
bs->request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O supported */
bs->read_only = true;
s->n_chunks = 0;
s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL;
@ -449,7 +449,7 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
ds.max_sectors_per_chunk = 1;
/* locate the UDIF trailer */
offset = dmg_find_koly_offset(bs->file->bs, errp);
offset = dmg_find_koly_offset(bs->file, errp);
if (offset < 0) {
ret = offset;
goto fail;
@ -547,6 +547,11 @@ fail:
return ret;
}
static void dmg_refresh_limits(BlockDriverState *bs, Error **errp)
{
bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
}
static inline int is_sector_in_chunk(BDRVDMGState* s,
uint32_t chunk_num, uint64_t sector_num)
{
@ -595,7 +600,7 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
case 0x80000005: { /* zlib compressed */
/* we need to buffer, because only the chunk as whole can be
* inflated. */
ret = bdrv_pread(bs->file->bs, s->offsets[chunk],
ret = bdrv_pread(bs->file, s->offsets[chunk],
s->compressed_chunk, s->lengths[chunk]);
if (ret != s->lengths[chunk]) {
return -1;
@ -619,7 +624,7 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
case 0x80000006: /* bzip2 compressed */
/* we need to buffer, because only the chunk as whole can be
* inflated. */
ret = bdrv_pread(bs->file->bs, s->offsets[chunk],
ret = bdrv_pread(bs->file, s->offsets[chunk],
s->compressed_chunk, s->lengths[chunk]);
if (ret != s->lengths[chunk]) {
return -1;
@ -644,7 +649,7 @@ static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
break;
#endif /* CONFIG_BZIP2 */
case 1: /* copy */
ret = bdrv_pread(bs->file->bs, s->offsets[chunk],
ret = bdrv_pread(bs->file, s->offsets[chunk],
s->uncompressed_chunk, s->lengths[chunk]);
if (ret != s->lengths[chunk]) {
return -1;
@ -720,6 +725,7 @@ static BlockDriver bdrv_dmg = {
.instance_size = sizeof(BDRVDMGState),
.bdrv_probe = dmg_probe,
.bdrv_open = dmg_open,
.bdrv_refresh_limits = dmg_refresh_limits,
.bdrv_co_preadv = dmg_co_preadv,
.bdrv_close = dmg_close,
};

View File

@ -33,7 +33,7 @@
#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
static BlockAIOCB *bdrv_co_aio_rw_vector(BdrvChild *child,
int64_t sector_num,
QEMUIOVector *qiov,
int nb_sectors,
@ -67,6 +67,17 @@ static void bdrv_parent_drained_end(BlockDriverState *bs)
}
}
static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
{
dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
src->opt_mem_alignment);
dst->min_mem_alignment = MAX(dst->min_mem_alignment,
src->min_mem_alignment);
dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
}
void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
{
BlockDriver *drv = bs->drv;
@ -78,6 +89,9 @@ void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
return;
}
/* Default alignment based on whether driver has byte interface */
bs->bl.request_alignment = drv->bdrv_co_preadv ? 1 : 512;
/* Take some limits from the children as a default */
if (bs->file) {
bdrv_refresh_limits(bs->file->bs, &local_err);
@ -85,11 +99,7 @@ void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
error_propagate(errp, local_err);
return;
}
bs->bl.opt_transfer_length = bs->file->bs->bl.opt_transfer_length;
bs->bl.max_transfer_length = bs->file->bs->bl.max_transfer_length;
bs->bl.min_mem_alignment = bs->file->bs->bl.min_mem_alignment;
bs->bl.opt_mem_alignment = bs->file->bs->bl.opt_mem_alignment;
bs->bl.max_iov = bs->file->bs->bl.max_iov;
bdrv_merge_limits(&bs->bl, &bs->file->bs->bl);
} else {
bs->bl.min_mem_alignment = 512;
bs->bl.opt_mem_alignment = getpagesize();
@ -104,21 +114,7 @@ void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
error_propagate(errp, local_err);
return;
}
bs->bl.opt_transfer_length =
MAX(bs->bl.opt_transfer_length,
bs->backing->bs->bl.opt_transfer_length);
bs->bl.max_transfer_length =
MIN_NON_ZERO(bs->bl.max_transfer_length,
bs->backing->bs->bl.max_transfer_length);
bs->bl.opt_mem_alignment =
MAX(bs->bl.opt_mem_alignment,
bs->backing->bs->bl.opt_mem_alignment);
bs->bl.min_mem_alignment =
MAX(bs->bl.min_mem_alignment,
bs->backing->bs->bl.min_mem_alignment);
bs->bl.max_iov =
MIN(bs->bl.max_iov,
bs->backing->bs->bl.max_iov);
bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl);
}
/* Then let the driver override it */
@ -463,7 +459,7 @@ static int bdrv_get_cluster_size(BlockDriverState *bs)
ret = bdrv_get_info(bs, &bdi);
if (ret < 0 || bdi.cluster_size == 0) {
return bs->request_alignment;
return bs->bl.request_alignment;
} else {
return bdi.cluster_size;
}
@ -557,7 +553,7 @@ static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
}
typedef struct RwCo {
BlockDriverState *bs;
BdrvChild *child;
int64_t offset;
QEMUIOVector *qiov;
bool is_write;
@ -570,11 +566,11 @@ static void coroutine_fn bdrv_rw_co_entry(void *opaque)
RwCo *rwco = opaque;
if (!rwco->is_write) {
rwco->ret = bdrv_co_preadv(rwco->bs, rwco->offset,
rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
rwco->qiov->size, rwco->qiov,
rwco->flags);
} else {
rwco->ret = bdrv_co_pwritev(rwco->bs, rwco->offset,
rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
rwco->qiov->size, rwco->qiov,
rwco->flags);
}
@ -583,13 +579,13 @@ static void coroutine_fn bdrv_rw_co_entry(void *opaque)
/*
* Process a vectored synchronous request using coroutines
*/
static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
QEMUIOVector *qiov, bool is_write,
BdrvRequestFlags flags)
{
Coroutine *co;
RwCo rwco = {
.bs = bs,
.child = child,
.offset = offset,
.qiov = qiov,
.is_write = is_write,
@ -601,7 +597,7 @@ static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
/* Fast-path if already in coroutine context */
bdrv_rw_co_entry(&rwco);
} else {
AioContext *aio_context = bdrv_get_aio_context(bs);
AioContext *aio_context = bdrv_get_aio_context(child->bs);
co = qemu_coroutine_create(bdrv_rw_co_entry);
qemu_coroutine_enter(co, &rwco);
@ -615,7 +611,7 @@ static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
/*
* Process a synchronous request using coroutines
*/
static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf,
int nb_sectors, bool is_write, BdrvRequestFlags flags)
{
QEMUIOVector qiov;
@ -629,15 +625,15 @@ static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
}
qemu_iovec_init_external(&qiov, &iov, 1);
return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS,
&qiov, is_write, flags);
}
/* return < 0 if error. See bdrv_write() for the return codes */
int bdrv_read(BlockDriverState *bs, int64_t sector_num,
int bdrv_read(BdrvChild *child, int64_t sector_num,
uint8_t *buf, int nb_sectors)
{
return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
return bdrv_rw_co(child, sector_num, buf, nb_sectors, false, 0);
}
/* Return < 0 if error. Important errors are:
@ -646,13 +642,13 @@ int bdrv_read(BlockDriverState *bs, int64_t sector_num,
-EINVAL Invalid sector number or nb_sectors
-EACCES Trying to write a read-only device
*/
int bdrv_write(BlockDriverState *bs, int64_t sector_num,
int bdrv_write(BdrvChild *child, int64_t sector_num,
const uint8_t *buf, int nb_sectors)
{
return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
return bdrv_rw_co(child, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
}
int bdrv_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
int count, BdrvRequestFlags flags)
{
QEMUIOVector qiov;
@ -662,7 +658,7 @@ int bdrv_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
};
qemu_iovec_init_external(&qiov, &iov, 1);
return bdrv_prwv_co(bs, offset, &qiov, true,
return bdrv_prwv_co(child, offset, &qiov, true,
BDRV_REQ_ZERO_WRITE | flags);
}
@ -675,9 +671,10 @@ int bdrv_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
*
* Returns < 0 on error, 0 on success. For error codes see bdrv_write().
*/
int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
{
int64_t target_sectors, ret, nb_sectors, sector_num = 0;
BlockDriverState *bs = child->bs;
BlockDriverState *file;
int n;
@ -701,7 +698,7 @@ int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
sector_num += n;
continue;
}
ret = bdrv_pwrite_zeroes(bs, sector_num << BDRV_SECTOR_BITS,
ret = bdrv_pwrite_zeroes(child, sector_num << BDRV_SECTOR_BITS,
n << BDRV_SECTOR_BITS, flags);
if (ret < 0) {
error_report("error writing zeroes at sector %" PRId64 ": %s",
@ -712,11 +709,11 @@ int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
}
}
int bdrv_preadv(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
{
int ret;
ret = bdrv_prwv_co(bs, offset, qiov, false, 0);
ret = bdrv_prwv_co(child, offset, qiov, false, 0);
if (ret < 0) {
return ret;
}
@ -724,7 +721,7 @@ int bdrv_preadv(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
return qiov->size;
}
int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
{
QEMUIOVector qiov;
struct iovec iov = {
@ -737,14 +734,14 @@ int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
}
qemu_iovec_init_external(&qiov, &iov, 1);
return bdrv_preadv(bs, offset, &qiov);
return bdrv_preadv(child, offset, &qiov);
}
int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
{
int ret;
ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
ret = bdrv_prwv_co(child, offset, qiov, true, 0);
if (ret < 0) {
return ret;
}
@ -752,8 +749,7 @@ int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
return qiov->size;
}
int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
const void *buf, int bytes)
int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
{
QEMUIOVector qiov;
struct iovec iov = {
@ -766,7 +762,7 @@ int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
}
qemu_iovec_init_external(&qiov, &iov, 1);
return bdrv_pwritev(bs, offset, &qiov);
return bdrv_pwritev(child, offset, &qiov);
}
/*
@ -775,17 +771,17 @@ int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
*
* Returns 0 on success, -errno in error cases.
*/
int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
const void *buf, int count)
{
int ret;
ret = bdrv_pwrite(bs, offset, buf, count);
ret = bdrv_pwrite(child, offset, buf, count);
if (ret < 0) {
return ret;
}
ret = bdrv_flush(bs);
ret = bdrv_flush(child->bs);
if (ret < 0) {
return ret;
}
@ -945,6 +941,9 @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
if (drv->bdrv_co_pwrite_zeroes &&
buffer_is_zero(bounce_buffer, iov.iov_len)) {
/* FIXME: Should we (perhaps conditionally) be setting
* BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
* that still correctly reads as zero? */
ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, cluster_bytes, 0);
} else {
/* This does not change the data on the disk, it is not necessary
@ -987,7 +986,12 @@ static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
assert((bytes & (align - 1)) == 0);
assert(!qiov || bytes == qiov->size);
assert((bs->open_flags & BDRV_O_NO_IO) == 0);
assert(!(flags & ~BDRV_REQ_MASK));
/* TODO: We would need a per-BDS .supported_read_flags and
* potential fallback support, if we ever implement any read flags
* to pass through to drivers. For now, there aren't any
* passthrough flags. */
assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ)));
/* Handle Copy on Read and associated serialisation */
if (flags & BDRV_REQ_COPY_ON_READ) {
@ -1028,7 +1032,7 @@ static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
}
max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
if (bytes < max_bytes) {
if (bytes <= max_bytes) {
ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
} else if (max_bytes > 0) {
QEMUIOVector local_qiov;
@ -1057,14 +1061,15 @@ out:
/*
* Handle a read request in coroutine context
*/
int coroutine_fn bdrv_co_preadv(BlockDriverState *bs,
int coroutine_fn bdrv_co_preadv(BdrvChild *child,
int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
BlockDriverState *bs = child->bs;
BlockDriver *drv = bs->drv;
BdrvTrackedRequest req;
uint64_t align = bs->request_alignment;
uint64_t align = bs->bl.request_alignment;
uint8_t *head_buf = NULL;
uint8_t *tail_buf = NULL;
QEMUIOVector local_qiov;
@ -1125,7 +1130,7 @@ int coroutine_fn bdrv_co_preadv(BlockDriverState *bs,
return ret;
}
static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
static int coroutine_fn bdrv_co_do_readv(BdrvChild *child,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
@ -1133,19 +1138,20 @@ static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
return -EINVAL;
}
return bdrv_co_preadv(bs, sector_num << BDRV_SECTOR_BITS,
return bdrv_co_preadv(child, sector_num << BDRV_SECTOR_BITS,
nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
}
int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
int coroutine_fn bdrv_co_readv(BdrvChild *child, int64_t sector_num,
int nb_sectors, QEMUIOVector *qiov)
{
trace_bdrv_co_readv(bs, sector_num, nb_sectors);
trace_bdrv_co_readv(child->bs, sector_num, nb_sectors);
return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
return bdrv_co_do_readv(child, sector_num, nb_sectors, qiov, 0);
}
#define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
/* Maximum buffer for write zeroes fallback, in bytes */
#define MAX_WRITE_ZEROES_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int count, BdrvRequestFlags flags)
@ -1159,8 +1165,8 @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
int tail = 0;
int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
int alignment = MAX(bs->bl.pwrite_zeroes_alignment ?: 1,
bs->request_alignment);
int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
bs->bl.request_alignment);
assert(is_power_of_2(alignment));
head = offset & (alignment - 1);
@ -1203,7 +1209,7 @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
if (ret == -ENOTSUP) {
/* Fall back to bounce buffer if write zeroes is unsupported */
int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length,
int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
MAX_WRITE_ZEROES_BOUNCE_BUFFER);
BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
@ -1214,7 +1220,7 @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
write_flags &= ~BDRV_REQ_FUA;
need_flush = true;
}
num = MIN(num, max_xfer_len << BDRV_SECTOR_BITS);
num = MIN(num, max_transfer);
iov.iov_len = num;
if (iov.iov_base == NULL) {
iov.iov_base = qemu_try_blockalign(bs, num);
@ -1231,7 +1237,7 @@ static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
/* Keep bounce buffer around if it is big enough for all
* all future requests.
*/
if (num < max_xfer_len << BDRV_SECTOR_BITS) {
if (num < max_transfer) {
qemu_vfree(iov.iov_base);
iov.iov_base = NULL;
}
@ -1254,7 +1260,7 @@ fail:
*/
static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
QEMUIOVector *qiov, int flags)
int64_t align, QEMUIOVector *qiov, int flags)
{
BlockDriver *drv = bs->drv;
bool waited;
@ -1263,6 +1269,9 @@ static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
int64_t start_sector = offset >> BDRV_SECTOR_BITS;
int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
assert(is_power_of_2(align));
assert((offset & (align - 1)) == 0);
assert((bytes & (align - 1)) == 0);
assert(!qiov || bytes == qiov->size);
assert((bs->open_flags & BDRV_O_NO_IO) == 0);
assert(!(flags & ~BDRV_REQ_MASK));
@ -1316,7 +1325,7 @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs,
uint8_t *buf = NULL;
QEMUIOVector local_qiov;
struct iovec iov;
uint64_t align = bs->request_alignment;
uint64_t align = bs->bl.request_alignment;
unsigned int head_padding_bytes, tail_padding_bytes;
int ret = 0;
@ -1349,7 +1358,7 @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs,
memset(buf + head_padding_bytes, 0, zero_bytes);
ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align,
&local_qiov,
align, &local_qiov,
flags & ~BDRV_REQ_ZERO_WRITE);
if (ret < 0) {
goto fail;
@ -1362,7 +1371,7 @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs,
if (bytes >= align) {
/* Write the aligned part in the middle. */
uint64_t aligned_bytes = bytes & ~(align - 1);
ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes,
ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes, align,
NULL, flags);
if (ret < 0) {
goto fail;
@ -1386,7 +1395,7 @@ static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs,
bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
memset(buf, 0, bytes);
ret = bdrv_aligned_pwritev(bs, req, offset, align,
ret = bdrv_aligned_pwritev(bs, req, offset, align, align,
&local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
}
fail:
@ -1398,12 +1407,13 @@ fail:
/*
* Handle a write request in coroutine context
*/
int coroutine_fn bdrv_co_pwritev(BlockDriverState *bs,
int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
BlockDriverState *bs = child->bs;
BdrvTrackedRequest req;
uint64_t align = bs->request_alignment;
uint64_t align = bs->bl.request_alignment;
uint8_t *head_buf = NULL;
uint8_t *tail_buf = NULL;
QEMUIOVector local_qiov;
@ -1511,7 +1521,7 @@ int coroutine_fn bdrv_co_pwritev(BlockDriverState *bs,
bytes = ROUND_UP(bytes, align);
}
ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, align,
use_local_qiov ? &local_qiov : qiov,
flags);
@ -1527,7 +1537,7 @@ out:
return ret;
}
static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
static int coroutine_fn bdrv_co_do_writev(BdrvChild *child,
int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
BdrvRequestFlags flags)
{
@ -1535,29 +1545,28 @@ static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
return -EINVAL;
}
return bdrv_co_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
return bdrv_co_pwritev(child, sector_num << BDRV_SECTOR_BITS,
nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
}
int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
int coroutine_fn bdrv_co_writev(BdrvChild *child, int64_t sector_num,
int nb_sectors, QEMUIOVector *qiov)
{
trace_bdrv_co_writev(bs, sector_num, nb_sectors);
trace_bdrv_co_writev(child->bs, sector_num, nb_sectors);
return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
return bdrv_co_do_writev(child, sector_num, nb_sectors, qiov, 0);
}
int coroutine_fn bdrv_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int count,
BdrvRequestFlags flags)
int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
int count, BdrvRequestFlags flags)
{
trace_bdrv_co_pwrite_zeroes(bs, offset, count, flags);
trace_bdrv_co_pwrite_zeroes(child->bs, offset, count, flags);
if (!(bs->open_flags & BDRV_O_UNMAP)) {
if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
flags &= ~BDRV_REQ_MAY_UNMAP;
}
return bdrv_co_pwritev(bs, offset, count, NULL,
return bdrv_co_pwritev(child, offset, count, NULL,
BDRV_REQ_ZERO_WRITE | flags);
}
@ -1954,23 +1963,23 @@ int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
/**************************************************************/
/* async I/Os */
BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
BlockAIOCB *bdrv_aio_readv(BdrvChild *child, int64_t sector_num,
QEMUIOVector *qiov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque)
{
trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
trace_bdrv_aio_readv(child->bs, sector_num, nb_sectors, opaque);
return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
return bdrv_co_aio_rw_vector(child, sector_num, qiov, nb_sectors, 0,
cb, opaque, false);
}
BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num,
QEMUIOVector *qiov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque)
{
trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
trace_bdrv_aio_writev(child->bs, sector_num, nb_sectors, opaque);
return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
return bdrv_co_aio_rw_vector(child, sector_num, qiov, nb_sectors, 0,
cb, opaque, true);
}
@ -2026,6 +2035,7 @@ typedef struct BlockRequest {
typedef struct BlockAIOCBCoroutine {
BlockAIOCB common;
BdrvChild *child;
BlockRequest req;
bool is_write;
bool need_bh;
@ -2069,20 +2079,19 @@ static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
static void coroutine_fn bdrv_co_do_rw(void *opaque)
{
BlockAIOCBCoroutine *acb = opaque;
BlockDriverState *bs = acb->common.bs;
if (!acb->is_write) {
acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
acb->req.error = bdrv_co_do_readv(acb->child, acb->req.sector,
acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
} else {
acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
acb->req.error = bdrv_co_do_writev(acb->child, acb->req.sector,
acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
}
bdrv_co_complete(acb);
}
static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
static BlockAIOCB *bdrv_co_aio_rw_vector(BdrvChild *child,
int64_t sector_num,
QEMUIOVector *qiov,
int nb_sectors,
@ -2094,7 +2103,8 @@ static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
Coroutine *co;
BlockAIOCBCoroutine *acb;
acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
acb = qemu_aio_get(&bdrv_em_co_aiocb_info, child->bs, cb, opaque);
acb->child = child;
acb->need_bh = true;
acb->req.error = -EINPROGRESS;
acb->req.sector = sector_num;
@ -2200,9 +2210,15 @@ void qemu_aio_unref(void *p)
/**************************************************************/
/* Coroutine block device emulation */
typedef struct FlushCo {
BlockDriverState *bs;
int ret;
} FlushCo;
static void coroutine_fn bdrv_flush_co_entry(void *opaque)
{
RwCo *rwco = opaque;
FlushCo *rwco = opaque;
rwco->ret = bdrv_co_flush(rwco->bs);
}
@ -2286,25 +2302,25 @@ out:
int bdrv_flush(BlockDriverState *bs)
{
Coroutine *co;
RwCo rwco = {
FlushCo flush_co = {
.bs = bs,
.ret = NOT_DONE,
};
if (qemu_in_coroutine()) {
/* Fast-path if already in coroutine context */
bdrv_flush_co_entry(&rwco);
bdrv_flush_co_entry(&flush_co);
} else {
AioContext *aio_context = bdrv_get_aio_context(bs);
co = qemu_coroutine_create(bdrv_flush_co_entry);
qemu_coroutine_enter(co, &rwco);
while (rwco.ret == NOT_DONE) {
qemu_coroutine_enter(co, &flush_co);
while (flush_co.ret == NOT_DONE) {
aio_poll(aio_context, true);
}
}
return rwco.ret;
return flush_co.ret;
}
typedef struct DiscardCo {
@ -2355,19 +2371,21 @@ int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
goto out;
}
max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS);
max_discard = MIN_NON_ZERO(bs->bl.max_pdiscard >> BDRV_SECTOR_BITS,
BDRV_REQUEST_MAX_SECTORS);
while (nb_sectors > 0) {
int ret;
int num = nb_sectors;
int discard_alignment = bs->bl.pdiscard_alignment >> BDRV_SECTOR_BITS;
/* align request */
if (bs->bl.discard_alignment &&
num >= bs->bl.discard_alignment &&
sector_num % bs->bl.discard_alignment) {
if (num > bs->bl.discard_alignment) {
num = bs->bl.discard_alignment;
if (discard_alignment &&
num >= discard_alignment &&
sector_num % discard_alignment) {
if (num > discard_alignment) {
num = discard_alignment;
}
num -= sector_num % bs->bl.discard_alignment;
num -= sector_num % discard_alignment;
}
/* limit request size */

View File

@ -473,9 +473,10 @@ iscsi_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
return -EINVAL;
}
if (bs->bl.max_transfer_length && nb_sectors > bs->bl.max_transfer_length) {
if (bs->bl.max_transfer &&
nb_sectors << BDRV_SECTOR_BITS > bs->bl.max_transfer) {
error_report("iSCSI Error: Write of %d sectors exceeds max_xfer_len "
"of %d sectors", nb_sectors, bs->bl.max_transfer_length);
"of %" PRIu32 " bytes", nb_sectors, bs->bl.max_transfer);
return -EINVAL;
}
@ -650,9 +651,10 @@ static int coroutine_fn iscsi_co_readv(BlockDriverState *bs,
return -EINVAL;
}
if (bs->bl.max_transfer_length && nb_sectors > bs->bl.max_transfer_length) {
if (bs->bl.max_transfer &&
nb_sectors << BDRV_SECTOR_BITS > bs->bl.max_transfer) {
error_report("iSCSI Error: Read of %d sectors exceeds max_xfer_len "
"of %d sectors", nb_sectors, bs->bl.max_transfer_length);
"of %" PRIu32 " bytes", nb_sectors, bs->bl.max_transfer);
return -EINVAL;
}
@ -1589,14 +1591,13 @@ static int iscsi_open(BlockDriverState *bs, QDict *options, int flags,
goto out;
}
bs->total_sectors = sector_lun2qemu(iscsilun->num_blocks, iscsilun);
bs->request_alignment = iscsilun->block_size;
/* We don't have any emulation for devices other than disks and CD-ROMs, so
* this must be sg ioctl compatible. We force it to be sg, otherwise qemu
* will try to read from the device to guess the image format.
*/
if (iscsilun->type != TYPE_DISK && iscsilun->type != TYPE_ROM) {
bs->sg = 1;
bs->sg = true;
}
task = iscsi_do_inquiry(iscsilun->iscsi, iscsilun->lun, 1,
@ -1696,34 +1697,33 @@ static void iscsi_close(BlockDriverState *bs)
memset(iscsilun, 0, sizeof(IscsiLun));
}
static int sector_limits_lun2qemu(int64_t sector, IscsiLun *iscsilun)
{
return MIN(sector_lun2qemu(sector, iscsilun), INT_MAX / 2 + 1);
}
static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp)
{
/* We don't actually refresh here, but just return data queried in
* iscsi_open(): iscsi targets don't change their limits. */
IscsiLun *iscsilun = bs->opaque;
uint32_t max_xfer_len = iscsilun->use_16_for_rw ? 0xffffffff : 0xffff;
uint64_t max_xfer_len = iscsilun->use_16_for_rw ? 0xffffffff : 0xffff;
bs->bl.request_alignment = iscsilun->block_size;
if (iscsilun->bl.max_xfer_len) {
max_xfer_len = MIN(max_xfer_len, iscsilun->bl.max_xfer_len);
}
bs->bl.max_transfer_length = sector_limits_lun2qemu(max_xfer_len, iscsilun);
if (max_xfer_len * iscsilun->block_size < INT_MAX) {
bs->bl.max_transfer = max_xfer_len * iscsilun->block_size;
}
if (iscsilun->lbp.lbpu) {
if (iscsilun->bl.max_unmap < 0xffffffff) {
bs->bl.max_discard =
sector_limits_lun2qemu(iscsilun->bl.max_unmap, iscsilun);
if (iscsilun->bl.max_unmap < 0xffffffff / iscsilun->block_size) {
bs->bl.max_pdiscard =
iscsilun->bl.max_unmap * iscsilun->block_size;
}
bs->bl.discard_alignment =
sector_limits_lun2qemu(iscsilun->bl.opt_unmap_gran, iscsilun);
bs->bl.pdiscard_alignment =
iscsilun->bl.opt_unmap_gran * iscsilun->block_size;
} else {
bs->bl.discard_alignment = iscsilun->block_size >> BDRV_SECTOR_BITS;
bs->bl.pdiscard_alignment = iscsilun->block_size;
}
if (iscsilun->bl.max_ws_len < 0xffffffff / iscsilun->block_size) {
@ -1736,8 +1736,11 @@ static void iscsi_refresh_limits(BlockDriverState *bs, Error **errp)
} else {
bs->bl.pwrite_zeroes_alignment = iscsilun->block_size;
}
bs->bl.opt_transfer_length =
sector_limits_lun2qemu(iscsilun->bl.opt_xfer_len, iscsilun);
if (iscsilun->bl.opt_xfer_len &&
iscsilun->bl.opt_xfer_len < INT_MAX / iscsilun->block_size) {
bs->bl.opt_transfer = pow2floor(iscsilun->bl.opt_xfer_len *
iscsilun->block_size);
}
}
/* Note that this will not re-establish a connection with an iSCSI target - it

View File

@ -87,7 +87,7 @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
qemu_iovec_memset(laiocb->qiov, ret, 0,
laiocb->qiov->size - ret);
} else {
ret = -EINVAL;
ret = -ENOSPC;
}
}
}

View File

@ -269,10 +269,6 @@ static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
return -reply.error;
}
/* qemu-nbd has a limit of slightly less than 1M per request. Try to
* remain aligned to 4K. */
#define NBD_MAX_SECTORS 2040
int nbd_client_co_readv(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, QEMUIOVector *qiov)
{

View File

@ -362,8 +362,8 @@ static int nbd_co_flush(BlockDriverState *bs)
static void nbd_refresh_limits(BlockDriverState *bs, Error **errp)
{
bs->bl.max_discard = UINT32_MAX >> BDRV_SECTOR_BITS;
bs->bl.max_transfer_length = UINT32_MAX >> BDRV_SECTOR_BITS;
bs->bl.max_pdiscard = NBD_MAX_BUFFER_SIZE;
bs->bl.max_transfer = NBD_MAX_BUFFER_SIZE;
}
static int nbd_co_discard(BlockDriverState *bs, int64_t sector_num,

View File

@ -210,7 +210,7 @@ static int64_t allocate_clusters(BlockDriverState *bs, int64_t sector_num,
int ret;
space += s->prealloc_size;
if (s->prealloc_mode == PRL_PREALLOC_MODE_FALLOCATE) {
ret = bdrv_pwrite_zeroes(bs->file->bs,
ret = bdrv_pwrite_zeroes(bs->file,
s->data_end << BDRV_SECTOR_BITS,
space << BDRV_SECTOR_BITS, 0);
} else {
@ -250,7 +250,7 @@ static coroutine_fn int parallels_co_flush_to_os(BlockDriverState *bs)
if (off + to_write > s->header_size) {
to_write = s->header_size - off;
}
ret = bdrv_pwrite(bs->file->bs, off, (uint8_t *)s->header + off,
ret = bdrv_pwrite(bs->file, off, (uint8_t *)s->header + off,
to_write);
if (ret < 0) {
qemu_co_mutex_unlock(&s->lock);
@ -311,7 +311,7 @@ static coroutine_fn int parallels_co_writev(BlockDriverState *bs,
qemu_iovec_reset(&hd_qiov);
qemu_iovec_concat(&hd_qiov, qiov, bytes_done, nbytes);
ret = bdrv_co_writev(bs->file->bs, position, n, &hd_qiov);
ret = bdrv_co_writev(bs->file, position, n, &hd_qiov);
if (ret < 0) {
break;
}
@ -351,7 +351,7 @@ static coroutine_fn int parallels_co_readv(BlockDriverState *bs,
qemu_iovec_reset(&hd_qiov);
qemu_iovec_concat(&hd_qiov, qiov, bytes_done, nbytes);
ret = bdrv_co_readv(bs->file->bs, position, n, &hd_qiov);
ret = bdrv_co_readv(bs->file, position, n, &hd_qiov);
if (ret < 0) {
break;
}
@ -432,7 +432,7 @@ static int parallels_check(BlockDriverState *bs, BdrvCheckResult *res,
}
if (flush_bat) {
ret = bdrv_pwrite_sync(bs->file->bs, 0, s->header, s->header_size);
ret = bdrv_pwrite_sync(bs->file, 0, s->header, s->header_size);
if (ret < 0) {
res->check_errors++;
return ret;
@ -563,7 +563,7 @@ static int parallels_update_header(BlockDriverState *bs)
if (size > s->header_size) {
size = s->header_size;
}
return bdrv_pwrite_sync(bs->file->bs, 0, s->header, size);
return bdrv_pwrite_sync(bs->file, 0, s->header, size);
}
static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
@ -576,7 +576,7 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
Error *local_err = NULL;
char *buf;
ret = bdrv_pread(bs->file->bs, 0, &ph, sizeof(ph));
ret = bdrv_pread(bs->file, 0, &ph, sizeof(ph));
if (ret < 0) {
goto fail;
}
@ -631,7 +631,7 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
s->header_size = size;
}
ret = bdrv_pread(bs->file->bs, 0, s->header, s->header_size);
ret = bdrv_pread(bs->file, 0, s->header, s->header_size);
if (ret < 0) {
goto fail;
}

View File

@ -105,7 +105,7 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
int ret;
QCowHeader header;
ret = bdrv_pread(bs->file->bs, 0, &header, sizeof(header));
ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
if (ret < 0) {
goto fail;
}
@ -174,7 +174,7 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
goto fail;
}
bs->encrypted = 1;
bs->encrypted = true;
}
s->cluster_bits = header.cluster_bits;
s->cluster_size = 1 << s->cluster_bits;
@ -208,7 +208,7 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
goto fail;
}
ret = bdrv_pread(bs->file->bs, s->l1_table_offset, s->l1_table,
ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table,
s->l1_size * sizeof(uint64_t));
if (ret < 0) {
goto fail;
@ -239,7 +239,7 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags,
ret = -EINVAL;
goto fail;
}
ret = bdrv_pread(bs->file->bs, header.backing_file_offset,
ret = bdrv_pread(bs->file, header.backing_file_offset,
bs->backing_file, len);
if (ret < 0) {
goto fail;
@ -390,7 +390,7 @@ static uint64_t get_cluster_offset(BlockDriverState *bs,
/* update the L1 entry */
s->l1_table[l1_index] = l2_offset;
tmp = cpu_to_be64(l2_offset);
if (bdrv_pwrite_sync(bs->file->bs,
if (bdrv_pwrite_sync(bs->file,
s->l1_table_offset + l1_index * sizeof(tmp),
&tmp, sizeof(tmp)) < 0)
return 0;
@ -420,11 +420,11 @@ static uint64_t get_cluster_offset(BlockDriverState *bs,
l2_table = s->l2_cache + (min_index << s->l2_bits);
if (new_l2_table) {
memset(l2_table, 0, s->l2_size * sizeof(uint64_t));
if (bdrv_pwrite_sync(bs->file->bs, l2_offset, l2_table,
if (bdrv_pwrite_sync(bs->file, l2_offset, l2_table,
s->l2_size * sizeof(uint64_t)) < 0)
return 0;
} else {
if (bdrv_pread(bs->file->bs, l2_offset, l2_table,
if (bdrv_pread(bs->file, l2_offset, l2_table,
s->l2_size * sizeof(uint64_t)) !=
s->l2_size * sizeof(uint64_t))
return 0;
@ -450,7 +450,7 @@ static uint64_t get_cluster_offset(BlockDriverState *bs,
cluster_offset = (cluster_offset + s->cluster_size - 1) &
~(s->cluster_size - 1);
/* write the cluster content */
if (bdrv_pwrite(bs->file->bs, cluster_offset, s->cluster_cache,
if (bdrv_pwrite(bs->file, cluster_offset, s->cluster_cache,
s->cluster_size) !=
s->cluster_size)
return -1;
@ -480,7 +480,7 @@ static uint64_t get_cluster_offset(BlockDriverState *bs,
errno = EIO;
return -1;
}
if (bdrv_pwrite(bs->file->bs,
if (bdrv_pwrite(bs->file,
cluster_offset + i * 512,
s->cluster_data, 512) != 512)
return -1;
@ -495,7 +495,7 @@ static uint64_t get_cluster_offset(BlockDriverState *bs,
/* update L2 table */
tmp = cpu_to_be64(cluster_offset);
l2_table[l2_index] = tmp;
if (bdrv_pwrite_sync(bs->file->bs, l2_offset + l2_index * sizeof(tmp),
if (bdrv_pwrite_sync(bs->file, l2_offset + l2_index * sizeof(tmp),
&tmp, sizeof(tmp)) < 0)
return 0;
}
@ -565,7 +565,7 @@ static int decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
if (s->cluster_cache_offset != coffset) {
csize = cluster_offset >> (63 - s->cluster_bits);
csize &= (s->cluster_size - 1);
ret = bdrv_pread(bs->file->bs, coffset, s->cluster_data, csize);
ret = bdrv_pread(bs->file, coffset, s->cluster_data, csize);
if (ret != csize)
return -1;
if (decompress_buffer(s->cluster_cache, s->cluster_size,
@ -619,8 +619,7 @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
hd_iov.iov_len = n * 512;
qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
qemu_co_mutex_unlock(&s->lock);
ret = bdrv_co_readv(bs->backing->bs, sector_num,
n, &hd_qiov);
ret = bdrv_co_readv(bs->backing, sector_num, n, &hd_qiov);
qemu_co_mutex_lock(&s->lock);
if (ret < 0) {
goto fail;
@ -644,7 +643,7 @@ static coroutine_fn int qcow_co_readv(BlockDriverState *bs, int64_t sector_num,
hd_iov.iov_len = n * 512;
qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
qemu_co_mutex_unlock(&s->lock);
ret = bdrv_co_readv(bs->file->bs,
ret = bdrv_co_readv(bs->file,
(cluster_offset >> 9) + index_in_cluster,
n, &hd_qiov);
qemu_co_mutex_lock(&s->lock);
@ -746,7 +745,7 @@ static coroutine_fn int qcow_co_writev(BlockDriverState *bs, int64_t sector_num,
hd_iov.iov_len = n * 512;
qemu_iovec_init_external(&hd_qiov, &hd_iov, 1);
qemu_co_mutex_unlock(&s->lock);
ret = bdrv_co_writev(bs->file->bs,
ret = bdrv_co_writev(bs->file,
(cluster_offset >> 9) + index_in_cluster,
n, &hd_qiov);
qemu_co_mutex_lock(&s->lock);
@ -900,7 +899,7 @@ static int qcow_make_empty(BlockDriverState *bs)
int ret;
memset(s->l1_table, 0, l1_length);
if (bdrv_pwrite_sync(bs->file->bs, s->l1_table_offset, s->l1_table,
if (bdrv_pwrite_sync(bs->file, s->l1_table_offset, s->l1_table,
l1_length) < 0)
return -1;
ret = bdrv_truncate(bs->file->bs, s->l1_table_offset + l1_length);
@ -914,6 +913,49 @@ static int qcow_make_empty(BlockDriverState *bs)
return 0;
}
typedef struct QcowWriteCo {
BlockDriverState *bs;
int64_t sector_num;
const uint8_t *buf;
int nb_sectors;
int ret;
} QcowWriteCo;
static void qcow_write_co_entry(void *opaque)
{
QcowWriteCo *co = opaque;
QEMUIOVector qiov;
struct iovec iov = (struct iovec) {
.iov_base = (uint8_t*) co->buf,
.iov_len = co->nb_sectors * BDRV_SECTOR_SIZE,
};
qemu_iovec_init_external(&qiov, &iov, 1);
co->ret = qcow_co_writev(co->bs, co->sector_num, co->nb_sectors, &qiov);
}
/* Wrapper for non-coroutine contexts */
static int qcow_write(BlockDriverState *bs, int64_t sector_num,
const uint8_t *buf, int nb_sectors)
{
Coroutine *co;
AioContext *aio_context = bdrv_get_aio_context(bs);
QcowWriteCo data = {
.bs = bs,
.sector_num = sector_num,
.buf = buf,
.nb_sectors = nb_sectors,
.ret = -EINPROGRESS,
};
co = qemu_coroutine_create(qcow_write_co_entry);
qemu_coroutine_enter(co, &data);
while (data.ret == -EINPROGRESS) {
aio_poll(aio_context, true);
}
return data.ret;
}
/* XXX: put compressed sectors first, then all the cluster aligned
tables to avoid losing bytes in alignment */
static int qcow_write_compressed(BlockDriverState *bs, int64_t sector_num,
@ -970,7 +1012,7 @@ static int qcow_write_compressed(BlockDriverState *bs, int64_t sector_num,
if (ret != Z_STREAM_END || out_len >= s->cluster_size) {
/* could not compress: write normal cluster */
ret = bdrv_write(bs, sector_num, buf, s->cluster_sectors);
ret = qcow_write(bs, sector_num, buf, s->cluster_sectors);
if (ret < 0) {
goto fail;
}
@ -983,7 +1025,7 @@ static int qcow_write_compressed(BlockDriverState *bs, int64_t sector_num,
}
cluster_offset &= s->cluster_offset_mask;
ret = bdrv_pwrite(bs->file->bs, cluster_offset, out_buf, out_len);
ret = bdrv_pwrite(bs->file, cluster_offset, out_buf, out_len);
if (ret < 0) {
goto fail;
}

View File

@ -210,7 +210,7 @@ static int qcow2_cache_entry_flush(BlockDriverState *bs, Qcow2Cache *c, int i)
BLKDBG_EVENT(bs->file, BLKDBG_L2_UPDATE);
}
ret = bdrv_pwrite(bs->file->bs, c->entries[i].offset,
ret = bdrv_pwrite(bs->file, c->entries[i].offset,
qcow2_cache_get_table_addr(bs, c, i), s->cluster_size);
if (ret < 0) {
return ret;
@ -357,7 +357,7 @@ static int qcow2_cache_do_get(BlockDriverState *bs, Qcow2Cache *c,
BLKDBG_EVENT(bs->file, BLKDBG_L2_LOAD);
}
ret = bdrv_pread(bs->file->bs, offset,
ret = bdrv_pread(bs->file, offset,
qcow2_cache_get_table_addr(bs, c, i),
s->cluster_size);
if (ret < 0) {

View File

@ -108,7 +108,7 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_WRITE_TABLE);
for(i = 0; i < s->l1_size; i++)
new_l1_table[i] = cpu_to_be64(new_l1_table[i]);
ret = bdrv_pwrite_sync(bs->file->bs, new_l1_table_offset,
ret = bdrv_pwrite_sync(bs->file, new_l1_table_offset,
new_l1_table, new_l1_size2);
if (ret < 0)
goto fail;
@ -117,9 +117,9 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
/* set new table */
BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ACTIVATE_TABLE);
cpu_to_be32w((uint32_t*)data, new_l1_size);
stl_be_p(data, new_l1_size);
stq_be_p(data + 4, new_l1_table_offset);
ret = bdrv_pwrite_sync(bs->file->bs, offsetof(QCowHeader, l1_size),
ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_size),
data, sizeof(data));
if (ret < 0) {
goto fail;
@ -185,7 +185,7 @@ int qcow2_write_l1_entry(BlockDriverState *bs, int l1_index)
}
BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
ret = bdrv_pwrite_sync(bs->file->bs,
ret = bdrv_pwrite_sync(bs->file,
s->l1_table_offset + 8 * l1_start_index,
buf, sizeof(buf));
if (ret < 0) {
@ -446,7 +446,7 @@ static int coroutine_fn do_perform_cow(BlockDriverState *bs,
}
BLKDBG_EVENT(bs->file, BLKDBG_COW_WRITE);
ret = bdrv_co_pwritev(bs->file->bs, cluster_offset + offset_in_cluster,
ret = bdrv_co_pwritev(bs->file, cluster_offset + offset_in_cluster,
bytes, &qiov, 0);
if (ret < 0) {
goto out;
@ -1408,7 +1408,7 @@ int qcow2_decompress_cluster(BlockDriverState *bs, uint64_t cluster_offset)
sector_offset = coffset & 511;
csize = nb_csectors * 512 - sector_offset;
BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
ret = bdrv_read(bs->file->bs, coffset >> 9, s->cluster_data,
ret = bdrv_read(bs->file, coffset >> 9, s->cluster_data,
nb_csectors);
if (ret < 0) {
return ret;
@ -1677,7 +1677,7 @@ static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
(void **)&l2_table);
} else {
/* load inactive L2 tables from disk */
ret = bdrv_read(bs->file->bs, l2_offset / BDRV_SECTOR_SIZE,
ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE,
(void *)l2_table, s->cluster_sectors);
}
if (ret < 0) {
@ -1752,7 +1752,7 @@ static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
goto fail;
}
ret = bdrv_pwrite_zeroes(bs->file->bs, offset, s->cluster_size, 0);
ret = bdrv_pwrite_zeroes(bs->file, offset, s->cluster_size, 0);
if (ret < 0) {
if (!preallocated) {
qcow2_free_clusters(bs, offset, s->cluster_size,
@ -1784,7 +1784,7 @@ static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
goto fail;
}
ret = bdrv_write(bs->file->bs, l2_offset / BDRV_SECTOR_SIZE,
ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE,
(void *)l2_table, s->cluster_sectors);
if (ret < 0) {
goto fail;
@ -1859,7 +1859,7 @@ int qcow2_expand_zero_clusters(BlockDriverState *bs,
l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE);
ret = bdrv_read(bs->file->bs,
ret = bdrv_read(bs->file,
s->snapshots[i].l1_table_offset / BDRV_SECTOR_SIZE,
(void *)l1_table, l1_sectors);
if (ret < 0) {

View File

@ -104,7 +104,7 @@ int qcow2_refcount_init(BlockDriverState *bs)
goto fail;
}
BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD);
ret = bdrv_pread(bs->file->bs, s->refcount_table_offset,
ret = bdrv_pread(bs->file, s->refcount_table_offset,
s->refcount_table, refcount_table_size2);
if (ret < 0) {
goto fail;
@ -431,7 +431,7 @@ static int alloc_refcount_block(BlockDriverState *bs,
if (refcount_table_index < s->refcount_table_size) {
uint64_t data64 = cpu_to_be64(new_block);
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP);
ret = bdrv_pwrite_sync(bs->file->bs,
ret = bdrv_pwrite_sync(bs->file,
s->refcount_table_offset + refcount_table_index * sizeof(uint64_t),
&data64, sizeof(data64));
if (ret < 0) {
@ -533,7 +533,7 @@ static int alloc_refcount_block(BlockDriverState *bs,
/* Write refcount blocks to disk */
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS);
ret = bdrv_pwrite_sync(bs->file->bs, meta_offset, new_blocks,
ret = bdrv_pwrite_sync(bs->file, meta_offset, new_blocks,
blocks_clusters * s->cluster_size);
g_free(new_blocks);
new_blocks = NULL;
@ -547,7 +547,7 @@ static int alloc_refcount_block(BlockDriverState *bs,
}
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE);
ret = bdrv_pwrite_sync(bs->file->bs, table_offset, new_table,
ret = bdrv_pwrite_sync(bs->file, table_offset, new_table,
table_size * sizeof(uint64_t));
if (ret < 0) {
goto fail_table;
@ -562,10 +562,10 @@ static int alloc_refcount_block(BlockDriverState *bs,
uint64_t d64;
uint32_t d32;
} data;
cpu_to_be64w(&data.d64, table_offset);
cpu_to_be32w(&data.d32, table_clusters);
data.d64 = cpu_to_be64(table_offset);
data.d32 = cpu_to_be32(table_clusters);
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE);
ret = bdrv_pwrite_sync(bs->file->bs,
ret = bdrv_pwrite_sync(bs->file,
offsetof(QCowHeader, refcount_table_offset),
&data, sizeof(data));
if (ret < 0) {
@ -1070,7 +1070,7 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
}
l1_allocated = true;
ret = bdrv_pread(bs->file->bs, l1_table_offset, l1_table, l1_size2);
ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2);
if (ret < 0) {
goto fail;
}
@ -1223,7 +1223,7 @@ fail:
cpu_to_be64s(&l1_table[i]);
}
ret = bdrv_pwrite_sync(bs->file->bs, l1_table_offset,
ret = bdrv_pwrite_sync(bs->file, l1_table_offset,
l1_table, l1_size2);
for (i = 0; i < l1_size; i++) {
@ -1382,7 +1382,7 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
l2_size = s->l2_size * sizeof(uint64_t);
l2_table = g_malloc(l2_size);
ret = bdrv_pread(bs->file->bs, l2_offset, l2_table, l2_size);
ret = bdrv_pread(bs->file, l2_offset, l2_table, l2_size);
if (ret < 0) {
fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n");
res->check_errors++;
@ -1514,7 +1514,7 @@ static int check_refcounts_l1(BlockDriverState *bs,
res->check_errors++;
goto fail;
}
ret = bdrv_pread(bs->file->bs, l1_table_offset, l1_table, l1_size2);
ret = bdrv_pread(bs->file, l1_table_offset, l1_table, l1_size2);
if (ret < 0) {
fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
res->check_errors++;
@ -1612,7 +1612,7 @@ static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res,
}
}
ret = bdrv_pread(bs->file->bs, l2_offset, l2_table,
ret = bdrv_pread(bs->file, l2_offset, l2_table,
s->l2_size * sizeof(uint64_t));
if (ret < 0) {
fprintf(stderr, "ERROR: Could not read L2 table: %s\n",
@ -1664,7 +1664,7 @@ static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res,
goto fail;
}
ret = bdrv_pwrite(bs->file->bs, l2_offset, l2_table,
ret = bdrv_pwrite(bs->file, l2_offset, l2_table,
s->cluster_size);
if (ret < 0) {
fprintf(stderr, "ERROR: Could not write L2 table: %s\n",
@ -2098,7 +2098,7 @@ write_refblocks:
on_disk_refblock = (void *)((char *) *refcount_table +
refblock_index * s->cluster_size);
ret = bdrv_write(bs->file->bs, refblock_offset / BDRV_SECTOR_SIZE,
ret = bdrv_write(bs->file, refblock_offset / BDRV_SECTOR_SIZE,
on_disk_refblock, s->cluster_sectors);
if (ret < 0) {
fprintf(stderr, "ERROR writing refblock: %s\n", strerror(-ret));
@ -2147,7 +2147,7 @@ write_refblocks:
}
assert(reftable_size < INT_MAX / sizeof(uint64_t));
ret = bdrv_pwrite(bs->file->bs, reftable_offset, on_disk_reftable,
ret = bdrv_pwrite(bs->file, reftable_offset, on_disk_reftable,
reftable_size * sizeof(uint64_t));
if (ret < 0) {
fprintf(stderr, "ERROR writing reftable: %s\n", strerror(-ret));
@ -2155,12 +2155,11 @@ write_refblocks:
}
/* Enter new reftable into the image header */
cpu_to_be64w(&reftable_offset_and_clusters.reftable_offset,
reftable_offset);
cpu_to_be32w(&reftable_offset_and_clusters.reftable_clusters,
size_to_clusters(s, reftable_size * sizeof(uint64_t)));
ret = bdrv_pwrite_sync(bs->file->bs, offsetof(QCowHeader,
refcount_table_offset),
reftable_offset_and_clusters.reftable_offset = cpu_to_be64(reftable_offset);
reftable_offset_and_clusters.reftable_clusters =
cpu_to_be32(size_to_clusters(s, reftable_size * sizeof(uint64_t)));
ret = bdrv_pwrite_sync(bs->file,
offsetof(QCowHeader, refcount_table_offset),
&reftable_offset_and_clusters,
sizeof(reftable_offset_and_clusters));
if (ret < 0) {
@ -2407,7 +2406,7 @@ int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
return -ENOMEM;
}
ret = bdrv_pread(bs->file->bs, l1_ofs, l1, l1_sz2);
ret = bdrv_pread(bs->file, l1_ofs, l1, l1_sz2);
if (ret < 0) {
g_free(l1);
return ret;
@ -2560,7 +2559,7 @@ static int flush_refblock(BlockDriverState *bs, uint64_t **reftable,
return ret;
}
ret = bdrv_pwrite(bs->file->bs, offset, refblock, s->cluster_size);
ret = bdrv_pwrite(bs->file, offset, refblock, s->cluster_size);
if (ret < 0) {
error_setg_errno(errp, -ret, "Failed to write refblock");
return ret;
@ -2830,7 +2829,7 @@ int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
cpu_to_be64s(&new_reftable[i]);
}
ret = bdrv_pwrite(bs->file->bs, new_reftable_offset, new_reftable,
ret = bdrv_pwrite(bs->file, new_reftable_offset, new_reftable,
new_reftable_size * sizeof(uint64_t));
for (i = 0; i < new_reftable_size; i++) {

View File

@ -67,7 +67,7 @@ int qcow2_read_snapshots(BlockDriverState *bs)
for(i = 0; i < s->nb_snapshots; i++) {
/* Read statically sized part of the snapshot header */
offset = align_offset(offset, 8);
ret = bdrv_pread(bs->file->bs, offset, &h, sizeof(h));
ret = bdrv_pread(bs->file, offset, &h, sizeof(h));
if (ret < 0) {
goto fail;
}
@ -86,7 +86,7 @@ int qcow2_read_snapshots(BlockDriverState *bs)
name_size = be16_to_cpu(h.name_size);
/* Read extra data */
ret = bdrv_pread(bs->file->bs, offset, &extra,
ret = bdrv_pread(bs->file, offset, &extra,
MIN(sizeof(extra), extra_data_size));
if (ret < 0) {
goto fail;
@ -105,7 +105,7 @@ int qcow2_read_snapshots(BlockDriverState *bs)
/* Read snapshot ID */
sn->id_str = g_malloc(id_str_size + 1);
ret = bdrv_pread(bs->file->bs, offset, sn->id_str, id_str_size);
ret = bdrv_pread(bs->file, offset, sn->id_str, id_str_size);
if (ret < 0) {
goto fail;
}
@ -114,7 +114,7 @@ int qcow2_read_snapshots(BlockDriverState *bs)
/* Read snapshot name */
sn->name = g_malloc(name_size + 1);
ret = bdrv_pread(bs->file->bs, offset, sn->name, name_size);
ret = bdrv_pread(bs->file, offset, sn->name, name_size);
if (ret < 0) {
goto fail;
}
@ -217,25 +217,25 @@ static int qcow2_write_snapshots(BlockDriverState *bs)
h.name_size = cpu_to_be16(name_size);
offset = align_offset(offset, 8);
ret = bdrv_pwrite(bs->file->bs, offset, &h, sizeof(h));
ret = bdrv_pwrite(bs->file, offset, &h, sizeof(h));
if (ret < 0) {
goto fail;
}
offset += sizeof(h);
ret = bdrv_pwrite(bs->file->bs, offset, &extra, sizeof(extra));
ret = bdrv_pwrite(bs->file, offset, &extra, sizeof(extra));
if (ret < 0) {
goto fail;
}
offset += sizeof(extra);
ret = bdrv_pwrite(bs->file->bs, offset, sn->id_str, id_str_size);
ret = bdrv_pwrite(bs->file, offset, sn->id_str, id_str_size);
if (ret < 0) {
goto fail;
}
offset += id_str_size;
ret = bdrv_pwrite(bs->file->bs, offset, sn->name, name_size);
ret = bdrv_pwrite(bs->file, offset, sn->name, name_size);
if (ret < 0) {
goto fail;
}
@ -257,7 +257,7 @@ static int qcow2_write_snapshots(BlockDriverState *bs)
header_data.nb_snapshots = cpu_to_be32(s->nb_snapshots);
header_data.snapshots_offset = cpu_to_be64(snapshots_offset);
ret = bdrv_pwrite_sync(bs->file->bs, offsetof(QCowHeader, nb_snapshots),
ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, nb_snapshots),
&header_data, sizeof(header_data));
if (ret < 0) {
goto fail;
@ -399,7 +399,7 @@ int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
goto fail;
}
ret = bdrv_pwrite(bs->file->bs, sn->l1_table_offset, l1_table,
ret = bdrv_pwrite(bs->file, sn->l1_table_offset, l1_table,
s->l1_size * sizeof(uint64_t));
if (ret < 0) {
goto fail;
@ -512,7 +512,7 @@ int qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id)
goto fail;
}
ret = bdrv_pread(bs->file->bs, sn->l1_table_offset,
ret = bdrv_pread(bs->file, sn->l1_table_offset,
sn_l1_table, sn_l1_bytes);
if (ret < 0) {
goto fail;
@ -530,7 +530,7 @@ int qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id)
goto fail;
}
ret = bdrv_pwrite_sync(bs->file->bs, s->l1_table_offset, sn_l1_table,
ret = bdrv_pwrite_sync(bs->file, s->l1_table_offset, sn_l1_table,
cur_l1_bytes);
if (ret < 0) {
goto fail;
@ -716,7 +716,7 @@ int qcow2_snapshot_load_tmp(BlockDriverState *bs,
return -ENOMEM;
}
ret = bdrv_pread(bs->file->bs, sn->l1_table_offset,
ret = bdrv_pread(bs->file, sn->l1_table_offset,
new_l1_table, new_l1_bytes);
if (ret < 0) {
error_setg(errp, "Failed to read l1 table for snapshot");

View File

@ -107,7 +107,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
printf("attempting to read extended header in offset %lu\n", offset);
#endif
ret = bdrv_pread(bs->file->bs, offset, &ext, sizeof(ext));
ret = bdrv_pread(bs->file, offset, &ext, sizeof(ext));
if (ret < 0) {
error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: "
"pread fail from offset %" PRIu64, offset);
@ -135,7 +135,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
sizeof(bs->backing_format));
return 2;
}
ret = bdrv_pread(bs->file->bs, offset, bs->backing_format, ext.len);
ret = bdrv_pread(bs->file, offset, bs->backing_format, ext.len);
if (ret < 0) {
error_setg_errno(errp, -ret, "ERROR: ext_backing_format: "
"Could not read format name");
@ -151,7 +151,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
case QCOW2_EXT_MAGIC_FEATURE_TABLE:
if (p_feature_table != NULL) {
void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature));
ret = bdrv_pread(bs->file->bs, offset , feature_table, ext.len);
ret = bdrv_pread(bs->file, offset , feature_table, ext.len);
if (ret < 0) {
error_setg_errno(errp, -ret, "ERROR: ext_feature_table: "
"Could not read table");
@ -172,7 +172,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
uext->len = ext.len;
QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next);
ret = bdrv_pread(bs->file->bs, offset , uext->data, uext->len);
ret = bdrv_pread(bs->file, offset , uext->data, uext->len);
if (ret < 0) {
error_setg_errno(errp, -ret, "ERROR: unknown extension: "
"Could not read data");
@ -249,7 +249,7 @@ int qcow2_mark_dirty(BlockDriverState *bs)
}
val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY);
ret = bdrv_pwrite(bs->file->bs, offsetof(QCowHeader, incompatible_features),
ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features),
&val, sizeof(val));
if (ret < 0) {
return ret;
@ -817,7 +817,7 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
uint64_t ext_end;
uint64_t l1_vm_state_index;
ret = bdrv_pread(bs->file->bs, 0, &header, sizeof(header));
ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read qcow2 header");
goto fail;
@ -892,7 +892,7 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
if (header.header_length > sizeof(header)) {
s->unknown_header_fields_size = header.header_length - sizeof(header);
s->unknown_header_fields = g_malloc(s->unknown_header_fields_size);
ret = bdrv_pread(bs->file->bs, sizeof(header), s->unknown_header_fields,
ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields,
s->unknown_header_fields_size);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read unknown qcow2 header "
@ -980,10 +980,7 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
goto fail;
}
bs->encrypted = 1;
/* Encryption works on a sector granularity */
bs->request_alignment = BDRV_SECTOR_SIZE;
bs->encrypted = true;
}
s->l2_bits = s->cluster_bits - 3; /* L2 is always one cluster */
@ -1069,7 +1066,7 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
ret = -ENOMEM;
goto fail;
}
ret = bdrv_pread(bs->file->bs, s->l1_table_offset, s->l1_table,
ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table,
s->l1_size * sizeof(uint64_t));
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read L1 table");
@ -1125,7 +1122,7 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
ret = -EINVAL;
goto fail;
}
ret = bdrv_pread(bs->file->bs, header.backing_file_offset,
ret = bdrv_pread(bs->file, header.backing_file_offset,
bs->backing_file, len);
if (ret < 0) {
error_setg_errno(errp, -ret, "Could not read backing file name");
@ -1202,6 +1199,10 @@ static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp)
{
BDRVQcow2State *s = bs->opaque;
if (bs->encrypted) {
/* Encryption works on a sector granularity */
bs->bl.request_alignment = BDRV_SECTOR_SIZE;
}
bs->bl.pwrite_zeroes_alignment = s->cluster_size;
}
@ -1442,7 +1443,7 @@ static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
qemu_co_mutex_unlock(&s->lock);
ret = bdrv_co_preadv(bs->backing->bs, offset, n1,
ret = bdrv_co_preadv(bs->backing, offset, n1,
&local_qiov, 0);
qemu_co_mutex_lock(&s->lock);
@ -1505,7 +1506,7 @@ static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset,
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
qemu_co_mutex_unlock(&s->lock);
ret = bdrv_co_preadv(bs->file->bs,
ret = bdrv_co_preadv(bs->file,
cluster_offset + offset_in_cluster,
cur_bytes, &hd_qiov, 0);
qemu_co_mutex_lock(&s->lock);
@ -1636,7 +1637,7 @@ static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset,
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
trace_qcow2_writev_data(qemu_coroutine_self(),
cluster_offset + offset_in_cluster);
ret = bdrv_co_pwritev(bs->file->bs,
ret = bdrv_co_pwritev(bs->file,
cluster_offset + offset_in_cluster,
cur_bytes, &hd_qiov, 0);
qemu_co_mutex_lock(&s->lock);
@ -1975,7 +1976,7 @@ int qcow2_update_header(BlockDriverState *bs)
}
/* Write the new header */
ret = bdrv_pwrite(bs->file->bs, 0, header, s->cluster_size);
ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size);
if (ret < 0) {
goto fail;
}
@ -2058,7 +2059,7 @@ static int preallocate(BlockDriverState *bs)
*/
if (host_offset != 0) {
uint8_t data = 0;
ret = bdrv_pwrite(bs->file->bs, (host_offset + cur_bytes) - 1,
ret = bdrv_pwrite(bs->file, (host_offset + cur_bytes) - 1,
&data, 1);
if (ret < 0) {
return ret;
@ -2522,7 +2523,7 @@ static int qcow2_truncate(BlockDriverState *bs, int64_t offset)
/* write updated header.size */
offset = cpu_to_be64(offset);
ret = bdrv_pwrite_sync(bs->file->bs, offsetof(QCowHeader, size),
ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size),
&offset, sizeof(uint64_t));
if (ret < 0) {
return ret;
@ -2532,6 +2533,51 @@ static int qcow2_truncate(BlockDriverState *bs, int64_t offset)
return 0;
}
typedef struct Qcow2WriteCo {
BlockDriverState *bs;
int64_t sector_num;
const uint8_t *buf;
int nb_sectors;
int ret;
} Qcow2WriteCo;
static void qcow2_write_co_entry(void *opaque)
{
Qcow2WriteCo *co = opaque;
QEMUIOVector qiov;
uint64_t offset = co->sector_num * BDRV_SECTOR_SIZE;
uint64_t bytes = co->nb_sectors * BDRV_SECTOR_SIZE;
struct iovec iov = (struct iovec) {
.iov_base = (uint8_t*) co->buf,
.iov_len = bytes,
};
qemu_iovec_init_external(&qiov, &iov, 1);
co->ret = qcow2_co_pwritev(co->bs, offset, bytes, &qiov, 0);
}
/* Wrapper for non-coroutine contexts */
static int qcow2_write(BlockDriverState *bs, int64_t sector_num,
const uint8_t *buf, int nb_sectors)
{
Coroutine *co;
AioContext *aio_context = bdrv_get_aio_context(bs);
Qcow2WriteCo data = {
.bs = bs,
.sector_num = sector_num,
.buf = buf,
.nb_sectors = nb_sectors,
.ret = -EINPROGRESS,
};
co = qemu_coroutine_create(qcow2_write_co_entry);
qemu_coroutine_enter(co, &data);
while (data.ret == -EINPROGRESS) {
aio_poll(aio_context, true);
}
return data.ret;
}
/* XXX: put compressed sectors first, then all the cluster aligned
tables to avoid losing bytes in alignment */
static int qcow2_write_compressed(BlockDriverState *bs, int64_t sector_num,
@ -2595,7 +2641,7 @@ static int qcow2_write_compressed(BlockDriverState *bs, int64_t sector_num,
if (ret != Z_STREAM_END || out_len >= s->cluster_size) {
/* could not compress: write normal cluster */
ret = bdrv_write(bs, sector_num, buf, s->cluster_sectors);
ret = qcow2_write(bs, sector_num, buf, s->cluster_sectors);
if (ret < 0) {
goto fail;
}
@ -2614,7 +2660,7 @@ static int qcow2_write_compressed(BlockDriverState *bs, int64_t sector_num,
}
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_COMPRESSED);
ret = bdrv_pwrite(bs->file->bs, cluster_offset, out_buf, out_len);
ret = bdrv_pwrite(bs->file, cluster_offset, out_buf, out_len);
if (ret < 0) {
goto fail;
}
@ -2663,7 +2709,7 @@ static int make_completely_empty(BlockDriverState *bs)
/* After this call, neither the in-memory nor the on-disk refcount
* information accurately describe the actual references */
ret = bdrv_pwrite_zeroes(bs->file->bs, s->l1_table_offset,
ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset,
l1_clusters * s->cluster_size, 0);
if (ret < 0) {
goto fail_broken_refcounts;
@ -2677,7 +2723,7 @@ static int make_completely_empty(BlockDriverState *bs)
* overwrite parts of the existing refcount and L1 table, which is not
* an issue because the dirty flag is set, complete data loss is in fact
* desired and partial data loss is consequently fine as well */
ret = bdrv_pwrite_zeroes(bs->file->bs, s->cluster_size,
ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size,
(2 + l1_clusters) * s->cluster_size, 0);
/* This call (even if it failed overall) may have overwritten on-disk
* refcount structures; in that case, the in-memory refcount information
@ -2693,10 +2739,10 @@ static int make_completely_empty(BlockDriverState *bs)
/* "Create" an empty reftable (one cluster) directly after the image
* header and an empty L1 table three clusters after the image header;
* the cluster between those two will be used as the first refblock */
cpu_to_be64w(&l1_ofs_rt_ofs_cls.l1_offset, 3 * s->cluster_size);
cpu_to_be64w(&l1_ofs_rt_ofs_cls.reftable_offset, s->cluster_size);
cpu_to_be32w(&l1_ofs_rt_ofs_cls.reftable_clusters, 1);
ret = bdrv_pwrite_sync(bs->file->bs, offsetof(QCowHeader, l1_table_offset),
l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size);
l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size);
l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1);
ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset),
&l1_ofs_rt_ofs_cls, sizeof(l1_ofs_rt_ofs_cls));
if (ret < 0) {
goto fail_broken_refcounts;
@ -2727,7 +2773,7 @@ static int make_completely_empty(BlockDriverState *bs)
/* Enter the first refblock into the reftable */
rt_entry = cpu_to_be64(2 * s->cluster_size);
ret = bdrv_pwrite_sync(bs->file->bs, s->cluster_size,
ret = bdrv_pwrite_sync(bs->file, s->cluster_size,
&rt_entry, sizeof(rt_entry));
if (ret < 0) {
goto fail_broken_refcounts;

View File

@ -65,7 +65,7 @@ static void qed_read_table(BDRVQEDState *s, uint64_t offset, QEDTable *table,
read_table_cb->iov.iov_len = s->header.cluster_size * s->header.table_size,
qemu_iovec_init_external(qiov, &read_table_cb->iov, 1);
bdrv_aio_readv(s->bs->file->bs, offset / BDRV_SECTOR_SIZE, qiov,
bdrv_aio_readv(s->bs->file, offset / BDRV_SECTOR_SIZE, qiov,
qiov->size / BDRV_SECTOR_SIZE,
qed_read_table_cb, read_table_cb);
}
@ -154,7 +154,7 @@ static void qed_write_table(BDRVQEDState *s, uint64_t offset, QEDTable *table,
/* Adjust for offset into table */
offset += start * sizeof(uint64_t);
bdrv_aio_writev(s->bs->file->bs, offset / BDRV_SECTOR_SIZE,
bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE,
&write_table_cb->qiov,
write_table_cb->qiov.size / BDRV_SECTOR_SIZE,
qed_write_table_cb, write_table_cb);

View File

@ -86,7 +86,7 @@ int qed_write_header_sync(BDRVQEDState *s)
int ret;
qed_header_cpu_to_le(&s->header, &le);
ret = bdrv_pwrite(s->bs->file->bs, 0, &le, sizeof(le));
ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le));
if (ret != sizeof(le)) {
return ret;
}
@ -123,7 +123,7 @@ static void qed_write_header_read_cb(void *opaque, int ret)
/* Update header */
qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf);
bdrv_aio_writev(s->bs->file->bs, 0, &write_header_cb->qiov,
bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov,
write_header_cb->nsectors, qed_write_header_cb,
write_header_cb);
}
@ -155,7 +155,7 @@ static void qed_write_header(BDRVQEDState *s, BlockCompletionFunc cb,
write_header_cb->iov.iov_len = len;
qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
bdrv_aio_readv(s->bs->file->bs, 0, &write_header_cb->qiov, nsectors,
bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors,
qed_write_header_read_cb, write_header_cb);
}
@ -218,7 +218,7 @@ static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
*
* The string is NUL-terminated.
*/
static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n,
static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n,
char *buf, size_t buflen)
{
int ret;
@ -389,7 +389,7 @@ static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
s->bs = bs;
QSIMPLEQ_INIT(&s->allocating_write_reqs);
ret = bdrv_pread(bs->file->bs, 0, &le_header, sizeof(le_header));
ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header));
if (ret < 0) {
return ret;
}
@ -446,7 +446,7 @@ static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
return -EINVAL;
}
ret = qed_read_string(bs->file->bs, s->header.backing_filename_offset,
ret = qed_read_string(bs->file, s->header.backing_filename_offset,
s->header.backing_filename_size, bs->backing_file,
sizeof(bs->backing_file));
if (ret < 0) {
@ -800,7 +800,7 @@ static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
qemu_iovec_concat(*backing_qiov, qiov, 0, size);
BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
bdrv_aio_readv(s->bs->backing->bs, pos / BDRV_SECTOR_SIZE,
bdrv_aio_readv(s->bs->backing, pos / BDRV_SECTOR_SIZE,
*backing_qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
}
@ -837,7 +837,7 @@ static void qed_copy_from_backing_file_write(void *opaque, int ret)
}
BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
bdrv_aio_writev(s->bs->file->bs, copy_cb->offset / BDRV_SECTOR_SIZE,
bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE,
&copy_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE,
qed_copy_from_backing_file_cb, copy_cb);
}
@ -1087,7 +1087,7 @@ static void qed_aio_write_main(void *opaque, int ret)
}
BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
bdrv_aio_writev(s->bs->file->bs, offset / BDRV_SECTOR_SIZE,
bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE,
&acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
next_fn, acb);
}
@ -1319,7 +1319,7 @@ static void qed_aio_read_data(void *opaque, int ret,
}
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
bdrv_aio_readv(bs->file->bs, offset / BDRV_SECTOR_SIZE,
bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE,
&acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
qed_aio_next_io, acb);
return;
@ -1575,7 +1575,7 @@ static int bdrv_qed_change_backing_file(BlockDriverState *bs,
}
/* Write new header */
ret = bdrv_pwrite_sync(bs->file->bs, 0, buffer, buffer_len);
ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
g_free(buffer);
if (ret == 0) {
memcpy(&s->header, &new_header, sizeof(new_header));

View File

@ -383,7 +383,7 @@ static bool quorum_rewrite_bad_versions(BDRVQuorumState *s, QuorumAIOCB *acb,
continue;
}
QLIST_FOREACH(item, &version->items, next) {
bdrv_aio_writev(s->children[item->index]->bs, acb->sector_num,
bdrv_aio_writev(s->children[item->index], acb->sector_num,
acb->qiov, acb->nb_sectors, quorum_rewrite_aio_cb,
acb);
}
@ -660,7 +660,7 @@ static BlockAIOCB *read_quorum_children(QuorumAIOCB *acb)
}
for (i = 0; i < s->num_children; i++) {
acb->qcrs[i].aiocb = bdrv_aio_readv(s->children[i]->bs, acb->sector_num,
acb->qcrs[i].aiocb = bdrv_aio_readv(s->children[i], acb->sector_num,
&acb->qcrs[i].qiov, acb->nb_sectors,
quorum_aio_cb, &acb->qcrs[i]);
}
@ -678,7 +678,7 @@ static BlockAIOCB *read_fifo_child(QuorumAIOCB *acb)
qemu_iovec_clone(&acb->qcrs[acb->child_iter].qiov, acb->qiov,
acb->qcrs[acb->child_iter].buf);
acb->qcrs[acb->child_iter].aiocb =
bdrv_aio_readv(s->children[acb->child_iter]->bs, acb->sector_num,
bdrv_aio_readv(s->children[acb->child_iter], acb->sector_num,
&acb->qcrs[acb->child_iter].qiov, acb->nb_sectors,
quorum_aio_cb, &acb->qcrs[acb->child_iter]);
@ -719,7 +719,7 @@ static BlockAIOCB *quorum_aio_writev(BlockDriverState *bs,
int i;
for (i = 0; i < s->num_children; i++) {
acb->qcrs[i].aiocb = bdrv_aio_writev(s->children[i]->bs, sector_num,
acb->qcrs[i].aiocb = bdrv_aio_writev(s->children[i], sector_num,
qiov, nb_sectors, &quorum_aio_cb,
&acb->qcrs[i]);
}

View File

@ -302,22 +302,22 @@ static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp)
/* For SCSI generic devices the alignment is not really used.
With buffered I/O, we don't have any restrictions. */
if (bdrv_is_sg(bs) || !s->needs_alignment) {
bs->request_alignment = 1;
bs->bl.request_alignment = 1;
s->buf_align = 1;
return;
}
bs->request_alignment = 0;
bs->bl.request_alignment = 0;
s->buf_align = 0;
/* Let's try to use the logical blocksize for the alignment. */
if (probe_logical_blocksize(fd, &bs->request_alignment) < 0) {
bs->request_alignment = 0;
if (probe_logical_blocksize(fd, &bs->bl.request_alignment) < 0) {
bs->bl.request_alignment = 0;
}
#ifdef CONFIG_XFS
if (s->is_xfs) {
struct dioattr da;
if (xfsctl(NULL, fd, XFS_IOC_DIOINFO, &da) >= 0) {
bs->request_alignment = da.d_miniosz;
bs->bl.request_alignment = da.d_miniosz;
/* The kernel returns wrong information for d_mem */
/* s->buf_align = da.d_mem; */
}
@ -337,21 +337,21 @@ static void raw_probe_alignment(BlockDriverState *bs, int fd, Error **errp)
qemu_vfree(buf);
}
if (!bs->request_alignment) {
if (!bs->bl.request_alignment) {
size_t align;
buf = qemu_memalign(s->buf_align, max_align);
for (align = 512; align <= max_align; align <<= 1) {
if (raw_is_io_aligned(fd, buf, align)) {
bs->request_alignment = align;
bs->bl.request_alignment = align;
break;
}
}
qemu_vfree(buf);
}
if (!s->buf_align || !bs->request_alignment) {
error_setg(errp, "Could not find working O_DIRECT alignment. "
"Try cache.direct=off.");
if (!s->buf_align || !bs->bl.request_alignment) {
error_setg(errp, "Could not find working O_DIRECT alignment");
error_append_hint(errp, "Try cache.direct=off\n");
}
}
@ -745,8 +745,8 @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
if (!fstat(s->fd, &st)) {
if (S_ISBLK(st.st_mode)) {
int ret = hdev_get_max_transfer_length(s->fd);
if (ret >= 0) {
bs->bl.max_transfer_length = ret;
if (ret > 0 && ret <= BDRV_REQUEST_MAX_SECTORS) {
bs->bl.max_transfer = pow2floor(ret << BDRV_SECTOR_BITS);
}
}
}

View File

@ -222,7 +222,7 @@ static void raw_attach_aio_context(BlockDriverState *bs,
}
}
static void raw_probe_alignment(BlockDriverState *bs)
static void raw_probe_alignment(BlockDriverState *bs, Error **errp)
{
BDRVRawState *s = bs->opaque;
DWORD sectorsPerCluster, freeClusters, totalClusters, count;
@ -230,14 +230,14 @@ static void raw_probe_alignment(BlockDriverState *bs)
BOOL status;
if (s->type == FTYPE_CD) {
bs->request_alignment = 2048;
bs->bl.request_alignment = 2048;
return;
}
if (s->type == FTYPE_HARDDISK) {
status = DeviceIoControl(s->hfile, IOCTL_DISK_GET_DRIVE_GEOMETRY_EX,
NULL, 0, &dg, sizeof(dg), &count, NULL);
if (status != 0) {
bs->request_alignment = dg.Geometry.BytesPerSector;
bs->bl.request_alignment = dg.Geometry.BytesPerSector;
return;
}
/* try GetDiskFreeSpace too */
@ -247,7 +247,7 @@ static void raw_probe_alignment(BlockDriverState *bs)
GetDiskFreeSpace(s->drive_path, &sectorsPerCluster,
&dg.Geometry.BytesPerSector,
&freeClusters, &totalClusters);
bs->request_alignment = dg.Geometry.BytesPerSector;
bs->bl.request_alignment = dg.Geometry.BytesPerSector;
}
}
@ -365,7 +365,6 @@ static int raw_open(BlockDriverState *bs, QDict *options, int flags,
win32_aio_attach_aio_context(s->aio, bdrv_get_aio_context(bs));
}
raw_probe_alignment(bs);
ret = 0;
fail:
qemu_opts_del(opts);
@ -550,6 +549,7 @@ BlockDriver bdrv_file = {
.bdrv_needs_filename = true,
.bdrv_parse_filename = raw_parse_filename,
.bdrv_file_open = raw_open,
.bdrv_refresh_limits = raw_probe_alignment,
.bdrv_close = raw_close,
.bdrv_create = raw_create,
.bdrv_has_zero_init = bdrv_has_zero_init_1,

View File

@ -1,6 +1,6 @@
/* BlockDriver implementation for "raw"
*
* Copyright (C) 2010, 2013, Red Hat, Inc.
* Copyright (C) 2010-2016 Red Hat, Inc.
* Copyright (C) 2010, Blue Swirl <blauwirbel@gmail.com>
* Copyright (C) 2009, Anthony Liguori <aliguori@us.ibm.com>
*
@ -54,7 +54,7 @@ static int coroutine_fn raw_co_readv(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, QEMUIOVector *qiov)
{
BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
return bdrv_co_readv(bs->file->bs, sector_num, nb_sectors, qiov);
return bdrv_co_readv(bs->file, sector_num, nb_sectors, qiov);
}
static int coroutine_fn
@ -105,7 +105,7 @@ raw_co_writev_flags(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
}
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
ret = bdrv_co_pwritev(bs->file->bs, sector_num * BDRV_SECTOR_SIZE,
ret = bdrv_co_pwritev(bs->file, sector_num * BDRV_SECTOR_SIZE,
nb_sectors * BDRV_SECTOR_SIZE, qiov, flags);
fail:
@ -131,7 +131,7 @@ static int coroutine_fn raw_co_pwrite_zeroes(BlockDriverState *bs,
int64_t offset, int count,
BdrvRequestFlags flags)
{
return bdrv_co_pwrite_zeroes(bs->file->bs, offset, count, flags);
return bdrv_co_pwrite_zeroes(bs->file, offset, count, flags);
}
static int coroutine_fn raw_co_discard(BlockDriverState *bs,
@ -150,11 +150,6 @@ static int raw_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
return bdrv_get_info(bs->file->bs, bdi);
}
static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
{
bs->bl = bs->file->bs->bl;
}
static int raw_truncate(BlockDriverState *bs, int64_t offset)
{
return bdrv_truncate(bs->file->bs, offset);
@ -252,7 +247,6 @@ BlockDriver bdrv_raw = {
.bdrv_getlength = &raw_getlength,
.has_variable_length = true,
.bdrv_get_info = &raw_get_info,
.bdrv_refresh_limits = &raw_refresh_limits,
.bdrv_probe_blocksizes = &raw_probe_blocksizes,
.bdrv_probe_geometry = &raw_probe_geometry,
.bdrv_media_changed = &raw_media_changed,

View File

@ -403,7 +403,7 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
logout("\n");
ret = bdrv_read(bs->file->bs, 0, (uint8_t *)&header, 1);
ret = bdrv_read(bs->file, 0, (uint8_t *)&header, 1);
if (ret < 0) {
goto fail;
}
@ -500,7 +500,7 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags,
goto fail;
}
ret = bdrv_read(bs->file->bs, s->bmap_sector, (uint8_t *)s->bmap,
ret = bdrv_read(bs->file, s->bmap_sector, (uint8_t *)s->bmap,
bmap_size);
if (ret < 0) {
goto fail_free_bmap;
@ -597,7 +597,7 @@ vdi_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
qemu_iovec_reset(&local_qiov);
qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes);
ret = bdrv_co_preadv(bs->file->bs, data_offset, n_bytes,
ret = bdrv_co_preadv(bs->file, data_offset, n_bytes,
&local_qiov, 0);
}
logout("%u bytes read\n", n_bytes);
@ -670,7 +670,7 @@ vdi_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
* acquire the lock and thus the padded cluster is written before
* the other coroutines can write to the affected area. */
qemu_co_mutex_lock(&s->write_lock);
ret = bdrv_pwrite(bs->file->bs, data_offset, block, s->block_size);
ret = bdrv_pwrite(bs->file, data_offset, block, s->block_size);
qemu_co_mutex_unlock(&s->write_lock);
} else {
uint64_t data_offset = s->header.offset_data +
@ -690,7 +690,7 @@ vdi_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
qemu_iovec_reset(&local_qiov);
qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes);
ret = bdrv_co_pwritev(bs->file->bs, data_offset, n_bytes,
ret = bdrv_co_pwritev(bs->file, data_offset, n_bytes,
&local_qiov, 0);
}
@ -719,7 +719,7 @@ vdi_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
assert(VDI_IS_ALLOCATED(bmap_first));
*header = s->header;
vdi_header_to_le(header);
ret = bdrv_write(bs->file->bs, 0, block, 1);
ret = bdrv_write(bs->file, 0, block, 1);
g_free(block);
block = NULL;
@ -737,7 +737,7 @@ vdi_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
base = ((uint8_t *)&s->bmap[0]) + bmap_first * SECTOR_SIZE;
logout("will write %u block map sectors starting from entry %u\n",
n_sectors, bmap_first);
ret = bdrv_write(bs->file->bs, offset, base, n_sectors);
ret = bdrv_write(bs->file, offset, base, n_sectors);
}
return ret;

View File

@ -84,7 +84,7 @@ static int vhdx_log_peek_hdr(BlockDriverState *bs, VHDXLogEntries *log,
offset = log->offset + read;
ret = bdrv_pread(bs->file->bs, offset, hdr, sizeof(VHDXLogEntryHeader));
ret = bdrv_pread(bs->file, offset, hdr, sizeof(VHDXLogEntryHeader));
if (ret < 0) {
goto exit;
}
@ -144,7 +144,7 @@ static int vhdx_log_read_sectors(BlockDriverState *bs, VHDXLogEntries *log,
}
offset = log->offset + read;
ret = bdrv_pread(bs->file->bs, offset, buffer, VHDX_LOG_SECTOR_SIZE);
ret = bdrv_pread(bs->file, offset, buffer, VHDX_LOG_SECTOR_SIZE);
if (ret < 0) {
goto exit;
}
@ -194,7 +194,7 @@ static int vhdx_log_write_sectors(BlockDriverState *bs, VHDXLogEntries *log,
/* full */
break;
}
ret = bdrv_pwrite(bs->file->bs, offset, buffer_tmp,
ret = bdrv_pwrite(bs->file, offset, buffer_tmp,
VHDX_LOG_SECTOR_SIZE);
if (ret < 0) {
goto exit;
@ -466,7 +466,7 @@ static int vhdx_log_flush_desc(BlockDriverState *bs, VHDXLogDescriptor *desc,
/* count is only > 1 if we are writing zeroes */
for (i = 0; i < count; i++) {
ret = bdrv_pwrite_sync(bs->file->bs, file_offset, buffer,
ret = bdrv_pwrite_sync(bs->file, file_offset, buffer,
VHDX_LOG_SECTOR_SIZE);
if (ret < 0) {
goto exit;
@ -945,7 +945,7 @@ static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s,
if (i == 0 && leading_length) {
/* partial sector at the front of the buffer */
ret = bdrv_pread(bs->file->bs, file_offset, merged_sector,
ret = bdrv_pread(bs->file, file_offset, merged_sector,
VHDX_LOG_SECTOR_SIZE);
if (ret < 0) {
goto exit;
@ -955,7 +955,7 @@ static int vhdx_log_write(BlockDriverState *bs, BDRVVHDXState *s,
sector_write = merged_sector;
} else if (i == sectors - 1 && trailing_length) {
/* partial sector at the end of the buffer */
ret = bdrv_pread(bs->file->bs,
ret = bdrv_pread(bs->file,
file_offset,
merged_sector + trailing_length,
VHDX_LOG_SECTOR_SIZE - trailing_length);

View File

@ -298,9 +298,10 @@ static int vhdx_probe(const uint8_t *buf, int buf_size, const char *filename)
* and then update the header checksum. Header is converted to proper
* endianness before being written to the specified file offset
*/
static int vhdx_write_header(BlockDriverState *bs_file, VHDXHeader *hdr,
static int vhdx_write_header(BdrvChild *file, VHDXHeader *hdr,
uint64_t offset, bool read)
{
BlockDriverState *bs_file = file->bs;
uint8_t *buffer = NULL;
int ret;
VHDXHeader *header_le;
@ -315,7 +316,7 @@ static int vhdx_write_header(BlockDriverState *bs_file, VHDXHeader *hdr,
buffer = qemu_blockalign(bs_file, VHDX_HEADER_SIZE);
if (read) {
/* if true, we can't assume the extra reserved bytes are 0 */
ret = bdrv_pread(bs_file, offset, buffer, VHDX_HEADER_SIZE);
ret = bdrv_pread(file, offset, buffer, VHDX_HEADER_SIZE);
if (ret < 0) {
goto exit;
}
@ -329,7 +330,7 @@ static int vhdx_write_header(BlockDriverState *bs_file, VHDXHeader *hdr,
vhdx_header_le_export(hdr, header_le);
vhdx_update_checksum(buffer, VHDX_HEADER_SIZE,
offsetof(VHDXHeader, checksum));
ret = bdrv_pwrite_sync(bs_file, offset, header_le, sizeof(VHDXHeader));
ret = bdrv_pwrite_sync(file, offset, header_le, sizeof(VHDXHeader));
exit:
qemu_vfree(buffer);
@ -378,7 +379,7 @@ static int vhdx_update_header(BlockDriverState *bs, BDRVVHDXState *s,
inactive_header->log_guid = *log_guid;
}
ret = vhdx_write_header(bs->file->bs, inactive_header, header_offset, true);
ret = vhdx_write_header(bs->file, inactive_header, header_offset, true);
if (ret < 0) {
goto exit;
}
@ -430,7 +431,7 @@ static void vhdx_parse_header(BlockDriverState *bs, BDRVVHDXState *s,
/* We have to read the whole VHDX_HEADER_SIZE instead of
* sizeof(VHDXHeader), because the checksum is over the whole
* region */
ret = bdrv_pread(bs->file->bs, VHDX_HEADER1_OFFSET, buffer,
ret = bdrv_pread(bs->file, VHDX_HEADER1_OFFSET, buffer,
VHDX_HEADER_SIZE);
if (ret < 0) {
goto fail;
@ -447,7 +448,7 @@ static void vhdx_parse_header(BlockDriverState *bs, BDRVVHDXState *s,
}
}
ret = bdrv_pread(bs->file->bs, VHDX_HEADER2_OFFSET, buffer,
ret = bdrv_pread(bs->file, VHDX_HEADER2_OFFSET, buffer,
VHDX_HEADER_SIZE);
if (ret < 0) {
goto fail;
@ -521,7 +522,7 @@ static int vhdx_open_region_tables(BlockDriverState *bs, BDRVVHDXState *s)
* whole block */
buffer = qemu_blockalign(bs, VHDX_HEADER_BLOCK_SIZE);
ret = bdrv_pread(bs->file->bs, VHDX_REGION_TABLE_OFFSET, buffer,
ret = bdrv_pread(bs->file, VHDX_REGION_TABLE_OFFSET, buffer,
VHDX_HEADER_BLOCK_SIZE);
if (ret < 0) {
goto fail;
@ -634,7 +635,7 @@ static int vhdx_parse_metadata(BlockDriverState *bs, BDRVVHDXState *s)
buffer = qemu_blockalign(bs, VHDX_METADATA_TABLE_MAX_SIZE);
ret = bdrv_pread(bs->file->bs, s->metadata_rt.file_offset, buffer,
ret = bdrv_pread(bs->file, s->metadata_rt.file_offset, buffer,
VHDX_METADATA_TABLE_MAX_SIZE);
if (ret < 0) {
goto exit;
@ -737,7 +738,7 @@ static int vhdx_parse_metadata(BlockDriverState *bs, BDRVVHDXState *s)
goto exit;
}
ret = bdrv_pread(bs->file->bs,
ret = bdrv_pread(bs->file,
s->metadata_entries.file_parameters_entry.offset
+ s->metadata_rt.file_offset,
&s->params,
@ -772,7 +773,7 @@ static int vhdx_parse_metadata(BlockDriverState *bs, BDRVVHDXState *s)
/* determine virtual disk size, logical sector size,
* and phys sector size */
ret = bdrv_pread(bs->file->bs,
ret = bdrv_pread(bs->file,
s->metadata_entries.virtual_disk_size_entry.offset
+ s->metadata_rt.file_offset,
&s->virtual_disk_size,
@ -780,7 +781,7 @@ static int vhdx_parse_metadata(BlockDriverState *bs, BDRVVHDXState *s)
if (ret < 0) {
goto exit;
}
ret = bdrv_pread(bs->file->bs,
ret = bdrv_pread(bs->file,
s->metadata_entries.logical_sector_size_entry.offset
+ s->metadata_rt.file_offset,
&s->logical_sector_size,
@ -788,7 +789,7 @@ static int vhdx_parse_metadata(BlockDriverState *bs, BDRVVHDXState *s)
if (ret < 0) {
goto exit;
}
ret = bdrv_pread(bs->file->bs,
ret = bdrv_pread(bs->file,
s->metadata_entries.phys_sector_size_entry.offset
+ s->metadata_rt.file_offset,
&s->physical_sector_size,
@ -905,7 +906,7 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags,
QLIST_INIT(&s->regions);
/* validate the file signature */
ret = bdrv_pread(bs->file->bs, 0, &signature, sizeof(uint64_t));
ret = bdrv_pread(bs->file, 0, &signature, sizeof(uint64_t));
if (ret < 0) {
goto fail;
}
@ -964,7 +965,7 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags,
goto fail;
}
ret = bdrv_pread(bs->file->bs, s->bat_offset, s->bat, s->bat_rt.length);
ret = bdrv_pread(bs->file, s->bat_offset, s->bat, s->bat_rt.length);
if (ret < 0) {
goto fail;
}
@ -1117,7 +1118,7 @@ static coroutine_fn int vhdx_co_readv(BlockDriverState *bs, int64_t sector_num,
break;
case PAYLOAD_BLOCK_FULLY_PRESENT:
qemu_co_mutex_unlock(&s->lock);
ret = bdrv_co_readv(bs->file->bs,
ret = bdrv_co_readv(bs->file,
sinfo.file_offset >> BDRV_SECTOR_BITS,
sinfo.sectors_avail, &hd_qiov);
qemu_co_mutex_lock(&s->lock);
@ -1326,7 +1327,7 @@ static coroutine_fn int vhdx_co_writev(BlockDriverState *bs, int64_t sector_num,
}
/* block exists, so we can just overwrite it */
qemu_co_mutex_unlock(&s->lock);
ret = bdrv_co_writev(bs->file->bs,
ret = bdrv_co_writev(bs->file,
sinfo.file_offset >> BDRV_SECTOR_BITS,
sectors_to_write, &hd_qiov);
qemu_co_mutex_lock(&s->lock);
@ -1387,9 +1388,11 @@ exit:
* There are 2 headers, and the highest sequence number will represent
* the active header
*/
static int vhdx_create_new_headers(BlockDriverState *bs, uint64_t image_size,
static int vhdx_create_new_headers(BlockBackend *blk, uint64_t image_size,
uint32_t log_size)
{
BlockDriverState *bs = blk_bs(blk);
BdrvChild *child;
int ret = 0;
VHDXHeader *hdr = NULL;
@ -1404,12 +1407,18 @@ static int vhdx_create_new_headers(BlockDriverState *bs, uint64_t image_size,
vhdx_guid_generate(&hdr->file_write_guid);
vhdx_guid_generate(&hdr->data_write_guid);
ret = vhdx_write_header(bs, hdr, VHDX_HEADER1_OFFSET, false);
/* XXX Ugly way to get blk->root, but that's a feature, not a bug. This
* hack makes it obvious that vhdx_write_header() bypasses the BlockBackend
* here, which it really shouldn't be doing. */
child = QLIST_FIRST(&bs->parents);
assert(!QLIST_NEXT(child, next_parent));
ret = vhdx_write_header(child, hdr, VHDX_HEADER1_OFFSET, false);
if (ret < 0) {
goto exit;
}
hdr->sequence_number++;
ret = vhdx_write_header(bs, hdr, VHDX_HEADER2_OFFSET, false);
ret = vhdx_write_header(child, hdr, VHDX_HEADER2_OFFSET, false);
if (ret < 0) {
goto exit;
}
@ -1442,7 +1451,7 @@ exit:
* The first 64KB of the Metadata section is reserved for the metadata
* header and entries; beyond that, the metadata items themselves reside.
*/
static int vhdx_create_new_metadata(BlockDriverState *bs,
static int vhdx_create_new_metadata(BlockBackend *blk,
uint64_t image_size,
uint32_t block_size,
uint32_t sector_size,
@ -1538,13 +1547,13 @@ static int vhdx_create_new_metadata(BlockDriverState *bs,
VHDX_META_FLAGS_IS_VIRTUAL_DISK;
vhdx_metadata_entry_le_export(&md_table_entry[4]);
ret = bdrv_pwrite(bs, metadata_offset, buffer, VHDX_HEADER_BLOCK_SIZE);
ret = blk_pwrite(blk, metadata_offset, buffer, VHDX_HEADER_BLOCK_SIZE, 0);
if (ret < 0) {
goto exit;
}
ret = bdrv_pwrite(bs, metadata_offset + (64 * KiB), entry_buffer,
VHDX_METADATA_ENTRY_BUFFER_SIZE);
ret = blk_pwrite(blk, metadata_offset + (64 * KiB), entry_buffer,
VHDX_METADATA_ENTRY_BUFFER_SIZE, 0);
if (ret < 0) {
goto exit;
}
@ -1564,7 +1573,7 @@ exit:
* Fixed images: default state of the BAT is fully populated, with
* file offsets and state PAYLOAD_BLOCK_FULLY_PRESENT.
*/
static int vhdx_create_bat(BlockDriverState *bs, BDRVVHDXState *s,
static int vhdx_create_bat(BlockBackend *blk, BDRVVHDXState *s,
uint64_t image_size, VHDXImageType type,
bool use_zero_blocks, uint64_t file_offset,
uint32_t length)
@ -1588,12 +1597,12 @@ static int vhdx_create_bat(BlockDriverState *bs, BDRVVHDXState *s,
if (type == VHDX_TYPE_DYNAMIC) {
/* All zeroes, so we can just extend the file - the end of the BAT
* is the furthest thing we have written yet */
ret = bdrv_truncate(bs, data_file_offset);
ret = blk_truncate(blk, data_file_offset);
if (ret < 0) {
goto exit;
}
} else if (type == VHDX_TYPE_FIXED) {
ret = bdrv_truncate(bs, data_file_offset + image_size);
ret = blk_truncate(blk, data_file_offset + image_size);
if (ret < 0) {
goto exit;
}
@ -1604,7 +1613,7 @@ static int vhdx_create_bat(BlockDriverState *bs, BDRVVHDXState *s,
if (type == VHDX_TYPE_FIXED ||
use_zero_blocks ||
bdrv_has_zero_init(bs) == 0) {
bdrv_has_zero_init(blk_bs(blk)) == 0) {
/* for a fixed file, the default BAT entry is not zero */
s->bat = g_try_malloc0(length);
if (length && s->bat == NULL) {
@ -1620,12 +1629,12 @@ static int vhdx_create_bat(BlockDriverState *bs, BDRVVHDXState *s,
sinfo.file_offset = data_file_offset +
(sector_num << s->logical_sector_size_bits);
sinfo.file_offset = ROUND_UP(sinfo.file_offset, MiB);
vhdx_update_bat_table_entry(bs, s, &sinfo, &unused, &unused,
vhdx_update_bat_table_entry(blk_bs(blk), s, &sinfo, &unused, &unused,
block_state);
cpu_to_le64s(&s->bat[sinfo.bat_idx]);
sector_num += s->sectors_per_block;
}
ret = bdrv_pwrite(bs, file_offset, s->bat, length);
ret = blk_pwrite(blk, file_offset, s->bat, length, 0);
if (ret < 0) {
goto exit;
}
@ -1645,7 +1654,7 @@ exit:
* to create the BAT itself, we will also cause the BAT to be
* created.
*/
static int vhdx_create_new_region_table(BlockDriverState *bs,
static int vhdx_create_new_region_table(BlockBackend *blk,
uint64_t image_size,
uint32_t block_size,
uint32_t sector_size,
@ -1720,21 +1729,21 @@ static int vhdx_create_new_region_table(BlockDriverState *bs,
/* The region table gives us the data we need to create the BAT,
* so do that now */
ret = vhdx_create_bat(bs, s, image_size, type, use_zero_blocks,
ret = vhdx_create_bat(blk, s, image_size, type, use_zero_blocks,
bat_file_offset, bat_length);
if (ret < 0) {
goto exit;
}
/* Now write out the region headers to disk */
ret = bdrv_pwrite(bs, VHDX_REGION_TABLE_OFFSET, buffer,
VHDX_HEADER_BLOCK_SIZE);
ret = blk_pwrite(blk, VHDX_REGION_TABLE_OFFSET, buffer,
VHDX_HEADER_BLOCK_SIZE, 0);
if (ret < 0) {
goto exit;
}
ret = bdrv_pwrite(bs, VHDX_REGION_TABLE2_OFFSET, buffer,
VHDX_HEADER_BLOCK_SIZE);
ret = blk_pwrite(blk, VHDX_REGION_TABLE2_OFFSET, buffer,
VHDX_HEADER_BLOCK_SIZE, 0);
if (ret < 0) {
goto exit;
}
@ -1871,13 +1880,13 @@ static int vhdx_create(const char *filename, QemuOpts *opts, Error **errp)
/* Creates (B),(C) */
ret = vhdx_create_new_headers(blk_bs(blk), image_size, log_size);
ret = vhdx_create_new_headers(blk, image_size, log_size);
if (ret < 0) {
goto delete_and_exit;
}
/* Creates (D),(E),(G) explicitly. (F) created as by-product */
ret = vhdx_create_new_region_table(blk_bs(blk), image_size, block_size, 512,
ret = vhdx_create_new_region_table(blk, image_size, block_size, 512,
log_size, use_zero_blocks, image_type,
&metadata_offset);
if (ret < 0) {
@ -1885,7 +1894,7 @@ static int vhdx_create(const char *filename, QemuOpts *opts, Error **errp)
}
/* Creates (H) */
ret = vhdx_create_new_metadata(blk_bs(blk), image_size, block_size, 512,
ret = vhdx_create_new_metadata(blk, image_size, block_size, 512,
metadata_offset, image_type);
if (ret < 0) {
goto delete_and_exit;

View File

@ -252,7 +252,7 @@ static uint32_t vmdk_read_cid(BlockDriverState *bs, int parent)
int ret;
desc = g_malloc0(DESC_SIZE);
ret = bdrv_pread(bs->file->bs, s->desc_offset, desc, DESC_SIZE);
ret = bdrv_pread(bs->file, s->desc_offset, desc, DESC_SIZE);
if (ret < 0) {
g_free(desc);
return 0;
@ -286,7 +286,7 @@ static int vmdk_write_cid(BlockDriverState *bs, uint32_t cid)
desc = g_malloc0(DESC_SIZE);
tmp_desc = g_malloc0(DESC_SIZE);
ret = bdrv_pread(bs->file->bs, s->desc_offset, desc, DESC_SIZE);
ret = bdrv_pread(bs->file, s->desc_offset, desc, DESC_SIZE);
if (ret < 0) {
goto out;
}
@ -306,7 +306,7 @@ static int vmdk_write_cid(BlockDriverState *bs, uint32_t cid)
pstrcat(desc, DESC_SIZE, tmp_desc);
}
ret = bdrv_pwrite_sync(bs->file->bs, s->desc_offset, desc, DESC_SIZE);
ret = bdrv_pwrite_sync(bs->file, s->desc_offset, desc, DESC_SIZE);
out:
g_free(desc);
@ -350,7 +350,7 @@ static int vmdk_parent_open(BlockDriverState *bs)
int ret;
desc = g_malloc0(DESC_SIZE + 1);
ret = bdrv_pread(bs->file->bs, s->desc_offset, desc, DESC_SIZE);
ret = bdrv_pread(bs->file, s->desc_offset, desc, DESC_SIZE);
if (ret < 0) {
goto out;
}
@ -454,7 +454,7 @@ static int vmdk_init_tables(BlockDriverState *bs, VmdkExtent *extent,
return -ENOMEM;
}
ret = bdrv_pread(extent->file->bs,
ret = bdrv_pread(extent->file,
extent->l1_table_offset,
extent->l1_table,
l1_size);
@ -474,7 +474,7 @@ static int vmdk_init_tables(BlockDriverState *bs, VmdkExtent *extent,
ret = -ENOMEM;
goto fail_l1;
}
ret = bdrv_pread(extent->file->bs,
ret = bdrv_pread(extent->file,
extent->l1_backup_table_offset,
extent->l1_backup_table,
l1_size);
@ -508,7 +508,7 @@ static int vmdk_open_vmfs_sparse(BlockDriverState *bs,
VMDK3Header header;
VmdkExtent *extent;
ret = bdrv_pread(file->bs, sizeof(magic), &header, sizeof(header));
ret = bdrv_pread(file, sizeof(magic), &header, sizeof(header));
if (ret < 0) {
error_setg_errno(errp, -ret,
"Could not read header from file '%s'",
@ -538,14 +538,13 @@ static int vmdk_open_vmfs_sparse(BlockDriverState *bs,
static int vmdk_open_desc_file(BlockDriverState *bs, int flags, char *buf,
QDict *options, Error **errp);
static char *vmdk_read_desc(BlockDriverState *file, uint64_t desc_offset,
Error **errp)
static char *vmdk_read_desc(BdrvChild *file, uint64_t desc_offset, Error **errp)
{
int64_t size;
char *buf;
int ret;
size = bdrv_getlength(file);
size = bdrv_getlength(file->bs);
if (size < 0) {
error_setg_errno(errp, -size, "Could not access file");
return NULL;
@ -586,7 +585,7 @@ static int vmdk_open_vmdk4(BlockDriverState *bs,
int64_t l1_backup_offset = 0;
bool compressed;
ret = bdrv_pread(file->bs, sizeof(magic), &header, sizeof(header));
ret = bdrv_pread(file, sizeof(magic), &header, sizeof(header));
if (ret < 0) {
error_setg_errno(errp, -ret,
"Could not read header from file '%s'",
@ -596,7 +595,7 @@ static int vmdk_open_vmdk4(BlockDriverState *bs,
if (header.capacity == 0) {
uint64_t desc_offset = le64_to_cpu(header.desc_offset);
if (desc_offset) {
char *buf = vmdk_read_desc(file->bs, desc_offset << 9, errp);
char *buf = vmdk_read_desc(file, desc_offset << 9, errp);
if (!buf) {
return -EINVAL;
}
@ -636,7 +635,7 @@ static int vmdk_open_vmdk4(BlockDriverState *bs,
} QEMU_PACKED eos_marker;
} QEMU_PACKED footer;
ret = bdrv_pread(file->bs,
ret = bdrv_pread(file,
bs->file->bs->total_sectors * 512 - 1536,
&footer, sizeof(footer));
if (ret < 0) {
@ -874,7 +873,7 @@ static int vmdk_parse_extents(const char *desc, BlockDriverState *bs,
extent->flat_start_offset = flat_offset << 9;
} else if (!strcmp(type, "SPARSE") || !strcmp(type, "VMFSSPARSE")) {
/* SPARSE extent and VMFSSPARSE extent are both "COWD" sparse file*/
char *buf = vmdk_read_desc(extent_file->bs, 0, errp);
char *buf = vmdk_read_desc(extent_file, 0, errp);
if (!buf) {
ret = -EINVAL;
} else {
@ -943,7 +942,7 @@ static int vmdk_open(BlockDriverState *bs, QDict *options, int flags,
BDRVVmdkState *s = bs->opaque;
uint32_t magic;
buf = vmdk_read_desc(bs->file->bs, 0, errp);
buf = vmdk_read_desc(bs->file, 0, errp);
if (!buf) {
return -EINVAL;
}
@ -1046,14 +1045,14 @@ static int get_whole_cluster(BlockDriverState *bs,
/* Read backing data before skip range */
if (skip_start_bytes > 0) {
if (bs->backing) {
ret = bdrv_pread(bs->backing->bs, offset, whole_grain,
ret = bdrv_pread(bs->backing, offset, whole_grain,
skip_start_bytes);
if (ret < 0) {
ret = VMDK_ERROR;
goto exit;
}
}
ret = bdrv_pwrite(extent->file->bs, cluster_offset, whole_grain,
ret = bdrv_pwrite(extent->file, cluster_offset, whole_grain,
skip_start_bytes);
if (ret < 0) {
ret = VMDK_ERROR;
@ -1063,7 +1062,7 @@ static int get_whole_cluster(BlockDriverState *bs,
/* Read backing data after skip range */
if (skip_end_bytes < cluster_bytes) {
if (bs->backing) {
ret = bdrv_pread(bs->backing->bs, offset + skip_end_bytes,
ret = bdrv_pread(bs->backing, offset + skip_end_bytes,
whole_grain + skip_end_bytes,
cluster_bytes - skip_end_bytes);
if (ret < 0) {
@ -1071,7 +1070,7 @@ static int get_whole_cluster(BlockDriverState *bs,
goto exit;
}
}
ret = bdrv_pwrite(extent->file->bs, cluster_offset + skip_end_bytes,
ret = bdrv_pwrite(extent->file, cluster_offset + skip_end_bytes,
whole_grain + skip_end_bytes,
cluster_bytes - skip_end_bytes);
if (ret < 0) {
@ -1091,8 +1090,7 @@ static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data,
{
offset = cpu_to_le32(offset);
/* update L2 table */
if (bdrv_pwrite_sync(
extent->file->bs,
if (bdrv_pwrite_sync(extent->file,
((int64_t)m_data->l2_offset * 512)
+ (m_data->l2_index * sizeof(offset)),
&offset, sizeof(offset)) < 0) {
@ -1101,8 +1099,7 @@ static int vmdk_L2update(VmdkExtent *extent, VmdkMetaData *m_data,
/* update backup L2 table */
if (extent->l1_backup_table_offset != 0) {
m_data->l2_offset = extent->l1_backup_table[m_data->l1_index];
if (bdrv_pwrite_sync(
extent->file->bs,
if (bdrv_pwrite_sync(extent->file,
((int64_t)m_data->l2_offset * 512)
+ (m_data->l2_index * sizeof(offset)),
&offset, sizeof(offset)) < 0) {
@ -1191,8 +1188,7 @@ static int get_cluster_offset(BlockDriverState *bs,
}
}
l2_table = extent->l2_cache + (min_index * extent->l2_size);
if (bdrv_pread(
extent->file->bs,
if (bdrv_pread(extent->file,
(int64_t)l2_offset * 512,
l2_table,
extent->l2_size * sizeof(uint32_t)
@ -1373,7 +1369,7 @@ static int vmdk_write_extent(VmdkExtent *extent, int64_t cluster_offset,
}
write_offset = cluster_offset + offset_in_cluster,
ret = bdrv_co_pwritev(extent->file->bs, write_offset, n_bytes,
ret = bdrv_co_pwritev(extent->file, write_offset, n_bytes,
&local_qiov, 0);
write_end_sector = DIV_ROUND_UP(write_offset + n_bytes, BDRV_SECTOR_SIZE);
@ -1411,7 +1407,7 @@ static int vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset,
if (!extent->compressed) {
ret = bdrv_co_preadv(extent->file->bs,
ret = bdrv_co_preadv(extent->file,
cluster_offset + offset_in_cluster, bytes,
qiov, 0);
if (ret < 0) {
@ -1424,7 +1420,7 @@ static int vmdk_read_extent(VmdkExtent *extent, int64_t cluster_offset,
buf_bytes = cluster_bytes * 2;
cluster_buf = g_malloc(buf_bytes);
uncomp_buf = g_malloc(cluster_bytes);
ret = bdrv_pread(extent->file->bs,
ret = bdrv_pread(extent->file,
cluster_offset,
cluster_buf, buf_bytes);
if (ret < 0) {
@ -1501,7 +1497,7 @@ vmdk_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
qemu_iovec_reset(&local_qiov);
qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes);
ret = bdrv_co_preadv(bs->backing->bs, offset, n_bytes,
ret = bdrv_co_preadv(bs->backing, offset, n_bytes,
&local_qiov, 0);
if (ret < 0) {
goto fail;

View File

@ -237,7 +237,7 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
goto fail;
}
ret = bdrv_pread(bs->file->bs, 0, s->footer_buf, HEADER_SIZE);
ret = bdrv_pread(bs->file, 0, s->footer_buf, HEADER_SIZE);
if (ret < 0) {
error_setg(errp, "Unable to read VHD header");
goto fail;
@ -257,7 +257,7 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
}
/* If a fixed disk, the footer is found only at the end of the file */
ret = bdrv_pread(bs->file->bs, offset-HEADER_SIZE, s->footer_buf,
ret = bdrv_pread(bs->file, offset-HEADER_SIZE, s->footer_buf,
HEADER_SIZE);
if (ret < 0) {
goto fail;
@ -328,7 +328,7 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
}
if (disk_type == VHD_DYNAMIC) {
ret = bdrv_pread(bs->file->bs, be64_to_cpu(footer->data_offset), buf,
ret = bdrv_pread(bs->file, be64_to_cpu(footer->data_offset), buf,
HEADER_SIZE);
if (ret < 0) {
error_setg(errp, "Error reading dynamic VHD header");
@ -385,7 +385,7 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags,
s->bat_offset = be64_to_cpu(dyndisk_header->table_offset);
ret = bdrv_pread(bs->file->bs, s->bat_offset, s->pagetable,
ret = bdrv_pread(bs->file, s->bat_offset, s->pagetable,
pagetable_size);
if (ret < 0) {
error_setg(errp, "Error reading pagetable");
@ -481,7 +481,7 @@ static inline int64_t get_image_offset(BlockDriverState *bs, uint64_t offset,
s->last_bitmap_offset = bitmap_offset;
memset(bitmap, 0xff, s->bitmap_size);
bdrv_pwrite_sync(bs->file->bs, bitmap_offset, bitmap, s->bitmap_size);
bdrv_pwrite_sync(bs->file, bitmap_offset, bitmap, s->bitmap_size);
}
return block_offset;
@ -505,7 +505,7 @@ static int rewrite_footer(BlockDriverState* bs)
BDRVVPCState *s = bs->opaque;
int64_t offset = s->free_data_block_offset;
ret = bdrv_pwrite_sync(bs->file->bs, offset, s->footer_buf, HEADER_SIZE);
ret = bdrv_pwrite_sync(bs->file, offset, s->footer_buf, HEADER_SIZE);
if (ret < 0)
return ret;
@ -539,7 +539,7 @@ static int64_t alloc_block(BlockDriverState* bs, int64_t offset)
/* Initialize the block's bitmap */
memset(bitmap, 0xff, s->bitmap_size);
ret = bdrv_pwrite_sync(bs->file->bs, s->free_data_block_offset, bitmap,
ret = bdrv_pwrite_sync(bs->file, s->free_data_block_offset, bitmap,
s->bitmap_size);
if (ret < 0) {
return ret;
@ -554,7 +554,7 @@ static int64_t alloc_block(BlockDriverState* bs, int64_t offset)
/* Write BAT entry to disk */
bat_offset = s->bat_offset + (4 * index);
bat_value = cpu_to_be32(s->pagetable[index]);
ret = bdrv_pwrite_sync(bs->file->bs, bat_offset, &bat_value, 4);
ret = bdrv_pwrite_sync(bs->file, bat_offset, &bat_value, 4);
if (ret < 0)
goto fail;
@ -591,7 +591,7 @@ vpc_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector local_qiov;
if (be32_to_cpu(footer->type) == VHD_FIXED) {
return bdrv_co_preadv(bs->file->bs, offset, bytes, qiov, 0);
return bdrv_co_preadv(bs->file, offset, bytes, qiov, 0);
}
qemu_co_mutex_lock(&s->lock);
@ -607,7 +607,7 @@ vpc_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
qemu_iovec_reset(&local_qiov);
qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes);
ret = bdrv_co_preadv(bs->file->bs, image_offset, n_bytes,
ret = bdrv_co_preadv(bs->file, image_offset, n_bytes,
&local_qiov, 0);
if (ret < 0) {
goto fail;
@ -640,7 +640,7 @@ vpc_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
QEMUIOVector local_qiov;
if (be32_to_cpu(footer->type) == VHD_FIXED) {
return bdrv_co_pwritev(bs->file->bs, offset, bytes, qiov, 0);
return bdrv_co_pwritev(bs->file, offset, bytes, qiov, 0);
}
qemu_co_mutex_lock(&s->lock);
@ -661,7 +661,7 @@ vpc_co_pwritev(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
qemu_iovec_reset(&local_qiov);
qemu_iovec_concat(&local_qiov, qiov, bytes_done, n_bytes);
ret = bdrv_co_pwritev(bs->file->bs, image_offset, n_bytes,
ret = bdrv_co_pwritev(bs->file, image_offset, n_bytes,
&local_qiov, 0);
if (ret < 0) {
goto fail;

View File

@ -341,9 +341,8 @@ typedef struct BDRVVVFATState {
unsigned int current_cluster;
/* write support */
BlockDriverState* write_target;
char* qcow_filename;
BlockDriverState* qcow;
BdrvChild* qcow;
void* fat2;
char* used_clusters;
array_t commits;
@ -981,7 +980,7 @@ static int init_directories(BDRVVVFATState* s,
static BDRVVVFATState *vvv = NULL;
#endif
static int enable_write_target(BDRVVVFATState *s, Error **errp);
static int enable_write_target(BlockDriverState *bs, Error **errp);
static int is_consistent(BDRVVVFATState *s);
static QemuOptsList runtime_opts = {
@ -1158,8 +1157,8 @@ static int vvfat_open(BlockDriverState *bs, QDict *options, int flags,
s->current_cluster=0xffffffff;
/* read only is the default for safety */
bs->read_only = 1;
s->qcow = s->write_target = NULL;
bs->read_only = true;
s->qcow = NULL;
s->qcow_filename = NULL;
s->fat2 = NULL;
s->downcase_short_names = 1;
@ -1170,14 +1169,13 @@ static int vvfat_open(BlockDriverState *bs, QDict *options, int flags,
s->sector_count = cyls * heads * secs - (s->first_sectors_number - 1);
if (qemu_opt_get_bool(opts, "rw", false)) {
ret = enable_write_target(s, errp);
ret = enable_write_target(bs, errp);
if (ret < 0) {
goto fail;
}
bs->read_only = 0;
bs->read_only = false;
}
bs->request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O supported */
bs->total_sectors = cyls * heads * secs;
if (init_directories(s, dirname, heads, secs, errp)) {
@ -1209,6 +1207,11 @@ fail:
return ret;
}
static void vvfat_refresh_limits(BlockDriverState *bs, Error **errp)
{
bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
}
static inline void vvfat_close_current_file(BDRVVVFATState *s)
{
if(s->current_mapping) {
@ -1387,8 +1390,9 @@ static int vvfat_read(BlockDriverState *bs, int64_t sector_num,
return -1;
if (s->qcow) {
int n;
if (bdrv_is_allocated(s->qcow, sector_num, nb_sectors-i, &n)) {
DLOG(fprintf(stderr, "sectors %d+%d allocated\n", (int)sector_num, n));
if (bdrv_is_allocated(s->qcow->bs, sector_num, nb_sectors-i, &n)) {
DLOG(fprintf(stderr, "sectors %d+%d allocated\n",
(int)sector_num, n));
if (bdrv_read(s->qcow, sector_num, buf + i * 0x200, n)) {
return -1;
}
@ -1665,12 +1669,15 @@ static inline int cluster_was_modified(BDRVVVFATState* s, uint32_t cluster_num)
int was_modified = 0;
int i, dummy;
if (s->qcow == NULL)
if (s->qcow == NULL) {
return 0;
}
for (i = 0; !was_modified && i < s->sectors_per_cluster; i++)
was_modified = bdrv_is_allocated(s->qcow,
cluster2sector(s, cluster_num) + i, 1, &dummy);
for (i = 0; !was_modified && i < s->sectors_per_cluster; i++) {
was_modified = bdrv_is_allocated(s->qcow->bs,
cluster2sector(s, cluster_num) + i,
1, &dummy);
}
return was_modified;
}
@ -1819,11 +1826,16 @@ static uint32_t get_cluster_count_for_direntry(BDRVVVFATState* s,
vvfat_close_current_file(s);
for (i = 0; i < s->sectors_per_cluster; i++) {
if (!bdrv_is_allocated(s->qcow, offset + i, 1, &dummy)) {
if (vvfat_read(s->bs, offset, s->cluster_buffer, 1)) {
int res;
res = bdrv_is_allocated(s->qcow->bs, offset + i, 1, &dummy);
if (!res) {
res = vvfat_read(s->bs, offset, s->cluster_buffer, 1);
if (res) {
return -1;
}
if (bdrv_write(s->qcow, offset, s->cluster_buffer, 1)) {
res = bdrv_write(s->qcow, offset, s->cluster_buffer, 1);
if (res) {
return -2;
}
}
@ -2779,8 +2791,8 @@ static int do_commit(BDRVVVFATState* s)
return ret;
}
if (s->qcow->drv->bdrv_make_empty) {
s->qcow->drv->bdrv_make_empty(s->qcow);
if (s->qcow->bs->drv->bdrv_make_empty) {
s->qcow->bs->drv->bdrv_make_empty(s->qcow->bs);
}
memset(s->used_clusters, 0, sector2cluster(s, s->sector_count));
@ -2946,7 +2958,7 @@ write_target_commit(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
static void write_target_close(BlockDriverState *bs) {
BDRVVVFATState* s = *((BDRVVVFATState**) bs->opaque);
bdrv_unref(s->qcow);
bdrv_unref_child(s->bs, s->qcow);
g_free(s->qcow_filename);
}
@ -2956,8 +2968,19 @@ static BlockDriver vvfat_write_target = {
.bdrv_close = write_target_close,
};
static int enable_write_target(BDRVVVFATState *s, Error **errp)
static void vvfat_qcow_options(int *child_flags, QDict *child_options,
int parent_flags, QDict *parent_options)
{
*child_flags = BDRV_O_RDWR | BDRV_O_NO_FLUSH;
}
static const BdrvChildRole child_vvfat_qcow = {
.inherit_options = vvfat_qcow_options,
};
static int enable_write_target(BlockDriverState *bs, Error **errp)
{
BDRVVVFATState *s = bs->opaque;
BlockDriver *bdrv_qcow = NULL;
BlockDriverState *backing;
QemuOpts *opts = NULL;
@ -2996,8 +3019,8 @@ static int enable_write_target(BDRVVVFATState *s, Error **errp)
options = qdict_new();
qdict_put(options, "driver", qstring_from_str("qcow"));
s->qcow = bdrv_open(s->qcow_filename, NULL, options,
BDRV_O_RDWR | BDRV_O_NO_FLUSH, errp);
s->qcow = bdrv_open_child(s->qcow_filename, options, "write-target", bs,
&child_vvfat_qcow, false, errp);
if (!s->qcow) {
ret = -EINVAL;
goto err;
@ -3046,6 +3069,7 @@ static BlockDriver bdrv_vvfat = {
.bdrv_parse_filename = vvfat_parse_filename,
.bdrv_file_open = vvfat_open,
.bdrv_refresh_limits = vvfat_refresh_limits,
.bdrv_close = vvfat_close,
.bdrv_co_preadv = vvfat_co_preadv,

View File

@ -384,7 +384,7 @@ static int multireq_compare(const void *a, const void *b)
void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb)
{
int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0;
int max_xfer_len = 0;
uint32_t max_transfer;
int64_t sector_num = 0;
if (mrb->num_reqs == 1) {
@ -393,8 +393,7 @@ void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb)
return;
}
max_xfer_len = blk_get_max_transfer_length(mrb->reqs[0]->dev->blk);
max_xfer_len = MIN_NON_ZERO(max_xfer_len, BDRV_REQUEST_MAX_SECTORS);
max_transfer = blk_get_max_transfer(mrb->reqs[0]->dev->blk);
qsort(mrb->reqs, mrb->num_reqs, sizeof(*mrb->reqs),
&multireq_compare);
@ -410,8 +409,9 @@ void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb)
*/
if (sector_num + nb_sectors != req->sector_num ||
niov > blk_get_max_iov(blk) - req->qiov.niov ||
req->qiov.size / BDRV_SECTOR_SIZE > max_xfer_len ||
nb_sectors > max_xfer_len - req->qiov.size / BDRV_SECTOR_SIZE) {
req->qiov.size > max_transfer ||
nb_sectors > (max_transfer -
req->qiov.size) / BDRV_SECTOR_SIZE) {
submit_requests(blk, mrb, start, num_reqs, niov);
num_reqs = 0;
}

View File

@ -82,7 +82,7 @@ static void parse_drive(DeviceState *dev, const char *str, void **ptr,
if (blk_attach_dev(blk, dev) < 0) {
DriveInfo *dinfo = blk_legacy_dinfo(blk);
if (dinfo->type != IF_NONE) {
if (dinfo && dinfo->type != IF_NONE) {
error_setg(errp, "Drive '%s' is already in use because "
"it has been automatically connected to another "
"device (did you need 'if=none' in the drive options?)",

View File

@ -225,13 +225,14 @@ static void scsi_read_complete(void * opaque, int ret)
if (s->type == TYPE_DISK &&
r->req.cmd.buf[0] == INQUIRY &&
r->req.cmd.buf[2] == 0xb0) {
uint32_t max_xfer_len = blk_get_max_transfer_length(s->conf.blk);
if (max_xfer_len) {
stl_be_p(&r->buf[8], max_xfer_len);
uint32_t max_transfer =
blk_get_max_transfer(s->conf.blk) / s->blocksize;
assert(max_transfer);
stl_be_p(&r->buf[8], max_transfer);
/* Also take care of the opt xfer len. */
if (ldl_be_p(&r->buf[12]) > max_xfer_len) {
stl_be_p(&r->buf[12], max_xfer_len);
}
if (ldl_be_p(&r->buf[12]) > max_transfer) {
stl_be_p(&r->buf[12], max_transfer);
}
}
scsi_req_data(&r->req, len);

View File

@ -226,24 +226,22 @@ int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
BlockReopenQueue *queue, Error **errp);
void bdrv_reopen_commit(BDRVReopenState *reopen_state);
void bdrv_reopen_abort(BDRVReopenState *reopen_state);
int bdrv_read(BlockDriverState *bs, int64_t sector_num,
int bdrv_read(BdrvChild *child, int64_t sector_num,
uint8_t *buf, int nb_sectors);
int bdrv_write(BlockDriverState *bs, int64_t sector_num,
int bdrv_write(BdrvChild *child, int64_t sector_num,
const uint8_t *buf, int nb_sectors);
int bdrv_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
int count, BdrvRequestFlags flags);
int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags);
int bdrv_pread(BlockDriverState *bs, int64_t offset,
void *buf, int count);
int bdrv_preadv(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov);
int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags);
int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes);
int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov);
int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes);
int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov);
int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
const void *buf, int count);
int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov);
int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
const void *buf, int count);
int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
int coroutine_fn bdrv_co_readv(BdrvChild *child, int64_t sector_num,
int nb_sectors, QEMUIOVector *qiov);
int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
int coroutine_fn bdrv_co_writev(BdrvChild *child, int64_t sector_num,
int nb_sectors, QEMUIOVector *qiov);
/*
* Efficiently zero a region of the disk image. Note that this is a regular
@ -251,7 +249,7 @@ int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
* function is not suitable for zeroing the entire image in a single request
* because it may allocate memory for the entire region.
*/
int coroutine_fn bdrv_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
int count, BdrvRequestFlags flags);
BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
const char *backing_file);
@ -310,10 +308,10 @@ BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs,
const char *node_name, Error **errp);
/* async block I/O */
BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
BlockAIOCB *bdrv_aio_readv(BdrvChild *child, int64_t sector_num,
QEMUIOVector *iov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque);
BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num,
QEMUIOVector *iov, int nb_sectors,
BlockCompletionFunc *cb, void *opaque);
BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
@ -362,8 +360,8 @@ int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
int64_t sector_num, int nb_sectors, int *pnum);
int bdrv_is_read_only(BlockDriverState *bs);
int bdrv_is_sg(BlockDriverState *bs);
bool bdrv_is_read_only(BlockDriverState *bs);
bool bdrv_is_sg(BlockDriverState *bs);
bool bdrv_is_inserted(BlockDriverState *bs);
int bdrv_media_changed(BlockDriverState *bs);
void bdrv_lock_medium(BlockDriverState *bs, bool locked);
@ -390,8 +388,8 @@ BlockDriverState *bdrv_first(BdrvNextIterator *it);
BlockDriverState *bdrv_next(BdrvNextIterator *it);
BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs);
int bdrv_is_encrypted(BlockDriverState *bs);
int bdrv_key_required(BlockDriverState *bs);
bool bdrv_is_encrypted(BlockDriverState *bs);
bool bdrv_key_required(BlockDriverState *bs);
int bdrv_set_key(BlockDriverState *bs, const char *key);
void bdrv_add_key(BlockDriverState *bs, const char *key, Error **errp);
int bdrv_query_missing_keys(void);

View File

@ -324,30 +324,48 @@ struct BlockDriver {
};
typedef struct BlockLimits {
/* maximum number of sectors that can be discarded at once */
int max_discard;
/* Alignment requirement, in bytes, for offset/length of I/O
* requests. Must be a power of 2 less than INT_MAX; defaults to
* 1 for drivers with modern byte interfaces, and to 512
* otherwise. */
uint32_t request_alignment;
/* optimal alignment for discard requests in sectors */
int64_t discard_alignment;
/* maximum number of bytes that can be discarded at once (since it
* is signed, it must be < 2G, if set), should be multiple of
* pdiscard_alignment, but need not be power of 2. May be 0 if no
* inherent 32-bit limit */
int32_t max_pdiscard;
/* optimal alignment for discard requests in bytes, must be power
* of 2, less than max_pdiscard if that is set, and multiple of
* bl.request_alignment. May be 0 if bl.request_alignment is good
* enough */
uint32_t pdiscard_alignment;
/* maximum number of bytes that can zeroized at once (since it is
* signed, it must be < 2G, if set) */
* signed, it must be < 2G, if set), should be multiple of
* pwrite_zeroes_alignment. May be 0 if no inherent 32-bit limit */
int32_t max_pwrite_zeroes;
/* optimal alignment for write zeroes requests in bytes, must be
* power of 2, and less than max_pwrite_zeroes if that is set */
* power of 2, less than max_pwrite_zeroes if that is set, and
* multiple of bl.request_alignment. May be 0 if
* bl.request_alignment is good enough */
uint32_t pwrite_zeroes_alignment;
/* optimal transfer length in sectors */
int opt_transfer_length;
/* optimal transfer length in bytes (must be power of 2, and
* multiple of bl.request_alignment), or 0 if no preferred size */
uint32_t opt_transfer;
/* maximal transfer length in sectors */
int max_transfer_length;
/* maximal transfer length in bytes (need not be power of 2, but
* should be multiple of opt_transfer), or 0 for no 32-bit limit.
* For now, anything larger than INT_MAX is clamped down. */
uint32_t max_transfer;
/* memory alignment so that no bounce buffer is needed */
/* memory alignment, in bytes so that no bounce buffer is needed */
size_t min_mem_alignment;
/* memory alignment for bounce buffer */
/* memory alignment, in bytes, for bounce buffer */
size_t opt_mem_alignment;
/* maximum number of iovec elements */
@ -411,14 +429,15 @@ struct BdrvChild {
struct BlockDriverState {
int64_t total_sectors; /* if we are reading a disk image, give its
size in sectors */
int read_only; /* if true, the media is read only */
int open_flags; /* flags used to open the file, re-used for re-open */
int encrypted; /* if true, the media is encrypted */
int valid_key; /* if true, a valid encryption key has been set */
int sg; /* if true, the device is a /dev/sg* */
int copy_on_read; /* if true, copy read backing sectors into image
bool read_only; /* if true, the media is read only */
bool encrypted; /* if true, the media is encrypted */
bool valid_key; /* if true, a valid encryption key has been set */
bool sg; /* if true, the device is a /dev/sg* */
bool probed; /* if true, format was probed rather than specified */
int copy_on_read; /* if nonzero, copy read backing sectors into image.
note this is a reference count */
bool probed;
BlockDriver *drv; /* NULL means no media */
void *opaque;
@ -453,8 +472,6 @@ struct BlockDriverState {
/* I/O Limits */
BlockLimits bl;
/* Alignment requirement for offset/length of I/O requests */
unsigned int request_alignment;
/* Flags honored during pwrite (so far: BDRV_REQ_FUA) */
unsigned int supported_write_flags;
/* Flags honored during pwrite_zeroes (so far: BDRV_REQ_FUA,
@ -546,10 +563,10 @@ extern BlockDriver bdrv_qcow2;
*/
void bdrv_setup_io_funcs(BlockDriver *bdrv);
int coroutine_fn bdrv_co_preadv(BlockDriverState *bs,
int coroutine_fn bdrv_co_preadv(BdrvChild *child,
int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags);
int coroutine_fn bdrv_co_pwritev(BlockDriverState *bs,
int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
BdrvRequestFlags flags);

View File

@ -77,6 +77,8 @@ enum {
/* Maximum size of a single READ/WRITE data buffer */
#define NBD_MAX_BUFFER_SIZE (32 * 1024 * 1024)
#define NBD_MAX_SECTORS (NBD_MAX_BUFFER_SIZE / BDRV_SECTOR_SIZE)
/* Maximum size of an export name. The NBD spec requires 256 and
* suggests that servers support up to 4096, but we stick to only the
* required size so that we can stack-allocate the names, and because

View File

@ -124,6 +124,7 @@ int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
int count, BdrvRequestFlags flags,
BlockCompletionFunc *cb, void *opaque);
int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags);
int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count);
int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count,
BdrvRequestFlags flags);
@ -170,7 +171,7 @@ bool blk_is_available(BlockBackend *blk);
void blk_lock_medium(BlockBackend *blk, bool locked);
void blk_eject(BlockBackend *blk, bool eject_flag);
int blk_get_flags(BlockBackend *blk);
int blk_get_max_transfer_length(BlockBackend *blk);
uint32_t blk_get_max_transfer(BlockBackend *blk);
int blk_get_max_iov(BlockBackend *blk);
void blk_set_guest_block_size(BlockBackend *blk, int align);
void *blk_try_blockalign(BlockBackend *blk, size_t size);

View File

@ -1961,7 +1961,8 @@
#
# @config: #optional filename of the configuration file
#
# @align: #optional required alignment for requests in bytes
# @align: #optional required alignment for requests in bytes,
# must be power of 2, or 0 for default
#
# @inject-error: #optional array of error injection descriptions
#

View File

@ -1648,7 +1648,7 @@ static int convert_do_copy(ImgConvertState *s)
if (!s->has_zero_init && !s->target_has_backing &&
bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)))
{
ret = bdrv_make_zero(blk_bs(s->target), BDRV_REQ_MAY_UNMAP);
ret = blk_make_zero(s->target, BDRV_REQ_MAY_UNMAP);
if (ret == 0) {
s->has_zero_init = true;
}
@ -2085,13 +2085,14 @@ static int img_convert(int argc, char **argv)
}
out_bs = blk_bs(out_blk);
/* increase bufsectors from the default 4096 (2M) if opt_transfer_length
/* increase bufsectors from the default 4096 (2M) if opt_transfer
* or discard_alignment of the out_bs is greater. Limit to 32768 (16MB)
* as maximum. */
bufsectors = MIN(32768,
MAX(bufsectors, MAX(out_bs->bl.opt_transfer_length,
out_bs->bl.discard_alignment))
);
MAX(bufsectors,
MAX(out_bs->bl.opt_transfer >> BDRV_SECTOR_BITS,
out_bs->bl.pdiscard_alignment >>
BDRV_SECTOR_BITS)));
if (skip_create) {
int64_t output_sectors = blk_nb_sectors(out_blk);
@ -3866,7 +3867,7 @@ int main(int argc, char **argv)
return 0;
}
argv += optind;
optind = 1;
optind = 0;
if (!trace_init_backends()) {
exit(1);