util/iov: make qemu_iovec_init_extended() honest
Actually, we can't extend the io vector in all cases. Handle possible MAX_IOV and size_t overflows. For now add assertion to callers (actually they rely on success anyway) and fix them in the following patch. Add also some additional good assertions to qemu_iovec_init_slice() while being here. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20201211183934.169161-3-vsementsov@virtuozzo.com> Reviewed-by: Eric Blake <eblake@redhat.com> Signed-off-by: Eric Blake <eblake@redhat.com>
This commit is contained in:
parent
69b55e03f7
commit
4c002cef0e
10
block/io.c
10
block/io.c
@ -1680,13 +1680,17 @@ static bool bdrv_pad_request(BlockDriverState *bs,
|
||||
int64_t *offset, unsigned int *bytes,
|
||||
BdrvRequestPadding *pad)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!bdrv_init_padding(bs, *offset, *bytes, pad)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head,
|
||||
*qiov, *qiov_offset, *bytes,
|
||||
pad->buf + pad->buf_len - pad->tail, pad->tail);
|
||||
ret = qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head,
|
||||
*qiov, *qiov_offset, *bytes,
|
||||
pad->buf + pad->buf_len - pad->tail,
|
||||
pad->tail);
|
||||
assert(ret == 0);
|
||||
*bytes += pad->head + pad->tail;
|
||||
*offset -= pad->head;
|
||||
*qiov = &pad->local_qiov;
|
||||
|
@ -222,7 +222,7 @@ static inline void *qemu_iovec_buf(QEMUIOVector *qiov)
|
||||
|
||||
void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint);
|
||||
void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov);
|
||||
void qemu_iovec_init_extended(
|
||||
int qemu_iovec_init_extended(
|
||||
QEMUIOVector *qiov,
|
||||
void *head_buf, size_t head_len,
|
||||
QEMUIOVector *mid_qiov, size_t mid_offset, size_t mid_len,
|
||||
|
25
util/iov.c
25
util/iov.c
@ -415,7 +415,7 @@ int qemu_iovec_subvec_niov(QEMUIOVector *qiov, size_t offset, size_t len)
|
||||
* Compile new iovec, combining @head_buf buffer, sub-qiov of @mid_qiov,
|
||||
* and @tail_buf buffer into new qiov.
|
||||
*/
|
||||
void qemu_iovec_init_extended(
|
||||
int qemu_iovec_init_extended(
|
||||
QEMUIOVector *qiov,
|
||||
void *head_buf, size_t head_len,
|
||||
QEMUIOVector *mid_qiov, size_t mid_offset, size_t mid_len,
|
||||
@ -425,12 +425,24 @@ void qemu_iovec_init_extended(
|
||||
int total_niov, mid_niov = 0;
|
||||
struct iovec *p, *mid_iov = NULL;
|
||||
|
||||
assert(mid_qiov->niov <= IOV_MAX);
|
||||
|
||||
if (SIZE_MAX - head_len < mid_len ||
|
||||
SIZE_MAX - head_len - mid_len < tail_len)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (mid_len) {
|
||||
mid_iov = qiov_slice(mid_qiov, mid_offset, mid_len,
|
||||
&mid_head, &mid_tail, &mid_niov);
|
||||
}
|
||||
|
||||
total_niov = !!head_len + mid_niov + !!tail_len;
|
||||
if (total_niov > IOV_MAX) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (total_niov == 1) {
|
||||
qemu_iovec_init_buf(qiov, NULL, 0);
|
||||
p = &qiov->local_iov;
|
||||
@ -459,6 +471,8 @@ void qemu_iovec_init_extended(
|
||||
p->iov_base = tail_buf;
|
||||
p->iov_len = tail_len;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -492,7 +506,14 @@ bool qemu_iovec_is_zero(QEMUIOVector *qiov, size_t offset, size_t bytes)
|
||||
void qemu_iovec_init_slice(QEMUIOVector *qiov, QEMUIOVector *source,
|
||||
size_t offset, size_t len)
|
||||
{
|
||||
qemu_iovec_init_extended(qiov, NULL, 0, source, offset, len, NULL, 0);
|
||||
int ret;
|
||||
|
||||
assert(source->size >= len);
|
||||
assert(source->size - len >= offset);
|
||||
|
||||
/* We shrink the request, so we can't overflow neither size_t nor MAX_IOV */
|
||||
ret = qemu_iovec_init_extended(qiov, NULL, 0, source, offset, len, NULL, 0);
|
||||
assert(ret == 0);
|
||||
}
|
||||
|
||||
void qemu_iovec_destroy(QEMUIOVector *qiov)
|
||||
|
Loading…
Reference in New Issue
Block a user