qemu-img: initialize MapEntry object

Commit 16b0d555 introduced an issue where we are not initializing
has_filename for the 'next' MapEntry object, which leads to interesting
errors in both Valgrind and Clang -fsanitize=undefined.

Zero the stack object at allocation AND make sure the utility to
populate the fields properly marks has_filename as false if applicable.

Signed-off-by: John Snow <jsnow@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
John Snow 2016-02-05 13:12:33 -05:00 committed by Kevin Wolf
parent c3bce9d5f9
commit 2875645b65

View File

@ -2194,6 +2194,7 @@ static int get_block_status(BlockDriverState *bs, int64_t sector_num,
int64_t ret; int64_t ret;
int depth; int depth;
BlockDriverState *file; BlockDriverState *file;
bool has_offset;
/* As an optimization, we could cache the current range of unallocated /* As an optimization, we could cache the current range of unallocated
* clusters in each file of the chain, and avoid querying the same * clusters in each file of the chain, and avoid querying the same
@ -2220,17 +2221,20 @@ static int get_block_status(BlockDriverState *bs, int64_t sector_num,
depth++; depth++;
} }
e->start = sector_num * BDRV_SECTOR_SIZE; has_offset = !!(ret & BDRV_BLOCK_OFFSET_VALID);
e->length = nb_sectors * BDRV_SECTOR_SIZE;
e->data = !!(ret & BDRV_BLOCK_DATA); *e = (MapEntry) {
e->zero = !!(ret & BDRV_BLOCK_ZERO); .start = sector_num * BDRV_SECTOR_SIZE,
e->offset = ret & BDRV_BLOCK_OFFSET_MASK; .length = nb_sectors * BDRV_SECTOR_SIZE,
e->has_offset = !!(ret & BDRV_BLOCK_OFFSET_VALID); .data = !!(ret & BDRV_BLOCK_DATA),
e->depth = depth; .zero = !!(ret & BDRV_BLOCK_ZERO),
if (file && e->has_offset) { .offset = ret & BDRV_BLOCK_OFFSET_MASK,
e->has_filename = true; .has_offset = has_offset,
e->filename = file->filename; .depth = depth,
} .has_filename = file && has_offset,
.filename = file && has_offset ? file->filename : NULL,
};
return 0; return 0;
} }