parallels: Handle failure for potentially large allocations
Some code in the block layer makes potentially huge allocations. Failure is not completely unexpected there, so avoid aborting qemu and handle out-of-memory situations gracefully. This patch addresses the allocations in the parallels block driver. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Benoit Canet <benoit@irqsave.net>
This commit is contained in:
parent
2347dd7b68
commit
f7b593d937
@ -105,7 +105,11 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
ret = -EFBIG;
|
||||
goto fail;
|
||||
}
|
||||
s->catalog_bitmap = g_malloc(s->catalog_size * 4);
|
||||
s->catalog_bitmap = g_try_malloc(s->catalog_size * 4);
|
||||
if (s->catalog_size && s->catalog_bitmap == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = bdrv_pread(bs->file, 64, s->catalog_bitmap, s->catalog_size * 4);
|
||||
if (ret < 0) {
|
||||
|
Loading…
Reference in New Issue
Block a user