qcow2: Check maximum L1 size in qcow2_snapshot_load_tmp() (CVE-2014-0143)
This avoids an unbounded allocation. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
c05e4667be
commit
6a83f8b5be
@ -680,6 +680,10 @@ int qcow2_snapshot_load_tmp(BlockDriverState *bs,
|
||||
sn = &s->snapshots[snapshot_index];
|
||||
|
||||
/* Allocate and read in the snapshot's L1 table */
|
||||
if (sn->l1_size > QCOW_MAX_L1_SIZE) {
|
||||
error_setg(errp, "Snapshot L1 table too large");
|
||||
return -EFBIG;
|
||||
}
|
||||
new_l1_bytes = sn->l1_size * sizeof(uint64_t);
|
||||
new_l1_table = g_malloc0(align_offset(new_l1_bytes, 512));
|
||||
|
||||
|
@ -638,9 +638,7 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
|
||||
}
|
||||
|
||||
/* read the level 1 table */
|
||||
if (header.l1_size > 0x2000000) {
|
||||
/* 32 MB L1 table is enough for 2 PB images at 64k cluster size
|
||||
* (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
|
||||
if (header.l1_size > QCOW_MAX_L1_SIZE) {
|
||||
error_setg(errp, "Active L1 table too large");
|
||||
ret = -EFBIG;
|
||||
goto fail;
|
||||
|
@ -44,6 +44,10 @@
|
||||
* (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
|
||||
#define QCOW_MAX_REFTABLE_SIZE 0x800000
|
||||
|
||||
/* 32 MB L1 table is enough for 2 PB images at 64k cluster size
|
||||
* (128 GB for 512 byte clusters, 2 EB for 2 MB clusters) */
|
||||
#define QCOW_MAX_L1_SIZE 0x2000000
|
||||
|
||||
/* indicate that the refcount of the referenced cluster is exactly one. */
|
||||
#define QCOW_OFLAG_COPIED (1ULL << 63)
|
||||
/* indicate that the cluster is compressed (they never have the copied flag) */
|
||||
|
@ -30,7 +30,8 @@ status=1 # failure is the default!
|
||||
|
||||
_cleanup()
|
||||
{
|
||||
_cleanup_test_img
|
||||
rm -f $TEST_IMG.snap
|
||||
_cleanup_test_img
|
||||
}
|
||||
trap "_cleanup; exit \$status" 0 1 2 3 15
|
||||
|
||||
@ -58,6 +59,10 @@ offset_ext_size=$((header_size + 4))
|
||||
|
||||
offset_l2_table_0=$((0x40000))
|
||||
|
||||
offset_snap1=$((0x70000))
|
||||
offset_snap1_l1_offset=$((offset_snap1 + 0))
|
||||
offset_snap1_l1_size=$((offset_snap1 + 8))
|
||||
|
||||
echo
|
||||
echo "== Huge header size =="
|
||||
_make_test_img 64M
|
||||
@ -161,6 +166,14 @@ poke_file "$TEST_IMG" "$offset_l2_table_0" "\xbf\xff\xff\xff\xff\xff\x00\x00"
|
||||
poke_file "$TEST_IMG" "$offset_l2_table_0" "\x80\x00\x00\xff\xff\xff\x00\x00"
|
||||
{ $QEMU_IMG snapshot -c test $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
|
||||
|
||||
echo
|
||||
echo "== Invalid snapshot L1 table =="
|
||||
_make_test_img 64M
|
||||
{ $QEMU_IO -c "write 0 512" $TEST_IMG; } 2>&1 | _filter_qemu_io | _filter_testdir
|
||||
{ $QEMU_IMG snapshot -c test $TEST_IMG; } 2>&1 | _filter_testdir
|
||||
poke_file "$TEST_IMG" "$offset_snap1_l1_size" "\x10\x00\x00\x00"
|
||||
{ $QEMU_IMG convert -s test $TEST_IMG $TEST_IMG.snap; } 2>&1 | _filter_testdir
|
||||
|
||||
# success, all done
|
||||
echo "*** done"
|
||||
rm -f $seq.full
|
||||
|
@ -74,4 +74,10 @@ wrote 512/512 bytes at offset 0
|
||||
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
qemu-img: Could not create snapshot 'test': -27 (File too large)
|
||||
qemu-img: Could not create snapshot 'test': -11 (Resource temporarily unavailable)
|
||||
|
||||
== Invalid snapshot L1 table ==
|
||||
Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864
|
||||
wrote 512/512 bytes at offset 0
|
||||
512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
|
||||
qemu-img: Failed to load snapshot: Snapshot L1 table too large
|
||||
*** done
|
||||
|
Loading…
Reference in New Issue
Block a user