2009-05-28 18:07:04 +04:00
|
|
|
/*
|
|
|
|
* Block driver for the QCOW version 2 format
|
|
|
|
*
|
|
|
|
* Copyright (c) 2004-2006 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2016-01-18 21:01:42 +03:00
|
|
|
#include "qemu/osdep.h"
|
2022-12-21 16:35:49 +03:00
|
|
|
#include "block/block-io.h"
|
include/qemu/osdep.h: Don't include qapi/error.h
Commit 57cb38b included qapi/error.h into qemu/osdep.h to get the
Error typedef. Since then, we've moved to include qemu/osdep.h
everywhere. Its file comment explains: "To avoid getting into
possible circular include dependencies, this file should not include
any other QEMU headers, with the exceptions of config-host.h,
compiler.h, os-posix.h and os-win32.h, all of which are doing a
similar job to this file and are under similar constraints."
qapi/error.h doesn't do a similar job, and it doesn't adhere to
similar constraints: it includes qapi-types.h. That's in excess of
100KiB of crap most .c files don't actually need.
Add the typedef to qemu/typedefs.h, and include that instead of
qapi/error.h. Include qapi/error.h in .c files that need it and don't
get it now. Include qapi-types.h in qom/object.h for uint16List.
Update scripts/clean-includes accordingly. Update it further to match
reality: replace config.h by config-target.h, add sysemu/os-posix.h,
sysemu/os-win32.h. Update the list of includes in the qemu/osdep.h
comment quoted above similarly.
This reduces the number of objects depending on qapi/error.h from "all
of them" to less than a third. Unfortunately, the number depending on
qapi-types.h shrinks only a little. More work is needed for that one.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
[Fix compilation without the spice devel packages. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-03-14 11:01:28 +03:00
|
|
|
#include "qapi/error.h"
|
2018-05-03 22:50:20 +03:00
|
|
|
#include "qcow2.h"
|
2013-08-30 16:34:25 +04:00
|
|
|
#include "qemu/range.h"
|
2016-03-15 19:22:36 +03:00
|
|
|
#include "qemu/bswap.h"
|
2017-09-18 15:42:29 +03:00
|
|
|
#include "qemu/cutils.h"
|
2022-02-26 21:07:23 +03:00
|
|
|
#include "qemu/memalign.h"
|
2019-04-23 15:57:04 +03:00
|
|
|
#include "trace.h"
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2018-11-14 02:03:18 +03:00
|
|
|
static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size,
|
|
|
|
uint64_t max);
|
2022-02-23 23:58:22 +03:00
|
|
|
|
|
|
|
G_GNUC_WARN_UNUSED_RESULT
|
|
|
|
static int update_refcount(BlockDriverState *bs,
|
|
|
|
int64_t offset, int64_t length, uint64_t addend,
|
|
|
|
bool decrease, enum qcow2_discard_type type);
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2015-02-10 23:28:51 +03:00
|
|
|
static uint64_t get_refcount_ro0(const void *refcount_array, uint64_t index);
|
|
|
|
static uint64_t get_refcount_ro1(const void *refcount_array, uint64_t index);
|
|
|
|
static uint64_t get_refcount_ro2(const void *refcount_array, uint64_t index);
|
|
|
|
static uint64_t get_refcount_ro3(const void *refcount_array, uint64_t index);
|
2015-02-10 23:28:50 +03:00
|
|
|
static uint64_t get_refcount_ro4(const void *refcount_array, uint64_t index);
|
2015-02-10 23:28:51 +03:00
|
|
|
static uint64_t get_refcount_ro5(const void *refcount_array, uint64_t index);
|
|
|
|
static uint64_t get_refcount_ro6(const void *refcount_array, uint64_t index);
|
2015-02-10 23:28:50 +03:00
|
|
|
|
2015-02-10 23:28:51 +03:00
|
|
|
static void set_refcount_ro0(void *refcount_array, uint64_t index,
|
|
|
|
uint64_t value);
|
|
|
|
static void set_refcount_ro1(void *refcount_array, uint64_t index,
|
|
|
|
uint64_t value);
|
|
|
|
static void set_refcount_ro2(void *refcount_array, uint64_t index,
|
|
|
|
uint64_t value);
|
|
|
|
static void set_refcount_ro3(void *refcount_array, uint64_t index,
|
|
|
|
uint64_t value);
|
2015-02-10 23:28:50 +03:00
|
|
|
static void set_refcount_ro4(void *refcount_array, uint64_t index,
|
|
|
|
uint64_t value);
|
2015-02-10 23:28:51 +03:00
|
|
|
static void set_refcount_ro5(void *refcount_array, uint64_t index,
|
|
|
|
uint64_t value);
|
|
|
|
static void set_refcount_ro6(void *refcount_array, uint64_t index,
|
|
|
|
uint64_t value);
|
|
|
|
|
|
|
|
|
|
|
|
static Qcow2GetRefcountFunc *const get_refcount_funcs[] = {
|
|
|
|
&get_refcount_ro0,
|
|
|
|
&get_refcount_ro1,
|
|
|
|
&get_refcount_ro2,
|
|
|
|
&get_refcount_ro3,
|
|
|
|
&get_refcount_ro4,
|
|
|
|
&get_refcount_ro5,
|
|
|
|
&get_refcount_ro6
|
|
|
|
};
|
|
|
|
|
|
|
|
static Qcow2SetRefcountFunc *const set_refcount_funcs[] = {
|
|
|
|
&set_refcount_ro0,
|
|
|
|
&set_refcount_ro1,
|
|
|
|
&set_refcount_ro2,
|
|
|
|
&set_refcount_ro3,
|
|
|
|
&set_refcount_ro4,
|
|
|
|
&set_refcount_ro5,
|
|
|
|
&set_refcount_ro6
|
|
|
|
};
|
2015-02-10 23:28:50 +03:00
|
|
|
|
2009-06-26 22:19:38 +04:00
|
|
|
|
2009-05-28 18:07:04 +04:00
|
|
|
/*********************************************************/
|
|
|
|
/* refcount handling */
|
|
|
|
|
2017-02-01 15:38:28 +03:00
|
|
|
static void update_max_refcount_table_index(BDRVQcow2State *s)
|
|
|
|
{
|
|
|
|
unsigned i = s->refcount_table_size - 1;
|
|
|
|
while (i > 0 && (s->refcount_table[i] & REFT_OFFSET_MASK) == 0) {
|
|
|
|
i--;
|
|
|
|
}
|
|
|
|
/* Set s->max_refcount_table_index to the index of the last used entry */
|
|
|
|
s->max_refcount_table_index = i;
|
|
|
|
}
|
|
|
|
|
2022-10-13 15:36:59 +03:00
|
|
|
int coroutine_fn qcow2_refcount_init(BlockDriverState *bs)
|
2009-05-28 18:07:04 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2014-03-26 16:05:43 +04:00
|
|
|
unsigned int refcount_table_size2, i;
|
|
|
|
int ret;
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2015-02-10 23:28:51 +03:00
|
|
|
assert(s->refcount_order >= 0 && s->refcount_order <= 6);
|
|
|
|
|
|
|
|
s->get_refcount = get_refcount_funcs[s->refcount_order];
|
|
|
|
s->set_refcount = set_refcount_funcs[s->refcount_order];
|
2015-02-10 23:28:50 +03:00
|
|
|
|
2020-08-28 14:08:28 +03:00
|
|
|
assert(s->refcount_table_size <= INT_MAX / REFTABLE_ENTRY_SIZE);
|
|
|
|
refcount_table_size2 = s->refcount_table_size * REFTABLE_ENTRY_SIZE;
|
2014-05-20 19:12:47 +04:00
|
|
|
s->refcount_table = g_try_malloc(refcount_table_size2);
|
|
|
|
|
2009-05-28 18:07:04 +04:00
|
|
|
if (s->refcount_table_size > 0) {
|
2014-05-20 19:12:47 +04:00
|
|
|
if (s->refcount_table == NULL) {
|
2014-05-29 02:19:54 +04:00
|
|
|
ret = -ENOMEM;
|
2014-05-20 19:12:47 +04:00
|
|
|
goto fail;
|
|
|
|
}
|
2010-04-14 16:17:38 +04:00
|
|
|
BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_LOAD);
|
2022-10-13 15:37:06 +03:00
|
|
|
ret = bdrv_co_pread(bs->file, s->refcount_table_offset,
|
|
|
|
refcount_table_size2, s->refcount_table, 0);
|
2014-05-29 02:19:54 +04:00
|
|
|
if (ret < 0) {
|
2009-05-28 18:07:04 +04:00
|
|
|
goto fail;
|
2014-05-29 02:19:54 +04:00
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
for(i = 0; i < s->refcount_table_size; i++)
|
|
|
|
be64_to_cpus(&s->refcount_table[i]);
|
2017-02-01 15:38:28 +03:00
|
|
|
update_max_refcount_table_index(s);
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
fail:
|
2014-05-29 02:19:54 +04:00
|
|
|
return ret;
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
|
2009-05-28 18:07:07 +04:00
|
|
|
void qcow2_refcount_close(BlockDriverState *bs)
|
2009-05-28 18:07:04 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(s->refcount_table);
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-02-10 23:28:51 +03:00
|
|
|
static uint64_t get_refcount_ro0(const void *refcount_array, uint64_t index)
|
|
|
|
{
|
|
|
|
return (((const uint8_t *)refcount_array)[index / 8] >> (index % 8)) & 0x1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_refcount_ro0(void *refcount_array, uint64_t index,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
assert(!(value >> 1));
|
|
|
|
((uint8_t *)refcount_array)[index / 8] &= ~(0x1 << (index % 8));
|
|
|
|
((uint8_t *)refcount_array)[index / 8] |= value << (index % 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t get_refcount_ro1(const void *refcount_array, uint64_t index)
|
|
|
|
{
|
|
|
|
return (((const uint8_t *)refcount_array)[index / 4] >> (2 * (index % 4)))
|
|
|
|
& 0x3;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_refcount_ro1(void *refcount_array, uint64_t index,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
assert(!(value >> 2));
|
|
|
|
((uint8_t *)refcount_array)[index / 4] &= ~(0x3 << (2 * (index % 4)));
|
|
|
|
((uint8_t *)refcount_array)[index / 4] |= value << (2 * (index % 4));
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t get_refcount_ro2(const void *refcount_array, uint64_t index)
|
|
|
|
{
|
|
|
|
return (((const uint8_t *)refcount_array)[index / 2] >> (4 * (index % 2)))
|
|
|
|
& 0xf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_refcount_ro2(void *refcount_array, uint64_t index,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
assert(!(value >> 4));
|
|
|
|
((uint8_t *)refcount_array)[index / 2] &= ~(0xf << (4 * (index % 2)));
|
|
|
|
((uint8_t *)refcount_array)[index / 2] |= value << (4 * (index % 2));
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t get_refcount_ro3(const void *refcount_array, uint64_t index)
|
|
|
|
{
|
|
|
|
return ((const uint8_t *)refcount_array)[index];
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_refcount_ro3(void *refcount_array, uint64_t index,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
assert(!(value >> 8));
|
|
|
|
((uint8_t *)refcount_array)[index] = value;
|
|
|
|
}
|
|
|
|
|
2015-02-10 23:28:50 +03:00
|
|
|
static uint64_t get_refcount_ro4(const void *refcount_array, uint64_t index)
|
|
|
|
{
|
|
|
|
return be16_to_cpu(((const uint16_t *)refcount_array)[index]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_refcount_ro4(void *refcount_array, uint64_t index,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
assert(!(value >> 16));
|
|
|
|
((uint16_t *)refcount_array)[index] = cpu_to_be16(value);
|
|
|
|
}
|
|
|
|
|
2015-02-10 23:28:51 +03:00
|
|
|
static uint64_t get_refcount_ro5(const void *refcount_array, uint64_t index)
|
|
|
|
{
|
|
|
|
return be32_to_cpu(((const uint32_t *)refcount_array)[index]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_refcount_ro5(void *refcount_array, uint64_t index,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
assert(!(value >> 32));
|
|
|
|
((uint32_t *)refcount_array)[index] = cpu_to_be32(value);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t get_refcount_ro6(const void *refcount_array, uint64_t index)
|
|
|
|
{
|
|
|
|
return be64_to_cpu(((const uint64_t *)refcount_array)[index]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_refcount_ro6(void *refcount_array, uint64_t index,
|
|
|
|
uint64_t value)
|
|
|
|
{
|
|
|
|
((uint64_t *)refcount_array)[index] = cpu_to_be64(value);
|
|
|
|
}
|
|
|
|
|
2015-02-10 23:28:50 +03:00
|
|
|
|
2009-05-28 18:07:04 +04:00
|
|
|
static int load_refcount_block(BlockDriverState *bs,
|
2011-01-10 19:17:28 +03:00
|
|
|
int64_t refcount_block_offset,
|
|
|
|
void **refcount_block)
|
2009-05-28 18:07:04 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2009-06-26 22:19:38 +04:00
|
|
|
|
2010-04-14 16:17:38 +04:00
|
|
|
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_LOAD);
|
2016-06-14 00:57:58 +03:00
|
|
|
return qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset,
|
|
|
|
refcount_block);
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
|
2010-06-04 13:16:11 +04:00
|
|
|
/*
|
2015-02-10 23:28:46 +03:00
|
|
|
* Retrieves the refcount of the cluster given by its index and stores it in
|
|
|
|
* *refcount. Returns 0 on success and -errno on failure.
|
2010-06-04 13:16:11 +04:00
|
|
|
*/
|
2015-02-10 23:28:46 +03:00
|
|
|
int qcow2_get_refcount(BlockDriverState *bs, int64_t cluster_index,
|
2015-02-10 23:28:48 +03:00
|
|
|
uint64_t *refcount)
|
2009-05-28 18:07:04 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2014-03-26 16:05:49 +04:00
|
|
|
uint64_t refcount_table_index, block_index;
|
2009-05-28 18:07:04 +04:00
|
|
|
int64_t refcount_block_offset;
|
2010-06-04 13:16:11 +04:00
|
|
|
int ret;
|
2015-02-10 23:28:50 +03:00
|
|
|
void *refcount_block;
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2014-09-03 02:25:07 +04:00
|
|
|
refcount_table_index = cluster_index >> s->refcount_block_bits;
|
2015-02-10 23:28:46 +03:00
|
|
|
if (refcount_table_index >= s->refcount_table_size) {
|
|
|
|
*refcount = 0;
|
2009-05-28 18:07:04 +04:00
|
|
|
return 0;
|
2015-02-10 23:28:46 +03:00
|
|
|
}
|
2014-03-08 02:10:12 +04:00
|
|
|
refcount_block_offset =
|
|
|
|
s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK;
|
2015-02-10 23:28:46 +03:00
|
|
|
if (!refcount_block_offset) {
|
|
|
|
*refcount = 0;
|
2009-05-28 18:07:04 +04:00
|
|
|
return 0;
|
2015-02-10 23:28:46 +03:00
|
|
|
}
|
2011-01-10 19:17:28 +03:00
|
|
|
|
2014-09-05 18:07:18 +04:00
|
|
|
if (offset_into_cluster(s, refcount_block_offset)) {
|
|
|
|
qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#" PRIx64
|
|
|
|
" unaligned (reftable index: %#" PRIx64 ")",
|
|
|
|
refcount_block_offset, refcount_table_index);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2011-01-10 19:17:28 +03:00
|
|
|
ret = qcow2_cache_get(bs, s->refcount_block_cache, refcount_block_offset,
|
2015-02-10 23:28:50 +03:00
|
|
|
&refcount_block);
|
2011-01-10 19:17:28 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
2011-01-10 19:17:28 +03:00
|
|
|
|
2014-09-03 02:25:07 +04:00
|
|
|
block_index = cluster_index & (s->refcount_block_size - 1);
|
2015-02-10 23:28:50 +03:00
|
|
|
*refcount = s->get_refcount(refcount_block, block_index);
|
2011-01-10 19:17:28 +03:00
|
|
|
|
2018-02-05 17:33:07 +03:00
|
|
|
qcow2_cache_put(s->refcount_block_cache, &refcount_block);
|
2011-01-10 19:17:28 +03:00
|
|
|
|
2015-02-10 23:28:46 +03:00
|
|
|
return 0;
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
|
2010-02-23 18:40:53 +03:00
|
|
|
/* Checks if two offsets are described by the same refcount block */
|
2015-09-07 18:12:56 +03:00
|
|
|
static int in_same_refcount_block(BDRVQcow2State *s, uint64_t offset_a,
|
2010-02-23 18:40:53 +03:00
|
|
|
uint64_t offset_b)
|
|
|
|
{
|
2014-09-03 02:25:07 +04:00
|
|
|
uint64_t block_a = offset_a >> (s->cluster_bits + s->refcount_block_bits);
|
|
|
|
uint64_t block_b = offset_b >> (s->cluster_bits + s->refcount_block_bits);
|
2010-02-23 18:40:53 +03:00
|
|
|
|
|
|
|
return (block_a == block_b);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Loads a refcount block. If it doesn't exist yet, it is allocated first
|
|
|
|
* (including growing the refcount table if needed).
|
|
|
|
*
|
2011-01-10 19:17:28 +03:00
|
|
|
* Returns 0 on success or -errno in error case
|
2010-02-23 18:40:53 +03:00
|
|
|
*/
|
2011-01-10 19:17:28 +03:00
|
|
|
static int alloc_refcount_block(BlockDriverState *bs,
|
2015-02-10 23:28:50 +03:00
|
|
|
int64_t cluster_index, void **refcount_block)
|
2009-05-28 18:07:04 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2010-02-23 18:40:53 +03:00
|
|
|
unsigned int refcount_table_index;
|
2017-06-13 23:21:03 +03:00
|
|
|
int64_t ret;
|
2010-02-23 18:40:53 +03:00
|
|
|
|
2010-04-14 16:17:38 +04:00
|
|
|
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC);
|
2010-03-15 19:38:05 +03:00
|
|
|
|
2010-02-23 18:40:53 +03:00
|
|
|
/* Find the refcount block for the given cluster */
|
2014-09-03 02:25:07 +04:00
|
|
|
refcount_table_index = cluster_index >> s->refcount_block_bits;
|
2010-02-23 18:40:53 +03:00
|
|
|
|
|
|
|
if (refcount_table_index < s->refcount_table_size) {
|
|
|
|
|
|
|
|
uint64_t refcount_block_offset =
|
2012-03-16 17:09:08 +04:00
|
|
|
s->refcount_table[refcount_table_index] & REFT_OFFSET_MASK;
|
2010-02-23 18:40:53 +03:00
|
|
|
|
|
|
|
/* If it's already there, we're done */
|
|
|
|
if (refcount_block_offset) {
|
2014-09-05 18:07:18 +04:00
|
|
|
if (offset_into_cluster(s, refcount_block_offset)) {
|
|
|
|
qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#"
|
|
|
|
PRIx64 " unaligned (reftable index: "
|
|
|
|
"%#x)", refcount_block_offset,
|
|
|
|
refcount_table_index);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2011-01-10 19:17:28 +03:00
|
|
|
return load_refcount_block(bs, refcount_block_offset,
|
2015-02-10 23:28:50 +03:00
|
|
|
refcount_block);
|
2010-02-23 18:40:53 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we came here, we need to allocate something. Something is at least
|
|
|
|
* a cluster for the new refcount block. It may also include a new refcount
|
|
|
|
* table if the old refcount table is too small.
|
|
|
|
*
|
|
|
|
* Note that allocating clusters here needs some special care:
|
|
|
|
*
|
|
|
|
* - We can't use the normal qcow2_alloc_clusters(), it would try to
|
|
|
|
* increase the refcount and very likely we would end up with an endless
|
|
|
|
* recursion. Instead we must place the refcount blocks in a way that
|
|
|
|
* they can describe them themselves.
|
|
|
|
*
|
|
|
|
* - We need to consider that at this point we are inside update_refcounts
|
2014-03-28 21:06:31 +04:00
|
|
|
* and potentially doing an initial refcount increase. This means that
|
|
|
|
* some clusters have already been allocated by the caller, but their
|
|
|
|
* refcount isn't accurate yet. If we allocate clusters for metadata, we
|
|
|
|
* need to return -EAGAIN to signal the caller that it needs to restart
|
|
|
|
* the search for free clusters.
|
2010-02-23 18:40:53 +03:00
|
|
|
*
|
|
|
|
* - alloc_clusters_noref and qcow2_free_clusters may load a different
|
|
|
|
* refcount block into the cache
|
|
|
|
*/
|
|
|
|
|
2011-01-10 19:17:28 +03:00
|
|
|
*refcount_block = NULL;
|
|
|
|
|
|
|
|
/* We write to the refcount table, so we might depend on L2 tables */
|
2013-03-04 18:02:30 +04:00
|
|
|
ret = qcow2_cache_flush(bs, s->l2_table_cache);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2010-02-23 18:40:53 +03:00
|
|
|
|
|
|
|
/* Allocate the refcount block itself and mark it as used */
|
2018-11-14 02:03:18 +03:00
|
|
|
int64_t new_block = alloc_clusters_noref(bs, s->cluster_size, INT64_MAX);
|
2010-06-04 13:22:39 +04:00
|
|
|
if (new_block < 0) {
|
|
|
|
return new_block;
|
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2018-11-14 17:58:57 +03:00
|
|
|
/* The offset must fit in the offset field of the refcount table entry */
|
|
|
|
assert((new_block & REFT_OFFSET_MASK) == new_block);
|
|
|
|
|
2017-11-03 17:18:50 +03:00
|
|
|
/* If we're allocating the block at offset 0 then something is wrong */
|
|
|
|
if (new_block == 0) {
|
|
|
|
qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid "
|
|
|
|
"allocation of refcount block at offset 0");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2009-05-28 18:07:04 +04:00
|
|
|
#ifdef DEBUG_ALLOC2
|
2010-02-23 18:40:53 +03:00
|
|
|
fprintf(stderr, "qcow2: Allocate refcount block %d for %" PRIx64
|
|
|
|
" at %" PRIx64 "\n",
|
|
|
|
refcount_table_index, cluster_index << s->cluster_bits, new_block);
|
2009-05-28 18:07:04 +04:00
|
|
|
#endif
|
2010-02-23 18:40:53 +03:00
|
|
|
|
|
|
|
if (in_same_refcount_block(s, new_block, cluster_index << s->cluster_bits)) {
|
2010-05-28 14:05:45 +04:00
|
|
|
/* Zero the new refcount block before updating it */
|
2011-01-10 19:17:28 +03:00
|
|
|
ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block,
|
2015-02-10 23:28:50 +03:00
|
|
|
refcount_block);
|
2011-01-10 19:17:28 +03:00
|
|
|
if (ret < 0) {
|
2017-06-13 23:21:04 +03:00
|
|
|
goto fail;
|
2011-01-10 19:17:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
memset(*refcount_block, 0, s->cluster_size);
|
2010-05-28 14:05:45 +04:00
|
|
|
|
2010-02-23 18:40:53 +03:00
|
|
|
/* The block describes itself, need to update the cache */
|
|
|
|
int block_index = (new_block >> s->cluster_bits) &
|
2014-09-03 02:25:07 +04:00
|
|
|
(s->refcount_block_size - 1);
|
2015-02-10 23:28:50 +03:00
|
|
|
s->set_refcount(*refcount_block, block_index, 1);
|
2010-02-23 18:40:53 +03:00
|
|
|
} else {
|
|
|
|
/* Described somewhere else. This can recurse at most twice before we
|
|
|
|
* arrive at a block that describes itself. */
|
2015-02-10 23:28:47 +03:00
|
|
|
ret = update_refcount(bs, new_block, s->cluster_size, 1, false,
|
2013-06-19 15:44:18 +04:00
|
|
|
QCOW2_DISCARD_NEVER);
|
2010-02-23 18:40:53 +03:00
|
|
|
if (ret < 0) {
|
2017-06-13 23:21:04 +03:00
|
|
|
goto fail;
|
2010-02-23 18:40:53 +03:00
|
|
|
}
|
2010-05-28 14:05:45 +04:00
|
|
|
|
2013-03-04 18:02:30 +04:00
|
|
|
ret = qcow2_cache_flush(bs, s->refcount_block_cache);
|
|
|
|
if (ret < 0) {
|
2017-06-13 23:21:04 +03:00
|
|
|
goto fail;
|
2013-03-04 18:02:30 +04:00
|
|
|
}
|
2010-09-17 18:36:58 +04:00
|
|
|
|
2010-05-28 14:05:45 +04:00
|
|
|
/* Initialize the new refcount block only after updating its refcount,
|
|
|
|
* update_refcount uses the refcount cache itself */
|
2011-01-10 19:17:28 +03:00
|
|
|
ret = qcow2_cache_get_empty(bs, s->refcount_block_cache, new_block,
|
2015-02-10 23:28:50 +03:00
|
|
|
refcount_block);
|
2011-01-10 19:17:28 +03:00
|
|
|
if (ret < 0) {
|
2017-06-13 23:21:04 +03:00
|
|
|
goto fail;
|
2011-01-10 19:17:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
memset(*refcount_block, 0, s->cluster_size);
|
2010-02-23 18:40:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Now the new refcount block needs to be written to disk */
|
2010-04-14 16:17:38 +04:00
|
|
|
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE);
|
2018-02-05 17:33:06 +03:00
|
|
|
qcow2_cache_entry_mark_dirty(s->refcount_block_cache, *refcount_block);
|
2011-01-10 19:17:28 +03:00
|
|
|
ret = qcow2_cache_flush(bs, s->refcount_block_cache);
|
2010-02-23 18:40:53 +03:00
|
|
|
if (ret < 0) {
|
2017-06-13 23:21:04 +03:00
|
|
|
goto fail;
|
2010-02-23 18:40:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If the refcount table is big enough, just hook the block up there */
|
|
|
|
if (refcount_table_index < s->refcount_table_size) {
|
|
|
|
uint64_t data64 = cpu_to_be64(new_block);
|
2010-04-14 16:17:38 +04:00
|
|
|
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_HOOKUP);
|
2020-08-28 14:08:28 +03:00
|
|
|
ret = bdrv_pwrite_sync(bs->file, s->refcount_table_offset +
|
|
|
|
refcount_table_index * REFTABLE_ENTRY_SIZE,
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 18:27:36 +03:00
|
|
|
sizeof(data64), &data64, 0);
|
2010-02-23 18:40:53 +03:00
|
|
|
if (ret < 0) {
|
2017-06-13 23:21:04 +03:00
|
|
|
goto fail;
|
2010-02-23 18:40:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
s->refcount_table[refcount_table_index] = new_block;
|
2017-02-01 15:38:28 +03:00
|
|
|
/* If there's a hole in s->refcount_table then it can happen
|
|
|
|
* that refcount_table_index < s->max_refcount_table_index */
|
|
|
|
s->max_refcount_table_index =
|
|
|
|
MAX(s->max_refcount_table_index, refcount_table_index);
|
2014-03-28 21:06:31 +04:00
|
|
|
|
|
|
|
/* The new refcount block may be where the caller intended to put its
|
|
|
|
* data, so let it restart the search. */
|
|
|
|
return -EAGAIN;
|
2011-01-10 19:17:28 +03:00
|
|
|
}
|
|
|
|
|
2018-02-05 17:33:07 +03:00
|
|
|
qcow2_cache_put(s->refcount_block_cache, refcount_block);
|
2010-02-23 18:40:53 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we come here, we need to grow the refcount table. Again, a new
|
|
|
|
* refcount table needs some space and we can't simply allocate to avoid
|
|
|
|
* endless recursion.
|
|
|
|
*
|
|
|
|
* Therefore let's grab new refcount blocks at the end of the image, which
|
|
|
|
* will describe themselves and the new refcount table. This way we can
|
|
|
|
* reference them only in the new table and do the switch to the new
|
|
|
|
* refcount table at once without producing an inconsistent state in
|
|
|
|
* between.
|
|
|
|
*/
|
2010-04-14 16:17:38 +04:00
|
|
|
BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_GROW);
|
2010-03-15 19:38:05 +03:00
|
|
|
|
2015-02-10 23:02:31 +03:00
|
|
|
/* Calculate the number of refcount blocks needed so far; this will be the
|
|
|
|
* basis for calculating the index of the first cluster used for the
|
|
|
|
* self-describing refcount structures which we are about to create.
|
|
|
|
*
|
|
|
|
* Because we reached this point, there cannot be any refcount entries for
|
|
|
|
* cluster_index or higher indices yet. However, because new_block has been
|
|
|
|
* allocated to describe that cluster (and it will assume this role later
|
|
|
|
* on), we cannot use that index; also, new_block may actually have a higher
|
|
|
|
* cluster index than cluster_index, so it needs to be taken into account
|
|
|
|
* here (and 1 needs to be added to its value because that cluster is used).
|
|
|
|
*/
|
|
|
|
uint64_t blocks_used = DIV_ROUND_UP(MAX(cluster_index + 1,
|
|
|
|
(new_block >> s->cluster_bits) + 1),
|
|
|
|
s->refcount_block_size);
|
2010-02-23 18:40:53 +03:00
|
|
|
|
2017-06-13 23:21:03 +03:00
|
|
|
/* Create the new refcount table and blocks */
|
|
|
|
uint64_t meta_offset = (blocks_used * s->refcount_block_size) *
|
|
|
|
s->cluster_size;
|
|
|
|
|
|
|
|
ret = qcow2_refcount_area(bs, meta_offset, 0, false,
|
|
|
|
refcount_table_index, new_block);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
2014-03-26 16:05:50 +04:00
|
|
|
}
|
|
|
|
|
2017-06-13 23:21:03 +03:00
|
|
|
ret = load_refcount_block(bs, new_block, refcount_block);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2010-02-23 18:40:53 +03:00
|
|
|
|
2017-06-13 23:21:03 +03:00
|
|
|
/* If we were trying to do the initial refcount update for some cluster
|
|
|
|
* allocation, we might have used the same clusters to store newly
|
|
|
|
* allocated metadata. Make the caller search some new space. */
|
|
|
|
return -EAGAIN;
|
2010-02-23 18:40:53 +03:00
|
|
|
|
2017-06-13 23:21:04 +03:00
|
|
|
fail:
|
2017-06-13 23:21:03 +03:00
|
|
|
if (*refcount_block != NULL) {
|
2018-02-05 17:33:07 +03:00
|
|
|
qcow2_cache_put(s->refcount_block_cache, refcount_block);
|
2017-06-13 23:21:03 +03:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2010-02-23 18:40:53 +03:00
|
|
|
|
2017-06-13 23:21:03 +03:00
|
|
|
/*
|
|
|
|
* Starting at @start_offset, this function creates new self-covering refcount
|
|
|
|
* structures: A new refcount table and refcount blocks which cover all of
|
|
|
|
* themselves, and a number of @additional_clusters beyond their end.
|
|
|
|
* @start_offset must be at the end of the image file, that is, there must be
|
|
|
|
* only empty space beyond it.
|
|
|
|
* If @exact_size is false, the refcount table will have 50 % more entries than
|
|
|
|
* necessary so it will not need to grow again soon.
|
|
|
|
* If @new_refblock_offset is not zero, it contains the offset of a refcount
|
|
|
|
* block that should be entered into the new refcount table at index
|
|
|
|
* @new_refblock_index.
|
|
|
|
*
|
|
|
|
* Returns: The offset after the new refcount structures (i.e. where the
|
|
|
|
* @additional_clusters may be placed) on success, -errno on error.
|
|
|
|
*/
|
2017-06-13 23:21:05 +03:00
|
|
|
int64_t qcow2_refcount_area(BlockDriverState *bs, uint64_t start_offset,
|
|
|
|
uint64_t additional_clusters, bool exact_size,
|
|
|
|
int new_refblock_index,
|
|
|
|
uint64_t new_refblock_offset)
|
2017-06-13 23:21:03 +03:00
|
|
|
{
|
|
|
|
BDRVQcow2State *s = bs->opaque;
|
|
|
|
uint64_t total_refblock_count_u64, additional_refblock_count;
|
|
|
|
int total_refblock_count, table_size, area_reftable_index, table_clusters;
|
|
|
|
int i;
|
|
|
|
uint64_t table_offset, block_offset, end_offset;
|
|
|
|
int ret;
|
|
|
|
uint64_t *new_table;
|
2010-02-23 18:40:53 +03:00
|
|
|
|
2017-06-13 23:21:03 +03:00
|
|
|
assert(!(start_offset % s->cluster_size));
|
2014-05-20 19:12:47 +04:00
|
|
|
|
2017-06-13 23:21:03 +03:00
|
|
|
qcow2_refcount_metadata_size(start_offset / s->cluster_size +
|
|
|
|
additional_clusters,
|
|
|
|
s->cluster_size, s->refcount_order,
|
|
|
|
!exact_size, &total_refblock_count_u64);
|
|
|
|
if (total_refblock_count_u64 > QCOW_MAX_REFTABLE_SIZE) {
|
|
|
|
return -EFBIG;
|
|
|
|
}
|
|
|
|
total_refblock_count = total_refblock_count_u64;
|
|
|
|
|
|
|
|
/* Index in the refcount table of the first refcount block to cover the area
|
|
|
|
* of refcount structures we are about to create; we know that
|
|
|
|
* @total_refblock_count can cover @start_offset, so this will definitely
|
|
|
|
* fit into an int. */
|
|
|
|
area_reftable_index = (start_offset / s->cluster_size) /
|
|
|
|
s->refcount_block_size;
|
|
|
|
|
|
|
|
if (exact_size) {
|
|
|
|
table_size = total_refblock_count;
|
|
|
|
} else {
|
|
|
|
table_size = total_refblock_count +
|
|
|
|
DIV_ROUND_UP(total_refblock_count, 2);
|
|
|
|
}
|
|
|
|
/* The qcow2 file can only store the reftable size in number of clusters */
|
2020-08-28 14:08:28 +03:00
|
|
|
table_size = ROUND_UP(table_size, s->cluster_size / REFTABLE_ENTRY_SIZE);
|
|
|
|
table_clusters = (table_size * REFTABLE_ENTRY_SIZE) / s->cluster_size;
|
2017-06-13 23:21:03 +03:00
|
|
|
|
|
|
|
if (table_size > QCOW_MAX_REFTABLE_SIZE) {
|
|
|
|
return -EFBIG;
|
|
|
|
}
|
|
|
|
|
|
|
|
new_table = g_try_new0(uint64_t, table_size);
|
|
|
|
|
|
|
|
assert(table_size > 0);
|
|
|
|
if (new_table == NULL) {
|
2014-05-20 19:12:47 +04:00
|
|
|
ret = -ENOMEM;
|
2017-06-13 23:21:03 +03:00
|
|
|
goto fail;
|
2014-05-20 19:12:47 +04:00
|
|
|
}
|
2010-02-23 18:40:53 +03:00
|
|
|
|
|
|
|
/* Fill the new refcount table */
|
2017-06-13 23:21:03 +03:00
|
|
|
if (table_size > s->max_refcount_table_index) {
|
|
|
|
/* We're actually growing the reftable */
|
|
|
|
memcpy(new_table, s->refcount_table,
|
2020-08-28 14:08:28 +03:00
|
|
|
(s->max_refcount_table_index + 1) * REFTABLE_ENTRY_SIZE);
|
2017-06-13 23:21:03 +03:00
|
|
|
} else {
|
|
|
|
/* Improbable case: We're shrinking the reftable. However, the caller
|
|
|
|
* has assured us that there is only empty space beyond @start_offset,
|
|
|
|
* so we can simply drop all of the refblocks that won't fit into the
|
|
|
|
* new reftable. */
|
2020-08-28 14:08:28 +03:00
|
|
|
memcpy(new_table, s->refcount_table, table_size * REFTABLE_ENTRY_SIZE);
|
2017-06-13 23:21:03 +03:00
|
|
|
}
|
2010-02-23 18:40:53 +03:00
|
|
|
|
2017-06-13 23:21:03 +03:00
|
|
|
if (new_refblock_offset) {
|
|
|
|
assert(new_refblock_index < total_refblock_count);
|
|
|
|
new_table[new_refblock_index] = new_refblock_offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Count how many new refblocks we have to create */
|
|
|
|
additional_refblock_count = 0;
|
|
|
|
for (i = area_reftable_index; i < total_refblock_count; i++) {
|
|
|
|
if (!new_table[i]) {
|
|
|
|
additional_refblock_count++;
|
|
|
|
}
|
2010-02-23 18:40:53 +03:00
|
|
|
}
|
|
|
|
|
2017-06-13 23:21:03 +03:00
|
|
|
table_offset = start_offset + additional_refblock_count * s->cluster_size;
|
|
|
|
end_offset = table_offset + table_clusters * s->cluster_size;
|
|
|
|
|
|
|
|
/* Fill the refcount blocks, and create new ones, if necessary */
|
|
|
|
block_offset = start_offset;
|
|
|
|
for (i = area_reftable_index; i < total_refblock_count; i++) {
|
|
|
|
void *refblock_data;
|
|
|
|
uint64_t first_offset_covered;
|
|
|
|
|
|
|
|
/* Reuse an existing refblock if possible, create a new one otherwise */
|
|
|
|
if (new_table[i]) {
|
|
|
|
ret = qcow2_cache_get(bs, s->refcount_block_cache, new_table[i],
|
|
|
|
&refblock_data);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
ret = qcow2_cache_get_empty(bs, s->refcount_block_cache,
|
|
|
|
block_offset, &refblock_data);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
memset(refblock_data, 0, s->cluster_size);
|
2018-02-05 17:33:06 +03:00
|
|
|
qcow2_cache_entry_mark_dirty(s->refcount_block_cache,
|
2017-06-13 23:21:03 +03:00
|
|
|
refblock_data);
|
|
|
|
|
|
|
|
new_table[i] = block_offset;
|
|
|
|
block_offset += s->cluster_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* First host offset covered by this refblock */
|
|
|
|
first_offset_covered = (uint64_t)i * s->refcount_block_size *
|
|
|
|
s->cluster_size;
|
|
|
|
if (first_offset_covered < end_offset) {
|
|
|
|
int j, end_index;
|
|
|
|
|
|
|
|
/* Set the refcount of all of the new refcount structures to 1 */
|
|
|
|
|
|
|
|
if (first_offset_covered < start_offset) {
|
|
|
|
assert(i == area_reftable_index);
|
|
|
|
j = (start_offset - first_offset_covered) / s->cluster_size;
|
|
|
|
assert(j < s->refcount_block_size);
|
|
|
|
} else {
|
|
|
|
j = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
end_index = MIN((end_offset - first_offset_covered) /
|
|
|
|
s->cluster_size,
|
|
|
|
s->refcount_block_size);
|
|
|
|
|
|
|
|
for (; j < end_index; j++) {
|
|
|
|
/* The caller guaranteed us this space would be empty */
|
|
|
|
assert(s->get_refcount(refblock_data, j) == 0);
|
|
|
|
s->set_refcount(refblock_data, j, 1);
|
|
|
|
}
|
|
|
|
|
2018-02-05 17:33:06 +03:00
|
|
|
qcow2_cache_entry_mark_dirty(s->refcount_block_cache,
|
2017-06-13 23:21:03 +03:00
|
|
|
refblock_data);
|
|
|
|
}
|
|
|
|
|
2018-02-05 17:33:07 +03:00
|
|
|
qcow2_cache_put(s->refcount_block_cache, &refblock_data);
|
2010-02-23 18:40:53 +03:00
|
|
|
}
|
|
|
|
|
2017-06-13 23:21:03 +03:00
|
|
|
assert(block_offset == table_offset);
|
|
|
|
|
2010-02-23 18:40:53 +03:00
|
|
|
/* Write refcount blocks to disk */
|
2010-04-14 16:17:38 +04:00
|
|
|
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS);
|
2017-06-13 23:21:03 +03:00
|
|
|
ret = qcow2_cache_flush(bs, s->refcount_block_cache);
|
2010-02-23 18:40:53 +03:00
|
|
|
if (ret < 0) {
|
2017-06-13 23:21:03 +03:00
|
|
|
goto fail;
|
2010-02-23 18:40:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Write refcount table to disk */
|
2017-06-13 23:21:03 +03:00
|
|
|
for (i = 0; i < total_refblock_count; i++) {
|
2010-02-23 18:40:53 +03:00
|
|
|
cpu_to_be64s(&new_table[i]);
|
|
|
|
}
|
|
|
|
|
2010-04-14 16:17:38 +04:00
|
|
|
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE);
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 18:27:36 +03:00
|
|
|
ret = bdrv_pwrite_sync(bs->file, table_offset,
|
|
|
|
table_size * REFTABLE_ENTRY_SIZE, new_table, 0);
|
2010-02-23 18:40:53 +03:00
|
|
|
if (ret < 0) {
|
2017-06-13 23:21:03 +03:00
|
|
|
goto fail;
|
2010-02-23 18:40:53 +03:00
|
|
|
}
|
|
|
|
|
2017-06-13 23:21:03 +03:00
|
|
|
for (i = 0; i < total_refblock_count; i++) {
|
2012-04-28 11:38:08 +04:00
|
|
|
be64_to_cpus(&new_table[i]);
|
2010-02-23 18:40:53 +03:00
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2010-02-23 18:40:53 +03:00
|
|
|
/* Hook up the new refcount table in the qcow2 header */
|
2015-11-03 02:32:06 +03:00
|
|
|
struct QEMU_PACKED {
|
|
|
|
uint64_t d64;
|
|
|
|
uint32_t d32;
|
|
|
|
} data;
|
2016-06-16 19:06:17 +03:00
|
|
|
data.d64 = cpu_to_be64(table_offset);
|
|
|
|
data.d32 = cpu_to_be32(table_clusters);
|
2010-04-14 16:17:38 +04:00
|
|
|
BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE);
|
2016-06-20 21:09:15 +03:00
|
|
|
ret = bdrv_pwrite_sync(bs->file,
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 18:27:36 +03:00
|
|
|
offsetof(QCowHeader, refcount_table_offset),
|
|
|
|
sizeof(data), &data, 0);
|
2010-02-23 18:40:53 +03:00
|
|
|
if (ret < 0) {
|
2017-06-13 23:21:03 +03:00
|
|
|
goto fail;
|
2010-01-20 17:03:03 +03:00
|
|
|
}
|
|
|
|
|
2010-02-23 18:40:53 +03:00
|
|
|
/* And switch it in memory */
|
|
|
|
uint64_t old_table_offset = s->refcount_table_offset;
|
|
|
|
uint64_t old_table_size = s->refcount_table_size;
|
|
|
|
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(s->refcount_table);
|
2009-05-28 18:07:04 +04:00
|
|
|
s->refcount_table = new_table;
|
2010-02-23 18:40:53 +03:00
|
|
|
s->refcount_table_size = table_size;
|
2009-05-28 18:07:04 +04:00
|
|
|
s->refcount_table_offset = table_offset;
|
2017-02-01 15:38:28 +03:00
|
|
|
update_max_refcount_table_index(s);
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2014-03-28 21:06:31 +04:00
|
|
|
/* Free old table. */
|
2020-08-28 14:08:28 +03:00
|
|
|
qcow2_free_clusters(bs, old_table_offset,
|
|
|
|
old_table_size * REFTABLE_ENTRY_SIZE,
|
2013-06-19 15:44:18 +04:00
|
|
|
QCOW2_DISCARD_OTHER);
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2017-06-13 23:21:03 +03:00
|
|
|
return end_offset;
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2017-06-13 23:21:03 +03:00
|
|
|
fail:
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(new_table);
|
2011-01-10 19:17:28 +03:00
|
|
|
return ret;
|
2009-06-16 13:31:30 +04:00
|
|
|
}
|
|
|
|
|
2013-06-19 15:44:20 +04:00
|
|
|
void qcow2_process_discards(BlockDriverState *bs, int ret)
|
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2013-06-19 15:44:20 +04:00
|
|
|
Qcow2DiscardRegion *d, *next;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH_SAFE(d, &s->discards, next, next) {
|
|
|
|
QTAILQ_REMOVE(&s->discards, d, next);
|
|
|
|
|
|
|
|
/* Discard is optional, ignore the return value */
|
|
|
|
if (ret >= 0) {
|
2019-04-23 15:57:04 +03:00
|
|
|
int r2 = bdrv_pdiscard(bs->file, d->offset, d->bytes);
|
|
|
|
if (r2 < 0) {
|
|
|
|
trace_qcow2_process_discards_failed_region(d->offset, d->bytes,
|
|
|
|
r2);
|
|
|
|
}
|
2013-06-19 15:44:20 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
g_free(d);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void update_refcount_discard(BlockDriverState *bs,
|
|
|
|
uint64_t offset, uint64_t length)
|
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2013-06-19 15:44:20 +04:00
|
|
|
Qcow2DiscardRegion *d, *p, *next;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH(d, &s->discards, next) {
|
|
|
|
uint64_t new_start = MIN(offset, d->offset);
|
|
|
|
uint64_t new_end = MAX(offset + length, d->offset + d->bytes);
|
|
|
|
|
|
|
|
if (new_end - new_start <= length + d->bytes) {
|
|
|
|
/* There can't be any overlap, areas ending up here have no
|
|
|
|
* references any more and therefore shouldn't get freed another
|
|
|
|
* time. */
|
|
|
|
assert(d->bytes + length == new_end - new_start);
|
|
|
|
d->offset = new_start;
|
|
|
|
d->bytes = new_end - new_start;
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
d = g_malloc(sizeof(*d));
|
|
|
|
*d = (Qcow2DiscardRegion) {
|
|
|
|
.bs = bs,
|
|
|
|
.offset = offset,
|
|
|
|
.bytes = length,
|
|
|
|
};
|
|
|
|
QTAILQ_INSERT_TAIL(&s->discards, d, next);
|
|
|
|
|
|
|
|
found:
|
|
|
|
/* Merge discard requests if they are adjacent now */
|
|
|
|
QTAILQ_FOREACH_SAFE(p, &s->discards, next, next) {
|
|
|
|
if (p == d
|
|
|
|
|| p->offset > d->offset + d->bytes
|
|
|
|
|| d->offset > p->offset + p->bytes)
|
|
|
|
{
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Still no overlap possible */
|
|
|
|
assert(p->offset == d->offset + d->bytes
|
|
|
|
|| d->offset == p->offset + p->bytes);
|
|
|
|
|
|
|
|
QTAILQ_REMOVE(&s->discards, p, next);
|
|
|
|
d->offset = MIN(d->offset, p->offset);
|
|
|
|
d->bytes += p->bytes;
|
2014-10-11 12:35:43 +04:00
|
|
|
g_free(p);
|
2013-06-19 15:44:20 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-05-28 18:07:04 +04:00
|
|
|
/* XXX: cache several refcount block clusters ? */
|
2015-02-10 23:28:47 +03:00
|
|
|
/* @addend is the absolute value of the addend; if @decrease is set, @addend
|
|
|
|
* will be subtracted from the current refcount, otherwise it will be added */
|
2022-02-23 23:58:22 +03:00
|
|
|
static int update_refcount(BlockDriverState *bs,
|
|
|
|
int64_t offset,
|
|
|
|
int64_t length,
|
|
|
|
uint64_t addend,
|
|
|
|
bool decrease,
|
|
|
|
enum qcow2_discard_type type)
|
2009-05-28 18:07:04 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2009-05-28 18:07:04 +04:00
|
|
|
int64_t start, last, cluster_offset;
|
2015-02-10 23:28:50 +03:00
|
|
|
void *refcount_block = NULL;
|
2011-01-10 19:17:28 +03:00
|
|
|
int64_t old_table_index = -1;
|
2010-01-20 17:03:04 +03:00
|
|
|
int ret;
|
2009-05-28 18:07:04 +04:00
|
|
|
|
|
|
|
#ifdef DEBUG_ALLOC2
|
2015-02-10 23:28:47 +03:00
|
|
|
fprintf(stderr, "update_refcount: offset=%" PRId64 " size=%" PRId64
|
2015-02-10 23:28:48 +03:00
|
|
|
" addend=%s%" PRIu64 "\n", offset, length, decrease ? "-" : "",
|
2015-02-10 23:28:47 +03:00
|
|
|
addend);
|
2009-05-28 18:07:04 +04:00
|
|
|
#endif
|
2010-01-20 17:03:05 +03:00
|
|
|
if (length < 0) {
|
2009-05-28 18:07:04 +04:00
|
|
|
return -EINVAL;
|
2010-01-20 17:03:05 +03:00
|
|
|
} else if (length == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-02-10 23:28:47 +03:00
|
|
|
if (decrease) {
|
2011-01-10 19:17:28 +03:00
|
|
|
qcow2_cache_set_dependency(bs, s->refcount_block_cache,
|
|
|
|
s->l2_table_cache);
|
|
|
|
}
|
|
|
|
|
2013-12-05 10:32:34 +04:00
|
|
|
start = start_of_cluster(s, offset);
|
|
|
|
last = start_of_cluster(s, offset + length - 1);
|
2009-05-28 18:07:04 +04:00
|
|
|
for(cluster_offset = start; cluster_offset <= last;
|
|
|
|
cluster_offset += s->cluster_size)
|
|
|
|
{
|
2015-02-10 23:28:47 +03:00
|
|
|
int block_index;
|
2015-02-10 23:28:48 +03:00
|
|
|
uint64_t refcount;
|
2009-05-28 18:07:04 +04:00
|
|
|
int64_t cluster_index = cluster_offset >> s->cluster_bits;
|
2014-09-03 02:25:07 +04:00
|
|
|
int64_t table_index = cluster_index >> s->refcount_block_bits;
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2011-01-10 19:17:28 +03:00
|
|
|
/* Load the refcount block and allocate it if needed */
|
|
|
|
if (table_index != old_table_index) {
|
|
|
|
if (refcount_block) {
|
2018-02-05 17:33:07 +03:00
|
|
|
qcow2_cache_put(s->refcount_block_cache, &refcount_block);
|
2011-01-10 19:17:28 +03:00
|
|
|
}
|
|
|
|
ret = alloc_refcount_block(bs, cluster_index, &refcount_block);
|
2018-03-21 16:38:52 +03:00
|
|
|
/* If the caller needs to restart the search for free clusters,
|
|
|
|
* try the same ones first to see if they're still free. */
|
|
|
|
if (ret == -EAGAIN) {
|
|
|
|
if (s->free_cluster_index > (start >> s->cluster_bits)) {
|
|
|
|
s->free_cluster_index = (start >> s->cluster_bits);
|
|
|
|
}
|
|
|
|
}
|
2010-05-28 11:47:44 +04:00
|
|
|
if (ret < 0) {
|
2011-01-10 19:17:28 +03:00
|
|
|
goto fail;
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
}
|
2011-01-10 19:17:28 +03:00
|
|
|
old_table_index = table_index;
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2018-02-05 17:33:06 +03:00
|
|
|
qcow2_cache_entry_mark_dirty(s->refcount_block_cache, refcount_block);
|
2009-05-28 18:07:04 +04:00
|
|
|
|
|
|
|
/* we can update the count and save it */
|
2014-09-03 02:25:07 +04:00
|
|
|
block_index = cluster_index & (s->refcount_block_size - 1);
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2015-02-10 23:28:50 +03:00
|
|
|
refcount = s->get_refcount(refcount_block, block_index);
|
2015-02-10 23:28:48 +03:00
|
|
|
if (decrease ? (refcount - addend > refcount)
|
|
|
|
: (refcount + addend < refcount ||
|
|
|
|
refcount + addend > s->refcount_max))
|
2015-02-10 23:28:47 +03:00
|
|
|
{
|
2010-01-20 17:03:04 +03:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
2015-02-10 23:28:47 +03:00
|
|
|
if (decrease) {
|
|
|
|
refcount -= addend;
|
|
|
|
} else {
|
|
|
|
refcount += addend;
|
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
if (refcount == 0 && cluster_index < s->free_cluster_index) {
|
|
|
|
s->free_cluster_index = cluster_index;
|
|
|
|
}
|
2015-02-10 23:28:50 +03:00
|
|
|
s->set_refcount(refcount_block, block_index, refcount);
|
2013-06-19 15:44:20 +04:00
|
|
|
|
2017-09-18 15:42:28 +03:00
|
|
|
if (refcount == 0) {
|
|
|
|
void *table;
|
|
|
|
|
2018-02-05 17:33:11 +03:00
|
|
|
table = qcow2_cache_is_table_offset(s->refcount_block_cache,
|
2017-09-18 15:42:28 +03:00
|
|
|
offset);
|
|
|
|
if (table != NULL) {
|
2018-02-05 17:33:07 +03:00
|
|
|
qcow2_cache_put(s->refcount_block_cache, &refcount_block);
|
2020-02-11 12:48:58 +03:00
|
|
|
old_table_index = -1;
|
2018-02-05 17:33:10 +03:00
|
|
|
qcow2_cache_discard(s->refcount_block_cache, table);
|
2017-09-18 15:42:28 +03:00
|
|
|
}
|
|
|
|
|
2018-02-05 17:33:11 +03:00
|
|
|
table = qcow2_cache_is_table_offset(s->l2_table_cache, offset);
|
2017-09-18 15:42:28 +03:00
|
|
|
if (table != NULL) {
|
2018-02-05 17:33:10 +03:00
|
|
|
qcow2_cache_discard(s->l2_table_cache, table);
|
2017-09-18 15:42:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (s->discard_passthrough[type]) {
|
|
|
|
update_refcount_discard(bs, cluster_offset, s->cluster_size);
|
|
|
|
}
|
2013-06-19 15:44:19 +04:00
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
|
2010-01-20 17:03:04 +03:00
|
|
|
ret = 0;
|
|
|
|
fail:
|
2013-06-19 15:44:20 +04:00
|
|
|
if (!s->cache_discards) {
|
|
|
|
qcow2_process_discards(bs, ret);
|
|
|
|
}
|
|
|
|
|
2009-05-28 18:07:04 +04:00
|
|
|
/* Write last changed block to disk */
|
2011-01-10 19:17:28 +03:00
|
|
|
if (refcount_block) {
|
2018-02-05 17:33:07 +03:00
|
|
|
qcow2_cache_put(s->refcount_block_cache, &refcount_block);
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
|
2010-01-20 17:03:04 +03:00
|
|
|
/*
|
|
|
|
* Try do undo any updates if an error is returned (This may succeed in
|
|
|
|
* some cases like ENOSPC for allocating a new refcount block)
|
|
|
|
*/
|
|
|
|
if (ret < 0) {
|
|
|
|
int dummy;
|
2015-02-10 23:28:47 +03:00
|
|
|
dummy = update_refcount(bs, offset, cluster_offset - offset, addend,
|
|
|
|
!decrease, QCOW2_DISCARD_NEVER);
|
2010-10-13 22:38:07 +04:00
|
|
|
(void)dummy;
|
2010-01-20 17:03:04 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
|
2010-06-04 13:16:11 +04:00
|
|
|
/*
|
2014-10-27 13:12:54 +03:00
|
|
|
* Increases or decreases the refcount of a given cluster.
|
2010-06-04 13:16:11 +04:00
|
|
|
*
|
2015-02-10 23:28:47 +03:00
|
|
|
* @addend is the absolute value of the addend; if @decrease is set, @addend
|
|
|
|
* will be subtracted from the current refcount, otherwise it will be added.
|
|
|
|
*
|
2015-02-10 23:28:45 +03:00
|
|
|
* On success 0 is returned; on failure -errno is returned.
|
2010-06-04 13:16:11 +04:00
|
|
|
*/
|
2013-09-03 12:09:52 +04:00
|
|
|
int qcow2_update_cluster_refcount(BlockDriverState *bs,
|
|
|
|
int64_t cluster_index,
|
2015-02-10 23:28:48 +03:00
|
|
|
uint64_t addend, bool decrease,
|
2013-09-03 12:09:52 +04:00
|
|
|
enum qcow2_discard_type type)
|
2009-05-28 18:07:04 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2009-05-28 18:07:04 +04:00
|
|
|
int ret;
|
|
|
|
|
2013-06-19 15:44:18 +04:00
|
|
|
ret = update_refcount(bs, cluster_index << s->cluster_bits, 1, addend,
|
2015-02-10 23:28:47 +03:00
|
|
|
decrease, type);
|
2009-05-28 18:07:04 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-02-10 23:28:45 +03:00
|
|
|
return 0;
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*********************************************************/
|
|
|
|
/* cluster allocation functions */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* return < 0 if error */
|
2018-11-14 02:03:18 +03:00
|
|
|
static int64_t alloc_clusters_noref(BlockDriverState *bs, uint64_t size,
|
|
|
|
uint64_t max)
|
2009-05-28 18:07:04 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2015-02-10 23:28:48 +03:00
|
|
|
uint64_t i, nb_clusters, refcount;
|
2015-02-10 23:28:46 +03:00
|
|
|
int ret;
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2015-05-06 14:21:51 +03:00
|
|
|
/* We can't allocate clusters if they may still be queued for discard. */
|
|
|
|
if (s->cache_discards) {
|
|
|
|
qcow2_process_discards(bs, 0);
|
|
|
|
}
|
|
|
|
|
2009-05-28 18:07:04 +04:00
|
|
|
nb_clusters = size_to_clusters(s, size);
|
|
|
|
retry:
|
|
|
|
for(i = 0; i < nb_clusters; i++) {
|
2014-03-26 16:05:51 +04:00
|
|
|
uint64_t next_cluster_index = s->free_cluster_index++;
|
2015-02-10 23:28:46 +03:00
|
|
|
ret = qcow2_get_refcount(bs, next_cluster_index, &refcount);
|
2010-06-04 13:22:39 +04:00
|
|
|
|
2015-02-10 23:28:46 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
2010-06-04 13:22:39 +04:00
|
|
|
} else if (refcount != 0) {
|
2009-05-28 18:07:04 +04:00
|
|
|
goto retry;
|
2010-06-04 13:22:39 +04:00
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
2014-04-29 21:03:11 +04:00
|
|
|
|
|
|
|
/* Make sure that all offsets in the "allocated" range are representable
|
2018-11-14 02:03:18 +03:00
|
|
|
* in the requested max */
|
2014-05-04 07:31:40 +04:00
|
|
|
if (s->free_cluster_index > 0 &&
|
2018-11-14 02:03:18 +03:00
|
|
|
s->free_cluster_index - 1 > (max >> s->cluster_bits))
|
2014-05-04 07:31:40 +04:00
|
|
|
{
|
2014-04-29 21:03:11 +04:00
|
|
|
return -EFBIG;
|
|
|
|
}
|
|
|
|
|
2009-05-28 18:07:04 +04:00
|
|
|
#ifdef DEBUG_ALLOC2
|
2011-08-25 11:23:55 +04:00
|
|
|
fprintf(stderr, "alloc_clusters: size=%" PRId64 " -> %" PRId64 "\n",
|
2009-05-28 18:07:04 +04:00
|
|
|
size,
|
|
|
|
(s->free_cluster_index - nb_clusters) << s->cluster_bits);
|
|
|
|
#endif
|
|
|
|
return (s->free_cluster_index - nb_clusters) << s->cluster_bits;
|
|
|
|
}
|
|
|
|
|
2014-03-26 16:05:51 +04:00
|
|
|
int64_t qcow2_alloc_clusters(BlockDriverState *bs, uint64_t size)
|
2009-05-28 18:07:04 +04:00
|
|
|
{
|
|
|
|
int64_t offset;
|
2010-01-20 17:03:06 +03:00
|
|
|
int ret;
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2010-04-14 16:17:38 +04:00
|
|
|
BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC);
|
2014-03-28 21:06:31 +04:00
|
|
|
do {
|
2018-11-14 02:03:18 +03:00
|
|
|
offset = alloc_clusters_noref(bs, size, QCOW_MAX_CLUSTER_OFFSET);
|
2014-03-28 21:06:31 +04:00
|
|
|
if (offset < 0) {
|
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
|
2015-02-10 23:28:47 +03:00
|
|
|
ret = update_refcount(bs, offset, size, 1, false, QCOW2_DISCARD_NEVER);
|
2014-03-28 21:06:31 +04:00
|
|
|
} while (ret == -EAGAIN);
|
2010-06-04 13:22:39 +04:00
|
|
|
|
2010-01-20 17:03:06 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2010-09-17 18:36:58 +04:00
|
|
|
|
2009-05-28 18:07:04 +04:00
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
|
2023-03-09 11:44:55 +03:00
|
|
|
int64_t coroutine_fn qcow2_alloc_clusters_at(BlockDriverState *bs, uint64_t offset,
|
|
|
|
int64_t nb_clusters)
|
2012-03-02 22:35:58 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2015-02-10 23:28:48 +03:00
|
|
|
uint64_t cluster_index, refcount;
|
2014-01-26 07:12:38 +04:00
|
|
|
uint64_t i;
|
2015-02-10 23:28:46 +03:00
|
|
|
int ret;
|
2014-01-26 07:12:38 +04:00
|
|
|
|
|
|
|
assert(nb_clusters >= 0);
|
|
|
|
if (nb_clusters == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
2012-03-02 22:35:58 +04:00
|
|
|
|
2014-03-28 21:06:31 +04:00
|
|
|
do {
|
|
|
|
/* Check how many clusters there are free */
|
|
|
|
cluster_index = offset >> s->cluster_bits;
|
|
|
|
for(i = 0; i < nb_clusters; i++) {
|
2015-02-10 23:28:46 +03:00
|
|
|
ret = qcow2_get_refcount(bs, cluster_index++, &refcount);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
2014-03-28 21:06:31 +04:00
|
|
|
} else if (refcount != 0) {
|
|
|
|
break;
|
|
|
|
}
|
2012-03-02 22:35:58 +04:00
|
|
|
}
|
|
|
|
|
2014-03-28 21:06:31 +04:00
|
|
|
/* And then allocate them */
|
2015-02-10 23:28:47 +03:00
|
|
|
ret = update_refcount(bs, offset, i << s->cluster_bits, 1, false,
|
2014-03-28 21:06:31 +04:00
|
|
|
QCOW2_DISCARD_NEVER);
|
|
|
|
} while (ret == -EAGAIN);
|
2012-04-20 17:50:39 +04:00
|
|
|
|
2012-03-02 22:35:58 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2009-05-28 18:07:04 +04:00
|
|
|
/* only used to allocate compressed sectors. We try to allocate
|
|
|
|
contiguous sectors. size must be <= cluster_size */
|
2023-03-09 11:44:55 +03:00
|
|
|
int64_t coroutine_fn qcow2_alloc_bytes(BlockDriverState *bs, int size)
|
2009-05-28 18:07:04 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2015-02-06 17:39:16 +03:00
|
|
|
int64_t offset;
|
|
|
|
size_t free_in_cluster;
|
|
|
|
int ret;
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2010-04-14 16:17:38 +04:00
|
|
|
BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES);
|
2009-05-28 18:07:04 +04:00
|
|
|
assert(size > 0 && size <= s->cluster_size);
|
2015-02-06 17:39:16 +03:00
|
|
|
assert(!s->free_byte_offset || offset_into_cluster(s, s->free_byte_offset));
|
|
|
|
|
|
|
|
offset = s->free_byte_offset;
|
|
|
|
|
|
|
|
if (offset) {
|
2015-02-10 23:28:48 +03:00
|
|
|
uint64_t refcount;
|
2015-02-10 23:28:46 +03:00
|
|
|
ret = qcow2_get_refcount(bs, offset >> s->cluster_bits, &refcount);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
2010-01-20 17:04:01 +03:00
|
|
|
}
|
2015-02-06 17:39:16 +03:00
|
|
|
|
2015-02-10 23:28:43 +03:00
|
|
|
if (refcount == s->refcount_max) {
|
2015-02-06 17:39:16 +03:00
|
|
|
offset = 0;
|
2010-01-20 17:04:01 +03:00
|
|
|
}
|
2015-02-06 17:39:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
free_in_cluster = s->cluster_size - offset_into_cluster(s, offset);
|
2015-06-24 08:05:25 +03:00
|
|
|
do {
|
|
|
|
if (!offset || free_in_cluster < size) {
|
2018-11-14 02:03:18 +03:00
|
|
|
int64_t new_cluster;
|
|
|
|
|
|
|
|
new_cluster = alloc_clusters_noref(bs, s->cluster_size,
|
|
|
|
MIN(s->cluster_offset_mask,
|
|
|
|
QCOW_MAX_CLUSTER_OFFSET));
|
2015-06-24 08:05:25 +03:00
|
|
|
if (new_cluster < 0) {
|
|
|
|
return new_cluster;
|
|
|
|
}
|
2015-02-06 17:39:16 +03:00
|
|
|
|
2017-11-03 17:18:52 +03:00
|
|
|
if (new_cluster == 0) {
|
|
|
|
qcow2_signal_corruption(bs, true, -1, -1, "Preventing invalid "
|
|
|
|
"allocation of compressed cluster "
|
|
|
|
"at offset 0");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2015-06-24 08:05:25 +03:00
|
|
|
if (!offset || ROUND_UP(offset, s->cluster_size) != new_cluster) {
|
|
|
|
offset = new_cluster;
|
2015-09-11 19:47:51 +03:00
|
|
|
free_in_cluster = s->cluster_size;
|
|
|
|
} else {
|
|
|
|
free_in_cluster += s->cluster_size;
|
2015-06-24 08:05:25 +03:00
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
2010-09-17 18:57:48 +04:00
|
|
|
|
2015-06-24 08:05:25 +03:00
|
|
|
assert(offset);
|
|
|
|
ret = update_refcount(bs, offset, size, 1, false, QCOW2_DISCARD_NEVER);
|
2015-09-11 19:47:51 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
offset = 0;
|
|
|
|
}
|
2015-06-24 08:05:25 +03:00
|
|
|
} while (ret == -EAGAIN);
|
2015-02-06 17:39:16 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The cluster refcount was incremented; refcount blocks must be flushed
|
|
|
|
* before the caller's L2 table updates. */
|
2013-03-04 18:02:32 +04:00
|
|
|
qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache);
|
2015-02-06 17:39:16 +03:00
|
|
|
|
|
|
|
s->free_byte_offset = offset + size;
|
|
|
|
if (!offset_into_cluster(s, s->free_byte_offset)) {
|
|
|
|
s->free_byte_offset = 0;
|
|
|
|
}
|
|
|
|
|
2009-05-28 18:07:04 +04:00
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
|
2009-05-28 18:07:07 +04:00
|
|
|
void qcow2_free_clusters(BlockDriverState *bs,
|
2013-06-19 15:44:18 +04:00
|
|
|
int64_t offset, int64_t size,
|
|
|
|
enum qcow2_discard_type type)
|
2009-05-28 18:07:04 +04:00
|
|
|
{
|
2010-01-20 17:03:06 +03:00
|
|
|
int ret;
|
|
|
|
|
2010-04-14 16:17:38 +04:00
|
|
|
BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_FREE);
|
2015-02-10 23:28:47 +03:00
|
|
|
ret = update_refcount(bs, offset, size, 1, true, type);
|
2010-01-20 17:03:06 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "qcow2_free_clusters failed: %s\n", strerror(-ret));
|
2010-04-21 13:37:52 +04:00
|
|
|
/* TODO Remember the clusters to free them later and avoid leaking */
|
2010-01-20 17:03:06 +03:00
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
|
2009-05-28 18:07:05 +04:00
|
|
|
/*
|
2012-03-27 15:09:22 +04:00
|
|
|
* Free a cluster using its L2 entry (handles clusters of all types, e.g.
|
|
|
|
* normal cluster, compressed cluster, etc.)
|
2009-05-28 18:07:05 +04:00
|
|
|
*/
|
2020-09-08 17:08:28 +03:00
|
|
|
void qcow2_free_any_cluster(BlockDriverState *bs, uint64_t l2_entry,
|
|
|
|
enum qcow2_discard_type type)
|
2009-05-28 18:07:05 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2019-01-15 22:39:06 +03:00
|
|
|
QCow2ClusterType ctype = qcow2_get_cluster_type(bs, l2_entry);
|
2009-05-28 18:07:05 +04:00
|
|
|
|
2019-01-15 22:39:06 +03:00
|
|
|
if (has_data_file(bs)) {
|
|
|
|
if (s->discard_passthrough[type] &&
|
|
|
|
(ctype == QCOW2_CLUSTER_NORMAL ||
|
|
|
|
ctype == QCOW2_CLUSTER_ZERO_ALLOC))
|
|
|
|
{
|
|
|
|
bdrv_pdiscard(s->data_file, l2_entry & L2E_OFFSET_MASK,
|
2020-09-08 17:08:28 +03:00
|
|
|
s->cluster_size);
|
2019-01-15 22:39:06 +03:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (ctype) {
|
2012-03-27 15:09:22 +04:00
|
|
|
case QCOW2_CLUSTER_COMPRESSED:
|
|
|
|
{
|
2021-09-14 15:24:47 +03:00
|
|
|
uint64_t coffset;
|
|
|
|
int csize;
|
|
|
|
|
|
|
|
qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize);
|
|
|
|
qcow2_free_clusters(bs, coffset, csize, type);
|
2012-03-27 15:09:22 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case QCOW2_CLUSTER_NORMAL:
|
2017-05-07 03:05:46 +03:00
|
|
|
case QCOW2_CLUSTER_ZERO_ALLOC:
|
|
|
|
if (offset_into_cluster(s, l2_entry & L2E_OFFSET_MASK)) {
|
|
|
|
qcow2_signal_corruption(bs, false, -1, -1,
|
|
|
|
"Cannot free unaligned cluster %#llx",
|
|
|
|
l2_entry & L2E_OFFSET_MASK);
|
|
|
|
} else {
|
|
|
|
qcow2_free_clusters(bs, l2_entry & L2E_OFFSET_MASK,
|
2020-09-08 17:08:28 +03:00
|
|
|
s->cluster_size, type);
|
2013-10-09 12:44:28 +04:00
|
|
|
}
|
2012-03-27 15:09:22 +04:00
|
|
|
break;
|
2017-05-07 03:05:46 +03:00
|
|
|
case QCOW2_CLUSTER_ZERO_PLAIN:
|
2012-03-27 15:09:22 +04:00
|
|
|
case QCOW2_CLUSTER_UNALLOCATED:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
abort();
|
2009-05-28 18:07:05 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-09-22 11:49:01 +03:00
|
|
|
int qcow2_write_caches(BlockDriverState *bs)
|
2018-03-01 19:36:14 +03:00
|
|
|
{
|
|
|
|
BDRVQcow2State *s = bs->opaque;
|
|
|
|
int ret;
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2018-03-01 19:36:14 +03:00
|
|
|
ret = qcow2_cache_write(bs, s->l2_table_cache);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qcow2_need_accurate_refcounts(s)) {
|
|
|
|
ret = qcow2_cache_write(bs, s->refcount_block_cache);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-09-22 11:49:01 +03:00
|
|
|
int qcow2_flush_caches(BlockDriverState *bs)
|
2018-03-01 19:36:14 +03:00
|
|
|
{
|
|
|
|
int ret = qcow2_write_caches(bs);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return bdrv_flush(bs->file->bs);
|
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
|
|
|
|
/*********************************************************/
|
|
|
|
/* snapshots and image creation */
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/* update the refcounts of snapshots and the copied flag */
|
2009-05-28 18:07:07 +04:00
|
|
|
int qcow2_update_snapshot_refcount(BlockDriverState *bs,
|
|
|
|
int64_t l1_table_offset, int l1_size, int addend)
|
2009-05-28 18:07:04 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2018-02-05 17:33:27 +03:00
|
|
|
uint64_t *l1_table, *l2_slice, l2_offset, entry, l1_size2, refcount;
|
2014-05-20 19:12:47 +04:00
|
|
|
bool l1_allocated = false;
|
2017-05-07 03:05:41 +03:00
|
|
|
int64_t old_entry, old_l2_offset;
|
2018-02-05 17:33:27 +03:00
|
|
|
unsigned slice, slice_size2, n_slices;
|
2021-09-14 15:24:47 +03:00
|
|
|
int i, j, l1_modified = 0;
|
2011-01-10 19:17:28 +03:00
|
|
|
int ret;
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2015-02-10 23:28:47 +03:00
|
|
|
assert(addend >= -1 && addend <= 1);
|
|
|
|
|
2018-02-05 17:33:27 +03:00
|
|
|
l2_slice = NULL;
|
2009-05-28 18:07:04 +04:00
|
|
|
l1_table = NULL;
|
2020-08-28 14:08:28 +03:00
|
|
|
l1_size2 = l1_size * L1E_SIZE;
|
2020-07-10 19:12:54 +03:00
|
|
|
slice_size2 = s->l2_slice_size * l2_entry_size(s);
|
2018-02-05 17:33:27 +03:00
|
|
|
n_slices = s->cluster_size / slice_size2;
|
2011-11-16 18:20:45 +04:00
|
|
|
|
2013-06-19 15:44:20 +04:00
|
|
|
s->cache_discards = true;
|
|
|
|
|
2011-11-16 18:20:45 +04:00
|
|
|
/* WARNING: qcow2_snapshot_goto relies on this function not using the
|
|
|
|
* l1_table_offset when it is the current s->l1_table_offset! Be careful
|
|
|
|
* when changing this! */
|
2009-05-28 18:07:04 +04:00
|
|
|
if (l1_table_offset != s->l1_table_offset) {
|
2020-01-18 22:09:26 +03:00
|
|
|
l1_table = g_try_malloc0(l1_size2);
|
2014-05-20 19:12:47 +04:00
|
|
|
if (l1_size2 && l1_table == NULL) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
l1_allocated = true;
|
2013-04-05 14:51:31 +04:00
|
|
|
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 18:27:36 +03:00
|
|
|
ret = bdrv_pread(bs->file, l1_table_offset, l1_size2, l1_table, 0);
|
2013-04-05 14:51:31 +04:00
|
|
|
if (ret < 0) {
|
2009-05-28 18:07:04 +04:00
|
|
|
goto fail;
|
2011-07-19 15:01:48 +04:00
|
|
|
}
|
|
|
|
|
2017-05-07 03:05:41 +03:00
|
|
|
for (i = 0; i < l1_size; i++) {
|
2009-05-28 18:07:04 +04:00
|
|
|
be64_to_cpus(&l1_table[i]);
|
2017-05-07 03:05:41 +03:00
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
} else {
|
|
|
|
assert(l1_size == s->l1_size);
|
|
|
|
l1_table = s->l1_table;
|
2014-05-20 19:12:47 +04:00
|
|
|
l1_allocated = false;
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
|
2017-05-07 03:05:41 +03:00
|
|
|
for (i = 0; i < l1_size; i++) {
|
2009-05-28 18:07:04 +04:00
|
|
|
l2_offset = l1_table[i];
|
|
|
|
if (l2_offset) {
|
|
|
|
old_l2_offset = l2_offset;
|
2012-02-23 18:40:55 +04:00
|
|
|
l2_offset &= L1E_OFFSET_MASK;
|
2011-01-10 19:17:28 +03:00
|
|
|
|
2014-09-05 18:07:18 +04:00
|
|
|
if (offset_into_cluster(s, l2_offset)) {
|
|
|
|
qcow2_signal_corruption(bs, true, -1, -1, "L2 table offset %#"
|
|
|
|
PRIx64 " unaligned (L1 index: %#x)",
|
|
|
|
l2_offset, i);
|
|
|
|
ret = -EIO;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2018-02-05 17:33:27 +03:00
|
|
|
for (slice = 0; slice < n_slices; slice++) {
|
2018-02-05 17:33:26 +03:00
|
|
|
ret = qcow2_cache_get(bs, s->l2_table_cache,
|
2018-02-05 17:33:27 +03:00
|
|
|
l2_offset + slice * slice_size2,
|
|
|
|
(void **) &l2_slice);
|
2018-02-05 17:33:26 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
2011-01-10 19:17:28 +03:00
|
|
|
|
2018-02-05 17:33:27 +03:00
|
|
|
for (j = 0; j < s->l2_slice_size; j++) {
|
2018-02-05 17:33:26 +03:00
|
|
|
uint64_t cluster_index;
|
|
|
|
uint64_t offset;
|
|
|
|
|
2020-07-10 19:12:48 +03:00
|
|
|
entry = get_l2_entry(s, l2_slice, j);
|
2018-02-05 17:33:26 +03:00
|
|
|
old_entry = entry;
|
|
|
|
entry &= ~QCOW_OFLAG_COPIED;
|
|
|
|
offset = entry & L2E_OFFSET_MASK;
|
|
|
|
|
2019-01-17 20:42:40 +03:00
|
|
|
switch (qcow2_get_cluster_type(bs, entry)) {
|
2018-02-05 17:33:26 +03:00
|
|
|
case QCOW2_CLUSTER_COMPRESSED:
|
|
|
|
if (addend != 0) {
|
2021-09-14 15:24:47 +03:00
|
|
|
uint64_t coffset;
|
|
|
|
int csize;
|
|
|
|
|
|
|
|
qcow2_parse_compressed_l2_entry(bs, entry,
|
|
|
|
&coffset, &csize);
|
2018-02-05 17:33:26 +03:00
|
|
|
ret = update_refcount(
|
2021-09-14 15:24:47 +03:00
|
|
|
bs, coffset, csize,
|
2019-05-10 19:22:54 +03:00
|
|
|
abs(addend), addend < 0,
|
2013-06-19 15:44:18 +04:00
|
|
|
QCOW2_DISCARD_SNAPSHOT);
|
2018-02-05 17:33:26 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* compressed clusters are never modified */
|
|
|
|
refcount = 2;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case QCOW2_CLUSTER_NORMAL:
|
|
|
|
case QCOW2_CLUSTER_ZERO_ALLOC:
|
|
|
|
if (offset_into_cluster(s, offset)) {
|
2018-02-05 17:33:27 +03:00
|
|
|
/* Here l2_index means table (not slice) index */
|
|
|
|
int l2_index = slice * s->l2_slice_size + j;
|
2018-02-05 17:33:26 +03:00
|
|
|
qcow2_signal_corruption(
|
|
|
|
bs, true, -1, -1, "Cluster "
|
|
|
|
"allocation offset %#" PRIx64
|
|
|
|
" unaligned (L2 offset: %#"
|
|
|
|
PRIx64 ", L2 index: %#x)",
|
2018-02-05 17:33:27 +03:00
|
|
|
offset, l2_offset, l2_index);
|
2018-02-05 17:33:26 +03:00
|
|
|
ret = -EIO;
|
2014-09-05 18:07:18 +04:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2018-02-05 17:33:26 +03:00
|
|
|
cluster_index = offset >> s->cluster_bits;
|
|
|
|
assert(cluster_index);
|
|
|
|
if (addend != 0) {
|
|
|
|
ret = qcow2_update_cluster_refcount(
|
|
|
|
bs, cluster_index, abs(addend), addend < 0,
|
|
|
|
QCOW2_DISCARD_SNAPSHOT);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = qcow2_get_refcount(bs, cluster_index, &refcount);
|
2015-02-10 23:28:46 +03:00
|
|
|
if (ret < 0) {
|
2010-06-04 13:16:11 +04:00
|
|
|
goto fail;
|
|
|
|
}
|
2018-02-05 17:33:26 +03:00
|
|
|
break;
|
2017-05-07 03:05:42 +03:00
|
|
|
|
2018-02-05 17:33:26 +03:00
|
|
|
case QCOW2_CLUSTER_ZERO_PLAIN:
|
|
|
|
case QCOW2_CLUSTER_UNALLOCATED:
|
|
|
|
refcount = 0;
|
|
|
|
break;
|
2013-08-30 12:40:14 +04:00
|
|
|
|
2018-02-05 17:33:26 +03:00
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
2013-08-30 12:40:14 +04:00
|
|
|
|
2018-02-05 17:33:26 +03:00
|
|
|
if (refcount == 1) {
|
|
|
|
entry |= QCOW_OFLAG_COPIED;
|
|
|
|
}
|
|
|
|
if (entry != old_entry) {
|
|
|
|
if (addend > 0) {
|
|
|
|
qcow2_cache_set_dependency(bs, s->l2_table_cache,
|
|
|
|
s->refcount_block_cache);
|
|
|
|
}
|
2020-07-10 19:12:48 +03:00
|
|
|
set_l2_entry(s, l2_slice, j, entry);
|
2018-02-05 17:33:26 +03:00
|
|
|
qcow2_cache_entry_mark_dirty(s->l2_table_cache,
|
2018-02-05 17:33:27 +03:00
|
|
|
l2_slice);
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
}
|
2011-01-10 19:17:28 +03:00
|
|
|
|
2018-02-05 17:33:27 +03:00
|
|
|
qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
|
2018-02-05 17:33:26 +03:00
|
|
|
}
|
2011-01-10 19:17:28 +03:00
|
|
|
|
2009-05-28 18:07:04 +04:00
|
|
|
if (addend != 0) {
|
2015-02-10 23:28:45 +03:00
|
|
|
ret = qcow2_update_cluster_refcount(bs, l2_offset >>
|
|
|
|
s->cluster_bits,
|
2015-02-10 23:28:47 +03:00
|
|
|
abs(addend), addend < 0,
|
2015-02-10 23:28:45 +03:00
|
|
|
QCOW2_DISCARD_SNAPSHOT);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
2015-02-10 23:28:46 +03:00
|
|
|
ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
|
|
|
|
&refcount);
|
|
|
|
if (ret < 0) {
|
2010-06-04 13:16:11 +04:00
|
|
|
goto fail;
|
|
|
|
} else if (refcount == 1) {
|
2009-05-28 18:07:04 +04:00
|
|
|
l2_offset |= QCOW_OFLAG_COPIED;
|
|
|
|
}
|
|
|
|
if (l2_offset != old_l2_offset) {
|
|
|
|
l1_table[i] = l2_offset;
|
|
|
|
l1_modified = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-07-19 15:01:48 +04:00
|
|
|
|
2013-03-04 18:02:33 +04:00
|
|
|
ret = bdrv_flush(bs);
|
2011-07-19 15:01:48 +04:00
|
|
|
fail:
|
2018-02-05 17:33:27 +03:00
|
|
|
if (l2_slice) {
|
|
|
|
qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
|
2011-07-19 15:01:48 +04:00
|
|
|
}
|
|
|
|
|
2013-06-19 15:44:20 +04:00
|
|
|
s->cache_discards = false;
|
|
|
|
qcow2_process_discards(bs, ret);
|
|
|
|
|
2011-11-16 18:20:45 +04:00
|
|
|
/* Update L1 only if it isn't deleted anyway (addend = -1) */
|
2013-04-05 14:57:10 +04:00
|
|
|
if (ret == 0 && addend >= 0 && l1_modified) {
|
|
|
|
for (i = 0; i < l1_size; i++) {
|
2009-05-28 18:07:04 +04:00
|
|
|
cpu_to_be64s(&l1_table[i]);
|
2013-04-05 14:57:10 +04:00
|
|
|
}
|
|
|
|
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 18:27:36 +03:00
|
|
|
ret = bdrv_pwrite_sync(bs->file, l1_table_offset, l1_size2, l1_table,
|
block: Add a 'flags' param to bdrv_{pread,pwrite,pwrite_sync}()
For consistency with other I/O functions, and in preparation to
implement them using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes; @@
- bdrv_pread(child, offset, buf, bytes)
+ bdrv_pread(child, offset, buf, bytes, 0)
@@ expression child, offset, buf, bytes; @@
- bdrv_pwrite(child, offset, buf, bytes)
+ bdrv_pwrite(child, offset, buf, bytes, 0)
@@ expression child, offset, buf, bytes; @@
- bdrv_pwrite_sync(child, offset, buf, bytes)
+ bdrv_pwrite_sync(child, offset, buf, bytes, 0)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-2-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 18:27:35 +03:00
|
|
|
0);
|
2013-04-05 14:57:10 +04:00
|
|
|
|
|
|
|
for (i = 0; i < l1_size; i++) {
|
2009-05-28 18:07:04 +04:00
|
|
|
be64_to_cpus(&l1_table[i]);
|
2013-04-05 14:57:10 +04:00
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
if (l1_allocated)
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(l1_table);
|
2011-07-19 15:01:48 +04:00
|
|
|
return ret;
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*********************************************************/
|
|
|
|
/* refcount checking functions */
|
|
|
|
|
|
|
|
|
2015-12-01 17:16:49 +03:00
|
|
|
static uint64_t refcount_array_byte_size(BDRVQcow2State *s, uint64_t entries)
|
2015-02-10 23:28:49 +03:00
|
|
|
{
|
|
|
|
/* This assertion holds because there is no way we can address more than
|
|
|
|
* 2^(64 - 9) clusters at once (with cluster size 512 = 2^9, and because
|
|
|
|
* offsets have to be representable in bytes); due to every cluster
|
|
|
|
* corresponding to one refcount entry, we are well below that limit */
|
|
|
|
assert(entries < (UINT64_C(1) << (64 - 9)));
|
|
|
|
|
|
|
|
/* Thanks to the assertion this will not overflow, because
|
|
|
|
* s->refcount_order < 7.
|
|
|
|
* (note: x << s->refcount_order == x * s->refcount_bits) */
|
|
|
|
return DIV_ROUND_UP(entries << s->refcount_order, 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Reallocates *array so that it can hold new_size entries. *size must contain
|
|
|
|
* the current number of entries in *array. If the reallocation fails, *array
|
|
|
|
* and *size will not be modified and -errno will be returned. If the
|
|
|
|
* reallocation is successful, *array will be set to the new buffer, *size
|
|
|
|
* will be set to new_size and 0 will be returned. The size of the reallocated
|
|
|
|
* refcount array buffer will be aligned to a cluster boundary, and the newly
|
|
|
|
* allocated area will be zeroed.
|
|
|
|
*/
|
2015-09-07 18:12:56 +03:00
|
|
|
static int realloc_refcount_array(BDRVQcow2State *s, void **array,
|
2015-02-10 23:28:49 +03:00
|
|
|
int64_t *size, int64_t new_size)
|
|
|
|
{
|
2015-09-14 17:39:47 +03:00
|
|
|
int64_t old_byte_size, new_byte_size;
|
2015-02-10 23:28:50 +03:00
|
|
|
void *new_ptr;
|
2015-02-10 23:28:49 +03:00
|
|
|
|
|
|
|
/* Round to clusters so the array can be directly written to disk */
|
|
|
|
old_byte_size = size_to_clusters(s, refcount_array_byte_size(s, *size))
|
|
|
|
* s->cluster_size;
|
|
|
|
new_byte_size = size_to_clusters(s, refcount_array_byte_size(s, new_size))
|
|
|
|
* s->cluster_size;
|
|
|
|
|
|
|
|
if (new_byte_size == old_byte_size) {
|
|
|
|
*size = new_size;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(new_byte_size > 0);
|
|
|
|
|
2015-09-14 17:39:47 +03:00
|
|
|
if (new_byte_size > SIZE_MAX) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2015-02-10 23:28:49 +03:00
|
|
|
new_ptr = g_try_realloc(*array, new_byte_size);
|
|
|
|
if (!new_ptr) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_byte_size > old_byte_size) {
|
2015-09-14 17:39:47 +03:00
|
|
|
memset((char *)new_ptr + old_byte_size, 0,
|
2015-02-10 23:28:49 +03:00
|
|
|
new_byte_size - old_byte_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
*array = new_ptr;
|
|
|
|
*size = new_size;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Increases the refcount for a range of clusters in a given refcount table.
|
|
|
|
* This is used to construct a temporary refcount table out of L1 and L2 tables
|
2015-08-26 14:17:13 +03:00
|
|
|
* which can be compared to the refcount table saved in the image.
|
2009-05-28 18:07:04 +04:00
|
|
|
*
|
2010-06-29 14:37:54 +04:00
|
|
|
* Modifies the number of errors in res.
|
2009-05-28 18:07:04 +04:00
|
|
|
*/
|
2017-06-28 15:05:07 +03:00
|
|
|
int qcow2_inc_refcounts_imrt(BlockDriverState *bs, BdrvCheckResult *res,
|
|
|
|
void **refcount_table,
|
|
|
|
int64_t *refcount_table_size,
|
|
|
|
int64_t offset, int64_t size)
|
2009-05-28 18:07:04 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2015-02-10 23:28:50 +03:00
|
|
|
uint64_t start, last, cluster_offset, k, refcount;
|
2019-02-27 16:14:30 +03:00
|
|
|
int64_t file_len;
|
2015-02-10 23:28:49 +03:00
|
|
|
int ret;
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2014-10-22 16:09:35 +04:00
|
|
|
if (size <= 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2019-02-27 16:14:30 +03:00
|
|
|
file_len = bdrv_getlength(bs->file->bs);
|
|
|
|
if (file_len < 0) {
|
|
|
|
return file_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Last cluster of qcow2 image may be semi-allocated, so it may be OK to
|
|
|
|
* reference some space after file end but it should be less than one
|
|
|
|
* cluster.
|
|
|
|
*/
|
|
|
|
if (offset + size - file_len >= s->cluster_size) {
|
|
|
|
fprintf(stderr, "ERROR: counting reference for region exceeding the "
|
|
|
|
"end of the file by one cluster or more: offset 0x%" PRIx64
|
|
|
|
" size 0x%" PRIx64 "\n", offset, size);
|
|
|
|
res->corruptions++;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-12-05 10:32:34 +04:00
|
|
|
start = start_of_cluster(s, offset);
|
|
|
|
last = start_of_cluster(s, offset + size - 1);
|
2009-05-28 18:07:04 +04:00
|
|
|
for(cluster_offset = start; cluster_offset <= last;
|
|
|
|
cluster_offset += s->cluster_size) {
|
|
|
|
k = cluster_offset >> s->cluster_bits;
|
2014-10-22 16:09:36 +04:00
|
|
|
if (k >= *refcount_table_size) {
|
2015-02-10 23:28:49 +03:00
|
|
|
ret = realloc_refcount_array(s, refcount_table,
|
|
|
|
refcount_table_size, k + 1);
|
|
|
|
if (ret < 0) {
|
2014-10-22 16:09:36 +04:00
|
|
|
res->check_errors++;
|
2015-02-10 23:28:49 +03:00
|
|
|
return ret;
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
2014-10-22 16:09:36 +04:00
|
|
|
}
|
|
|
|
|
2015-02-10 23:28:50 +03:00
|
|
|
refcount = s->get_refcount(*refcount_table, k);
|
|
|
|
if (refcount == s->refcount_max) {
|
2014-10-22 16:09:36 +04:00
|
|
|
fprintf(stderr, "ERROR: overflow cluster offset=0x%" PRIx64
|
|
|
|
"\n", cluster_offset);
|
2015-07-27 18:51:39 +03:00
|
|
|
fprintf(stderr, "Use qemu-img amend to increase the refcount entry "
|
|
|
|
"width or qemu-img convert to create a clean copy if the "
|
|
|
|
"image cannot be opened for writing\n");
|
2014-10-22 16:09:36 +04:00
|
|
|
res->corruptions++;
|
2015-02-10 23:28:50 +03:00
|
|
|
continue;
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
2015-02-10 23:28:50 +03:00
|
|
|
s->set_refcount(*refcount_table, k, refcount + 1);
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
2014-10-22 16:09:35 +04:00
|
|
|
|
|
|
|
return 0;
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
|
2013-02-07 20:15:01 +04:00
|
|
|
/* Flags for check_refcounts_l1() and check_refcounts_l2() */
|
|
|
|
enum {
|
2013-02-07 20:15:02 +04:00
|
|
|
CHECK_FRAG_INFO = 0x2, /* update BlockFragInfo counters */
|
2013-02-07 20:15:01 +04:00
|
|
|
};
|
|
|
|
|
2021-09-14 15:24:48 +03:00
|
|
|
/*
|
2021-09-14 15:24:49 +03:00
|
|
|
* Fix L2 entry by making it QCOW2_CLUSTER_ZERO_PLAIN (or making all its present
|
|
|
|
* subclusters QCOW2_SUBCLUSTER_ZERO_PLAIN).
|
2021-09-14 15:24:48 +03:00
|
|
|
*
|
|
|
|
* This function decrements res->corruptions on success, so the caller is
|
|
|
|
* responsible to increment res->corruptions prior to the call.
|
|
|
|
*
|
|
|
|
* On failure in-memory @l2_table may be modified.
|
|
|
|
*/
|
|
|
|
static int fix_l2_entry_by_zero(BlockDriverState *bs, BdrvCheckResult *res,
|
|
|
|
uint64_t l2_offset,
|
|
|
|
uint64_t *l2_table, int l2_index, bool active,
|
|
|
|
bool *metadata_overlap)
|
|
|
|
{
|
|
|
|
BDRVQcow2State *s = bs->opaque;
|
|
|
|
int ret;
|
|
|
|
int idx = l2_index * (l2_entry_size(s) / sizeof(uint64_t));
|
|
|
|
uint64_t l2e_offset = l2_offset + (uint64_t)l2_index * l2_entry_size(s);
|
|
|
|
int ign = active ? QCOW2_OL_ACTIVE_L2 : QCOW2_OL_INACTIVE_L2;
|
|
|
|
|
2021-09-14 15:24:49 +03:00
|
|
|
if (has_subclusters(s)) {
|
|
|
|
uint64_t l2_bitmap = get_l2_bitmap(s, l2_table, l2_index);
|
|
|
|
|
|
|
|
/* Allocated subclusters become zero */
|
|
|
|
l2_bitmap |= l2_bitmap << 32;
|
|
|
|
l2_bitmap &= QCOW_L2_BITMAP_ALL_ZEROES;
|
|
|
|
|
|
|
|
set_l2_bitmap(s, l2_table, l2_index, l2_bitmap);
|
|
|
|
set_l2_entry(s, l2_table, l2_index, 0);
|
|
|
|
} else {
|
|
|
|
set_l2_entry(s, l2_table, l2_index, QCOW_OFLAG_ZERO);
|
|
|
|
}
|
|
|
|
|
2021-09-14 15:24:48 +03:00
|
|
|
ret = qcow2_pre_write_overlap_check(bs, ign, l2e_offset, l2_entry_size(s),
|
|
|
|
false);
|
|
|
|
if (metadata_overlap) {
|
|
|
|
*metadata_overlap = ret < 0;
|
|
|
|
}
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "ERROR: Overlap check failed\n");
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 18:27:36 +03:00
|
|
|
ret = bdrv_pwrite_sync(bs->file, l2e_offset, l2_entry_size(s),
|
|
|
|
&l2_table[idx], 0);
|
2021-09-14 15:24:48 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "ERROR: Failed to overwrite L2 "
|
|
|
|
"table entry: %s\n", strerror(-ret));
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
res->corruptions--;
|
|
|
|
res->corruptions_fixed++;
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
res->check_errors++;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2009-05-28 18:07:04 +04:00
|
|
|
/*
|
|
|
|
* Increases the refcount in the given refcount table for the all clusters
|
|
|
|
* referenced in the L2 table. While doing so, performs some checks on L2
|
|
|
|
* entries.
|
|
|
|
*
|
|
|
|
* Returns the number of errors found by the checks or -errno if an internal
|
|
|
|
* error occurred.
|
|
|
|
*/
|
2010-06-29 14:37:54 +04:00
|
|
|
static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
|
2015-02-10 23:28:50 +03:00
|
|
|
void **refcount_table,
|
|
|
|
int64_t *refcount_table_size, int64_t l2_offset,
|
2019-02-27 16:14:31 +03:00
|
|
|
int flags, BdrvCheckMode fix, bool active)
|
2009-05-28 18:07:04 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2021-09-14 15:24:50 +03:00
|
|
|
uint64_t l2_entry, l2_bitmap;
|
2013-02-07 20:15:02 +04:00
|
|
|
uint64_t next_contiguous_offset = 0;
|
2021-09-14 15:24:47 +03:00
|
|
|
int i, ret;
|
2021-09-14 15:24:45 +03:00
|
|
|
size_t l2_size_bytes = s->l2_size * l2_entry_size(s);
|
|
|
|
g_autofree uint64_t *l2_table = g_malloc(l2_size_bytes);
|
2021-09-14 15:24:48 +03:00
|
|
|
bool metadata_overlap;
|
2009-05-28 18:07:04 +04:00
|
|
|
|
|
|
|
/* Read L2 table from disk */
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 18:27:36 +03:00
|
|
|
ret = bdrv_pread(bs->file, l2_offset, l2_size_bytes, l2_table, 0);
|
2014-10-22 16:09:34 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "ERROR: I/O error in check_refcounts_l2\n");
|
|
|
|
res->check_errors++;
|
2021-09-14 15:24:45 +03:00
|
|
|
return ret;
|
2014-10-22 16:09:34 +04:00
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
|
|
|
|
/* Do the actual checks */
|
2021-09-14 15:24:45 +03:00
|
|
|
for (i = 0; i < s->l2_size; i++) {
|
2021-09-14 15:24:47 +03:00
|
|
|
uint64_t coffset;
|
|
|
|
int csize;
|
2021-09-14 15:24:51 +03:00
|
|
|
QCow2ClusterType type;
|
|
|
|
|
2020-07-10 19:12:48 +03:00
|
|
|
l2_entry = get_l2_entry(s, l2_table, i);
|
2021-09-14 15:24:50 +03:00
|
|
|
l2_bitmap = get_l2_bitmap(s, l2_table, i);
|
2021-09-14 15:24:51 +03:00
|
|
|
type = qcow2_get_cluster_type(bs, l2_entry);
|
|
|
|
|
|
|
|
if (type != QCOW2_CLUSTER_COMPRESSED) {
|
|
|
|
/* Check reserved bits of Standard Cluster Descriptor */
|
|
|
|
if (l2_entry & L2E_STD_RESERVED_MASK) {
|
|
|
|
fprintf(stderr, "ERROR found l2 entry with reserved bits set: "
|
|
|
|
"%" PRIx64 "\n", l2_entry);
|
|
|
|
res->corruptions++;
|
|
|
|
}
|
|
|
|
}
|
2012-03-27 15:44:56 +04:00
|
|
|
|
2021-09-14 15:24:51 +03:00
|
|
|
switch (type) {
|
2012-03-27 15:44:56 +04:00
|
|
|
case QCOW2_CLUSTER_COMPRESSED:
|
|
|
|
/* Compressed clusters don't have QCOW_OFLAG_COPIED */
|
|
|
|
if (l2_entry & QCOW_OFLAG_COPIED) {
|
2018-04-10 19:05:03 +03:00
|
|
|
fprintf(stderr, "ERROR: coffset=0x%" PRIx64 ": "
|
2012-03-27 15:44:56 +04:00
|
|
|
"copied flag must never be set for compressed "
|
2018-04-10 19:05:03 +03:00
|
|
|
"clusters\n", l2_entry & s->cluster_offset_mask);
|
2012-03-27 15:44:56 +04:00
|
|
|
l2_entry &= ~QCOW_OFLAG_COPIED;
|
|
|
|
res->corruptions++;
|
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2019-02-27 19:26:40 +03:00
|
|
|
if (has_data_file(bs)) {
|
|
|
|
fprintf(stderr, "ERROR compressed cluster %d with data file, "
|
|
|
|
"entry=0x%" PRIx64 "\n", i, l2_entry);
|
|
|
|
res->corruptions++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-09-14 15:24:50 +03:00
|
|
|
if (l2_bitmap) {
|
|
|
|
fprintf(stderr, "ERROR compressed cluster %d with non-zero "
|
|
|
|
"subcluster allocation bitmap, entry=0x%" PRIx64 "\n",
|
|
|
|
i, l2_entry);
|
|
|
|
res->corruptions++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-03-27 15:44:56 +04:00
|
|
|
/* Mark cluster as used */
|
2021-09-14 15:24:47 +03:00
|
|
|
qcow2_parse_compressed_l2_entry(bs, l2_entry, &coffset, &csize);
|
2019-05-10 19:22:54 +03:00
|
|
|
ret = qcow2_inc_refcounts_imrt(
|
2021-09-14 15:24:47 +03:00
|
|
|
bs, res, refcount_table, refcount_table_size, coffset, csize);
|
2014-10-22 16:09:35 +04:00
|
|
|
if (ret < 0) {
|
2021-09-14 15:24:45 +03:00
|
|
|
return ret;
|
2014-10-22 16:09:35 +04:00
|
|
|
}
|
2013-02-07 20:15:02 +04:00
|
|
|
|
|
|
|
if (flags & CHECK_FRAG_INFO) {
|
|
|
|
res->bfi.allocated_clusters++;
|
2013-02-07 20:15:05 +04:00
|
|
|
res->bfi.compressed_clusters++;
|
2013-02-07 20:15:02 +04:00
|
|
|
|
2021-09-14 15:24:45 +03:00
|
|
|
/*
|
|
|
|
* Compressed clusters are fragmented by nature. Since they
|
2013-02-07 20:15:02 +04:00
|
|
|
* take up sub-sector space but we only have sector granularity
|
|
|
|
* I/O we need to re-read the same sectors even for adjacent
|
|
|
|
* compressed clusters.
|
|
|
|
*/
|
|
|
|
res->bfi.fragmented_clusters++;
|
|
|
|
}
|
2012-03-27 15:44:56 +04:00
|
|
|
break;
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2017-05-07 03:05:46 +03:00
|
|
|
case QCOW2_CLUSTER_ZERO_ALLOC:
|
2012-03-27 15:44:56 +04:00
|
|
|
case QCOW2_CLUSTER_NORMAL:
|
|
|
|
{
|
|
|
|
uint64_t offset = l2_entry & L2E_OFFSET_MASK;
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2021-09-14 15:24:50 +03:00
|
|
|
if ((l2_bitmap >> 32) & l2_bitmap) {
|
|
|
|
res->corruptions++;
|
|
|
|
fprintf(stderr, "ERROR offset=%" PRIx64 ": Allocated "
|
|
|
|
"cluster has corrupted subcluster allocation bitmap\n",
|
|
|
|
offset);
|
|
|
|
}
|
|
|
|
|
2017-11-10 23:37:59 +03:00
|
|
|
/* Correct offsets are cluster aligned */
|
|
|
|
if (offset_into_cluster(s, offset)) {
|
2020-07-10 19:13:06 +03:00
|
|
|
bool contains_data;
|
2019-02-27 16:14:33 +03:00
|
|
|
res->corruptions++;
|
|
|
|
|
2020-07-10 19:13:06 +03:00
|
|
|
if (has_subclusters(s)) {
|
|
|
|
contains_data = (l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC);
|
|
|
|
} else {
|
|
|
|
contains_data = !(l2_entry & QCOW_OFLAG_ZERO);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!contains_data) {
|
|
|
|
fprintf(stderr, "%s offset=%" PRIx64 ": Preallocated "
|
2017-11-10 23:37:59 +03:00
|
|
|
"cluster is not properly aligned; L2 entry "
|
|
|
|
"corrupted.\n",
|
|
|
|
fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR",
|
|
|
|
offset);
|
|
|
|
if (fix & BDRV_FIX_ERRORS) {
|
2021-09-14 15:24:48 +03:00
|
|
|
ret = fix_l2_entry_by_zero(bs, res, l2_offset,
|
|
|
|
l2_table, i, active,
|
|
|
|
&metadata_overlap);
|
|
|
|
if (metadata_overlap) {
|
2021-09-14 15:24:45 +03:00
|
|
|
/*
|
|
|
|
* Something is seriously wrong, so abort checking
|
|
|
|
* this L2 table.
|
|
|
|
*/
|
|
|
|
return ret;
|
2017-11-10 23:37:59 +03:00
|
|
|
}
|
|
|
|
|
2021-09-14 15:24:48 +03:00
|
|
|
if (ret == 0) {
|
2021-09-14 15:24:45 +03:00
|
|
|
/*
|
|
|
|
* Skip marking the cluster as used
|
|
|
|
* (it is unused now).
|
|
|
|
*/
|
2017-11-10 23:37:59 +03:00
|
|
|
continue;
|
|
|
|
}
|
2021-09-14 15:24:48 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Failed to fix.
|
|
|
|
* Do not abort, continue checking the rest of this
|
|
|
|
* L2 table's entries.
|
|
|
|
*/
|
2017-11-10 23:37:59 +03:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
fprintf(stderr, "ERROR offset=%" PRIx64 ": Data cluster is "
|
|
|
|
"not properly aligned; L2 entry corrupted.\n", offset);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-27 16:14:32 +03:00
|
|
|
if (flags & CHECK_FRAG_INFO) {
|
|
|
|
res->bfi.allocated_clusters++;
|
|
|
|
if (next_contiguous_offset &&
|
|
|
|
offset != next_contiguous_offset) {
|
|
|
|
res->bfi.fragmented_clusters++;
|
|
|
|
}
|
|
|
|
next_contiguous_offset = offset + s->cluster_size;
|
|
|
|
}
|
|
|
|
|
2012-03-27 15:44:56 +04:00
|
|
|
/* Mark cluster as used */
|
2019-02-27 19:26:40 +03:00
|
|
|
if (!has_data_file(bs)) {
|
|
|
|
ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table,
|
|
|
|
refcount_table_size,
|
|
|
|
offset, s->cluster_size);
|
|
|
|
if (ret < 0) {
|
2021-09-14 15:24:45 +03:00
|
|
|
return ret;
|
2019-02-27 19:26:40 +03:00
|
|
|
}
|
2014-10-22 16:09:35 +04:00
|
|
|
}
|
2012-03-27 15:44:56 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-05-07 03:05:46 +03:00
|
|
|
case QCOW2_CLUSTER_ZERO_PLAIN:
|
2021-09-14 15:24:50 +03:00
|
|
|
/* Impossible when image has subclusters */
|
|
|
|
assert(!l2_bitmap);
|
|
|
|
break;
|
|
|
|
|
2012-03-27 15:44:56 +04:00
|
|
|
case QCOW2_CLUSTER_UNALLOCATED:
|
2021-09-14 15:24:50 +03:00
|
|
|
if (l2_bitmap & QCOW_L2_BITMAP_ALL_ALLOC) {
|
|
|
|
res->corruptions++;
|
|
|
|
fprintf(stderr, "ERROR: Unallocated "
|
|
|
|
"cluster has non-zero subcluster allocation map\n");
|
|
|
|
}
|
2012-03-27 15:44:56 +04:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
abort();
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-06-29 14:37:54 +04:00
|
|
|
return 0;
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Increases the refcount for the L1 table, its L2 tables and all referenced
|
|
|
|
* clusters in the given refcount table. While doing so, performs some checks
|
|
|
|
* on L1 and L2 entries.
|
|
|
|
*
|
|
|
|
* Returns the number of errors found by the checks or -errno if an internal
|
|
|
|
* error occurred.
|
|
|
|
*/
|
|
|
|
static int check_refcounts_l1(BlockDriverState *bs,
|
2010-06-29 14:37:54 +04:00
|
|
|
BdrvCheckResult *res,
|
2015-02-10 23:28:50 +03:00
|
|
|
void **refcount_table,
|
2014-10-22 16:09:36 +04:00
|
|
|
int64_t *refcount_table_size,
|
2009-05-28 18:07:04 +04:00
|
|
|
int64_t l1_table_offset, int l1_size,
|
2019-02-27 16:14:31 +03:00
|
|
|
int flags, BdrvCheckMode fix, bool active)
|
2009-05-28 18:07:04 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2021-09-14 15:24:52 +03:00
|
|
|
size_t l1_size_bytes = l1_size * L1E_SIZE;
|
|
|
|
g_autofree uint64_t *l1_table = NULL;
|
|
|
|
uint64_t l2_offset;
|
2013-08-30 16:34:27 +04:00
|
|
|
int i, ret;
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2021-09-14 15:24:52 +03:00
|
|
|
if (!l1_size) {
|
|
|
|
return 0;
|
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
|
|
|
|
/* Mark L1 table as used */
|
2017-06-28 15:05:07 +03:00
|
|
|
ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, refcount_table_size,
|
2021-09-14 15:24:52 +03:00
|
|
|
l1_table_offset, l1_size_bytes);
|
2014-10-22 16:09:35 +04:00
|
|
|
if (ret < 0) {
|
2021-09-14 15:24:52 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
l1_table = g_try_malloc(l1_size_bytes);
|
|
|
|
if (l1_table == NULL) {
|
|
|
|
res->check_errors++;
|
|
|
|
return -ENOMEM;
|
2014-10-22 16:09:35 +04:00
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
|
|
|
|
/* Read L1 table entries from disk */
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 18:27:36 +03:00
|
|
|
ret = bdrv_pread(bs->file, l1_table_offset, l1_size_bytes, l1_table, 0);
|
2021-09-14 15:24:52 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "ERROR: I/O error in check_refcounts_l1\n");
|
|
|
|
res->check_errors++;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < l1_size; i++) {
|
|
|
|
be64_to_cpus(&l1_table[i]);
|
2009-11-27 19:35:55 +03:00
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
|
|
|
|
/* Do the actual checks */
|
2021-09-14 15:24:52 +03:00
|
|
|
for (i = 0; i < l1_size; i++) {
|
|
|
|
if (!l1_table[i]) {
|
|
|
|
continue;
|
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2021-09-14 15:24:53 +03:00
|
|
|
if (l1_table[i] & L1E_RESERVED_MASK) {
|
|
|
|
fprintf(stderr, "ERROR found L1 entry with reserved bits set: "
|
|
|
|
"%" PRIx64 "\n", l1_table[i]);
|
|
|
|
res->corruptions++;
|
|
|
|
}
|
|
|
|
|
2021-09-14 15:24:52 +03:00
|
|
|
l2_offset = l1_table[i] & L1E_OFFSET_MASK;
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2021-09-14 15:24:52 +03:00
|
|
|
/* Mark L2 table as used */
|
|
|
|
ret = qcow2_inc_refcounts_imrt(bs, res,
|
|
|
|
refcount_table, refcount_table_size,
|
|
|
|
l2_offset, s->cluster_size);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* L2 tables are cluster aligned */
|
|
|
|
if (offset_into_cluster(s, l2_offset)) {
|
|
|
|
fprintf(stderr, "ERROR l2_offset=%" PRIx64 ": Table is not "
|
|
|
|
"cluster aligned; L1 entry corrupted\n", l2_offset);
|
|
|
|
res->corruptions++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Process and check L2 entries */
|
|
|
|
ret = check_refcounts_l2(bs, res, refcount_table,
|
|
|
|
refcount_table_size, l2_offset, flags,
|
|
|
|
fix, active);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-14 15:24:52 +03:00
|
|
|
return 0;
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
|
2013-08-30 16:34:27 +04:00
|
|
|
/*
|
|
|
|
* Checks the OFLAG_COPIED flag for all L1 and L2 entries.
|
|
|
|
*
|
|
|
|
* This function does not print an error message nor does it increment
|
2014-10-27 13:12:54 +03:00
|
|
|
* check_errors if qcow2_get_refcount fails (this is because such an error will
|
|
|
|
* have been already detected and sufficiently signaled by the calling function
|
2013-08-30 16:34:27 +04:00
|
|
|
* (qcow2_check_refcounts) by the time this function is called).
|
|
|
|
*/
|
2013-08-30 16:34:28 +04:00
|
|
|
static int check_oflag_copied(BlockDriverState *bs, BdrvCheckResult *res,
|
|
|
|
BdrvCheckMode fix)
|
2013-08-30 16:34:27 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2013-08-30 16:34:27 +04:00
|
|
|
uint64_t *l2_table = qemu_blockalign(bs, s->cluster_size);
|
|
|
|
int ret;
|
2015-02-10 23:28:48 +03:00
|
|
|
uint64_t refcount;
|
2013-08-30 16:34:27 +04:00
|
|
|
int i, j;
|
2018-05-09 23:00:58 +03:00
|
|
|
bool repair;
|
|
|
|
|
|
|
|
if (fix & BDRV_FIX_ERRORS) {
|
|
|
|
/* Always repair */
|
|
|
|
repair = true;
|
|
|
|
} else if (fix & BDRV_FIX_LEAKS) {
|
|
|
|
/* Repair only if that seems safe: This function is always
|
|
|
|
* called after the refcounts have been fixed, so the refcount
|
|
|
|
* is accurate if that repair was successful */
|
|
|
|
repair = !res->check_errors && !res->corruptions && !res->leaks;
|
|
|
|
} else {
|
|
|
|
repair = false;
|
|
|
|
}
|
2013-08-30 16:34:27 +04:00
|
|
|
|
|
|
|
for (i = 0; i < s->l1_size; i++) {
|
|
|
|
uint64_t l1_entry = s->l1_table[i];
|
|
|
|
uint64_t l2_offset = l1_entry & L1E_OFFSET_MASK;
|
2019-02-27 16:14:29 +03:00
|
|
|
int l2_dirty = 0;
|
2013-08-30 16:34:27 +04:00
|
|
|
|
|
|
|
if (!l2_offset) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2015-02-10 23:28:46 +03:00
|
|
|
ret = qcow2_get_refcount(bs, l2_offset >> s->cluster_bits,
|
|
|
|
&refcount);
|
|
|
|
if (ret < 0) {
|
2013-08-30 16:34:27 +04:00
|
|
|
/* don't print message nor increment check_errors */
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if ((refcount == 1) != ((l1_entry & QCOW_OFLAG_COPIED) != 0)) {
|
2019-02-27 16:14:33 +03:00
|
|
|
res->corruptions++;
|
2013-08-30 16:34:28 +04:00
|
|
|
fprintf(stderr, "%s OFLAG_COPIED L2 cluster: l1_index=%d "
|
2015-02-10 23:28:48 +03:00
|
|
|
"l1_entry=%" PRIx64 " refcount=%" PRIu64 "\n",
|
2018-05-09 23:00:58 +03:00
|
|
|
repair ? "Repairing" : "ERROR", i, l1_entry, refcount);
|
|
|
|
if (repair) {
|
2013-08-30 16:34:28 +04:00
|
|
|
s->l1_table[i] = refcount == 1
|
|
|
|
? l1_entry | QCOW_OFLAG_COPIED
|
|
|
|
: l1_entry & ~QCOW_OFLAG_COPIED;
|
|
|
|
ret = qcow2_write_l1_entry(bs, i);
|
|
|
|
if (ret < 0) {
|
|
|
|
res->check_errors++;
|
|
|
|
goto fail;
|
|
|
|
}
|
2019-02-27 16:14:33 +03:00
|
|
|
res->corruptions--;
|
2013-08-30 16:34:28 +04:00
|
|
|
res->corruptions_fixed++;
|
|
|
|
}
|
2013-08-30 16:34:27 +04:00
|
|
|
}
|
|
|
|
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 18:27:36 +03:00
|
|
|
ret = bdrv_pread(bs->file, l2_offset, s->l2_size * l2_entry_size(s),
|
|
|
|
l2_table, 0);
|
2013-08-30 16:34:27 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "ERROR: Could not read L2 table: %s\n",
|
|
|
|
strerror(-ret));
|
|
|
|
res->check_errors++;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (j = 0; j < s->l2_size; j++) {
|
2020-07-10 19:12:48 +03:00
|
|
|
uint64_t l2_entry = get_l2_entry(s, l2_table, j);
|
2013-08-30 16:34:27 +04:00
|
|
|
uint64_t data_offset = l2_entry & L2E_OFFSET_MASK;
|
2019-01-17 20:42:40 +03:00
|
|
|
QCow2ClusterType cluster_type = qcow2_get_cluster_type(bs, l2_entry);
|
2013-08-30 16:34:27 +04:00
|
|
|
|
2017-05-07 03:05:46 +03:00
|
|
|
if (cluster_type == QCOW2_CLUSTER_NORMAL ||
|
|
|
|
cluster_type == QCOW2_CLUSTER_ZERO_ALLOC) {
|
2019-02-27 19:26:40 +03:00
|
|
|
if (has_data_file(bs)) {
|
|
|
|
refcount = 1;
|
|
|
|
} else {
|
|
|
|
ret = qcow2_get_refcount(bs,
|
|
|
|
data_offset >> s->cluster_bits,
|
|
|
|
&refcount);
|
|
|
|
if (ret < 0) {
|
|
|
|
/* don't print message nor increment check_errors */
|
|
|
|
continue;
|
|
|
|
}
|
2013-08-30 16:34:27 +04:00
|
|
|
}
|
|
|
|
if ((refcount == 1) != ((l2_entry & QCOW_OFLAG_COPIED) != 0)) {
|
2019-02-27 16:14:33 +03:00
|
|
|
res->corruptions++;
|
2013-08-30 16:34:28 +04:00
|
|
|
fprintf(stderr, "%s OFLAG_COPIED data cluster: "
|
2015-02-10 23:28:48 +03:00
|
|
|
"l2_entry=%" PRIx64 " refcount=%" PRIu64 "\n",
|
2018-05-09 23:00:58 +03:00
|
|
|
repair ? "Repairing" : "ERROR", l2_entry, refcount);
|
|
|
|
if (repair) {
|
2020-07-10 19:12:48 +03:00
|
|
|
set_l2_entry(s, l2_table, j,
|
|
|
|
refcount == 1 ?
|
|
|
|
l2_entry | QCOW_OFLAG_COPIED :
|
|
|
|
l2_entry & ~QCOW_OFLAG_COPIED);
|
2019-02-27 16:14:29 +03:00
|
|
|
l2_dirty++;
|
2013-08-30 16:34:28 +04:00
|
|
|
}
|
2013-08-30 16:34:27 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-08-30 16:34:28 +04:00
|
|
|
|
2019-02-27 16:14:29 +03:00
|
|
|
if (l2_dirty > 0) {
|
2013-10-10 13:09:23 +04:00
|
|
|
ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_ACTIVE_L2,
|
2019-01-15 22:39:06 +03:00
|
|
|
l2_offset, s->cluster_size,
|
|
|
|
false);
|
2013-08-30 16:34:28 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "ERROR: Could not write L2 table; metadata "
|
|
|
|
"overlap check failed: %s\n", strerror(-ret));
|
|
|
|
res->check_errors++;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 18:27:36 +03:00
|
|
|
ret = bdrv_pwrite(bs->file, l2_offset, s->cluster_size, l2_table,
|
block: Add a 'flags' param to bdrv_{pread,pwrite,pwrite_sync}()
For consistency with other I/O functions, and in preparation to
implement them using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes; @@
- bdrv_pread(child, offset, buf, bytes)
+ bdrv_pread(child, offset, buf, bytes, 0)
@@ expression child, offset, buf, bytes; @@
- bdrv_pwrite(child, offset, buf, bytes)
+ bdrv_pwrite(child, offset, buf, bytes, 0)
@@ expression child, offset, buf, bytes; @@
- bdrv_pwrite_sync(child, offset, buf, bytes)
+ bdrv_pwrite_sync(child, offset, buf, bytes, 0)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-2-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 18:27:35 +03:00
|
|
|
0);
|
2013-08-30 16:34:28 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "ERROR: Could not write L2 table: %s\n",
|
|
|
|
strerror(-ret));
|
|
|
|
res->check_errors++;
|
|
|
|
goto fail;
|
|
|
|
}
|
2019-02-27 16:14:33 +03:00
|
|
|
res->corruptions -= l2_dirty;
|
2019-02-27 16:14:29 +03:00
|
|
|
res->corruptions_fixed += l2_dirty;
|
2013-08-30 16:34:28 +04:00
|
|
|
}
|
2013-08-30 16:34:27 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
qemu_vfree(l2_table);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-10-22 16:09:30 +04:00
|
|
|
/*
|
|
|
|
* Checks consistency of refblocks and accounts for each refblock in
|
|
|
|
* *refcount_table.
|
|
|
|
*/
|
|
|
|
static int check_refblocks(BlockDriverState *bs, BdrvCheckResult *res,
|
2014-10-22 16:09:39 +04:00
|
|
|
BdrvCheckMode fix, bool *rebuild,
|
2015-02-10 23:28:50 +03:00
|
|
|
void **refcount_table, int64_t *nb_clusters)
|
2014-10-22 16:09:30 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2014-10-22 16:09:38 +04:00
|
|
|
int64_t i, size;
|
2014-10-22 16:09:35 +04:00
|
|
|
int ret;
|
2014-10-22 16:09:30 +04:00
|
|
|
|
2009-05-28 18:07:04 +04:00
|
|
|
for(i = 0; i < s->refcount_table_size; i++) {
|
2010-06-22 14:31:45 +04:00
|
|
|
uint64_t offset, cluster;
|
2021-09-14 15:24:54 +03:00
|
|
|
offset = s->refcount_table[i] & REFT_OFFSET_MASK;
|
2010-06-22 14:31:45 +04:00
|
|
|
cluster = offset >> s->cluster_bits;
|
2010-02-23 18:40:54 +03:00
|
|
|
|
2021-09-14 15:24:54 +03:00
|
|
|
if (s->refcount_table[i] & REFT_RESERVED_MASK) {
|
|
|
|
fprintf(stderr, "ERROR refcount table entry %" PRId64 " has "
|
|
|
|
"reserved bits set\n", i);
|
|
|
|
res->corruptions++;
|
|
|
|
*rebuild = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2010-02-23 18:40:54 +03:00
|
|
|
/* Refcount blocks are cluster aligned */
|
2013-12-05 10:32:34 +04:00
|
|
|
if (offset_into_cluster(s, offset)) {
|
2012-05-11 20:18:36 +04:00
|
|
|
fprintf(stderr, "ERROR refcount block %" PRId64 " is not "
|
2010-02-23 18:40:54 +03:00
|
|
|
"cluster aligned; refcount table entry corrupted\n", i);
|
2010-06-29 14:37:54 +04:00
|
|
|
res->corruptions++;
|
2014-10-22 16:09:39 +04:00
|
|
|
*rebuild = true;
|
2010-06-22 14:31:45 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-10-22 16:09:30 +04:00
|
|
|
if (cluster >= *nb_clusters) {
|
2019-02-27 16:14:33 +03:00
|
|
|
res->corruptions++;
|
2014-10-22 16:09:38 +04:00
|
|
|
fprintf(stderr, "%s refcount block %" PRId64 " is outside image\n",
|
|
|
|
fix & BDRV_FIX_ERRORS ? "Repairing" : "ERROR", i);
|
|
|
|
|
|
|
|
if (fix & BDRV_FIX_ERRORS) {
|
2015-02-10 23:28:49 +03:00
|
|
|
int64_t new_nb_clusters;
|
2017-03-28 23:51:27 +03:00
|
|
|
Error *local_err = NULL;
|
2014-10-22 16:09:38 +04:00
|
|
|
|
|
|
|
if (offset > INT64_MAX - s->cluster_size) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto resize_fail;
|
|
|
|
}
|
|
|
|
|
2019-09-18 12:51:40 +03:00
|
|
|
ret = bdrv_truncate(bs->file, offset + s->cluster_size, false,
|
2020-04-24 15:54:40 +03:00
|
|
|
PREALLOC_MODE_OFF, 0, &local_err);
|
2014-10-22 16:09:38 +04:00
|
|
|
if (ret < 0) {
|
2017-03-28 23:51:27 +03:00
|
|
|
error_report_err(local_err);
|
2014-10-22 16:09:38 +04:00
|
|
|
goto resize_fail;
|
|
|
|
}
|
2015-06-16 15:19:22 +03:00
|
|
|
size = bdrv_getlength(bs->file->bs);
|
2014-10-22 16:09:38 +04:00
|
|
|
if (size < 0) {
|
|
|
|
ret = size;
|
|
|
|
goto resize_fail;
|
|
|
|
}
|
|
|
|
|
2015-02-10 23:28:49 +03:00
|
|
|
new_nb_clusters = size_to_clusters(s, size);
|
|
|
|
assert(new_nb_clusters >= *nb_clusters);
|
2014-10-22 16:09:38 +04:00
|
|
|
|
2015-02-10 23:28:49 +03:00
|
|
|
ret = realloc_refcount_array(s, refcount_table,
|
|
|
|
nb_clusters, new_nb_clusters);
|
|
|
|
if (ret < 0) {
|
2014-10-22 16:09:38 +04:00
|
|
|
res->check_errors++;
|
2015-02-10 23:28:49 +03:00
|
|
|
return ret;
|
2014-10-22 16:09:38 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (cluster >= *nb_clusters) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto resize_fail;
|
|
|
|
}
|
|
|
|
|
2019-02-27 16:14:33 +03:00
|
|
|
res->corruptions--;
|
2014-10-22 16:09:38 +04:00
|
|
|
res->corruptions_fixed++;
|
2017-06-28 15:05:07 +03:00
|
|
|
ret = qcow2_inc_refcounts_imrt(bs, res,
|
|
|
|
refcount_table, nb_clusters,
|
|
|
|
offset, s->cluster_size);
|
2014-10-22 16:09:38 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
/* No need to check whether the refcount is now greater than 1:
|
|
|
|
* This area was just allocated and zeroed, so it can only be
|
2017-06-28 15:05:07 +03:00
|
|
|
* exactly 1 after qcow2_inc_refcounts_imrt() */
|
2014-10-22 16:09:38 +04:00
|
|
|
continue;
|
|
|
|
|
|
|
|
resize_fail:
|
2014-10-22 16:09:39 +04:00
|
|
|
*rebuild = true;
|
2014-10-22 16:09:38 +04:00
|
|
|
fprintf(stderr, "ERROR could not resize image: %s\n",
|
|
|
|
strerror(-ret));
|
|
|
|
}
|
2010-06-22 14:31:45 +04:00
|
|
|
continue;
|
2010-02-23 18:40:54 +03:00
|
|
|
}
|
|
|
|
|
2009-05-28 18:07:04 +04:00
|
|
|
if (offset != 0) {
|
2017-06-28 15:05:07 +03:00
|
|
|
ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, nb_clusters,
|
|
|
|
offset, s->cluster_size);
|
2014-10-22 16:09:35 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2015-02-10 23:28:50 +03:00
|
|
|
if (s->get_refcount(*refcount_table, cluster) != 1) {
|
2014-10-22 16:09:39 +04:00
|
|
|
fprintf(stderr, "ERROR refcount block %" PRId64
|
2015-02-10 23:28:50 +03:00
|
|
|
" refcount=%" PRIu64 "\n", i,
|
|
|
|
s->get_refcount(*refcount_table, cluster));
|
2014-10-22 16:09:39 +04:00
|
|
|
res->corruptions++;
|
|
|
|
*rebuild = true;
|
2010-02-23 18:40:54 +03:00
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-22 16:09:30 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-10-22 16:09:32 +04:00
|
|
|
/*
|
|
|
|
* Calculates an in-memory refcount table.
|
|
|
|
*/
|
|
|
|
static int calculate_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
|
2014-10-22 16:09:39 +04:00
|
|
|
BdrvCheckMode fix, bool *rebuild,
|
2015-02-10 23:28:50 +03:00
|
|
|
void **refcount_table, int64_t *nb_clusters)
|
2014-10-22 16:09:32 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2014-10-22 16:09:32 +04:00
|
|
|
int64_t i;
|
|
|
|
QCowSnapshot *sn;
|
|
|
|
int ret;
|
|
|
|
|
2014-10-22 16:09:37 +04:00
|
|
|
if (!*refcount_table) {
|
2015-02-10 23:28:49 +03:00
|
|
|
int64_t old_size = 0;
|
|
|
|
ret = realloc_refcount_array(s, refcount_table,
|
|
|
|
&old_size, *nb_clusters);
|
|
|
|
if (ret < 0) {
|
2014-10-22 16:09:37 +04:00
|
|
|
res->check_errors++;
|
2015-02-10 23:28:49 +03:00
|
|
|
return ret;
|
2014-10-22 16:09:37 +04:00
|
|
|
}
|
2014-10-22 16:09:32 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* header */
|
2017-06-28 15:05:07 +03:00
|
|
|
ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, nb_clusters,
|
|
|
|
0, s->cluster_size);
|
2014-10-22 16:09:35 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2014-10-22 16:09:32 +04:00
|
|
|
|
|
|
|
/* current L1 table */
|
2014-10-22 16:09:36 +04:00
|
|
|
ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters,
|
2017-11-10 23:37:59 +03:00
|
|
|
s->l1_table_offset, s->l1_size, CHECK_FRAG_INFO,
|
2019-02-27 16:14:31 +03:00
|
|
|
fix, true);
|
2014-10-22 16:09:32 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* snapshots */
|
2019-02-27 19:26:40 +03:00
|
|
|
if (has_data_file(bs) && s->nb_snapshots) {
|
|
|
|
fprintf(stderr, "ERROR %d snapshots in image with data file\n",
|
|
|
|
s->nb_snapshots);
|
|
|
|
res->corruptions++;
|
|
|
|
}
|
|
|
|
|
2014-10-22 16:09:32 +04:00
|
|
|
for (i = 0; i < s->nb_snapshots; i++) {
|
|
|
|
sn = s->snapshots + i;
|
2018-03-06 19:14:12 +03:00
|
|
|
if (offset_into_cluster(s, sn->l1_table_offset)) {
|
|
|
|
fprintf(stderr, "ERROR snapshot %s (%s) l1_offset=%#" PRIx64 ": "
|
|
|
|
"L1 table is not cluster aligned; snapshot table entry "
|
|
|
|
"corrupted\n", sn->id_str, sn->name, sn->l1_table_offset);
|
|
|
|
res->corruptions++;
|
|
|
|
continue;
|
|
|
|
}
|
2020-08-28 14:08:28 +03:00
|
|
|
if (sn->l1_size > QCOW_MAX_L1_SIZE / L1E_SIZE) {
|
2018-03-06 19:14:12 +03:00
|
|
|
fprintf(stderr, "ERROR snapshot %s (%s) l1_size=%#" PRIx32 ": "
|
|
|
|
"L1 table is too large; snapshot table entry corrupted\n",
|
|
|
|
sn->id_str, sn->name, sn->l1_size);
|
|
|
|
res->corruptions++;
|
|
|
|
continue;
|
|
|
|
}
|
2014-10-22 16:09:36 +04:00
|
|
|
ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters,
|
2019-02-27 16:14:31 +03:00
|
|
|
sn->l1_table_offset, sn->l1_size, 0, fix,
|
|
|
|
false);
|
2014-10-22 16:09:32 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
2017-06-28 15:05:07 +03:00
|
|
|
ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, nb_clusters,
|
|
|
|
s->snapshots_offset, s->snapshots_size);
|
2014-10-22 16:09:35 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2014-10-22 16:09:32 +04:00
|
|
|
|
|
|
|
/* refcount data */
|
2017-06-28 15:05:07 +03:00
|
|
|
ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, nb_clusters,
|
|
|
|
s->refcount_table_offset,
|
2020-08-28 14:08:28 +03:00
|
|
|
s->refcount_table_size *
|
|
|
|
REFTABLE_ENTRY_SIZE);
|
2014-10-22 16:09:35 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
2014-10-22 16:09:32 +04:00
|
|
|
|
qcow2: add support for LUKS encryption format
This adds support for using LUKS as an encryption format
with the qcow2 file, using the new encrypt.format parameter
to request "luks" format. e.g.
# qemu-img create --object secret,data=123456,id=sec0 \
-f qcow2 -o encrypt.format=luks,encrypt.key-secret=sec0 \
test.qcow2 10G
The legacy "encryption=on" parameter still results in
creation of the old qcow2 AES format (and is equivalent
to the new 'encryption-format=aes'). e.g. the following are
equivalent:
# qemu-img create --object secret,data=123456,id=sec0 \
-f qcow2 -o encryption=on,encrypt.key-secret=sec0 \
test.qcow2 10G
# qemu-img create --object secret,data=123456,id=sec0 \
-f qcow2 -o encryption-format=aes,encrypt.key-secret=sec0 \
test.qcow2 10G
With the LUKS format it is necessary to store the LUKS
partition header and key material in the QCow2 file. This
data can be many MB in size, so cannot go into the QCow2
header region directly. Thus the spec defines a FDE
(Full Disk Encryption) header extension that specifies
the offset of a set of clusters to hold the FDE headers,
as well as the length of that region. The LUKS header is
thus stored in these extra allocated clusters before the
main image payload.
Aside from all the cryptographic differences implied by
use of the LUKS format, there is one further key difference
between the use of legacy AES and LUKS encryption in qcow2.
For LUKS, the initialiazation vectors are generated using
the host physical sector as the input, rather than the
guest virtual sector. This guarantees unique initialization
vectors for all sectors when qcow2 internal snapshots are
used, thus giving stronger protection against watermarking
attacks.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Message-id: 20170623162419.26068-14-berrange@redhat.com
Reviewed-by: Alberto Garcia <berto@igalia.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-06-23 19:24:12 +03:00
|
|
|
/* encryption */
|
|
|
|
if (s->crypto_header.length) {
|
2017-06-28 15:05:07 +03:00
|
|
|
ret = qcow2_inc_refcounts_imrt(bs, res, refcount_table, nb_clusters,
|
|
|
|
s->crypto_header.offset,
|
|
|
|
s->crypto_header.length);
|
qcow2: add support for LUKS encryption format
This adds support for using LUKS as an encryption format
with the qcow2 file, using the new encrypt.format parameter
to request "luks" format. e.g.
# qemu-img create --object secret,data=123456,id=sec0 \
-f qcow2 -o encrypt.format=luks,encrypt.key-secret=sec0 \
test.qcow2 10G
The legacy "encryption=on" parameter still results in
creation of the old qcow2 AES format (and is equivalent
to the new 'encryption-format=aes'). e.g. the following are
equivalent:
# qemu-img create --object secret,data=123456,id=sec0 \
-f qcow2 -o encryption=on,encrypt.key-secret=sec0 \
test.qcow2 10G
# qemu-img create --object secret,data=123456,id=sec0 \
-f qcow2 -o encryption-format=aes,encrypt.key-secret=sec0 \
test.qcow2 10G
With the LUKS format it is necessary to store the LUKS
partition header and key material in the QCow2 file. This
data can be many MB in size, so cannot go into the QCow2
header region directly. Thus the spec defines a FDE
(Full Disk Encryption) header extension that specifies
the offset of a set of clusters to hold the FDE headers,
as well as the length of that region. The LUKS header is
thus stored in these extra allocated clusters before the
main image payload.
Aside from all the cryptographic differences implied by
use of the LUKS format, there is one further key difference
between the use of legacy AES and LUKS encryption in qcow2.
For LUKS, the initialiazation vectors are generated using
the host physical sector as the input, rather than the
guest virtual sector. This guarantees unique initialization
vectors for all sectors when qcow2 internal snapshots are
used, thus giving stronger protection against watermarking
attacks.
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Message-id: 20170623162419.26068-14-berrange@redhat.com
Reviewed-by: Alberto Garcia <berto@igalia.com>
Signed-off-by: Max Reitz <mreitz@redhat.com>
2017-06-23 19:24:12 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-28 15:05:08 +03:00
|
|
|
/* bitmaps */
|
|
|
|
ret = qcow2_check_bitmaps_refcounts(bs, res, refcount_table, nb_clusters);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-10-22 16:09:39 +04:00
|
|
|
return check_refblocks(bs, res, fix, rebuild, refcount_table, nb_clusters);
|
2014-10-22 16:09:32 +04:00
|
|
|
}
|
|
|
|
|
2014-10-22 16:09:30 +04:00
|
|
|
/*
|
|
|
|
* Compares the actual reference count for each cluster in the image against the
|
|
|
|
* refcount as reported by the refcount structures on-disk.
|
|
|
|
*/
|
|
|
|
static void compare_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
|
2014-10-22 16:09:39 +04:00
|
|
|
BdrvCheckMode fix, bool *rebuild,
|
|
|
|
int64_t *highest_cluster,
|
2015-02-10 23:28:50 +03:00
|
|
|
void *refcount_table, int64_t nb_clusters)
|
2014-10-22 16:09:30 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2014-10-22 16:09:30 +04:00
|
|
|
int64_t i;
|
2015-02-10 23:28:48 +03:00
|
|
|
uint64_t refcount1, refcount2;
|
2015-02-10 23:28:46 +03:00
|
|
|
int ret;
|
2014-10-22 16:09:30 +04:00
|
|
|
|
|
|
|
for (i = 0, *highest_cluster = 0; i < nb_clusters; i++) {
|
2015-02-10 23:28:46 +03:00
|
|
|
ret = qcow2_get_refcount(bs, i, &refcount1);
|
|
|
|
if (ret < 0) {
|
2012-05-11 20:18:36 +04:00
|
|
|
fprintf(stderr, "Can't get refcount for cluster %" PRId64 ": %s\n",
|
2015-02-10 23:28:46 +03:00
|
|
|
i, strerror(-ret));
|
2010-06-29 14:37:54 +04:00
|
|
|
res->check_errors++;
|
2010-06-22 14:35:07 +04:00
|
|
|
continue;
|
2010-06-04 13:16:11 +04:00
|
|
|
}
|
|
|
|
|
2015-02-10 23:28:50 +03:00
|
|
|
refcount2 = s->get_refcount(refcount_table, i);
|
2013-01-28 15:59:46 +04:00
|
|
|
|
|
|
|
if (refcount1 > 0 || refcount2 > 0) {
|
2014-10-22 16:09:30 +04:00
|
|
|
*highest_cluster = i;
|
2013-01-28 15:59:46 +04:00
|
|
|
}
|
|
|
|
|
2009-05-28 18:07:04 +04:00
|
|
|
if (refcount1 != refcount2) {
|
2012-05-11 20:18:36 +04:00
|
|
|
/* Check if we're allowed to fix the mismatch */
|
|
|
|
int *num_fixed = NULL;
|
2014-10-22 16:09:39 +04:00
|
|
|
if (refcount1 == 0) {
|
|
|
|
*rebuild = true;
|
|
|
|
} else if (refcount1 > refcount2 && (fix & BDRV_FIX_LEAKS)) {
|
2012-05-11 20:18:36 +04:00
|
|
|
num_fixed = &res->leaks_fixed;
|
|
|
|
} else if (refcount1 < refcount2 && (fix & BDRV_FIX_ERRORS)) {
|
|
|
|
num_fixed = &res->corruptions_fixed;
|
|
|
|
}
|
|
|
|
|
2015-02-10 23:28:48 +03:00
|
|
|
fprintf(stderr, "%s cluster %" PRId64 " refcount=%" PRIu64
|
|
|
|
" reference=%" PRIu64 "\n",
|
2012-05-11 20:18:36 +04:00
|
|
|
num_fixed != NULL ? "Repairing" :
|
|
|
|
refcount1 < refcount2 ? "ERROR" :
|
|
|
|
"Leaked",
|
2009-05-28 18:07:04 +04:00
|
|
|
i, refcount1, refcount2);
|
2012-05-11 20:18:36 +04:00
|
|
|
|
|
|
|
if (num_fixed) {
|
|
|
|
ret = update_refcount(bs, i << s->cluster_bits, 1,
|
2015-02-10 23:28:47 +03:00
|
|
|
refcount_diff(refcount1, refcount2),
|
|
|
|
refcount1 > refcount2,
|
2013-06-19 15:44:18 +04:00
|
|
|
QCOW2_DISCARD_ALWAYS);
|
2012-05-11 20:18:36 +04:00
|
|
|
if (ret >= 0) {
|
|
|
|
(*num_fixed)++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* And if we couldn't, print an error */
|
2010-06-29 14:37:54 +04:00
|
|
|
if (refcount1 < refcount2) {
|
|
|
|
res->corruptions++;
|
|
|
|
} else {
|
|
|
|
res->leaks++;
|
|
|
|
}
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
}
|
2014-10-22 16:09:30 +04:00
|
|
|
}
|
|
|
|
|
2014-10-22 16:09:40 +04:00
|
|
|
/*
|
|
|
|
* Allocates clusters using an in-memory refcount table (IMRT) in contrast to
|
|
|
|
* the on-disk refcount structures.
|
|
|
|
*
|
|
|
|
* On input, *first_free_cluster tells where to start looking, and need not
|
|
|
|
* actually be a free cluster; the returned offset will not be before that
|
|
|
|
* cluster. On output, *first_free_cluster points to the first gap found, even
|
|
|
|
* if that gap was too small to be used as the returned offset.
|
|
|
|
*
|
|
|
|
* Note that *first_free_cluster is a cluster index whereas the return value is
|
|
|
|
* an offset.
|
|
|
|
*/
|
|
|
|
static int64_t alloc_clusters_imrt(BlockDriverState *bs,
|
|
|
|
int cluster_count,
|
2015-02-10 23:28:50 +03:00
|
|
|
void **refcount_table,
|
2014-10-22 16:09:40 +04:00
|
|
|
int64_t *imrt_nb_clusters,
|
|
|
|
int64_t *first_free_cluster)
|
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2014-10-22 16:09:40 +04:00
|
|
|
int64_t cluster = *first_free_cluster, i;
|
|
|
|
bool first_gap = true;
|
|
|
|
int contiguous_free_clusters;
|
2015-02-10 23:28:49 +03:00
|
|
|
int ret;
|
2014-10-22 16:09:40 +04:00
|
|
|
|
|
|
|
/* Starting at *first_free_cluster, find a range of at least cluster_count
|
|
|
|
* continuously free clusters */
|
|
|
|
for (contiguous_free_clusters = 0;
|
|
|
|
cluster < *imrt_nb_clusters &&
|
|
|
|
contiguous_free_clusters < cluster_count;
|
|
|
|
cluster++)
|
|
|
|
{
|
2015-02-10 23:28:50 +03:00
|
|
|
if (!s->get_refcount(*refcount_table, cluster)) {
|
2014-10-22 16:09:40 +04:00
|
|
|
contiguous_free_clusters++;
|
|
|
|
if (first_gap) {
|
|
|
|
/* If this is the first free cluster found, update
|
|
|
|
* *first_free_cluster accordingly */
|
|
|
|
*first_free_cluster = cluster;
|
|
|
|
first_gap = false;
|
|
|
|
}
|
|
|
|
} else if (contiguous_free_clusters) {
|
|
|
|
contiguous_free_clusters = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If contiguous_free_clusters is greater than zero, it contains the number
|
|
|
|
* of continuously free clusters until the current cluster; the first free
|
|
|
|
* cluster in the current "gap" is therefore
|
|
|
|
* cluster - contiguous_free_clusters */
|
|
|
|
|
|
|
|
/* If no such range could be found, grow the in-memory refcount table
|
|
|
|
* accordingly to append free clusters at the end of the image */
|
|
|
|
if (contiguous_free_clusters < cluster_count) {
|
|
|
|
/* contiguous_free_clusters clusters are already empty at the image end;
|
|
|
|
* we need cluster_count clusters; therefore, we have to allocate
|
|
|
|
* cluster_count - contiguous_free_clusters new clusters at the end of
|
|
|
|
* the image (which is the current value of cluster; note that cluster
|
|
|
|
* may exceed old_imrt_nb_clusters if *first_free_cluster pointed beyond
|
|
|
|
* the image end) */
|
2015-02-10 23:28:49 +03:00
|
|
|
ret = realloc_refcount_array(s, refcount_table, imrt_nb_clusters,
|
|
|
|
cluster + cluster_count
|
|
|
|
- contiguous_free_clusters);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
2014-10-22 16:09:40 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Go back to the first free cluster */
|
|
|
|
cluster -= contiguous_free_clusters;
|
|
|
|
for (i = 0; i < cluster_count; i++) {
|
2015-02-10 23:28:50 +03:00
|
|
|
s->set_refcount(*refcount_table, cluster + i, 1);
|
2014-10-22 16:09:40 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return cluster << s->cluster_bits;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-04-05 16:46:50 +03:00
|
|
|
* Helper function for rebuild_refcount_structure().
|
2014-10-22 16:09:40 +04:00
|
|
|
*
|
2022-04-05 16:46:50 +03:00
|
|
|
* Scan the range of clusters [first_cluster, end_cluster) for allocated
|
|
|
|
* clusters and write all corresponding refblocks to disk. The refblock
|
|
|
|
* and allocation data is taken from the in-memory refcount table
|
|
|
|
* *refcount_table[] (of size *nb_clusters), which is basically one big
|
|
|
|
* (unlimited size) refblock for the whole image.
|
|
|
|
*
|
|
|
|
* For these refblocks, clusters are allocated using said in-memory
|
|
|
|
* refcount table. Care is taken that these allocations are reflected
|
|
|
|
* in the refblocks written to disk.
|
|
|
|
*
|
|
|
|
* The refblocks' offsets are written into a reftable, which is
|
|
|
|
* *on_disk_reftable_ptr[] (of size *on_disk_reftable_entries_ptr). If
|
|
|
|
* that reftable is of insufficient size, it will be resized to fit.
|
|
|
|
* This reftable is not written to disk.
|
|
|
|
*
|
|
|
|
* (If *on_disk_reftable_ptr is not NULL, the entries within are assumed
|
|
|
|
* to point to existing valid refblocks that do not need to be allocated
|
|
|
|
* again.)
|
|
|
|
*
|
|
|
|
* Return whether the on-disk reftable array was resized (true/false),
|
|
|
|
* or -errno on error.
|
2014-10-22 16:09:40 +04:00
|
|
|
*/
|
2022-04-05 16:46:50 +03:00
|
|
|
static int rebuild_refcounts_write_refblocks(
|
|
|
|
BlockDriverState *bs, void **refcount_table, int64_t *nb_clusters,
|
|
|
|
int64_t first_cluster, int64_t end_cluster,
|
2022-04-05 16:46:52 +03:00
|
|
|
uint64_t **on_disk_reftable_ptr, uint32_t *on_disk_reftable_entries_ptr,
|
|
|
|
Error **errp
|
2022-04-05 16:46:50 +03:00
|
|
|
)
|
2014-10-22 16:09:40 +04:00
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2022-04-05 16:46:50 +03:00
|
|
|
int64_t cluster;
|
2014-10-22 16:09:40 +04:00
|
|
|
int64_t refblock_offset, refblock_start, refblock_index;
|
2022-04-05 16:46:50 +03:00
|
|
|
int64_t first_free_cluster = 0;
|
|
|
|
uint64_t *on_disk_reftable = *on_disk_reftable_ptr;
|
|
|
|
uint32_t on_disk_reftable_entries = *on_disk_reftable_entries_ptr;
|
2015-02-10 23:28:50 +03:00
|
|
|
void *on_disk_refblock;
|
2022-04-05 16:46:50 +03:00
|
|
|
bool reftable_grown = false;
|
|
|
|
int ret;
|
2014-10-22 16:09:40 +04:00
|
|
|
|
2022-04-05 16:46:50 +03:00
|
|
|
for (cluster = first_cluster; cluster < end_cluster; cluster++) {
|
|
|
|
/* Check all clusters to find refblocks that contain non-zero entries */
|
2015-02-10 23:28:50 +03:00
|
|
|
if (!s->get_refcount(*refcount_table, cluster)) {
|
2014-10-22 16:09:40 +04:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2022-04-05 16:46:50 +03:00
|
|
|
/*
|
|
|
|
* This cluster is allocated, so we need to create a refblock
|
|
|
|
* for it. The data we will write to disk is just the
|
|
|
|
* respective slice from *refcount_table, so it will contain
|
|
|
|
* accurate refcounts for all clusters belonging to this
|
|
|
|
* refblock. After we have written it, we will therefore skip
|
|
|
|
* all remaining clusters in this refblock.
|
|
|
|
*/
|
|
|
|
|
2014-10-22 16:09:40 +04:00
|
|
|
refblock_index = cluster >> s->refcount_block_bits;
|
|
|
|
refblock_start = refblock_index << s->refcount_block_bits;
|
|
|
|
|
2022-04-05 16:46:50 +03:00
|
|
|
if (on_disk_reftable_entries > refblock_index &&
|
|
|
|
on_disk_reftable[refblock_index])
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We can get here after a `goto write_refblocks`: We have a
|
|
|
|
* reftable from a previous run, and the refblock is already
|
|
|
|
* allocated. No need to allocate it again.
|
|
|
|
*/
|
|
|
|
refblock_offset = on_disk_reftable[refblock_index];
|
|
|
|
} else {
|
|
|
|
int64_t refblock_cluster_index;
|
2014-10-22 16:09:40 +04:00
|
|
|
|
2022-04-05 16:46:50 +03:00
|
|
|
/* Don't allocate a cluster in a refblock already written to disk */
|
|
|
|
if (first_free_cluster < refblock_start) {
|
|
|
|
first_free_cluster = refblock_start;
|
|
|
|
}
|
|
|
|
refblock_offset = alloc_clusters_imrt(bs, 1, refcount_table,
|
|
|
|
nb_clusters,
|
|
|
|
&first_free_cluster);
|
|
|
|
if (refblock_offset < 0) {
|
2022-04-05 16:46:52 +03:00
|
|
|
error_setg_errno(errp, -refblock_offset,
|
|
|
|
"ERROR allocating refblock");
|
2022-04-05 16:46:50 +03:00
|
|
|
return refblock_offset;
|
|
|
|
}
|
2014-10-22 16:09:40 +04:00
|
|
|
|
2022-04-05 16:46:50 +03:00
|
|
|
refblock_cluster_index = refblock_offset / s->cluster_size;
|
|
|
|
if (refblock_cluster_index >= end_cluster) {
|
|
|
|
/*
|
|
|
|
* We must write the refblock that holds this refblock's
|
|
|
|
* refcount
|
|
|
|
*/
|
|
|
|
end_cluster = refblock_cluster_index + 1;
|
2014-10-22 16:09:40 +04:00
|
|
|
}
|
|
|
|
|
2022-04-05 16:46:50 +03:00
|
|
|
if (on_disk_reftable_entries <= refblock_index) {
|
|
|
|
on_disk_reftable_entries =
|
|
|
|
ROUND_UP((refblock_index + 1) * REFTABLE_ENTRY_SIZE,
|
|
|
|
s->cluster_size) / REFTABLE_ENTRY_SIZE;
|
|
|
|
on_disk_reftable =
|
|
|
|
g_try_realloc(on_disk_reftable,
|
|
|
|
on_disk_reftable_entries *
|
|
|
|
REFTABLE_ENTRY_SIZE);
|
|
|
|
if (!on_disk_reftable) {
|
2022-04-05 16:46:52 +03:00
|
|
|
error_setg(errp, "ERROR allocating reftable memory");
|
2022-04-05 16:46:50 +03:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2014-10-22 16:09:40 +04:00
|
|
|
|
2022-04-05 16:46:50 +03:00
|
|
|
memset(on_disk_reftable + *on_disk_reftable_entries_ptr, 0,
|
|
|
|
(on_disk_reftable_entries -
|
|
|
|
*on_disk_reftable_entries_ptr) *
|
|
|
|
REFTABLE_ENTRY_SIZE);
|
2014-10-22 16:09:40 +04:00
|
|
|
|
2022-04-05 16:46:50 +03:00
|
|
|
*on_disk_reftable_ptr = on_disk_reftable;
|
|
|
|
*on_disk_reftable_entries_ptr = on_disk_reftable_entries;
|
|
|
|
|
|
|
|
reftable_grown = true;
|
|
|
|
} else {
|
|
|
|
assert(on_disk_reftable);
|
2014-10-22 16:09:40 +04:00
|
|
|
}
|
2022-04-05 16:46:50 +03:00
|
|
|
on_disk_reftable[refblock_index] = refblock_offset;
|
2014-10-22 16:09:40 +04:00
|
|
|
}
|
|
|
|
|
2022-04-05 16:46:50 +03:00
|
|
|
/* Refblock is allocated, write it to disk */
|
|
|
|
|
2014-10-22 16:09:40 +04:00
|
|
|
ret = qcow2_pre_write_overlap_check(bs, 0, refblock_offset,
|
2019-01-15 22:39:06 +03:00
|
|
|
s->cluster_size, false);
|
2014-10-22 16:09:40 +04:00
|
|
|
if (ret < 0) {
|
2022-04-05 16:46:52 +03:00
|
|
|
error_setg_errno(errp, -ret, "ERROR writing refblock");
|
2022-04-05 16:46:50 +03:00
|
|
|
return ret;
|
2014-10-22 16:09:40 +04:00
|
|
|
}
|
|
|
|
|
2022-04-05 16:46:50 +03:00
|
|
|
/*
|
|
|
|
* The refblock is simply a slice of *refcount_table.
|
|
|
|
* Note that the size of *refcount_table is always aligned to
|
|
|
|
* whole clusters, so the write operation will not result in
|
|
|
|
* out-of-bounds accesses.
|
|
|
|
*/
|
2015-02-10 23:28:50 +03:00
|
|
|
on_disk_refblock = (void *)((char *) *refcount_table +
|
|
|
|
refblock_index * s->cluster_size);
|
2014-10-22 16:09:40 +04:00
|
|
|
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 18:27:36 +03:00
|
|
|
ret = bdrv_pwrite(bs->file, refblock_offset, s->cluster_size,
|
|
|
|
on_disk_refblock, 0);
|
2014-10-22 16:09:40 +04:00
|
|
|
if (ret < 0) {
|
2022-04-05 16:46:52 +03:00
|
|
|
error_setg_errno(errp, -ret, "ERROR writing refblock");
|
2022-04-05 16:46:50 +03:00
|
|
|
return ret;
|
2014-10-22 16:09:40 +04:00
|
|
|
}
|
|
|
|
|
2022-04-05 16:46:50 +03:00
|
|
|
/* This refblock is done, skip to its end */
|
2014-10-22 16:09:40 +04:00
|
|
|
cluster = refblock_start + s->refcount_block_size - 1;
|
|
|
|
}
|
|
|
|
|
2022-04-05 16:46:50 +03:00
|
|
|
return reftable_grown;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Creates a new refcount structure based solely on the in-memory information
|
|
|
|
* given through *refcount_table (this in-memory information is basically just
|
|
|
|
* the concatenation of all refblocks). All necessary allocations will be
|
|
|
|
* reflected in that array.
|
|
|
|
*
|
|
|
|
* On success, the old refcount structure is leaked (it will be covered by the
|
|
|
|
* new refcount structure).
|
|
|
|
*/
|
|
|
|
static int rebuild_refcount_structure(BlockDriverState *bs,
|
|
|
|
BdrvCheckResult *res,
|
|
|
|
void **refcount_table,
|
2022-04-05 16:46:52 +03:00
|
|
|
int64_t *nb_clusters,
|
|
|
|
Error **errp)
|
2022-04-05 16:46:50 +03:00
|
|
|
{
|
|
|
|
BDRVQcow2State *s = bs->opaque;
|
|
|
|
int64_t reftable_offset = -1;
|
|
|
|
int64_t reftable_length = 0;
|
|
|
|
int64_t reftable_clusters;
|
|
|
|
int64_t refblock_index;
|
|
|
|
uint32_t on_disk_reftable_entries = 0;
|
|
|
|
uint64_t *on_disk_reftable = NULL;
|
|
|
|
int ret = 0;
|
|
|
|
int reftable_size_changed = 0;
|
|
|
|
struct {
|
|
|
|
uint64_t reftable_offset;
|
|
|
|
uint32_t reftable_clusters;
|
|
|
|
} QEMU_PACKED reftable_offset_and_clusters;
|
|
|
|
|
|
|
|
qcow2_cache_empty(bs, s->refcount_block_cache);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For each refblock containing entries, we try to allocate a
|
|
|
|
* cluster (in the in-memory refcount table) and write its offset
|
|
|
|
* into on_disk_reftable[]. We then write the whole refblock to
|
|
|
|
* disk (as a slice of the in-memory refcount table).
|
|
|
|
* This is done by rebuild_refcounts_write_refblocks().
|
|
|
|
*
|
|
|
|
* Once we have scanned all clusters, we try to find space for the
|
|
|
|
* reftable. This will dirty the in-memory refcount table (i.e.
|
|
|
|
* make it differ from the refblocks we have already written), so we
|
|
|
|
* need to run rebuild_refcounts_write_refblocks() again for the
|
|
|
|
* range of clusters where the reftable has been allocated.
|
|
|
|
*
|
|
|
|
* This second run might make the reftable grow again, in which case
|
|
|
|
* we will need to allocate another space for it, which is why we
|
|
|
|
* repeat all this until the reftable stops growing.
|
|
|
|
*
|
|
|
|
* (This loop will terminate, because with every cluster the
|
|
|
|
* reftable grows, it can accomodate a multitude of more refcounts,
|
|
|
|
* so that at some point this must be able to cover the reftable
|
|
|
|
* and all refblocks describing it.)
|
|
|
|
*
|
|
|
|
* We then convert the reftable to big-endian and write it to disk.
|
|
|
|
*
|
|
|
|
* Note that we never free any reftable allocations. Doing so would
|
|
|
|
* needlessly complicate the algorithm: The eventual second check
|
|
|
|
* run we do will clean up all leaks we have caused.
|
|
|
|
*/
|
|
|
|
|
|
|
|
reftable_size_changed =
|
|
|
|
rebuild_refcounts_write_refblocks(bs, refcount_table, nb_clusters,
|
|
|
|
0, *nb_clusters,
|
|
|
|
&on_disk_reftable,
|
2022-04-05 16:46:52 +03:00
|
|
|
&on_disk_reftable_entries, errp);
|
2022-04-05 16:46:50 +03:00
|
|
|
if (reftable_size_changed < 0) {
|
|
|
|
res->check_errors++;
|
|
|
|
ret = reftable_size_changed;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* There was no reftable before, so rebuild_refcounts_write_refblocks()
|
|
|
|
* must have increased its size (from 0 to something).
|
|
|
|
*/
|
|
|
|
assert(reftable_size_changed);
|
|
|
|
|
|
|
|
do {
|
|
|
|
int64_t reftable_start_cluster, reftable_end_cluster;
|
|
|
|
int64_t first_free_cluster = 0;
|
|
|
|
|
|
|
|
reftable_length = on_disk_reftable_entries * REFTABLE_ENTRY_SIZE;
|
|
|
|
reftable_clusters = size_to_clusters(s, reftable_length);
|
2014-10-22 16:09:40 +04:00
|
|
|
|
|
|
|
reftable_offset = alloc_clusters_imrt(bs, reftable_clusters,
|
|
|
|
refcount_table, nb_clusters,
|
|
|
|
&first_free_cluster);
|
|
|
|
if (reftable_offset < 0) {
|
2022-04-05 16:46:52 +03:00
|
|
|
error_setg_errno(errp, -reftable_offset,
|
|
|
|
"ERROR allocating reftable");
|
2014-10-22 16:09:40 +04:00
|
|
|
res->check_errors++;
|
|
|
|
ret = reftable_offset;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2022-04-05 16:46:50 +03:00
|
|
|
/*
|
|
|
|
* We need to update the affected refblocks, so re-run the
|
|
|
|
* write_refblocks loop for the reftable's range of clusters.
|
|
|
|
*/
|
|
|
|
assert(offset_into_cluster(s, reftable_offset) == 0);
|
|
|
|
reftable_start_cluster = reftable_offset / s->cluster_size;
|
|
|
|
reftable_end_cluster = reftable_start_cluster + reftable_clusters;
|
|
|
|
reftable_size_changed =
|
|
|
|
rebuild_refcounts_write_refblocks(bs, refcount_table, nb_clusters,
|
|
|
|
reftable_start_cluster,
|
|
|
|
reftable_end_cluster,
|
|
|
|
&on_disk_reftable,
|
2022-04-05 16:46:52 +03:00
|
|
|
&on_disk_reftable_entries, errp);
|
2022-04-05 16:46:50 +03:00
|
|
|
if (reftable_size_changed < 0) {
|
|
|
|
res->check_errors++;
|
|
|
|
ret = reftable_size_changed;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the reftable size has changed, we will need to find a new
|
|
|
|
* allocation, repeating the loop.
|
|
|
|
*/
|
|
|
|
} while (reftable_size_changed);
|
2014-10-22 16:09:40 +04:00
|
|
|
|
2022-04-05 16:46:50 +03:00
|
|
|
/* The above loop must have run at least once */
|
|
|
|
assert(reftable_offset >= 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* All allocations are done, all refblocks are written, convert the
|
|
|
|
* reftable to big-endian and write it to disk.
|
|
|
|
*/
|
|
|
|
|
|
|
|
for (refblock_index = 0; refblock_index < on_disk_reftable_entries;
|
|
|
|
refblock_index++)
|
|
|
|
{
|
2014-10-22 16:09:40 +04:00
|
|
|
cpu_to_be64s(&on_disk_reftable[refblock_index]);
|
|
|
|
}
|
|
|
|
|
2022-04-05 16:46:50 +03:00
|
|
|
ret = qcow2_pre_write_overlap_check(bs, 0, reftable_offset, reftable_length,
|
2019-01-15 22:39:06 +03:00
|
|
|
false);
|
2014-10-22 16:09:40 +04:00
|
|
|
if (ret < 0) {
|
2022-04-05 16:46:52 +03:00
|
|
|
error_setg_errno(errp, -ret, "ERROR writing reftable");
|
2014-10-22 16:09:40 +04:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2022-04-05 16:46:50 +03:00
|
|
|
assert(reftable_length < INT_MAX);
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 18:27:36 +03:00
|
|
|
ret = bdrv_pwrite(bs->file, reftable_offset, reftable_length,
|
|
|
|
on_disk_reftable, 0);
|
2014-10-22 16:09:40 +04:00
|
|
|
if (ret < 0) {
|
2022-04-05 16:46:52 +03:00
|
|
|
error_setg_errno(errp, -ret, "ERROR writing reftable");
|
2014-10-22 16:09:40 +04:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Enter new reftable into the image header */
|
2016-06-16 19:06:17 +03:00
|
|
|
reftable_offset_and_clusters.reftable_offset = cpu_to_be64(reftable_offset);
|
|
|
|
reftable_offset_and_clusters.reftable_clusters =
|
2022-04-05 16:46:50 +03:00
|
|
|
cpu_to_be32(reftable_clusters);
|
2016-06-20 21:09:15 +03:00
|
|
|
ret = bdrv_pwrite_sync(bs->file,
|
|
|
|
offsetof(QCowHeader, refcount_table_offset),
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 18:27:36 +03:00
|
|
|
sizeof(reftable_offset_and_clusters),
|
|
|
|
&reftable_offset_and_clusters, 0);
|
2014-10-22 16:09:40 +04:00
|
|
|
if (ret < 0) {
|
2022-04-05 16:46:52 +03:00
|
|
|
error_setg_errno(errp, -ret, "ERROR setting reftable");
|
2014-10-22 16:09:40 +04:00
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2022-04-05 16:46:50 +03:00
|
|
|
for (refblock_index = 0; refblock_index < on_disk_reftable_entries;
|
|
|
|
refblock_index++)
|
|
|
|
{
|
2014-10-22 16:09:40 +04:00
|
|
|
be64_to_cpus(&on_disk_reftable[refblock_index]);
|
|
|
|
}
|
|
|
|
s->refcount_table = on_disk_reftable;
|
|
|
|
s->refcount_table_offset = reftable_offset;
|
2022-04-05 16:46:50 +03:00
|
|
|
s->refcount_table_size = on_disk_reftable_entries;
|
2017-02-01 15:38:28 +03:00
|
|
|
update_max_refcount_table_index(s);
|
2014-10-22 16:09:40 +04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
g_free(on_disk_reftable);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-10-22 16:09:30 +04:00
|
|
|
/*
|
|
|
|
* Checks an image for refcount consistency.
|
|
|
|
*
|
|
|
|
* Returns 0 if no errors are found, the number of errors in case the image is
|
|
|
|
* detected as corrupted, and -errno when an internal error occurred.
|
|
|
|
*/
|
|
|
|
int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
|
|
|
|
BdrvCheckMode fix)
|
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2014-10-22 16:09:40 +04:00
|
|
|
BdrvCheckResult pre_compare_res;
|
2014-10-22 16:09:30 +04:00
|
|
|
int64_t size, highest_cluster, nb_clusters;
|
2015-02-10 23:28:50 +03:00
|
|
|
void *refcount_table = NULL;
|
2014-10-22 16:09:39 +04:00
|
|
|
bool rebuild = false;
|
2014-10-22 16:09:30 +04:00
|
|
|
int ret;
|
|
|
|
|
2015-06-16 15:19:22 +03:00
|
|
|
size = bdrv_getlength(bs->file->bs);
|
2014-10-22 16:09:30 +04:00
|
|
|
if (size < 0) {
|
|
|
|
res->check_errors++;
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
nb_clusters = size_to_clusters(s, size);
|
|
|
|
if (nb_clusters > INT_MAX) {
|
|
|
|
res->check_errors++;
|
|
|
|
return -EFBIG;
|
|
|
|
}
|
|
|
|
|
|
|
|
res->bfi.total_clusters =
|
|
|
|
size_to_clusters(s, bs->total_sectors * BDRV_SECTOR_SIZE);
|
|
|
|
|
2014-10-22 16:09:39 +04:00
|
|
|
ret = calculate_refcounts(bs, res, fix, &rebuild, &refcount_table,
|
|
|
|
&nb_clusters);
|
2014-10-22 16:09:30 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2014-10-22 16:09:40 +04:00
|
|
|
/* In case we don't need to rebuild the refcount structure (but want to fix
|
|
|
|
* something), this function is immediately called again, in which case the
|
|
|
|
* result should be ignored */
|
|
|
|
pre_compare_res = *res;
|
|
|
|
compare_refcounts(bs, res, 0, &rebuild, &highest_cluster, refcount_table,
|
2014-10-22 16:09:30 +04:00
|
|
|
nb_clusters);
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2014-10-22 16:09:40 +04:00
|
|
|
if (rebuild && (fix & BDRV_FIX_ERRORS)) {
|
2014-10-22 16:09:41 +04:00
|
|
|
BdrvCheckResult old_res = *res;
|
|
|
|
int fresh_leaks = 0;
|
2022-04-05 16:46:52 +03:00
|
|
|
Error *local_err = NULL;
|
2014-10-22 16:09:41 +04:00
|
|
|
|
2014-10-22 16:09:40 +04:00
|
|
|
fprintf(stderr, "Rebuilding refcount structure\n");
|
|
|
|
ret = rebuild_refcount_structure(bs, res, &refcount_table,
|
2022-04-05 16:46:52 +03:00
|
|
|
&nb_clusters, &local_err);
|
2014-10-22 16:09:40 +04:00
|
|
|
if (ret < 0) {
|
2022-04-05 16:46:52 +03:00
|
|
|
error_report_err(local_err);
|
2014-10-22 16:09:40 +04:00
|
|
|
goto fail;
|
|
|
|
}
|
2014-10-22 16:09:41 +04:00
|
|
|
|
|
|
|
res->corruptions = 0;
|
|
|
|
res->leaks = 0;
|
|
|
|
|
|
|
|
/* Because the old reftable has been exchanged for a new one the
|
|
|
|
* references have to be recalculated */
|
|
|
|
rebuild = false;
|
2015-02-10 23:28:50 +03:00
|
|
|
memset(refcount_table, 0, refcount_array_byte_size(s, nb_clusters));
|
2014-10-22 16:09:41 +04:00
|
|
|
ret = calculate_refcounts(bs, res, 0, &rebuild, &refcount_table,
|
|
|
|
&nb_clusters);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fix & BDRV_FIX_LEAKS) {
|
|
|
|
/* The old refcount structures are now leaked, fix it; the result
|
|
|
|
* can be ignored, aside from leaks which were introduced by
|
|
|
|
* rebuild_refcount_structure() that could not be fixed */
|
|
|
|
BdrvCheckResult saved_res = *res;
|
|
|
|
*res = (BdrvCheckResult){ 0 };
|
|
|
|
|
|
|
|
compare_refcounts(bs, res, BDRV_FIX_LEAKS, &rebuild,
|
|
|
|
&highest_cluster, refcount_table, nb_clusters);
|
|
|
|
if (rebuild) {
|
|
|
|
fprintf(stderr, "ERROR rebuilt refcount structure is still "
|
|
|
|
"broken\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Any leaks accounted for here were introduced by
|
|
|
|
* rebuild_refcount_structure() because that function has created a
|
|
|
|
* new refcount structure from scratch */
|
|
|
|
fresh_leaks = res->leaks;
|
|
|
|
*res = saved_res;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (res->corruptions < old_res.corruptions) {
|
|
|
|
res->corruptions_fixed += old_res.corruptions - res->corruptions;
|
|
|
|
}
|
|
|
|
if (res->leaks < old_res.leaks) {
|
|
|
|
res->leaks_fixed += old_res.leaks - res->leaks;
|
|
|
|
}
|
|
|
|
res->leaks += fresh_leaks;
|
2014-10-22 16:09:40 +04:00
|
|
|
} else if (fix) {
|
|
|
|
if (rebuild) {
|
|
|
|
fprintf(stderr, "ERROR need to rebuild refcount structures\n");
|
|
|
|
res->check_errors++;
|
|
|
|
ret = -EIO;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (res->leaks || res->corruptions) {
|
|
|
|
*res = pre_compare_res;
|
|
|
|
compare_refcounts(bs, res, fix, &rebuild, &highest_cluster,
|
|
|
|
refcount_table, nb_clusters);
|
|
|
|
}
|
2014-10-22 16:09:39 +04:00
|
|
|
}
|
|
|
|
|
2013-08-30 16:34:27 +04:00
|
|
|
/* check OFLAG_COPIED */
|
2013-08-30 16:34:28 +04:00
|
|
|
ret = check_oflag_copied(bs, res, fix);
|
2013-08-30 16:34:27 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2013-01-28 15:59:46 +04:00
|
|
|
res->image_end_offset = (highest_cluster + 1) * s->cluster_size;
|
2011-06-01 12:50:00 +04:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
fail:
|
2011-08-21 07:09:37 +04:00
|
|
|
g_free(refcount_table);
|
2009-05-28 18:07:04 +04:00
|
|
|
|
2011-06-01 12:50:00 +04:00
|
|
|
return ret;
|
2009-05-28 18:07:04 +04:00
|
|
|
}
|
|
|
|
|
2013-08-30 16:34:25 +04:00
|
|
|
#define overlaps_with(ofs, sz) \
|
|
|
|
ranges_overlap(offset, size, ofs, sz)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Checks if the given offset into the image file is actually free to use by
|
|
|
|
* looking for overlaps with important metadata sections (L1/L2 tables etc.),
|
|
|
|
* i.e. a sanity check without relying on the refcount tables.
|
|
|
|
*
|
2013-10-10 13:09:23 +04:00
|
|
|
* The ign parameter specifies what checks not to perform (being a bitmask of
|
|
|
|
* QCow2MetadataOverlap values), i.e., what sections to ignore.
|
2013-08-30 16:34:25 +04:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* - 0 if writing to this offset will not affect the mentioned metadata
|
|
|
|
* - a positive QCow2MetadataOverlap value indicating one overlapping section
|
|
|
|
* - a negative value (-errno) indicating an error while performing a check,
|
2020-04-29 00:38:07 +03:00
|
|
|
* e.g. when bdrv_pread failed on QCOW2_OL_INACTIVE_L2
|
2013-08-30 16:34:25 +04:00
|
|
|
*/
|
2013-10-10 13:09:23 +04:00
|
|
|
int qcow2_check_metadata_overlap(BlockDriverState *bs, int ign, int64_t offset,
|
2013-08-30 16:34:25 +04:00
|
|
|
int64_t size)
|
|
|
|
{
|
2015-09-07 18:12:56 +03:00
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2013-10-10 13:09:24 +04:00
|
|
|
int chk = s->overlap_check & ~ign;
|
2013-08-30 16:34:25 +04:00
|
|
|
int i, j;
|
|
|
|
|
|
|
|
if (!size) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (chk & QCOW2_OL_MAIN_HEADER) {
|
|
|
|
if (offset < s->cluster_size) {
|
|
|
|
return QCOW2_OL_MAIN_HEADER;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* align range to test to cluster boundaries */
|
2018-02-15 16:10:08 +03:00
|
|
|
size = ROUND_UP(offset_into_cluster(s, offset) + size, s->cluster_size);
|
2013-08-30 16:34:25 +04:00
|
|
|
offset = start_of_cluster(s, offset);
|
|
|
|
|
|
|
|
if ((chk & QCOW2_OL_ACTIVE_L1) && s->l1_size) {
|
2020-08-28 14:08:28 +03:00
|
|
|
if (overlaps_with(s->l1_table_offset, s->l1_size * L1E_SIZE)) {
|
2013-08-30 16:34:25 +04:00
|
|
|
return QCOW2_OL_ACTIVE_L1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((chk & QCOW2_OL_REFCOUNT_TABLE) && s->refcount_table_size) {
|
|
|
|
if (overlaps_with(s->refcount_table_offset,
|
2020-08-28 14:08:28 +03:00
|
|
|
s->refcount_table_size * REFTABLE_ENTRY_SIZE)) {
|
2013-08-30 16:34:25 +04:00
|
|
|
return QCOW2_OL_REFCOUNT_TABLE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((chk & QCOW2_OL_SNAPSHOT_TABLE) && s->snapshots_size) {
|
|
|
|
if (overlaps_with(s->snapshots_offset, s->snapshots_size)) {
|
|
|
|
return QCOW2_OL_SNAPSHOT_TABLE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((chk & QCOW2_OL_INACTIVE_L1) && s->snapshots) {
|
|
|
|
for (i = 0; i < s->nb_snapshots; i++) {
|
|
|
|
if (s->snapshots[i].l1_size &&
|
|
|
|
overlaps_with(s->snapshots[i].l1_table_offset,
|
2020-08-28 14:08:28 +03:00
|
|
|
s->snapshots[i].l1_size * L1E_SIZE)) {
|
2013-08-30 16:34:25 +04:00
|
|
|
return QCOW2_OL_INACTIVE_L1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((chk & QCOW2_OL_ACTIVE_L2) && s->l1_table) {
|
|
|
|
for (i = 0; i < s->l1_size; i++) {
|
|
|
|
if ((s->l1_table[i] & L1E_OFFSET_MASK) &&
|
|
|
|
overlaps_with(s->l1_table[i] & L1E_OFFSET_MASK,
|
|
|
|
s->cluster_size)) {
|
|
|
|
return QCOW2_OL_ACTIVE_L2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((chk & QCOW2_OL_REFCOUNT_BLOCK) && s->refcount_table) {
|
2017-02-01 15:38:28 +03:00
|
|
|
unsigned last_entry = s->max_refcount_table_index;
|
|
|
|
assert(last_entry < s->refcount_table_size);
|
|
|
|
assert(last_entry + 1 == s->refcount_table_size ||
|
|
|
|
(s->refcount_table[last_entry + 1] & REFT_OFFSET_MASK) == 0);
|
|
|
|
for (i = 0; i <= last_entry; i++) {
|
2013-08-30 16:34:25 +04:00
|
|
|
if ((s->refcount_table[i] & REFT_OFFSET_MASK) &&
|
|
|
|
overlaps_with(s->refcount_table[i] & REFT_OFFSET_MASK,
|
|
|
|
s->cluster_size)) {
|
|
|
|
return QCOW2_OL_REFCOUNT_BLOCK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((chk & QCOW2_OL_INACTIVE_L2) && s->snapshots) {
|
|
|
|
for (i = 0; i < s->nb_snapshots; i++) {
|
|
|
|
uint64_t l1_ofs = s->snapshots[i].l1_table_offset;
|
|
|
|
uint32_t l1_sz = s->snapshots[i].l1_size;
|
2020-08-28 14:08:28 +03:00
|
|
|
uint64_t l1_sz2 = l1_sz * L1E_SIZE;
|
2018-03-06 19:14:09 +03:00
|
|
|
uint64_t *l1;
|
2013-08-30 16:34:25 +04:00
|
|
|
int ret;
|
|
|
|
|
2020-08-28 14:08:28 +03:00
|
|
|
ret = qcow2_validate_table(bs, l1_ofs, l1_sz, L1E_SIZE,
|
2018-03-06 19:14:09 +03:00
|
|
|
QCOW_MAX_L1_SIZE, "", NULL);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
l1 = g_try_malloc(l1_sz2);
|
|
|
|
|
2014-05-20 19:12:47 +04:00
|
|
|
if (l1_sz2 && l1 == NULL) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 18:27:36 +03:00
|
|
|
ret = bdrv_pread(bs->file, l1_ofs, l1_sz2, l1, 0);
|
2013-08-30 16:34:25 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
g_free(l1);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (j = 0; j < l1_sz; j++) {
|
2013-09-30 10:59:28 +04:00
|
|
|
uint64_t l2_ofs = be64_to_cpu(l1[j]) & L1E_OFFSET_MASK;
|
|
|
|
if (l2_ofs && overlaps_with(l2_ofs, s->cluster_size)) {
|
2013-08-30 16:34:25 +04:00
|
|
|
g_free(l1);
|
|
|
|
return QCOW2_OL_INACTIVE_L2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
g_free(l1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-05 18:15:15 +03:00
|
|
|
if ((chk & QCOW2_OL_BITMAP_DIRECTORY) &&
|
|
|
|
(s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS))
|
|
|
|
{
|
|
|
|
if (overlaps_with(s->bitmap_directory_offset,
|
|
|
|
s->bitmap_directory_size))
|
|
|
|
{
|
|
|
|
return QCOW2_OL_BITMAP_DIRECTORY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-30 16:34:25 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const char *metadata_ol_names[] = {
|
2018-11-06 00:38:39 +03:00
|
|
|
[QCOW2_OL_MAIN_HEADER_BITNR] = "qcow2_header",
|
|
|
|
[QCOW2_OL_ACTIVE_L1_BITNR] = "active L1 table",
|
|
|
|
[QCOW2_OL_ACTIVE_L2_BITNR] = "active L2 table",
|
|
|
|
[QCOW2_OL_REFCOUNT_TABLE_BITNR] = "refcount table",
|
|
|
|
[QCOW2_OL_REFCOUNT_BLOCK_BITNR] = "refcount block",
|
|
|
|
[QCOW2_OL_SNAPSHOT_TABLE_BITNR] = "snapshot table",
|
|
|
|
[QCOW2_OL_INACTIVE_L1_BITNR] = "inactive L1 table",
|
|
|
|
[QCOW2_OL_INACTIVE_L2_BITNR] = "inactive L2 table",
|
|
|
|
[QCOW2_OL_BITMAP_DIRECTORY_BITNR] = "bitmap directory",
|
2013-08-30 16:34:25 +04:00
|
|
|
};
|
2018-11-06 00:38:39 +03:00
|
|
|
QEMU_BUILD_BUG_ON(QCOW2_OL_MAX_BITNR != ARRAY_SIZE(metadata_ol_names));
|
2013-08-30 16:34:25 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* First performs a check for metadata overlaps (through
|
|
|
|
* qcow2_check_metadata_overlap); if that fails with a negative value (error
|
|
|
|
* while performing a check), that value is returned. If an impending overlap
|
|
|
|
* is detected, the BDS will be made unusable, the qcow2 file marked corrupt
|
|
|
|
* and -EIO returned.
|
|
|
|
*
|
|
|
|
* Returns 0 if there were neither overlaps nor errors while checking for
|
|
|
|
* overlaps; or a negative value (-errno) on error.
|
|
|
|
*/
|
2013-10-10 13:09:23 +04:00
|
|
|
int qcow2_pre_write_overlap_check(BlockDriverState *bs, int ign, int64_t offset,
|
2019-01-15 22:39:06 +03:00
|
|
|
int64_t size, bool data_file)
|
2013-08-30 16:34:25 +04:00
|
|
|
{
|
2019-01-15 22:39:06 +03:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (data_file && has_data_file(bs)) {
|
|
|
|
return 0;
|
|
|
|
}
|
2013-08-30 16:34:25 +04:00
|
|
|
|
2019-01-15 22:39:06 +03:00
|
|
|
ret = qcow2_check_metadata_overlap(bs, ign, offset, size);
|
2013-08-30 16:34:25 +04:00
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
} else if (ret > 0) {
|
2015-03-23 18:29:26 +03:00
|
|
|
int metadata_ol_bitnr = ctz32(ret);
|
2013-08-30 16:34:25 +04:00
|
|
|
assert(metadata_ol_bitnr < QCOW2_OL_MAX_BITNR);
|
|
|
|
|
2014-09-05 18:07:17 +04:00
|
|
|
qcow2_signal_corruption(bs, true, offset, size, "Preventing invalid "
|
|
|
|
"write on metadata (overlaps with %s)",
|
|
|
|
metadata_ol_names[metadata_ol_bitnr]);
|
2013-08-30 16:34:25 +04:00
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2015-07-27 18:51:37 +03:00
|
|
|
|
|
|
|
/* A pointer to a function of this type is given to walk_over_reftable(). That
|
|
|
|
* function will create refblocks and pass them to a RefblockFinishOp once they
|
|
|
|
* are completed (@refblock). @refblock_empty is set if the refblock is
|
|
|
|
* completely empty.
|
|
|
|
*
|
|
|
|
* Along with the refblock, a corresponding reftable entry is passed, in the
|
|
|
|
* reftable @reftable (which may be reallocated) at @reftable_index.
|
|
|
|
*
|
|
|
|
* @allocated should be set to true if a new cluster has been allocated.
|
|
|
|
*/
|
|
|
|
typedef int (RefblockFinishOp)(BlockDriverState *bs, uint64_t **reftable,
|
|
|
|
uint64_t reftable_index, uint64_t *reftable_size,
|
|
|
|
void *refblock, bool refblock_empty,
|
|
|
|
bool *allocated, Error **errp);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This "operation" for walk_over_reftable() allocates the refblock on disk (if
|
|
|
|
* it is not empty) and inserts its offset into the new reftable. The size of
|
|
|
|
* this new reftable is increased as required.
|
|
|
|
*/
|
|
|
|
static int alloc_refblock(BlockDriverState *bs, uint64_t **reftable,
|
|
|
|
uint64_t reftable_index, uint64_t *reftable_size,
|
|
|
|
void *refblock, bool refblock_empty, bool *allocated,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
BDRVQcow2State *s = bs->opaque;
|
|
|
|
int64_t offset;
|
|
|
|
|
|
|
|
if (!refblock_empty && reftable_index >= *reftable_size) {
|
|
|
|
uint64_t *new_reftable;
|
|
|
|
uint64_t new_reftable_size;
|
|
|
|
|
|
|
|
new_reftable_size = ROUND_UP(reftable_index + 1,
|
2020-08-28 14:08:28 +03:00
|
|
|
s->cluster_size / REFTABLE_ENTRY_SIZE);
|
|
|
|
if (new_reftable_size > QCOW_MAX_REFTABLE_SIZE / REFTABLE_ENTRY_SIZE) {
|
2015-07-27 18:51:37 +03:00
|
|
|
error_setg(errp,
|
|
|
|
"This operation would make the refcount table grow "
|
|
|
|
"beyond the maximum size supported by QEMU, aborting");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
new_reftable = g_try_realloc(*reftable, new_reftable_size *
|
2020-08-28 14:08:28 +03:00
|
|
|
REFTABLE_ENTRY_SIZE);
|
2015-07-27 18:51:37 +03:00
|
|
|
if (!new_reftable) {
|
|
|
|
error_setg(errp, "Failed to increase reftable buffer size");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
memset(new_reftable + *reftable_size, 0,
|
2020-08-28 14:08:28 +03:00
|
|
|
(new_reftable_size - *reftable_size) * REFTABLE_ENTRY_SIZE);
|
2015-07-27 18:51:37 +03:00
|
|
|
|
|
|
|
*reftable = new_reftable;
|
|
|
|
*reftable_size = new_reftable_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!refblock_empty && !(*reftable)[reftable_index]) {
|
|
|
|
offset = qcow2_alloc_clusters(bs, s->cluster_size);
|
|
|
|
if (offset < 0) {
|
|
|
|
error_setg_errno(errp, -offset, "Failed to allocate refblock");
|
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
(*reftable)[reftable_index] = offset;
|
|
|
|
*allocated = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This "operation" for walk_over_reftable() writes the refblock to disk at the
|
|
|
|
* offset specified by the new reftable's entry. It does not modify the new
|
|
|
|
* reftable or change any refcounts.
|
|
|
|
*/
|
|
|
|
static int flush_refblock(BlockDriverState *bs, uint64_t **reftable,
|
|
|
|
uint64_t reftable_index, uint64_t *reftable_size,
|
|
|
|
void *refblock, bool refblock_empty, bool *allocated,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
BDRVQcow2State *s = bs->opaque;
|
|
|
|
int64_t offset;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (reftable_index < *reftable_size && (*reftable)[reftable_index]) {
|
|
|
|
offset = (*reftable)[reftable_index];
|
|
|
|
|
2019-01-15 22:39:06 +03:00
|
|
|
ret = qcow2_pre_write_overlap_check(bs, 0, offset, s->cluster_size,
|
|
|
|
false);
|
2015-07-27 18:51:37 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
error_setg_errno(errp, -ret, "Overlap check failed");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 18:27:36 +03:00
|
|
|
ret = bdrv_pwrite(bs->file, offset, s->cluster_size, refblock, 0);
|
2015-07-27 18:51:37 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
error_setg_errno(errp, -ret, "Failed to write refblock");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
assert(refblock_empty);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This function walks over the existing reftable and every referenced refblock;
|
|
|
|
* if @new_set_refcount is non-NULL, it is called for every refcount entry to
|
|
|
|
* create an equal new entry in the passed @new_refblock. Once that
|
|
|
|
* @new_refblock is completely filled, @operation will be called.
|
|
|
|
*
|
|
|
|
* @status_cb and @cb_opaque are used for the amend operation's status callback.
|
|
|
|
* @index is the index of the walk_over_reftable() calls and @total is the total
|
|
|
|
* number of walk_over_reftable() calls per amend operation. Both are used for
|
|
|
|
* calculating the parameters for the status callback.
|
|
|
|
*
|
|
|
|
* @allocated is set to true if a new cluster has been allocated.
|
|
|
|
*/
|
|
|
|
static int walk_over_reftable(BlockDriverState *bs, uint64_t **new_reftable,
|
|
|
|
uint64_t *new_reftable_index,
|
|
|
|
uint64_t *new_reftable_size,
|
|
|
|
void *new_refblock, int new_refblock_size,
|
|
|
|
int new_refcount_bits,
|
|
|
|
RefblockFinishOp *operation, bool *allocated,
|
|
|
|
Qcow2SetRefcountFunc *new_set_refcount,
|
|
|
|
BlockDriverAmendStatusCB *status_cb,
|
|
|
|
void *cb_opaque, int index, int total,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
BDRVQcow2State *s = bs->opaque;
|
|
|
|
uint64_t reftable_index;
|
|
|
|
bool new_refblock_empty = true;
|
|
|
|
int refblock_index;
|
|
|
|
int new_refblock_index = 0;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
for (reftable_index = 0; reftable_index < s->refcount_table_size;
|
|
|
|
reftable_index++)
|
|
|
|
{
|
|
|
|
uint64_t refblock_offset = s->refcount_table[reftable_index]
|
|
|
|
& REFT_OFFSET_MASK;
|
|
|
|
|
|
|
|
status_cb(bs, (uint64_t)index * s->refcount_table_size + reftable_index,
|
|
|
|
(uint64_t)total * s->refcount_table_size, cb_opaque);
|
|
|
|
|
|
|
|
if (refblock_offset) {
|
|
|
|
void *refblock;
|
|
|
|
|
|
|
|
if (offset_into_cluster(s, refblock_offset)) {
|
|
|
|
qcow2_signal_corruption(bs, true, -1, -1, "Refblock offset %#"
|
|
|
|
PRIx64 " unaligned (reftable index: %#"
|
|
|
|
PRIx64 ")", refblock_offset,
|
|
|
|
reftable_index);
|
|
|
|
error_setg(errp,
|
|
|
|
"Image is corrupt (unaligned refblock offset)");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = qcow2_cache_get(bs, s->refcount_block_cache, refblock_offset,
|
|
|
|
&refblock);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_setg_errno(errp, -ret, "Failed to retrieve refblock");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (refblock_index = 0; refblock_index < s->refcount_block_size;
|
|
|
|
refblock_index++)
|
|
|
|
{
|
|
|
|
uint64_t refcount;
|
|
|
|
|
|
|
|
if (new_refblock_index >= new_refblock_size) {
|
|
|
|
/* new_refblock is now complete */
|
|
|
|
ret = operation(bs, new_reftable, *new_reftable_index,
|
|
|
|
new_reftable_size, new_refblock,
|
|
|
|
new_refblock_empty, allocated, errp);
|
|
|
|
if (ret < 0) {
|
2018-02-05 17:33:07 +03:00
|
|
|
qcow2_cache_put(s->refcount_block_cache, &refblock);
|
2015-07-27 18:51:37 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
(*new_reftable_index)++;
|
|
|
|
new_refblock_index = 0;
|
|
|
|
new_refblock_empty = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
refcount = s->get_refcount(refblock, refblock_index);
|
|
|
|
if (new_refcount_bits < 64 && refcount >> new_refcount_bits) {
|
|
|
|
uint64_t offset;
|
|
|
|
|
2018-02-05 17:33:07 +03:00
|
|
|
qcow2_cache_put(s->refcount_block_cache, &refblock);
|
2015-07-27 18:51:37 +03:00
|
|
|
|
|
|
|
offset = ((reftable_index << s->refcount_block_bits)
|
|
|
|
+ refblock_index) << s->cluster_bits;
|
|
|
|
|
|
|
|
error_setg(errp, "Cannot decrease refcount entry width to "
|
|
|
|
"%i bits: Cluster at offset %#" PRIx64 " has a "
|
|
|
|
"refcount of %" PRIu64, new_refcount_bits,
|
|
|
|
offset, refcount);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_set_refcount) {
|
|
|
|
new_set_refcount(new_refblock, new_refblock_index++,
|
|
|
|
refcount);
|
|
|
|
} else {
|
|
|
|
new_refblock_index++;
|
|
|
|
}
|
|
|
|
new_refblock_empty = new_refblock_empty && refcount == 0;
|
|
|
|
}
|
|
|
|
|
2018-02-05 17:33:07 +03:00
|
|
|
qcow2_cache_put(s->refcount_block_cache, &refblock);
|
2015-07-27 18:51:37 +03:00
|
|
|
} else {
|
|
|
|
/* No refblock means every refcount is 0 */
|
|
|
|
for (refblock_index = 0; refblock_index < s->refcount_block_size;
|
|
|
|
refblock_index++)
|
|
|
|
{
|
|
|
|
if (new_refblock_index >= new_refblock_size) {
|
|
|
|
/* new_refblock is now complete */
|
|
|
|
ret = operation(bs, new_reftable, *new_reftable_index,
|
|
|
|
new_reftable_size, new_refblock,
|
|
|
|
new_refblock_empty, allocated, errp);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
(*new_reftable_index)++;
|
|
|
|
new_refblock_index = 0;
|
|
|
|
new_refblock_empty = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_set_refcount) {
|
|
|
|
new_set_refcount(new_refblock, new_refblock_index++, 0);
|
|
|
|
} else {
|
|
|
|
new_refblock_index++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_refblock_index > 0) {
|
|
|
|
/* Complete the potentially existing partially filled final refblock */
|
|
|
|
if (new_set_refcount) {
|
|
|
|
for (; new_refblock_index < new_refblock_size;
|
|
|
|
new_refblock_index++)
|
|
|
|
{
|
|
|
|
new_set_refcount(new_refblock, new_refblock_index, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = operation(bs, new_reftable, *new_reftable_index,
|
|
|
|
new_reftable_size, new_refblock, new_refblock_empty,
|
|
|
|
allocated, errp);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
(*new_reftable_index)++;
|
|
|
|
}
|
|
|
|
|
|
|
|
status_cb(bs, (uint64_t)(index + 1) * s->refcount_table_size,
|
|
|
|
(uint64_t)total * s->refcount_table_size, cb_opaque);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int qcow2_change_refcount_order(BlockDriverState *bs, int refcount_order,
|
|
|
|
BlockDriverAmendStatusCB *status_cb,
|
|
|
|
void *cb_opaque, Error **errp)
|
|
|
|
{
|
|
|
|
BDRVQcow2State *s = bs->opaque;
|
|
|
|
Qcow2GetRefcountFunc *new_get_refcount;
|
|
|
|
Qcow2SetRefcountFunc *new_set_refcount;
|
|
|
|
void *new_refblock = qemu_blockalign(bs->file->bs, s->cluster_size);
|
|
|
|
uint64_t *new_reftable = NULL, new_reftable_size = 0;
|
|
|
|
uint64_t *old_reftable, old_reftable_size, old_reftable_offset;
|
|
|
|
uint64_t new_reftable_index = 0;
|
|
|
|
uint64_t i;
|
|
|
|
int64_t new_reftable_offset = 0, allocated_reftable_size = 0;
|
|
|
|
int new_refblock_size, new_refcount_bits = 1 << refcount_order;
|
|
|
|
int old_refcount_order;
|
|
|
|
int walk_index = 0;
|
|
|
|
int ret;
|
|
|
|
bool new_allocation;
|
|
|
|
|
|
|
|
assert(s->qcow_version >= 3);
|
|
|
|
assert(refcount_order >= 0 && refcount_order <= 6);
|
|
|
|
|
|
|
|
/* see qcow2_open() */
|
|
|
|
new_refblock_size = 1 << (s->cluster_bits - (refcount_order - 3));
|
|
|
|
|
|
|
|
new_get_refcount = get_refcount_funcs[refcount_order];
|
|
|
|
new_set_refcount = set_refcount_funcs[refcount_order];
|
|
|
|
|
|
|
|
|
|
|
|
do {
|
|
|
|
int total_walks;
|
|
|
|
|
|
|
|
new_allocation = false;
|
|
|
|
|
|
|
|
/* At least we have to do this walk and the one which writes the
|
|
|
|
* refblocks; also, at least we have to do this loop here at least
|
|
|
|
* twice (normally), first to do the allocations, and second to
|
|
|
|
* determine that everything is correctly allocated, this then makes
|
|
|
|
* three walks in total */
|
|
|
|
total_walks = MAX(walk_index + 2, 3);
|
|
|
|
|
|
|
|
/* First, allocate the structures so they are present in the refcount
|
|
|
|
* structures */
|
|
|
|
ret = walk_over_reftable(bs, &new_reftable, &new_reftable_index,
|
|
|
|
&new_reftable_size, NULL, new_refblock_size,
|
|
|
|
new_refcount_bits, &alloc_refblock,
|
|
|
|
&new_allocation, NULL, status_cb, cb_opaque,
|
|
|
|
walk_index++, total_walks, errp);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
new_reftable_index = 0;
|
|
|
|
|
|
|
|
if (new_allocation) {
|
|
|
|
if (new_reftable_offset) {
|
2020-08-28 14:08:28 +03:00
|
|
|
qcow2_free_clusters(
|
|
|
|
bs, new_reftable_offset,
|
|
|
|
allocated_reftable_size * REFTABLE_ENTRY_SIZE,
|
|
|
|
QCOW2_DISCARD_NEVER);
|
2015-07-27 18:51:37 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
new_reftable_offset = qcow2_alloc_clusters(bs, new_reftable_size *
|
2020-08-28 14:08:28 +03:00
|
|
|
REFTABLE_ENTRY_SIZE);
|
2015-07-27 18:51:37 +03:00
|
|
|
if (new_reftable_offset < 0) {
|
|
|
|
error_setg_errno(errp, -new_reftable_offset,
|
|
|
|
"Failed to allocate the new reftable");
|
|
|
|
ret = new_reftable_offset;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
allocated_reftable_size = new_reftable_size;
|
|
|
|
}
|
|
|
|
} while (new_allocation);
|
|
|
|
|
|
|
|
/* Second, write the new refblocks */
|
|
|
|
ret = walk_over_reftable(bs, &new_reftable, &new_reftable_index,
|
|
|
|
&new_reftable_size, new_refblock,
|
|
|
|
new_refblock_size, new_refcount_bits,
|
|
|
|
&flush_refblock, &new_allocation, new_set_refcount,
|
|
|
|
status_cb, cb_opaque, walk_index, walk_index + 1,
|
|
|
|
errp);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
assert(!new_allocation);
|
|
|
|
|
|
|
|
|
|
|
|
/* Write the new reftable */
|
|
|
|
ret = qcow2_pre_write_overlap_check(bs, 0, new_reftable_offset,
|
2020-08-28 14:08:28 +03:00
|
|
|
new_reftable_size * REFTABLE_ENTRY_SIZE,
|
2019-01-15 22:39:06 +03:00
|
|
|
false);
|
2015-07-27 18:51:37 +03:00
|
|
|
if (ret < 0) {
|
|
|
|
error_setg_errno(errp, -ret, "Overlap check failed");
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < new_reftable_size; i++) {
|
|
|
|
cpu_to_be64s(&new_reftable[i]);
|
|
|
|
}
|
|
|
|
|
block: Change bdrv_{pread,pwrite,pwrite_sync}() param order
Swap 'buf' and 'bytes' around for consistency with
bdrv_co_{pread,pwrite}(), and in preparation to implement these
functions using generated_co_wrapper.
Callers were updated using this Coccinelle script:
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pread(child, offset, buf, bytes, flags)
+ bdrv_pread(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite(child, offset, buf, bytes, flags)
+ bdrv_pwrite(child, offset, bytes, buf, flags)
@@ expression child, offset, buf, bytes, flags; @@
- bdrv_pwrite_sync(child, offset, buf, bytes, flags)
+ bdrv_pwrite_sync(child, offset, bytes, buf, flags)
Resulting overly-long lines were then fixed by hand.
Signed-off-by: Alberto Faria <afaria@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
Message-Id: <20220609152744.3891847-3-afaria@redhat.com>
Reviewed-by: Hanna Reitz <hreitz@redhat.com>
Signed-off-by: Hanna Reitz <hreitz@redhat.com>
2022-06-09 18:27:36 +03:00
|
|
|
ret = bdrv_pwrite(bs->file, new_reftable_offset,
|
|
|
|
new_reftable_size * REFTABLE_ENTRY_SIZE, new_reftable,
|
|
|
|
0);
|
2015-07-27 18:51:37 +03:00
|
|
|
|
|
|
|
for (i = 0; i < new_reftable_size; i++) {
|
|
|
|
be64_to_cpus(&new_reftable[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
error_setg_errno(errp, -ret, "Failed to write the new reftable");
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Empty the refcount cache */
|
|
|
|
ret = qcow2_cache_flush(bs, s->refcount_block_cache);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_setg_errno(errp, -ret, "Failed to flush the refblock cache");
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Update the image header to point to the new reftable; this only updates
|
|
|
|
* the fields which are relevant to qcow2_update_header(); other fields
|
|
|
|
* such as s->refcount_table or s->refcount_bits stay stale for now
|
|
|
|
* (because we have to restore everything if qcow2_update_header() fails) */
|
|
|
|
old_refcount_order = s->refcount_order;
|
|
|
|
old_reftable_size = s->refcount_table_size;
|
|
|
|
old_reftable_offset = s->refcount_table_offset;
|
|
|
|
|
|
|
|
s->refcount_order = refcount_order;
|
|
|
|
s->refcount_table_size = new_reftable_size;
|
|
|
|
s->refcount_table_offset = new_reftable_offset;
|
|
|
|
|
|
|
|
ret = qcow2_update_header(bs);
|
|
|
|
if (ret < 0) {
|
|
|
|
s->refcount_order = old_refcount_order;
|
|
|
|
s->refcount_table_size = old_reftable_size;
|
|
|
|
s->refcount_table_offset = old_reftable_offset;
|
|
|
|
error_setg_errno(errp, -ret, "Failed to update the qcow2 header");
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now update the rest of the in-memory information */
|
|
|
|
old_reftable = s->refcount_table;
|
|
|
|
s->refcount_table = new_reftable;
|
2017-02-01 15:38:28 +03:00
|
|
|
update_max_refcount_table_index(s);
|
2015-07-27 18:51:37 +03:00
|
|
|
|
|
|
|
s->refcount_bits = 1 << refcount_order;
|
|
|
|
s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1);
|
|
|
|
s->refcount_max += s->refcount_max - 1;
|
|
|
|
|
|
|
|
s->refcount_block_bits = s->cluster_bits - (refcount_order - 3);
|
|
|
|
s->refcount_block_size = 1 << s->refcount_block_bits;
|
|
|
|
|
|
|
|
s->get_refcount = new_get_refcount;
|
|
|
|
s->set_refcount = new_set_refcount;
|
|
|
|
|
|
|
|
/* For cleaning up all old refblocks and the old reftable below the "done"
|
|
|
|
* label */
|
|
|
|
new_reftable = old_reftable;
|
|
|
|
new_reftable_size = old_reftable_size;
|
|
|
|
new_reftable_offset = old_reftable_offset;
|
|
|
|
|
|
|
|
done:
|
|
|
|
if (new_reftable) {
|
|
|
|
/* On success, new_reftable actually points to the old reftable (and
|
|
|
|
* new_reftable_size is the old reftable's size); but that is just
|
|
|
|
* fine */
|
|
|
|
for (i = 0; i < new_reftable_size; i++) {
|
|
|
|
uint64_t offset = new_reftable[i] & REFT_OFFSET_MASK;
|
|
|
|
if (offset) {
|
|
|
|
qcow2_free_clusters(bs, offset, s->cluster_size,
|
|
|
|
QCOW2_DISCARD_OTHER);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
g_free(new_reftable);
|
|
|
|
|
|
|
|
if (new_reftable_offset > 0) {
|
|
|
|
qcow2_free_clusters(bs, new_reftable_offset,
|
2020-08-28 14:08:28 +03:00
|
|
|
new_reftable_size * REFTABLE_ENTRY_SIZE,
|
2015-07-27 18:51:37 +03:00
|
|
|
QCOW2_DISCARD_OTHER);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_vfree(new_refblock);
|
|
|
|
return ret;
|
|
|
|
}
|
2017-09-18 15:42:29 +03:00
|
|
|
|
2017-11-10 23:31:10 +03:00
|
|
|
static int64_t get_refblock_offset(BlockDriverState *bs, uint64_t offset)
|
|
|
|
{
|
|
|
|
BDRVQcow2State *s = bs->opaque;
|
|
|
|
uint32_t index = offset_to_reftable_index(s, offset);
|
|
|
|
int64_t covering_refblock_offset = 0;
|
|
|
|
|
|
|
|
if (index < s->refcount_table_size) {
|
|
|
|
covering_refblock_offset = s->refcount_table[index] & REFT_OFFSET_MASK;
|
|
|
|
}
|
|
|
|
if (!covering_refblock_offset) {
|
|
|
|
qcow2_signal_corruption(bs, true, -1, -1, "Refblock at %#" PRIx64 " is "
|
|
|
|
"not covered by the refcount structures",
|
|
|
|
offset);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
return covering_refblock_offset;
|
|
|
|
}
|
|
|
|
|
2022-10-13 15:36:59 +03:00
|
|
|
static int coroutine_fn
|
|
|
|
qcow2_discard_refcount_block(BlockDriverState *bs, uint64_t discard_block_offs)
|
2017-09-18 15:42:29 +03:00
|
|
|
{
|
|
|
|
BDRVQcow2State *s = bs->opaque;
|
2017-11-10 23:31:10 +03:00
|
|
|
int64_t refblock_offs;
|
2017-09-18 15:42:29 +03:00
|
|
|
uint64_t cluster_index = discard_block_offs >> s->cluster_bits;
|
|
|
|
uint32_t block_index = cluster_index & (s->refcount_block_size - 1);
|
|
|
|
void *refblock;
|
|
|
|
int ret;
|
|
|
|
|
2017-11-10 23:31:10 +03:00
|
|
|
refblock_offs = get_refblock_offset(bs, discard_block_offs);
|
|
|
|
if (refblock_offs < 0) {
|
|
|
|
return refblock_offs;
|
|
|
|
}
|
|
|
|
|
2017-09-18 15:42:29 +03:00
|
|
|
assert(discard_block_offs != 0);
|
|
|
|
|
|
|
|
ret = qcow2_cache_get(bs, s->refcount_block_cache, refblock_offs,
|
|
|
|
&refblock);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->get_refcount(refblock, block_index) != 1) {
|
|
|
|
qcow2_signal_corruption(bs, true, -1, -1, "Invalid refcount:"
|
|
|
|
" refblock offset %#" PRIx64
|
|
|
|
", reftable index %u"
|
|
|
|
", block offset %#" PRIx64
|
|
|
|
", refcount %#" PRIx64,
|
|
|
|
refblock_offs,
|
|
|
|
offset_to_reftable_index(s, discard_block_offs),
|
|
|
|
discard_block_offs,
|
|
|
|
s->get_refcount(refblock, block_index));
|
2018-02-05 17:33:07 +03:00
|
|
|
qcow2_cache_put(s->refcount_block_cache, &refblock);
|
2017-09-18 15:42:29 +03:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
s->set_refcount(refblock, block_index, 0);
|
|
|
|
|
2018-02-05 17:33:06 +03:00
|
|
|
qcow2_cache_entry_mark_dirty(s->refcount_block_cache, refblock);
|
2017-09-18 15:42:29 +03:00
|
|
|
|
2018-02-05 17:33:07 +03:00
|
|
|
qcow2_cache_put(s->refcount_block_cache, &refblock);
|
2017-09-18 15:42:29 +03:00
|
|
|
|
|
|
|
if (cluster_index < s->free_cluster_index) {
|
|
|
|
s->free_cluster_index = cluster_index;
|
|
|
|
}
|
|
|
|
|
2018-02-05 17:33:11 +03:00
|
|
|
refblock = qcow2_cache_is_table_offset(s->refcount_block_cache,
|
2017-09-18 15:42:29 +03:00
|
|
|
discard_block_offs);
|
|
|
|
if (refblock) {
|
|
|
|
/* discard refblock from the cache if refblock is cached */
|
2018-02-05 17:33:10 +03:00
|
|
|
qcow2_cache_discard(s->refcount_block_cache, refblock);
|
2017-09-18 15:42:29 +03:00
|
|
|
}
|
|
|
|
update_refcount_discard(bs, discard_block_offs, s->cluster_size);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-10-13 15:36:59 +03:00
|
|
|
int coroutine_fn qcow2_shrink_reftable(BlockDriverState *bs)
|
2017-09-18 15:42:29 +03:00
|
|
|
{
|
|
|
|
BDRVQcow2State *s = bs->opaque;
|
|
|
|
uint64_t *reftable_tmp =
|
2020-08-28 14:08:28 +03:00
|
|
|
g_malloc(s->refcount_table_size * REFTABLE_ENTRY_SIZE);
|
2017-09-18 15:42:29 +03:00
|
|
|
int i, ret;
|
|
|
|
|
|
|
|
for (i = 0; i < s->refcount_table_size; i++) {
|
|
|
|
int64_t refblock_offs = s->refcount_table[i] & REFT_OFFSET_MASK;
|
|
|
|
void *refblock;
|
|
|
|
bool unused_block;
|
|
|
|
|
|
|
|
if (refblock_offs == 0) {
|
|
|
|
reftable_tmp[i] = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
ret = qcow2_cache_get(bs, s->refcount_block_cache, refblock_offs,
|
|
|
|
&refblock);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the refblock has own reference */
|
|
|
|
if (i == offset_to_reftable_index(s, refblock_offs)) {
|
|
|
|
uint64_t block_index = (refblock_offs >> s->cluster_bits) &
|
|
|
|
(s->refcount_block_size - 1);
|
|
|
|
uint64_t refcount = s->get_refcount(refblock, block_index);
|
|
|
|
|
|
|
|
s->set_refcount(refblock, block_index, 0);
|
|
|
|
|
|
|
|
unused_block = buffer_is_zero(refblock, s->cluster_size);
|
|
|
|
|
|
|
|
s->set_refcount(refblock, block_index, refcount);
|
|
|
|
} else {
|
|
|
|
unused_block = buffer_is_zero(refblock, s->cluster_size);
|
|
|
|
}
|
2018-02-05 17:33:07 +03:00
|
|
|
qcow2_cache_put(s->refcount_block_cache, &refblock);
|
2017-09-18 15:42:29 +03:00
|
|
|
|
|
|
|
reftable_tmp[i] = unused_block ? 0 : cpu_to_be64(s->refcount_table[i]);
|
|
|
|
}
|
|
|
|
|
2022-10-13 15:37:06 +03:00
|
|
|
ret = bdrv_co_pwrite_sync(bs->file, s->refcount_table_offset,
|
|
|
|
s->refcount_table_size * REFTABLE_ENTRY_SIZE,
|
|
|
|
reftable_tmp, 0);
|
2017-09-18 15:42:29 +03:00
|
|
|
/*
|
|
|
|
* If the write in the reftable failed the image may contain a partially
|
|
|
|
* overwritten reftable. In this case it would be better to clear the
|
|
|
|
* reftable in memory to avoid possible image corruption.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < s->refcount_table_size; i++) {
|
|
|
|
if (s->refcount_table[i] && !reftable_tmp[i]) {
|
|
|
|
if (ret == 0) {
|
|
|
|
ret = qcow2_discard_refcount_block(bs, s->refcount_table[i] &
|
|
|
|
REFT_OFFSET_MASK);
|
|
|
|
}
|
|
|
|
s->refcount_table[i] = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!s->cache_discards) {
|
|
|
|
qcow2_process_discards(bs, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
g_free(reftable_tmp);
|
|
|
|
return ret;
|
|
|
|
}
|
2017-09-29 15:16:13 +03:00
|
|
|
|
2023-03-09 11:44:55 +03:00
|
|
|
int64_t coroutine_fn qcow2_get_last_cluster(BlockDriverState *bs, int64_t size)
|
2017-09-29 15:16:13 +03:00
|
|
|
{
|
|
|
|
BDRVQcow2State *s = bs->opaque;
|
|
|
|
int64_t i;
|
|
|
|
|
|
|
|
for (i = size_to_clusters(s, size) - 1; i >= 0; i--) {
|
|
|
|
uint64_t refcount;
|
|
|
|
int ret = qcow2_get_refcount(bs, i, &refcount);
|
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "Can't get refcount for cluster %" PRId64 ": %s\n",
|
|
|
|
i, strerror(-ret));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
if (refcount > 0) {
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
qcow2_signal_corruption(bs, true, -1, -1,
|
|
|
|
"There are no references in the refcount table.");
|
|
|
|
return -EIO;
|
|
|
|
}
|
block: avoid recursive block_status call if possible
drv_co_block_status digs bs->file for additional, more accurate search
for hole inside region, reported as DATA by bs since 5daa74a6ebc.
This accuracy is not free: assume we have qcow2 disk. Actually, qcow2
knows, where are holes and where is data. But every block_status
request calls lseek additionally. Assume a big disk, full of
data, in any iterative copying block job (or img convert) we'll call
lseek(HOLE) on every iteration, and each of these lseeks will have to
iterate through all metadata up to the end of file. It's obviously
ineffective behavior. And for many scenarios we don't need this lseek
at all.
However, lseek is needed when we have metadata-preallocated image.
So, let's detect metadata-preallocation case and don't dig qcow2's
protocol file in other cases.
The idea is to compare allocation size in POV of filesystem with
allocations size in POV of Qcow2 (by refcounts). If allocation in fs is
significantly lower, consider it as metadata-preallocation case.
102 iotest changed, as our detector can't detect shrinked file as
metadata-preallocation, which don't seem to be wrong, as with metadata
preallocation we always have valid file length.
Two other iotests have a slight change in their QMP output sequence:
Active 'block-commit' returns earlier because the job coroutine yields
earlier on a blocking operation. This operation is loading the refcount
blocks in qcow2_detect_metadata_preallocation().
Suggested-by: Denis V. Lunev <den@openvz.org>
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2019-04-08 19:26:17 +03:00
|
|
|
|
2022-09-22 11:49:12 +03:00
|
|
|
int coroutine_fn qcow2_detect_metadata_preallocation(BlockDriverState *bs)
|
block: avoid recursive block_status call if possible
drv_co_block_status digs bs->file for additional, more accurate search
for hole inside region, reported as DATA by bs since 5daa74a6ebc.
This accuracy is not free: assume we have qcow2 disk. Actually, qcow2
knows, where are holes and where is data. But every block_status
request calls lseek additionally. Assume a big disk, full of
data, in any iterative copying block job (or img convert) we'll call
lseek(HOLE) on every iteration, and each of these lseeks will have to
iterate through all metadata up to the end of file. It's obviously
ineffective behavior. And for many scenarios we don't need this lseek
at all.
However, lseek is needed when we have metadata-preallocated image.
So, let's detect metadata-preallocation case and don't dig qcow2's
protocol file in other cases.
The idea is to compare allocation size in POV of filesystem with
allocations size in POV of Qcow2 (by refcounts). If allocation in fs is
significantly lower, consider it as metadata-preallocation case.
102 iotest changed, as our detector can't detect shrinked file as
metadata-preallocation, which don't seem to be wrong, as with metadata
preallocation we always have valid file length.
Two other iotests have a slight change in their QMP output sequence:
Active 'block-commit' returns earlier because the job coroutine yields
earlier on a blocking operation. This operation is loading the refcount
blocks in qcow2_detect_metadata_preallocation().
Suggested-by: Denis V. Lunev <den@openvz.org>
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2019-04-08 19:26:17 +03:00
|
|
|
{
|
|
|
|
BDRVQcow2State *s = bs->opaque;
|
|
|
|
int64_t i, end_cluster, cluster_count = 0, threshold;
|
|
|
|
int64_t file_length, real_allocation, real_clusters;
|
|
|
|
|
2019-10-24 17:26:58 +03:00
|
|
|
qemu_co_mutex_assert_locked(&s->lock);
|
|
|
|
|
2023-05-04 14:57:31 +03:00
|
|
|
file_length = bdrv_co_getlength(bs->file->bs);
|
block: avoid recursive block_status call if possible
drv_co_block_status digs bs->file for additional, more accurate search
for hole inside region, reported as DATA by bs since 5daa74a6ebc.
This accuracy is not free: assume we have qcow2 disk. Actually, qcow2
knows, where are holes and where is data. But every block_status
request calls lseek additionally. Assume a big disk, full of
data, in any iterative copying block job (or img convert) we'll call
lseek(HOLE) on every iteration, and each of these lseeks will have to
iterate through all metadata up to the end of file. It's obviously
ineffective behavior. And for many scenarios we don't need this lseek
at all.
However, lseek is needed when we have metadata-preallocated image.
So, let's detect metadata-preallocation case and don't dig qcow2's
protocol file in other cases.
The idea is to compare allocation size in POV of filesystem with
allocations size in POV of Qcow2 (by refcounts). If allocation in fs is
significantly lower, consider it as metadata-preallocation case.
102 iotest changed, as our detector can't detect shrinked file as
metadata-preallocation, which don't seem to be wrong, as with metadata
preallocation we always have valid file length.
Two other iotests have a slight change in their QMP output sequence:
Active 'block-commit' returns earlier because the job coroutine yields
earlier on a blocking operation. This operation is loading the refcount
blocks in qcow2_detect_metadata_preallocation().
Suggested-by: Denis V. Lunev <den@openvz.org>
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2019-04-08 19:26:17 +03:00
|
|
|
if (file_length < 0) {
|
|
|
|
return file_length;
|
|
|
|
}
|
|
|
|
|
2023-01-13 23:42:07 +03:00
|
|
|
real_allocation = bdrv_co_get_allocated_file_size(bs->file->bs);
|
block: avoid recursive block_status call if possible
drv_co_block_status digs bs->file for additional, more accurate search
for hole inside region, reported as DATA by bs since 5daa74a6ebc.
This accuracy is not free: assume we have qcow2 disk. Actually, qcow2
knows, where are holes and where is data. But every block_status
request calls lseek additionally. Assume a big disk, full of
data, in any iterative copying block job (or img convert) we'll call
lseek(HOLE) on every iteration, and each of these lseeks will have to
iterate through all metadata up to the end of file. It's obviously
ineffective behavior. And for many scenarios we don't need this lseek
at all.
However, lseek is needed when we have metadata-preallocated image.
So, let's detect metadata-preallocation case and don't dig qcow2's
protocol file in other cases.
The idea is to compare allocation size in POV of filesystem with
allocations size in POV of Qcow2 (by refcounts). If allocation in fs is
significantly lower, consider it as metadata-preallocation case.
102 iotest changed, as our detector can't detect shrinked file as
metadata-preallocation, which don't seem to be wrong, as with metadata
preallocation we always have valid file length.
Two other iotests have a slight change in their QMP output sequence:
Active 'block-commit' returns earlier because the job coroutine yields
earlier on a blocking operation. This operation is loading the refcount
blocks in qcow2_detect_metadata_preallocation().
Suggested-by: Denis V. Lunev <den@openvz.org>
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2019-04-08 19:26:17 +03:00
|
|
|
if (real_allocation < 0) {
|
|
|
|
return real_allocation;
|
|
|
|
}
|
|
|
|
|
|
|
|
real_clusters = real_allocation / s->cluster_size;
|
|
|
|
threshold = MAX(real_clusters * 10 / 9, real_clusters + 2);
|
|
|
|
|
|
|
|
end_cluster = size_to_clusters(s, file_length);
|
|
|
|
for (i = 0; i < end_cluster && cluster_count < threshold; i++) {
|
|
|
|
uint64_t refcount;
|
|
|
|
int ret = qcow2_get_refcount(bs, i, &refcount);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
cluster_count += !!refcount;
|
|
|
|
}
|
|
|
|
|
|
|
|
return cluster_count >= threshold;
|
|
|
|
}
|