2007-11-11 05:51:17 +03:00
|
|
|
#ifndef BLOCK_H
|
|
|
|
#define BLOCK_H
|
|
|
|
|
2012-12-17 21:19:44 +04:00
|
|
|
#include "block/aio.h"
|
2018-02-16 19:50:12 +03:00
|
|
|
#include "block/aio-wait.h"
|
2016-03-09 12:52:44 +03:00
|
|
|
#include "qemu/iov.h"
|
2015-09-01 16:48:02 +03:00
|
|
|
#include "qemu/coroutine.h"
|
2014-09-05 17:46:18 +04:00
|
|
|
#include "block/accounting.h"
|
2016-03-08 07:44:55 +03:00
|
|
|
#include "block/dirty-bitmap.h"
|
2016-10-27 19:07:00 +03:00
|
|
|
#include "block/blockjob.h"
|
2016-03-08 07:44:53 +03:00
|
|
|
#include "qemu/hbitmap.h"
|
2021-04-28 18:17:55 +03:00
|
|
|
#include "qemu/transactions.h"
|
2008-09-22 23:17:18 +04:00
|
|
|
|
2020-09-24 21:54:11 +03:00
|
|
|
/*
|
|
|
|
* generated_co_wrapper
|
|
|
|
*
|
|
|
|
* Function specifier, which does nothing but mark functions to be
|
|
|
|
* generated by scripts/block-coroutine-wrapper.py
|
|
|
|
*
|
|
|
|
* Read more in docs/devel/block-coroutine-wrapper.rst
|
|
|
|
*/
|
|
|
|
#define generated_co_wrapper
|
|
|
|
|
2007-11-11 05:51:17 +03:00
|
|
|
/* block.c */
|
|
|
|
typedef struct BlockDriver BlockDriver;
|
2015-06-15 14:24:19 +03:00
|
|
|
typedef struct BdrvChild BdrvChild;
|
2020-05-13 14:05:13 +03:00
|
|
|
typedef struct BdrvChildClass BdrvChildClass;
|
2007-11-11 05:51:17 +03:00
|
|
|
|
|
|
|
typedef struct BlockDriverInfo {
|
|
|
|
/* in bytes, 0 if irrelevant */
|
|
|
|
int cluster_size;
|
|
|
|
/* offset at which the VM state can be saved (0 if not possible) */
|
|
|
|
int64_t vm_state_offset;
|
2012-03-15 16:13:33 +04:00
|
|
|
bool is_dirty;
|
2014-05-06 17:08:43 +04:00
|
|
|
/*
|
|
|
|
* True if this block driver only supports compressed writes
|
|
|
|
*/
|
|
|
|
bool needs_compressed_writes;
|
2007-11-11 05:51:17 +03:00
|
|
|
} BlockDriverInfo;
|
|
|
|
|
2012-03-15 16:13:31 +04:00
|
|
|
typedef struct BlockFragInfo {
|
|
|
|
uint64_t allocated_clusters;
|
|
|
|
uint64_t total_clusters;
|
|
|
|
uint64_t fragmented_clusters;
|
2013-02-07 20:15:04 +04:00
|
|
|
uint64_t compressed_clusters;
|
2012-03-15 16:13:31 +04:00
|
|
|
} BlockFragInfo;
|
|
|
|
|
2013-10-24 14:06:50 +04:00
|
|
|
typedef enum {
|
2015-09-08 06:28:32 +03:00
|
|
|
BDRV_REQ_COPY_ON_READ = 0x1,
|
|
|
|
BDRV_REQ_ZERO_WRITE = 0x2,
|
2018-07-25 14:20:32 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The BDRV_REQ_MAY_UNMAP flag is used in write_zeroes requests to indicate
|
|
|
|
* that the block driver should unmap (discard) blocks if it is guaranteed
|
|
|
|
* that the result will read back as zeroes. The flag is only passed to the
|
|
|
|
* driver if the block device is opened with BDRV_O_UNMAP.
|
2013-10-24 14:06:52 +04:00
|
|
|
*/
|
2015-09-08 06:28:32 +03:00
|
|
|
BDRV_REQ_MAY_UNMAP = 0x4,
|
2018-07-09 19:37:16 +03:00
|
|
|
|
2016-03-04 16:28:01 +03:00
|
|
|
BDRV_REQ_FUA = 0x10,
|
2016-07-22 11:17:42 +03:00
|
|
|
BDRV_REQ_WRITE_COMPRESSED = 0x20,
|
2016-06-13 21:56:35 +03:00
|
|
|
|
2018-04-21 16:29:23 +03:00
|
|
|
/* Signifies that this write request will not change the visible disk
|
|
|
|
* content. */
|
|
|
|
BDRV_REQ_WRITE_UNCHANGED = 0x40,
|
|
|
|
|
2020-10-21 17:58:39 +03:00
|
|
|
/* Forces request serialisation. Use only with write requests. */
|
2018-07-09 19:37:18 +03:00
|
|
|
BDRV_REQ_SERIALISING = 0x80,
|
|
|
|
|
2019-03-22 15:38:43 +03:00
|
|
|
/* Execute the request only if the operation can be offloaded or otherwise
|
|
|
|
* be executed efficiently, but return an error instead of using a slow
|
|
|
|
* fallback. */
|
|
|
|
BDRV_REQ_NO_FALLBACK = 0x100,
|
|
|
|
|
2019-07-25 13:05:48 +03:00
|
|
|
/*
|
2020-12-16 09:16:58 +03:00
|
|
|
* BDRV_REQ_PREFETCH makes sense only in the context of copy-on-read
|
|
|
|
* (i.e., together with the BDRV_REQ_COPY_ON_READ flag or when a COR
|
|
|
|
* filter is involved), in which case it signals that the COR operation
|
|
|
|
* need not read the data into memory (qiov) but only ensure they are
|
|
|
|
* copied to the top layer (i.e., that COR operation is done).
|
2019-07-25 13:05:48 +03:00
|
|
|
*/
|
|
|
|
BDRV_REQ_PREFETCH = 0x200,
|
2020-10-21 17:58:44 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we need to wait for other requests, just fail immediately. Used
|
|
|
|
* only together with BDRV_REQ_SERIALISING.
|
|
|
|
*/
|
|
|
|
BDRV_REQ_NO_WAIT = 0x400,
|
|
|
|
|
2016-06-13 21:56:35 +03:00
|
|
|
/* Mask of valid flags */
|
2020-10-21 17:58:44 +03:00
|
|
|
BDRV_REQ_MASK = 0x7ff,
|
2013-10-24 14:06:50 +04:00
|
|
|
} BdrvRequestFlags;
|
|
|
|
|
2015-02-16 14:47:54 +03:00
|
|
|
typedef struct BlockSizes {
|
|
|
|
uint32_t phys;
|
|
|
|
uint32_t log;
|
|
|
|
} BlockSizes;
|
|
|
|
|
|
|
|
typedef struct HDGeometry {
|
|
|
|
uint32_t heads;
|
|
|
|
uint32_t sectors;
|
|
|
|
uint32_t cylinders;
|
|
|
|
} HDGeometry;
|
|
|
|
|
2021-04-22 19:43:43 +03:00
|
|
|
#define BDRV_O_NO_SHARE 0x0001 /* don't share permissions */
|
2007-11-11 05:51:17 +03:00
|
|
|
#define BDRV_O_RDWR 0x0002
|
2017-02-17 17:07:38 +03:00
|
|
|
#define BDRV_O_RESIZE 0x0004 /* request permission for resizing the node */
|
2007-11-11 05:51:17 +03:00
|
|
|
#define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save writes in a snapshot */
|
2014-04-11 21:16:36 +04:00
|
|
|
#define BDRV_O_TEMPORARY 0x0010 /* delete the file after use */
|
2008-10-14 18:42:54 +04:00
|
|
|
#define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */
|
2009-08-20 18:58:35 +04:00
|
|
|
#define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the thread pool */
|
2010-01-12 14:55:16 +03:00
|
|
|
#define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */
|
2010-05-26 19:51:49 +04:00
|
|
|
#define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */
|
2011-11-28 20:08:47 +04:00
|
|
|
#define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */
|
2016-01-13 17:56:06 +03:00
|
|
|
#define BDRV_O_INACTIVE 0x0800 /* consistency hint for migration handoff */
|
2012-08-09 16:05:56 +04:00
|
|
|
#define BDRV_O_CHECK 0x1000 /* open solely for consistency check */
|
block: correctly set the keep_read_only flag
I believe the bs->keep_read_only flag is supposed to reflect
the initial open state of the device. If the device is initially
opened R/O, then commit operations, or reopen operations changing
to R/W, are prohibited.
Currently, the keep_read_only flag is only accurate for the active
layer, and its backing file. Subsequent images end up always having
the keep_read_only flag set.
For instance, what happens now:
[ base ] kro = 1, ro = 1
|
v
[ snap-1 ] kro = 1, ro = 1
|
v
[ snap-2 ] kro = 0, ro = 1
|
v
[ active ] kro = 0, ro = 0
What we want:
[ base ] kro = 0, ro = 1
|
v
[ snap-1 ] kro = 0, ro = 1
|
v
[ snap-2 ] kro = 0, ro = 1
|
v
[ active ] kro = 0, ro = 0
Signed-off-by: Jeff Cody <jcody@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2012-09-20 23:13:17 +04:00
|
|
|
#define BDRV_O_ALLOW_RDWR 0x2000 /* allow reopen to change from r/o to r/w */
|
2013-02-08 17:06:11 +04:00
|
|
|
#define BDRV_O_UNMAP 0x4000 /* execute guest UNMAP/TRIM operations */
|
2014-02-18 21:33:07 +04:00
|
|
|
#define BDRV_O_PROTOCOL 0x8000 /* if no block driver is explicitly given:
|
|
|
|
select an appropriate protocol driver,
|
|
|
|
ignoring the format layer */
|
2016-03-21 17:11:42 +03:00
|
|
|
#define BDRV_O_NO_IO 0x10000 /* don't initialize for I/O */
|
block: Add auto-read-only option
If a management application builds the block graph node by node, the
protocol layer doesn't inherit its read-only option from the format
layer any more, so it must be set explicitly.
Backing files should work on read-only storage, but at the same time, a
block job like commit should be able to reopen them read-write if they
are on read-write storage. However, without option inheritance, reopen
only changes the read-only option for the root node (typically the
format layer), but not the protocol layer, so reopening fails (the
format layer wants to get write permissions, but the protocol layer is
still read-only).
A simple workaround for the problem in the management tool would be to
open the protocol layer always read-write and to make only the format
layer read-only for backing files. However, sometimes the file is
actually stored on read-only storage and we don't know whether the image
can be opened read-write (for example, for NBD it depends on the server
we're trying to connect to). This adds an option that makes QEMU try to
open the image read-write, but allows it to degrade to a read-only mode
without returning an error.
The documentation for this option is consciously phrased in a way that
allows QEMU to switch to a better model eventually: Instead of trying
when the image is first opened, making the read-only flag dynamic and
changing it automatically whenever the first BLK_PERM_WRITE user is
attached or the last one is detached would be much more useful
behaviour.
Unfortunately, this more useful behaviour is also a lot harder to
implement, and libvirt needs a solution now before it can switch to
-blockdev, so let's start with this easier approach for now.
Instead of adding a new auto-read-only option, turning the existing
read-only into an enum (with a bool alternate for compatibility) was
considered, but it complicated the implementation to the point that it
didn't seem to be worth it.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2018-10-05 19:57:40 +03:00
|
|
|
#define BDRV_O_AUTO_RDONLY 0x20000 /* degrade to read-only if opening read-write fails */
|
2020-01-20 17:18:46 +03:00
|
|
|
#define BDRV_O_IO_URING 0x40000 /* use io_uring instead of the thread pool */
|
2008-10-14 18:42:54 +04:00
|
|
|
|
2016-03-18 19:46:45 +03:00
|
|
|
#define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_NO_FLUSH)
|
2007-11-11 05:51:17 +03:00
|
|
|
|
2015-04-07 17:55:00 +03:00
|
|
|
|
|
|
|
/* Option names of options parsed by the block layer */
|
|
|
|
|
|
|
|
#define BDRV_OPT_CACHE_WB "cache.writeback"
|
|
|
|
#define BDRV_OPT_CACHE_DIRECT "cache.direct"
|
|
|
|
#define BDRV_OPT_CACHE_NO_FLUSH "cache.no-flush"
|
2016-09-15 17:53:02 +03:00
|
|
|
#define BDRV_OPT_READ_ONLY "read-only"
|
block: Add auto-read-only option
If a management application builds the block graph node by node, the
protocol layer doesn't inherit its read-only option from the format
layer any more, so it must be set explicitly.
Backing files should work on read-only storage, but at the same time, a
block job like commit should be able to reopen them read-write if they
are on read-write storage. However, without option inheritance, reopen
only changes the read-only option for the root node (typically the
format layer), but not the protocol layer, so reopening fails (the
format layer wants to get write permissions, but the protocol layer is
still read-only).
A simple workaround for the problem in the management tool would be to
open the protocol layer always read-write and to make only the format
layer read-only for backing files. However, sometimes the file is
actually stored on read-only storage and we don't know whether the image
can be opened read-write (for example, for NBD it depends on the server
we're trying to connect to). This adds an option that makes QEMU try to
open the image read-write, but allows it to degrade to a read-only mode
without returning an error.
The documentation for this option is consciously phrased in a way that
allows QEMU to switch to a better model eventually: Instead of trying
when the image is first opened, making the read-only flag dynamic and
changing it automatically whenever the first BLK_PERM_WRITE user is
attached or the last one is detached would be much more useful
behaviour.
Unfortunately, this more useful behaviour is also a lot harder to
implement, and libvirt needs a solution now before it can switch to
-blockdev, so let's start with this easier approach for now.
Instead of adding a new auto-read-only option, turning the existing
read-only into an enum (with a bool alternate for compatibility) was
considered, but it complicated the implementation to the point that it
didn't seem to be worth it.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2018-10-05 19:57:40 +03:00
|
|
|
#define BDRV_OPT_AUTO_READ_ONLY "auto-read-only"
|
2016-09-12 19:03:18 +03:00
|
|
|
#define BDRV_OPT_DISCARD "discard"
|
2017-05-02 19:35:37 +03:00
|
|
|
#define BDRV_OPT_FORCE_SHARE "force-share"
|
2015-04-07 17:55:00 +03:00
|
|
|
|
|
|
|
|
2009-11-30 20:21:19 +03:00
|
|
|
#define BDRV_SECTOR_BITS 9
|
2010-05-27 17:46:55 +04:00
|
|
|
#define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS)
|
2009-11-30 20:21:19 +03:00
|
|
|
|
osdep: Make MIN/MAX evaluate arguments only once
I'm not aware of any immediate bugs in qemu where a second runtime
evaluation of the arguments to MIN() or MAX() causes a problem, but
proactively preventing such abuse is easier than falling prey to an
unintended case down the road. At any rate, here's the conversation
that sparked the current patch:
https://lists.gnu.org/archive/html/qemu-devel/2018-12/msg05718.html
Update the MIN/MAX macros to only evaluate their argument once at
runtime; this uses typeof(1 ? (a) : (b)) to ensure that we are
promoting the temporaries to the same type as the final comparison (we
have to trigger type promotion, as typeof(bitfield) won't compile; and
we can't use typeof((a) + (b)) or even typeof((a) + 0), as some of our
uses of MAX are on void* pointers where such addition is undefined).
However, we are unable to work around gcc refusing to compile ({}) in
a constant context (such as the array length of a static variable),
even when only used in the dead branch of a __builtin_choose_expr(),
so we have to provide a second macro pair MIN_CONST and MAX_CONST for
use when both arguments are known to be compile-time constants and
where the result must also be usable as a constant; this second form
evaluates arguments multiple times but that doesn't matter for
constants. By using a void expression as the expansion if a
non-constant is presented to this second form, we can enlist the
compiler to ensure the double evaluation is not attempted on
non-constants.
Alas, as both macros now rely on compiler intrinsics, they are no
longer usable in preprocessor #if conditions; those will just have to
be open-coded or the logic rewritten into #define or runtime 'if'
conditions (but where the compiler dead-code-elimination will probably
still apply).
I tested that both gcc 10.1.1 and clang 10.0.0 produce errors for all
forms of macro mis-use. As the errors can sometimes be cryptic, I'm
demonstrating the gcc output:
Use of MIN when MIN_CONST is needed:
In file included from /home/eblake/qemu/qemu-img.c:25:
/home/eblake/qemu/include/qemu/osdep.h:249:5: error: braced-group within expression allowed only inside a function
249 | ({ \
| ^
/home/eblake/qemu/qemu-img.c:92:12: note: in expansion of macro ‘MIN’
92 | char array[MIN(1, 2)] = "";
| ^~~
Use of MIN_CONST when MIN is needed:
/home/eblake/qemu/qemu-img.c: In function ‘is_allocated_sectors’:
/home/eblake/qemu/qemu-img.c:1225:15: error: void value not ignored as it ought to be
1225 | i = MIN_CONST(i, n);
| ^
Use of MIN in the preprocessor:
In file included from /home/eblake/qemu/accel/tcg/translate-all.c:20:
/home/eblake/qemu/accel/tcg/translate-all.c: In function ‘page_check_range’:
/home/eblake/qemu/include/qemu/osdep.h:249:6: error: token "{" is not valid in preprocessor expressions
249 | ({ \
| ^
Fix the resulting callsites that used #if or computed a compile-time
constant min or max to use the new macros. cpu-defs.h is interesting,
as CPU_TLB_DYN_MAX_BITS is sometimes used as a constant and sometimes
dynamic.
It may be worth improving glib's MIN/MAX definitions to be saner, but
that is a task for another day.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Message-Id: <20200625162602.700741-1-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2020-06-25 19:26:02 +03:00
|
|
|
#define BDRV_REQUEST_MAX_SECTORS MIN_CONST(SIZE_MAX >> BDRV_SECTOR_BITS, \
|
|
|
|
INT_MAX >> BDRV_SECTOR_BITS)
|
2017-01-20 19:25:26 +03:00
|
|
|
#define BDRV_REQUEST_MAX_BYTES (BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS)
|
2015-02-06 13:54:11 +03:00
|
|
|
|
block: introduce BDRV_MAX_LENGTH
We are going to modify block layer to work with 64bit requests. And
first step is moving to int64_t type for both offset and bytes
arguments in all block request related functions.
It's mostly safe (when widening signed or unsigned int to int64_t), but
switching from uint64_t is questionable.
So, let's first establish the set of requests we want to work with.
First signed int64_t should be enough, as off_t is signed anyway. Then,
obviously offset + bytes should not overflow.
And most interesting: (offset + bytes) being aligned up should not
overflow as well. Aligned to what alignment? First thing that comes in
mind is bs->bl.request_alignment, as we align up request to this
alignment. But there is another thing: look at
bdrv_mark_request_serialising(). It aligns request up to some given
alignment. And this parameter may be bdrv_get_cluster_size(), which is
often a lot greater than bs->bl.request_alignment.
Note also, that bdrv_mark_request_serialising() uses signed int64_t for
calculations. So, actually, we already depend on some restrictions.
Happily, bdrv_get_cluster_size() returns int and
bs->bl.request_alignment has 32bit unsigned type, but defined to be a
power of 2 less than INT_MAX. So, we may establish, that INT_MAX is
absolute maximum for any kind of alignment that may occur with the
request.
Note, that bdrv_get_cluster_size() is not documented to return power
of 2, still bdrv_mark_request_serialising() behaves like it is.
Also, backup uses bdi.cluster_size and is not prepared to it not being
power of 2.
So, let's establish that Qemu supports only power-of-2 clusters and
alignments.
So, alignment can't be greater than 2^30.
Finally to be safe with calculations, to not calculate different
maximums for different nodes (depending on cluster size and
request_alignment), let's simply set QEMU_ALIGN_DOWN(INT64_MAX, 2^30)
as absolute maximum bytes length for Qemu. Actually, it's not much less
than INT64_MAX.
OK, then, let's apply it to block/io.
Let's consider all block/io entry points of offset/bytes:
4 bytes/offset interface functions: bdrv_co_preadv_part(),
bdrv_co_pwritev_part(), bdrv_co_copy_range_internal() and
bdrv_co_pdiscard() and we check them all with bdrv_check_request().
We also have one entry point with only offset: bdrv_co_truncate().
Check the offset.
And one public structure: BdrvTrackedRequest. Happily, it has only
three external users:
file-posix.c: adopted by this patch
write-threshold.c: only read fields
test-write-threshold.c: sets obviously small constant values
Better is to make the structure private and add corresponding
interfaces.. Still it's not obvious what kind of interface is needed
for file-posix.c. Let's keep it public but add corresponding
assertions.
After this patch we'll convert functions in block/io.c to int64_t bytes
and offset parameters. We can assume that offset/bytes pair always
satisfy new restrictions, and make
corresponding assertions where needed. If we reach some offset/bytes
point in block/io.c missing bdrv_check_request() it is considered a
bug. As well, if block/io.c modifies a offset/bytes request, expanding
it more then aligning up to request_alignment, it's a bug too.
For all io requests except for discard we keep for now old restriction
of 32bit request length.
iotest 206 output error message changed, as now test disk size is
larger than new limit. Add one more test case with new maximum disk
size to cover too-big-L1 case.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20201203222713.13507-5-vsementsov@virtuozzo.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2020-12-04 01:27:13 +03:00
|
|
|
/*
|
|
|
|
* We want allow aligning requests and disk length up to any 32bit alignment
|
|
|
|
* and don't afraid of overflow.
|
|
|
|
* To achieve it, and in the same time use some pretty number as maximum disk
|
|
|
|
* size, let's define maximum "length" (a limit for any offset/bytes request and
|
|
|
|
* for disk size) to be the greatest power of 2 less than INT64_MAX.
|
|
|
|
*/
|
|
|
|
#define BDRV_MAX_ALIGNMENT (1L << 30)
|
|
|
|
#define BDRV_MAX_LENGTH (QEMU_ALIGN_DOWN(INT64_MAX, BDRV_MAX_ALIGNMENT))
|
|
|
|
|
2014-11-10 12:10:38 +03:00
|
|
|
/*
|
block: Convert bdrv_get_block_status() to bytes
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the name of the function from bdrv_get_block_status() to
bdrv_block_status() ensures that the compiler enforces that all
callers are updated. For now, the io.c layer still assert()s that
all callers are sector-aligned, but that can be relaxed when a later
patch implements byte-based block status in the drivers.
There was an inherent limitation in returning the offset via the
return value: we only have room for BDRV_BLOCK_OFFSET_MASK bits, which
means an offset can only be mapped for sector-aligned queries (or,
if we declare that non-aligned input is at the same relative position
modulo 512 of the answer), so the new interface also changes things to
return the offset via output through a parameter by reference rather
than mashed into the return value. We'll have some glue code that
munges between the two styles until we finish converting all uses.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_block_status(), coupled
with the tweak in calling convention. But some code, particularly
bdrv_is_allocated(), gets a lot simpler because it no longer has to
mess with sectors.
For ease of review, bdrv_get_block_status_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-10-12 06:47:03 +03:00
|
|
|
* Allocation status flags for bdrv_block_status() and friends.
|
2017-05-07 03:05:43 +03:00
|
|
|
*
|
|
|
|
* Public flags:
|
|
|
|
* BDRV_BLOCK_DATA: allocation for data at offset is tied to this layer
|
|
|
|
* BDRV_BLOCK_ZERO: offset reads as zero
|
|
|
|
* BDRV_BLOCK_OFFSET_VALID: an associated offset exists for accessing raw data
|
2014-05-06 17:25:36 +04:00
|
|
|
* BDRV_BLOCK_ALLOCATED: the content of the block is determined by this
|
block: Add .bdrv_co_block_status() callback
We are gradually moving away from sector-based interfaces, towards
byte-based. Now that the block layer exposes byte-based allocation,
it's time to tackle the drivers. Add a new callback that operates
on as small as byte boundaries. Subsequent patches will then update
individual drivers, then finally remove .bdrv_co_get_block_status().
The new code also passes through the 'want_zero' hint, which will
allow subsequent patches to further optimize callers that only care
about how much of the image is allocated (want_zero is false),
rather than full details about runs of zeroes and which offsets the
allocation actually maps to (want_zero is true). As part of this
effort, fix another part of the documentation: the claim in commit
4c41cb4 that BDRV_BLOCK_ALLOCATED is short for 'DATA || ZERO' is a
lie at the block layer (see commit e88ae2264), even though it is
how the bit is computed from the driver layer. After all, there
are intentionally cases where we return ZERO but not ALLOCATED at
the block layer, when we know that a read sees zero because the
backing file is too short. Note that the driver interface is thus
slightly different than the public interface with regards to which
bits will be set, and what guarantees are provided on input.
We also add an assertion that any driver using the new callback will
make progress (the only time pnum will be 0 is if the block layer
already handled an out-of-bounds request, or if there is an error);
the old driver interface did not provide this guarantee, which
could lead to some inf-loops in drastic corner-case failures.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-02-13 23:26:41 +03:00
|
|
|
* layer rather than any backing, set by block layer
|
|
|
|
* BDRV_BLOCK_EOF: the returned pnum covers through end of file for this
|
|
|
|
* layer, set by block layer
|
2013-09-04 21:00:29 +04:00
|
|
|
*
|
block: avoid recursive block_status call if possible
drv_co_block_status digs bs->file for additional, more accurate search
for hole inside region, reported as DATA by bs since 5daa74a6ebc.
This accuracy is not free: assume we have qcow2 disk. Actually, qcow2
knows, where are holes and where is data. But every block_status
request calls lseek additionally. Assume a big disk, full of
data, in any iterative copying block job (or img convert) we'll call
lseek(HOLE) on every iteration, and each of these lseeks will have to
iterate through all metadata up to the end of file. It's obviously
ineffective behavior. And for many scenarios we don't need this lseek
at all.
However, lseek is needed when we have metadata-preallocated image.
So, let's detect metadata-preallocation case and don't dig qcow2's
protocol file in other cases.
The idea is to compare allocation size in POV of filesystem with
allocations size in POV of Qcow2 (by refcounts). If allocation in fs is
significantly lower, consider it as metadata-preallocation case.
102 iotest changed, as our detector can't detect shrinked file as
metadata-preallocation, which don't seem to be wrong, as with metadata
preallocation we always have valid file length.
Two other iotests have a slight change in their QMP output sequence:
Active 'block-commit' returns earlier because the job coroutine yields
earlier on a blocking operation. This operation is loading the refcount
blocks in qcow2_detect_metadata_preallocation().
Suggested-by: Denis V. Lunev <den@openvz.org>
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2019-04-08 19:26:17 +03:00
|
|
|
* Internal flags:
|
2017-06-05 23:38:44 +03:00
|
|
|
* BDRV_BLOCK_RAW: for use by passthrough drivers, such as raw, to request
|
|
|
|
* that the block layer recompute the answer from the returned
|
|
|
|
* BDS; must be accompanied by just BDRV_BLOCK_OFFSET_VALID.
|
block: avoid recursive block_status call if possible
drv_co_block_status digs bs->file for additional, more accurate search
for hole inside region, reported as DATA by bs since 5daa74a6ebc.
This accuracy is not free: assume we have qcow2 disk. Actually, qcow2
knows, where are holes and where is data. But every block_status
request calls lseek additionally. Assume a big disk, full of
data, in any iterative copying block job (or img convert) we'll call
lseek(HOLE) on every iteration, and each of these lseeks will have to
iterate through all metadata up to the end of file. It's obviously
ineffective behavior. And for many scenarios we don't need this lseek
at all.
However, lseek is needed when we have metadata-preallocated image.
So, let's detect metadata-preallocation case and don't dig qcow2's
protocol file in other cases.
The idea is to compare allocation size in POV of filesystem with
allocations size in POV of Qcow2 (by refcounts). If allocation in fs is
significantly lower, consider it as metadata-preallocation case.
102 iotest changed, as our detector can't detect shrinked file as
metadata-preallocation, which don't seem to be wrong, as with metadata
preallocation we always have valid file length.
Two other iotests have a slight change in their QMP output sequence:
Active 'block-commit' returns earlier because the job coroutine yields
earlier on a blocking operation. This operation is loading the refcount
blocks in qcow2_detect_metadata_preallocation().
Suggested-by: Denis V. Lunev <den@openvz.org>
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2019-04-08 19:26:17 +03:00
|
|
|
* BDRV_BLOCK_RECURSE: request that the block layer will recursively search for
|
|
|
|
* zeroes in file child of current block node inside
|
|
|
|
* returned region. Only valid together with both
|
|
|
|
* BDRV_BLOCK_DATA and BDRV_BLOCK_OFFSET_VALID. Should not
|
|
|
|
* appear with BDRV_BLOCK_ZERO.
|
2013-09-04 21:00:29 +04:00
|
|
|
*
|
block: Add .bdrv_co_block_status() callback
We are gradually moving away from sector-based interfaces, towards
byte-based. Now that the block layer exposes byte-based allocation,
it's time to tackle the drivers. Add a new callback that operates
on as small as byte boundaries. Subsequent patches will then update
individual drivers, then finally remove .bdrv_co_get_block_status().
The new code also passes through the 'want_zero' hint, which will
allow subsequent patches to further optimize callers that only care
about how much of the image is allocated (want_zero is false),
rather than full details about runs of zeroes and which offsets the
allocation actually maps to (want_zero is true). As part of this
effort, fix another part of the documentation: the claim in commit
4c41cb4 that BDRV_BLOCK_ALLOCATED is short for 'DATA || ZERO' is a
lie at the block layer (see commit e88ae2264), even though it is
how the bit is computed from the driver layer. After all, there
are intentionally cases where we return ZERO but not ALLOCATED at
the block layer, when we know that a read sees zero because the
backing file is too short. Note that the driver interface is thus
slightly different than the public interface with regards to which
bits will be set, and what guarantees are provided on input.
We also add an assertion that any driver using the new callback will
make progress (the only time pnum will be 0 is if the block layer
already handled an out-of-bounds request, or if there is an error);
the old driver interface did not provide this guarantee, which
could lead to some inf-loops in drastic corner-case failures.
Signed-off-by: Eric Blake <eblake@redhat.com>
Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2018-02-13 23:26:41 +03:00
|
|
|
* If BDRV_BLOCK_OFFSET_VALID is set, the map parameter represents the
|
|
|
|
* host offset within the returned BDS that is allocated for the
|
|
|
|
* corresponding raw guest data. However, whether that offset
|
|
|
|
* actually contains data also depends on BDRV_BLOCK_DATA, as follows:
|
2013-09-04 21:00:29 +04:00
|
|
|
*
|
|
|
|
* DATA ZERO OFFSET_VALID
|
2017-05-07 03:05:43 +03:00
|
|
|
* t t t sectors read as zero, returned file is zero at offset
|
|
|
|
* t f t sectors read as valid from file at offset
|
|
|
|
* f t t sectors preallocated, read as zero, returned file not
|
2013-09-04 21:00:29 +04:00
|
|
|
* necessarily zero at offset
|
|
|
|
* f f t sectors preallocated but read from backing_hd,
|
2017-05-07 03:05:43 +03:00
|
|
|
* returned file contains garbage at offset
|
2013-09-04 21:00:29 +04:00
|
|
|
* t t f sectors preallocated, read as zero, unknown offset
|
|
|
|
* t f f sectors read from unknown file or offset
|
|
|
|
* f t f not allocated or unknown offset, read as zero
|
|
|
|
* f f f not allocated or unknown offset, read from backing_hd
|
|
|
|
*/
|
2014-05-06 17:25:36 +04:00
|
|
|
#define BDRV_BLOCK_DATA 0x01
|
|
|
|
#define BDRV_BLOCK_ZERO 0x02
|
|
|
|
#define BDRV_BLOCK_OFFSET_VALID 0x04
|
|
|
|
#define BDRV_BLOCK_RAW 0x08
|
|
|
|
#define BDRV_BLOCK_ALLOCATED 0x10
|
2017-05-05 05:14:59 +03:00
|
|
|
#define BDRV_BLOCK_EOF 0x20
|
block: avoid recursive block_status call if possible
drv_co_block_status digs bs->file for additional, more accurate search
for hole inside region, reported as DATA by bs since 5daa74a6ebc.
This accuracy is not free: assume we have qcow2 disk. Actually, qcow2
knows, where are holes and where is data. But every block_status
request calls lseek additionally. Assume a big disk, full of
data, in any iterative copying block job (or img convert) we'll call
lseek(HOLE) on every iteration, and each of these lseeks will have to
iterate through all metadata up to the end of file. It's obviously
ineffective behavior. And for many scenarios we don't need this lseek
at all.
However, lseek is needed when we have metadata-preallocated image.
So, let's detect metadata-preallocation case and don't dig qcow2's
protocol file in other cases.
The idea is to compare allocation size in POV of filesystem with
allocations size in POV of Qcow2 (by refcounts). If allocation in fs is
significantly lower, consider it as metadata-preallocation case.
102 iotest changed, as our detector can't detect shrinked file as
metadata-preallocation, which don't seem to be wrong, as with metadata
preallocation we always have valid file length.
Two other iotests have a slight change in their QMP output sequence:
Active 'block-commit' returns earlier because the job coroutine yields
earlier on a blocking operation. This operation is loading the refcount
blocks in qcow2_detect_metadata_preallocation().
Suggested-by: Denis V. Lunev <den@openvz.org>
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2019-04-08 19:26:17 +03:00
|
|
|
#define BDRV_BLOCK_RECURSE 0x40
|
2013-09-04 21:00:29 +04:00
|
|
|
|
2019-09-27 15:23:47 +03:00
|
|
|
typedef QTAILQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue;
|
2012-09-20 23:13:19 +04:00
|
|
|
|
|
|
|
typedef struct BDRVReopenState {
|
|
|
|
BlockDriverState *bs;
|
|
|
|
int flags;
|
2018-09-06 12:37:09 +03:00
|
|
|
BlockdevDetectZeroesOptions detect_zeroes;
|
2019-03-12 19:48:45 +03:00
|
|
|
bool backing_missing;
|
2021-04-28 18:17:58 +03:00
|
|
|
BlockDriverState *old_backing_bs; /* keep pointer for permissions update */
|
2021-06-10 15:05:36 +03:00
|
|
|
BlockDriverState *old_file_bs; /* keep pointer for permissions update */
|
2015-04-10 18:50:50 +03:00
|
|
|
QDict *options;
|
2015-05-08 17:15:03 +03:00
|
|
|
QDict *explicit_options;
|
2012-09-20 23:13:19 +04:00
|
|
|
void *opaque;
|
|
|
|
} BDRVReopenState;
|
|
|
|
|
2014-05-23 17:29:41 +04:00
|
|
|
/*
|
|
|
|
* Block operation types
|
|
|
|
*/
|
|
|
|
typedef enum BlockOpType {
|
|
|
|
BLOCK_OP_TYPE_BACKUP_SOURCE,
|
|
|
|
BLOCK_OP_TYPE_BACKUP_TARGET,
|
|
|
|
BLOCK_OP_TYPE_CHANGE,
|
2014-09-11 09:14:00 +04:00
|
|
|
BLOCK_OP_TYPE_COMMIT_SOURCE,
|
|
|
|
BLOCK_OP_TYPE_COMMIT_TARGET,
|
2014-05-23 17:29:41 +04:00
|
|
|
BLOCK_OP_TYPE_DATAPLANE,
|
|
|
|
BLOCK_OP_TYPE_DRIVE_DEL,
|
|
|
|
BLOCK_OP_TYPE_EJECT,
|
|
|
|
BLOCK_OP_TYPE_EXTERNAL_SNAPSHOT,
|
|
|
|
BLOCK_OP_TYPE_INTERNAL_SNAPSHOT,
|
|
|
|
BLOCK_OP_TYPE_INTERNAL_SNAPSHOT_DELETE,
|
2015-12-24 07:45:02 +03:00
|
|
|
BLOCK_OP_TYPE_MIRROR_SOURCE,
|
2015-12-24 07:45:04 +03:00
|
|
|
BLOCK_OP_TYPE_MIRROR_TARGET,
|
2014-05-23 17:29:41 +04:00
|
|
|
BLOCK_OP_TYPE_RESIZE,
|
|
|
|
BLOCK_OP_TYPE_STREAM,
|
2014-06-27 20:25:25 +04:00
|
|
|
BLOCK_OP_TYPE_REPLACE,
|
2014-05-23 17:29:41 +04:00
|
|
|
BLOCK_OP_TYPE_MAX,
|
|
|
|
} BlockOpType;
|
2012-09-20 23:13:19 +04:00
|
|
|
|
2016-12-20 18:52:41 +03:00
|
|
|
/* Block node permission constants */
|
|
|
|
enum {
|
|
|
|
/**
|
|
|
|
* A user that has the "permission" of consistent reads is guaranteed that
|
|
|
|
* their view of the contents of the block device is complete and
|
|
|
|
* self-consistent, representing the contents of a disk at a specific
|
|
|
|
* point.
|
|
|
|
*
|
|
|
|
* For most block devices (including their backing files) this is true, but
|
|
|
|
* the property cannot be maintained in a few situations like for
|
|
|
|
* intermediate nodes of a commit block job.
|
|
|
|
*/
|
|
|
|
BLK_PERM_CONSISTENT_READ = 0x01,
|
|
|
|
|
|
|
|
/** This permission is required to change the visible disk contents. */
|
|
|
|
BLK_PERM_WRITE = 0x02,
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This permission (which is weaker than BLK_PERM_WRITE) is both enough and
|
|
|
|
* required for writes to the block node when the caller promises that
|
|
|
|
* the visible disk content doesn't change.
|
2018-04-21 16:29:22 +03:00
|
|
|
*
|
|
|
|
* As the BLK_PERM_WRITE permission is strictly stronger, either is
|
|
|
|
* sufficient to perform an unchanging write.
|
2016-12-20 18:52:41 +03:00
|
|
|
*/
|
|
|
|
BLK_PERM_WRITE_UNCHANGED = 0x04,
|
|
|
|
|
|
|
|
/** This permission is required to change the size of a block node. */
|
|
|
|
BLK_PERM_RESIZE = 0x08,
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This permission is required to change the node that this BdrvChild
|
|
|
|
* points to.
|
|
|
|
*/
|
|
|
|
BLK_PERM_GRAPH_MOD = 0x10,
|
|
|
|
|
|
|
|
BLK_PERM_ALL = 0x1f,
|
2018-07-03 17:48:47 +03:00
|
|
|
|
|
|
|
DEFAULT_PERM_PASSTHROUGH = BLK_PERM_CONSISTENT_READ
|
|
|
|
| BLK_PERM_WRITE
|
|
|
|
| BLK_PERM_WRITE_UNCHANGED
|
|
|
|
| BLK_PERM_RESIZE,
|
|
|
|
|
|
|
|
DEFAULT_PERM_UNCHANGED = BLK_PERM_ALL & ~DEFAULT_PERM_PASSTHROUGH,
|
2016-12-20 18:52:41 +03:00
|
|
|
};
|
|
|
|
|
2020-05-13 14:05:14 +03:00
|
|
|
/*
|
|
|
|
* Flags that parent nodes assign to child nodes to specify what kind of
|
|
|
|
* role(s) they take.
|
|
|
|
*
|
|
|
|
* At least one of DATA, METADATA, FILTERED, or COW must be set for
|
|
|
|
* every child.
|
|
|
|
*/
|
|
|
|
enum BdrvChildRoleBits {
|
|
|
|
/*
|
|
|
|
* This child stores data.
|
|
|
|
* Any node may have an arbitrary number of such children.
|
|
|
|
*/
|
|
|
|
BDRV_CHILD_DATA = (1 << 0),
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This child stores metadata.
|
|
|
|
* Any node may have an arbitrary number of metadata-storing
|
|
|
|
* children.
|
|
|
|
*/
|
|
|
|
BDRV_CHILD_METADATA = (1 << 1),
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A child that always presents exactly the same visible data as
|
|
|
|
* the parent, e.g. by virtue of the parent forwarding all reads
|
|
|
|
* and writes.
|
|
|
|
* This flag is mutually exclusive with DATA, METADATA, and COW.
|
|
|
|
* Any node may have at most one filtered child at a time.
|
|
|
|
*/
|
|
|
|
BDRV_CHILD_FILTERED = (1 << 2),
|
|
|
|
|
|
|
|
/*
|
2020-09-25 11:32:29 +03:00
|
|
|
* Child from which to read all data that isn't allocated in the
|
2020-05-13 14:05:14 +03:00
|
|
|
* parent (i.e., the backing child); such data is copied to the
|
|
|
|
* parent through COW (and optionally COR).
|
|
|
|
* This field is mutually exclusive with DATA, METADATA, and
|
|
|
|
* FILTERED.
|
|
|
|
* Any node may have at most one such backing child at a time.
|
|
|
|
*/
|
|
|
|
BDRV_CHILD_COW = (1 << 3),
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The primary child. For most drivers, this is the child whose
|
|
|
|
* filename applies best to the parent node.
|
|
|
|
* Any node may have at most one primary child at a time.
|
|
|
|
*/
|
|
|
|
BDRV_CHILD_PRIMARY = (1 << 4),
|
|
|
|
|
|
|
|
/* Useful combination of flags */
|
|
|
|
BDRV_CHILD_IMAGE = BDRV_CHILD_DATA
|
|
|
|
| BDRV_CHILD_METADATA
|
|
|
|
| BDRV_CHILD_PRIMARY,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Mask of BdrvChildRoleBits values */
|
|
|
|
typedef unsigned int BdrvChildRole;
|
|
|
|
|
2017-05-02 19:35:36 +03:00
|
|
|
char *bdrv_perm_names(uint64_t perm);
|
2019-11-08 15:34:51 +03:00
|
|
|
uint64_t bdrv_qapi_perm_to_blk_perm(BlockPermission qapi_perm);
|
2017-05-02 19:35:36 +03:00
|
|
|
|
2011-11-03 12:57:25 +04:00
|
|
|
/* disk I/O throttling */
|
2007-11-11 05:51:17 +03:00
|
|
|
void bdrv_init(void);
|
2009-10-27 20:41:44 +03:00
|
|
|
void bdrv_init_with_whitelist(void);
|
2016-03-21 17:11:48 +03:00
|
|
|
bool bdrv_uses_whitelist(void);
|
2018-02-21 13:47:43 +03:00
|
|
|
int bdrv_is_whitelisted(BlockDriver *drv, bool read_only);
|
2013-07-10 17:47:39 +04:00
|
|
|
BlockDriver *bdrv_find_protocol(const char *filename,
|
2015-02-05 21:58:12 +03:00
|
|
|
bool allow_protocol_prefix,
|
|
|
|
Error **errp);
|
2007-11-11 05:51:17 +03:00
|
|
|
BlockDriver *bdrv_find_format(const char *format_name);
|
2009-05-18 18:42:10 +04:00
|
|
|
int bdrv_create(BlockDriver *drv, const char* filename,
|
2014-06-05 13:21:11 +04:00
|
|
|
QemuOpts *opts, Error **errp);
|
|
|
|
int bdrv_create_file(const char *filename, QemuOpts *opts, Error **errp);
|
2020-03-26 04:12:18 +03:00
|
|
|
|
2014-10-07 15:59:03 +04:00
|
|
|
BlockDriverState *bdrv_new(void);
|
2021-02-02 15:49:43 +03:00
|
|
|
int bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top,
|
|
|
|
Error **errp);
|
|
|
|
int bdrv_replace_node(BlockDriverState *from, BlockDriverState *to,
|
|
|
|
Error **errp);
|
2021-08-24 11:38:23 +03:00
|
|
|
int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs,
|
|
|
|
Error **errp);
|
2020-12-16 09:16:52 +03:00
|
|
|
BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *node_options,
|
|
|
|
int flags, Error **errp);
|
2021-04-28 18:17:51 +03:00
|
|
|
int bdrv_drop_filter(BlockDriverState *bs, Error **errp);
|
2015-09-15 12:58:23 +03:00
|
|
|
|
2020-01-20 17:18:50 +03:00
|
|
|
int bdrv_parse_aio(const char *mode, int *flags);
|
2016-03-14 13:40:23 +03:00
|
|
|
int bdrv_parse_cache_mode(const char *mode, int *flags, bool *writethrough);
|
2013-02-08 17:06:11 +04:00
|
|
|
int bdrv_parse_discard_flags(const char *mode, int *flags);
|
2015-06-15 14:24:19 +03:00
|
|
|
BdrvChild *bdrv_open_child(const char *filename,
|
|
|
|
QDict *options, const char *bdref_key,
|
|
|
|
BlockDriverState* parent,
|
2020-05-13 14:05:13 +03:00
|
|
|
const BdrvChildClass *child_class,
|
2020-05-13 14:05:15 +03:00
|
|
|
BdrvChildRole child_role,
|
2015-06-15 14:24:19 +03:00
|
|
|
bool allow_none, Error **errp);
|
2018-01-10 17:52:33 +03:00
|
|
|
BlockDriverState *bdrv_open_blockdev_ref(BlockdevRef *ref, Error **errp);
|
2021-02-02 15:49:43 +03:00
|
|
|
int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd,
|
|
|
|
Error **errp);
|
2015-01-16 20:23:41 +03:00
|
|
|
int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options,
|
|
|
|
const char *bdref_key, Error **errp);
|
2016-05-17 17:41:31 +03:00
|
|
|
BlockDriverState *bdrv_open(const char *filename, const char *reference,
|
|
|
|
QDict *options, int flags, Error **errp);
|
2021-09-20 14:55:34 +03:00
|
|
|
BlockDriverState *bdrv_new_open_driver_opts(BlockDriver *drv,
|
|
|
|
const char *node_name,
|
|
|
|
QDict *options, int flags,
|
|
|
|
Error **errp);
|
2017-01-18 19:16:41 +03:00
|
|
|
BlockDriverState *bdrv_new_open_driver(BlockDriver *drv, const char *node_name,
|
|
|
|
int flags, Error **errp);
|
2012-09-20 23:13:19 +04:00
|
|
|
BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
|
2019-03-12 19:48:44 +03:00
|
|
|
BlockDriverState *bs, QDict *options,
|
|
|
|
bool keep_old_opts);
|
2021-07-08 14:47:05 +03:00
|
|
|
void bdrv_reopen_queue_free(BlockReopenQueue *bs_queue);
|
2019-03-12 19:48:50 +03:00
|
|
|
int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp);
|
2021-07-08 14:47:06 +03:00
|
|
|
int bdrv_reopen(BlockDriverState *bs, QDict *opts, bool keep_old_opts,
|
|
|
|
Error **errp);
|
2018-11-12 17:00:33 +03:00
|
|
|
int bdrv_reopen_set_read_only(BlockDriverState *bs, bool read_only,
|
|
|
|
Error **errp);
|
2016-06-16 16:13:15 +03:00
|
|
|
int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
|
2020-12-11 21:39:33 +03:00
|
|
|
int64_t bytes, BdrvRequestFlags flags);
|
2016-06-16 16:13:15 +03:00
|
|
|
int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags);
|
2020-12-11 21:39:33 +03:00
|
|
|
int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int64_t bytes);
|
|
|
|
int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf,
|
|
|
|
int64_t bytes);
|
2016-06-20 21:09:15 +03:00
|
|
|
int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
|
2020-12-11 21:39:33 +03:00
|
|
|
const void *buf, int64_t bytes);
|
2012-02-07 17:27:25 +04:00
|
|
|
/*
|
|
|
|
* Efficiently zero a region of the disk image. Note that this is a regular
|
|
|
|
* I/O request like read or write and should have a reasonable size. This
|
|
|
|
* function is not suitable for zeroing the entire image in a single request
|
|
|
|
* because it may allocate memory for the entire region.
|
|
|
|
*/
|
2016-06-20 22:31:46 +03:00
|
|
|
int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
|
2020-12-11 21:39:33 +03:00
|
|
|
int64_t bytes, BdrvRequestFlags flags);
|
2012-01-18 18:40:51 +04:00
|
|
|
BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
|
|
|
|
const char *backing_file);
|
2014-07-18 22:24:56 +04:00
|
|
|
void bdrv_refresh_filename(BlockDriverState *bs);
|
block: Convert .bdrv_truncate callback to coroutine_fn
bdrv_truncate() is an operation that can block (even for a quite long
time, depending on the PreallocMode) in I/O paths that shouldn't block.
Convert it to a coroutine_fn so that we have the infrastructure for
drivers to make their .bdrv_co_truncate implementation asynchronous.
This change could potentially introduce new race conditions because
bdrv_truncate() isn't necessarily executed atomically any more. Whether
this is a problem needs to be evaluated for each block driver that
supports truncate:
* file-posix/win32, gluster, iscsi, nfs, rbd, ssh, sheepdog: The
protocol drivers are trivially safe because they don't actually yield
yet, so there is no change in behaviour.
* copy-on-read, crypto, raw-format: Essentially just filter drivers that
pass the request to a child node, no problem.
* qcow2: The implementation modifies metadata, so it needs to hold
s->lock to be safe with concurrent I/O requests. In order to avoid
double locking, this requires pulling the locking out into
preallocate_co() and using qcow2_write_caches() instead of
bdrv_flush().
* qed: Does a single header update, this is fine without locking.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
2018-06-21 18:54:35 +03:00
|
|
|
|
2019-09-18 12:51:40 +03:00
|
|
|
int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
|
2020-04-24 15:54:40 +03:00
|
|
|
PreallocMode prealloc, BdrvRequestFlags flags,
|
|
|
|
Error **errp);
|
2020-09-24 21:54:12 +03:00
|
|
|
int generated_co_wrapper
|
|
|
|
bdrv_truncate(BdrvChild *child, int64_t offset, bool exact,
|
|
|
|
PreallocMode prealloc, BdrvRequestFlags flags, Error **errp);
|
block: Convert .bdrv_truncate callback to coroutine_fn
bdrv_truncate() is an operation that can block (even for a quite long
time, depending on the PreallocMode) in I/O paths that shouldn't block.
Convert it to a coroutine_fn so that we have the infrastructure for
drivers to make their .bdrv_co_truncate implementation asynchronous.
This change could potentially introduce new race conditions because
bdrv_truncate() isn't necessarily executed atomically any more. Whether
this is a problem needs to be evaluated for each block driver that
supports truncate:
* file-posix/win32, gluster, iscsi, nfs, rbd, ssh, sheepdog: The
protocol drivers are trivially safe because they don't actually yield
yet, so there is no change in behaviour.
* copy-on-read, crypto, raw-format: Essentially just filter drivers that
pass the request to a child node, no problem.
* qcow2: The implementation modifies metadata, so it needs to hold
s->lock to be safe with concurrent I/O requests. In order to avoid
double locking, this requires pulling the locking out into
preallocate_co() and using qcow2_write_caches() instead of
bdrv_flush().
* qed: Does a single header update, this is fine without locking.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
2018-06-21 18:54:35 +03:00
|
|
|
|
2014-06-26 15:23:17 +04:00
|
|
|
int64_t bdrv_nb_sectors(BlockDriverState *bs);
|
2007-11-11 05:51:17 +03:00
|
|
|
int64_t bdrv_getlength(BlockDriverState *bs);
|
2011-07-12 15:56:39 +04:00
|
|
|
int64_t bdrv_get_allocated_file_size(BlockDriverState *bs);
|
2017-07-05 15:57:30 +03:00
|
|
|
BlockMeasureInfo *bdrv_measure(BlockDriver *drv, QemuOpts *opts,
|
|
|
|
BlockDriverState *in_bs, Error **errp);
|
2007-12-17 04:35:20 +03:00
|
|
|
void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr);
|
2021-04-28 18:17:55 +03:00
|
|
|
void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp);
|
2007-11-11 05:51:17 +03:00
|
|
|
int bdrv_commit(BlockDriverState *bs);
|
2020-04-29 17:11:23 +03:00
|
|
|
int bdrv_make_empty(BdrvChild *c, Error **errp);
|
2020-07-06 23:39:53 +03:00
|
|
|
int bdrv_change_backing_file(BlockDriverState *bs, const char *backing_file,
|
|
|
|
const char *backing_fmt, bool warn);
|
2009-05-10 02:03:42 +04:00
|
|
|
void bdrv_register(BlockDriver *bdrv);
|
2017-06-27 21:36:18 +03:00
|
|
|
int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base,
|
block: extend block-commit to accept a string for the backing file
On some image chains, QEMU may not always be able to resolve the
filenames properly, when updating the backing file of an image
after a block commit.
For instance, certain relative pathnames may fail, or drives may
have been specified originally by file descriptor (e.g. /dev/fd/???),
or a relative protocol pathname may have been used.
In these instances, QEMU may lack the information to be able to make
the correct choice, but the user or management layer most likely does
have that knowledge.
With this extension to the block-commit api, the user is able to change
the backing file of the overlay image as part of the block-commit
operation.
This allows the change to be 'safe', in the sense that if the attempt
to write the overlay image metadata fails, then the block-commit
operation returns failure, without disrupting the guest.
If the commit top is the active layer, then specifying the backing
file string will be treated as an error (there is no overlay image
to modify in that case).
If a backing file string is not specified in the command, the backing
file string to use is determined in the same manner as it was
previously.
Reviewed-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Jeff Cody <jcody@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2014-06-25 23:40:10 +04:00
|
|
|
const char *backing_file_str);
|
2012-09-27 21:29:12 +04:00
|
|
|
BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
|
|
|
|
BlockDriverState *bs);
|
2012-09-27 21:29:15 +04:00
|
|
|
BlockDriverState *bdrv_find_base(BlockDriverState *bs);
|
2019-03-12 19:48:40 +03:00
|
|
|
bool bdrv_is_backing_chain_frozen(BlockDriverState *bs, BlockDriverState *base,
|
|
|
|
Error **errp);
|
|
|
|
int bdrv_freeze_backing_chain(BlockDriverState *bs, BlockDriverState *base,
|
|
|
|
Error **errp);
|
|
|
|
void bdrv_unfreeze_backing_chain(BlockDriverState *bs, BlockDriverState *base);
|
2020-01-31 00:39:05 +03:00
|
|
|
int coroutine_fn bdrv_co_delete_file(BlockDriverState *bs, Error **errp);
|
2020-12-17 20:09:03 +03:00
|
|
|
void coroutine_fn bdrv_co_delete_file_noerr(BlockDriverState *bs);
|
2009-05-10 02:03:42 +04:00
|
|
|
|
2010-06-29 13:43:13 +04:00
|
|
|
|
|
|
|
typedef struct BdrvCheckResult {
|
|
|
|
int corruptions;
|
|
|
|
int leaks;
|
|
|
|
int check_errors;
|
2012-05-11 20:16:54 +04:00
|
|
|
int corruptions_fixed;
|
|
|
|
int leaks_fixed;
|
2013-01-28 15:59:46 +04:00
|
|
|
int64_t image_end_offset;
|
2012-03-15 16:13:31 +04:00
|
|
|
BlockFragInfo bfi;
|
2010-06-29 13:43:13 +04:00
|
|
|
} BdrvCheckResult;
|
|
|
|
|
2012-05-11 18:07:02 +04:00
|
|
|
typedef enum {
|
|
|
|
BDRV_FIX_LEAKS = 1,
|
|
|
|
BDRV_FIX_ERRORS = 2,
|
|
|
|
} BdrvCheckMode;
|
|
|
|
|
2020-09-24 21:54:12 +03:00
|
|
|
int generated_co_wrapper bdrv_check(BlockDriverState *bs, BdrvCheckResult *res,
|
|
|
|
BdrvCheckMode fix);
|
2010-06-29 13:43:13 +04:00
|
|
|
|
2014-10-27 13:12:50 +03:00
|
|
|
/* The units of offset and total_work_size may be chosen arbitrarily by the
|
|
|
|
* block driver; total_work_size may change during the course of the amendment
|
|
|
|
* operation */
|
|
|
|
typedef void BlockDriverAmendStatusCB(BlockDriverState *bs, int64_t offset,
|
2015-07-27 18:51:32 +03:00
|
|
|
int64_t total_work_size, void *opaque);
|
2014-10-27 13:12:50 +03:00
|
|
|
int bdrv_amend_options(BlockDriverState *bs_new, QemuOpts *opts,
|
2018-05-10 00:00:18 +03:00
|
|
|
BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
|
2020-06-25 15:55:38 +03:00
|
|
|
bool force,
|
2018-05-10 00:00:18 +03:00
|
|
|
Error **errp);
|
2013-09-03 12:09:50 +04:00
|
|
|
|
2014-06-27 20:25:25 +04:00
|
|
|
/* check if a named node can be replaced when doing drive-mirror */
|
2015-07-17 05:12:22 +03:00
|
|
|
BlockDriverState *check_to_replace_node(BlockDriverState *parent_bs,
|
|
|
|
const char *node_name, Error **errp);
|
2014-06-27 20:25:25 +04:00
|
|
|
|
2007-11-11 05:51:17 +03:00
|
|
|
/* async block I/O */
|
2014-10-07 15:59:14 +04:00
|
|
|
void bdrv_aio_cancel(BlockAIOCB *acb);
|
|
|
|
void bdrv_aio_cancel_async(BlockAIOCB *acb);
|
2007-11-11 05:51:17 +03:00
|
|
|
|
2009-03-12 22:57:08 +03:00
|
|
|
/* sg packet commands */
|
2016-10-20 13:56:14 +03:00
|
|
|
int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf);
|
2009-03-12 22:57:08 +03:00
|
|
|
|
2011-11-15 01:09:45 +04:00
|
|
|
/* Invalidate any cached metadata used by image formats */
|
2020-09-24 21:54:12 +03:00
|
|
|
int generated_co_wrapper bdrv_invalidate_cache(BlockDriverState *bs,
|
|
|
|
Error **errp);
|
2014-03-12 18:59:16 +04:00
|
|
|
void bdrv_invalidate_cache_all(Error **errp);
|
2015-12-22 16:07:08 +03:00
|
|
|
int bdrv_inactivate_all(void);
|
2011-11-15 01:09:45 +04:00
|
|
|
|
2007-11-11 05:51:17 +03:00
|
|
|
/* Ensure contents are flushed to disk. */
|
2020-09-24 21:54:12 +03:00
|
|
|
int generated_co_wrapper bdrv_flush(BlockDriverState *bs);
|
2011-10-17 14:32:12 +04:00
|
|
|
int coroutine_fn bdrv_co_flush(BlockDriverState *bs);
|
2016-09-23 04:45:50 +03:00
|
|
|
int bdrv_flush_all(void);
|
2010-05-28 06:44:57 +04:00
|
|
|
void bdrv_close_all(void);
|
2014-10-21 15:03:55 +04:00
|
|
|
void bdrv_drain(BlockDriverState *bs);
|
2016-04-05 14:20:52 +03:00
|
|
|
void coroutine_fn bdrv_co_drain(BlockDriverState *bs);
|
2016-10-28 10:08:02 +03:00
|
|
|
void bdrv_drain_all_begin(void);
|
|
|
|
void bdrv_drain_all_end(void);
|
2011-11-30 16:23:43 +04:00
|
|
|
void bdrv_drain_all(void);
|
2008-10-06 17:55:43 +04:00
|
|
|
|
2016-10-27 13:48:55 +03:00
|
|
|
#define BDRV_POLL_WHILE(bs, cond) ({ \
|
|
|
|
BlockDriverState *bs_ = (bs); \
|
2018-09-18 18:09:16 +03:00
|
|
|
AIO_WAIT_WHILE(bdrv_get_aio_context(bs_), \
|
2018-02-16 19:50:12 +03:00
|
|
|
cond); })
|
2016-10-27 13:48:55 +03:00
|
|
|
|
2020-09-24 21:54:12 +03:00
|
|
|
int generated_co_wrapper bdrv_pdiscard(BdrvChild *child, int64_t offset,
|
|
|
|
int64_t bytes);
|
2019-04-23 15:57:05 +03:00
|
|
|
int bdrv_co_pdiscard(BdrvChild *child, int64_t offset, int64_t bytes);
|
2013-06-28 14:47:42 +04:00
|
|
|
int bdrv_has_zero_init_1(BlockDriverState *bs);
|
2010-04-14 19:30:35 +04:00
|
|
|
int bdrv_has_zero_init(BlockDriverState *bs);
|
2013-10-24 14:06:54 +04:00
|
|
|
bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs);
|
block: Convert bdrv_get_block_status() to bytes
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the name of the function from bdrv_get_block_status() to
bdrv_block_status() ensures that the compiler enforces that all
callers are updated. For now, the io.c layer still assert()s that
all callers are sector-aligned, but that can be relaxed when a later
patch implements byte-based block status in the drivers.
There was an inherent limitation in returning the offset via the
return value: we only have room for BDRV_BLOCK_OFFSET_MASK bits, which
means an offset can only be mapped for sector-aligned queries (or,
if we declare that non-aligned input is at the same relative position
modulo 512 of the answer), so the new interface also changes things to
return the offset via output through a parameter by reference rather
than mashed into the return value. We'll have some glue code that
munges between the two styles until we finish converting all uses.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_block_status(), coupled
with the tweak in calling convention. But some code, particularly
bdrv_is_allocated(), gets a lot simpler because it no longer has to
mess with sectors.
For ease of review, bdrv_get_block_status_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-10-12 06:47:03 +03:00
|
|
|
int bdrv_block_status(BlockDriverState *bs, int64_t offset,
|
|
|
|
int64_t bytes, int64_t *pnum, int64_t *map,
|
|
|
|
BlockDriverState **file);
|
block: Convert bdrv_get_block_status_above() to bytes
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the name of the function from bdrv_get_block_status_above()
to bdrv_block_status_above() ensures that the compiler enforces that
all callers are updated. Likewise, since it a byte interface allows
an offset mapping that might not be sector aligned, split the mapping
out of the return value and into a pass-by-reference parameter. For
now, the io.c layer still assert()s that all uses are sector-aligned,
but that can be relaxed when a later patch implements byte-based
block status in the drivers.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_block_status(), plus
updates for the new split return interface. But some code,
particularly bdrv_block_status(), gets a lot simpler because it no
longer has to mess with sectors. Likewise, mirror code no longer
computes s->granularity >> BDRV_SECTOR_BITS, and can therefore drop
an assertion about alignment because the loop no longer depends on
alignment (never mind that we don't really have a driver that
reports sub-sector alignments, so it's not really possible to test
the effect of sub-sector mirroring). Fix a neighboring assertion to
use is_power_of_2 while there.
For ease of review, bdrv_get_block_status() was tackled separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-10-12 06:47:08 +03:00
|
|
|
int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
|
|
|
|
int64_t offset, int64_t bytes, int64_t *pnum,
|
|
|
|
int64_t *map, BlockDriverState **file);
|
block: Make bdrv_is_allocated() byte-based
We are gradually moving away from sector-based interfaces, towards
byte-based. In the common case, allocation is unlikely to ever use
values that are not naturally sector-aligned, but it is possible
that byte-based values will let us be more precise about allocation
at the end of an unaligned file that can do byte-based access.
Changing the signature of the function to use int64_t *pnum ensures
that the compiler enforces that all callers are updated. For now,
the io.c layer still assert()s that all callers are sector-aligned
on input and that *pnum is sector-aligned on return to the caller,
but that can be relaxed when a later patch implements byte-based
block status. Therefore, this code adds usages like
DIV_ROUND_UP(,BDRV_SECTOR_SIZE) to callers that still want aligned
values, where the call might reasonbly give non-aligned results
in the future; on the other hand, no rounding is needed for callers
that should just continue to work with byte alignment.
For the most part this patch is just the addition of scaling at the
callers followed by inverse scaling at bdrv_is_allocated(). But
some code, particularly bdrv_commit(), gets a lot simpler because it
no longer has to mess with sectors; also, it is now possible to pass
NULL if the caller does not care how much of the image is allocated
beyond the initial offset. Leave comments where we can further
simplify once a later patch eliminates the need for sector-aligned
requests through bdrv_is_allocated().
For ease of review, bdrv_is_allocated_above() will be tackled
separately.
Signed-off-by: Eric Blake <eblake@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2017-07-07 15:44:57 +03:00
|
|
|
int bdrv_is_allocated(BlockDriverState *bs, int64_t offset, int64_t bytes,
|
|
|
|
int64_t *pnum);
|
2013-02-13 12:09:39 +04:00
|
|
|
int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
|
2019-05-29 20:56:14 +03:00
|
|
|
bool include_base, int64_t offset, int64_t bytes,
|
|
|
|
int64_t *pnum);
|
2020-10-26 19:58:27 +03:00
|
|
|
int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
|
|
|
|
int64_t bytes);
|
2007-11-11 05:51:17 +03:00
|
|
|
|
2016-06-24 01:37:26 +03:00
|
|
|
bool bdrv_is_read_only(BlockDriverState *bs);
|
2017-08-03 18:02:58 +03:00
|
|
|
int bdrv_can_set_read_only(BlockDriverState *bs, bool read_only,
|
|
|
|
bool ignore_allow_rdw, Error **errp);
|
2018-10-12 12:27:41 +03:00
|
|
|
int bdrv_apply_auto_read_only(BlockDriverState *bs, const char *errmsg,
|
|
|
|
Error **errp);
|
2018-06-06 22:37:00 +03:00
|
|
|
bool bdrv_is_writable(BlockDriverState *bs);
|
2016-06-24 01:37:26 +03:00
|
|
|
bool bdrv_is_sg(BlockDriverState *bs);
|
2015-10-19 18:53:11 +03:00
|
|
|
bool bdrv_is_inserted(BlockDriverState *bs);
|
2011-09-06 20:58:47 +04:00
|
|
|
void bdrv_lock_medium(BlockDriverState *bs, bool locked);
|
2012-02-03 22:24:53 +04:00
|
|
|
void bdrv_eject(BlockDriverState *bs, bool eject_flag);
|
2012-06-13 12:11:48 +04:00
|
|
|
const char *bdrv_get_format_name(BlockDriverState *bs);
|
2014-01-24 00:31:32 +04:00
|
|
|
BlockDriverState *bdrv_find_node(const char *node_name);
|
2020-01-20 11:50:49 +03:00
|
|
|
BlockDeviceInfoList *bdrv_named_nodes_list(bool flat, Error **errp);
|
2018-12-21 20:09:07 +03:00
|
|
|
XDbgBlockGraph *bdrv_get_xdbg_block_graph(Error **errp);
|
2014-01-24 00:31:35 +04:00
|
|
|
BlockDriverState *bdrv_lookup_bs(const char *device,
|
|
|
|
const char *node_name,
|
|
|
|
Error **errp);
|
2014-06-25 23:40:09 +04:00
|
|
|
bool bdrv_chain_contains(BlockDriverState *top, BlockDriverState *base);
|
2014-10-31 06:32:54 +03:00
|
|
|
BlockDriverState *bdrv_next_node(BlockDriverState *bs);
|
2018-03-28 19:29:18 +03:00
|
|
|
BlockDriverState *bdrv_next_all_states(BlockDriverState *bs);
|
2016-05-20 19:49:07 +03:00
|
|
|
|
|
|
|
typedef struct BdrvNextIterator {
|
|
|
|
enum {
|
|
|
|
BDRV_NEXT_BACKEND_ROOTS,
|
|
|
|
BDRV_NEXT_MONITOR_OWNED,
|
|
|
|
} phase;
|
|
|
|
BlockBackend *blk;
|
|
|
|
BlockDriverState *bs;
|
|
|
|
} BdrvNextIterator;
|
|
|
|
|
|
|
|
BlockDriverState *bdrv_first(BdrvNextIterator *it);
|
|
|
|
BlockDriverState *bdrv_next(BdrvNextIterator *it);
|
2017-11-10 20:25:45 +03:00
|
|
|
void bdrv_next_cleanup(BdrvNextIterator *it);
|
2016-05-20 19:49:07 +03:00
|
|
|
|
2016-03-16 21:54:41 +03:00
|
|
|
BlockDriverState *bdrv_next_monitor_owned(BlockDriverState *bs);
|
2019-06-12 23:57:15 +03:00
|
|
|
bool bdrv_supports_compressed_writes(BlockDriverState *bs);
|
2007-11-11 05:51:17 +03:00
|
|
|
void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
|
2019-03-07 16:33:58 +03:00
|
|
|
void *opaque, bool read_only);
|
2014-10-31 06:32:55 +03:00
|
|
|
const char *bdrv_get_node_name(const BlockDriverState *bs);
|
2014-10-07 15:59:11 +04:00
|
|
|
const char *bdrv_get_device_name(const BlockDriverState *bs);
|
2015-04-08 12:29:18 +03:00
|
|
|
const char *bdrv_get_device_or_node_name(const BlockDriverState *bs);
|
2012-06-05 18:49:24 +04:00
|
|
|
int bdrv_get_flags(BlockDriverState *bs);
|
2007-11-11 05:51:17 +03:00
|
|
|
int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
|
2019-02-08 18:06:06 +03:00
|
|
|
ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs,
|
|
|
|
Error **errp);
|
2019-09-23 15:17:37 +03:00
|
|
|
BlockStatsSpecific *bdrv_get_specific_stats(BlockDriverState *bs);
|
2013-01-21 20:09:42 +04:00
|
|
|
void bdrv_round_to_clusters(BlockDriverState *bs,
|
2017-10-12 06:46:59 +03:00
|
|
|
int64_t offset, int64_t bytes,
|
2016-06-02 12:41:52 +03:00
|
|
|
int64_t *cluster_offset,
|
2017-10-12 06:46:59 +03:00
|
|
|
int64_t *cluster_bytes);
|
2007-11-11 05:51:17 +03:00
|
|
|
|
|
|
|
void bdrv_get_backing_filename(BlockDriverState *bs,
|
|
|
|
char *filename, int filename_size);
|
2019-02-01 22:29:15 +03:00
|
|
|
char *bdrv_get_full_backing_filename(BlockDriverState *bs, Error **errp);
|
2019-02-01 22:29:14 +03:00
|
|
|
char *bdrv_get_full_backing_filename_from_filename(const char *backed,
|
|
|
|
const char *backing,
|
|
|
|
Error **errp);
|
2019-02-01 22:29:18 +03:00
|
|
|
char *bdrv_dirname(BlockDriverState *bs, Error **errp);
|
2007-11-11 05:51:17 +03:00
|
|
|
|
2014-12-03 16:57:22 +03:00
|
|
|
int path_has_protocol(const char *path);
|
2007-11-11 05:51:17 +03:00
|
|
|
int path_is_absolute(const char *path);
|
2019-02-01 22:29:13 +03:00
|
|
|
char *path_combine(const char *base_path, const char *filename);
|
2007-11-11 05:51:17 +03:00
|
|
|
|
2020-09-24 21:54:14 +03:00
|
|
|
int generated_co_wrapper
|
|
|
|
bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
|
|
|
|
int generated_co_wrapper
|
|
|
|
bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
|
2009-07-11 01:11:57 +04:00
|
|
|
int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
|
|
|
|
int64_t pos, int size);
|
2009-04-05 23:10:55 +04:00
|
|
|
|
2009-07-11 01:11:57 +04:00
|
|
|
int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
|
|
|
|
int64_t pos, int size);
|
2009-04-05 23:10:55 +04:00
|
|
|
|
2012-11-30 16:52:09 +04:00
|
|
|
void bdrv_img_create(const char *filename, const char *fmt,
|
|
|
|
const char *base_filename, const char *base_fmt,
|
2013-02-13 12:09:40 +04:00
|
|
|
char *options, uint64_t img_size, int flags,
|
2017-04-21 15:27:01 +03:00
|
|
|
bool quiet, Error **errp);
|
2010-12-16 15:52:15 +03:00
|
|
|
|
2013-11-28 13:23:32 +04:00
|
|
|
/* Returns the alignment in bytes that is required so that no bounce buffer
|
|
|
|
* is required throughout the stack */
|
2015-05-12 17:30:55 +03:00
|
|
|
size_t bdrv_min_mem_align(BlockDriverState *bs);
|
|
|
|
/* Returns optimal alignment in bytes for bounce buffer */
|
2013-11-28 13:23:32 +04:00
|
|
|
size_t bdrv_opt_mem_align(BlockDriverState *bs);
|
2011-08-03 17:08:19 +04:00
|
|
|
void *qemu_blockalign(BlockDriverState *bs, size_t size);
|
2014-10-22 16:09:27 +04:00
|
|
|
void *qemu_blockalign0(BlockDriverState *bs, size_t size);
|
2014-05-20 14:24:05 +04:00
|
|
|
void *qemu_try_blockalign(BlockDriverState *bs, size_t size);
|
2014-10-22 16:09:27 +04:00
|
|
|
void *qemu_try_blockalign0(BlockDriverState *bs, size_t size);
|
2013-01-11 19:41:27 +04:00
|
|
|
bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov);
|
2011-08-03 17:08:19 +04:00
|
|
|
|
2011-11-28 20:08:47 +04:00
|
|
|
void bdrv_enable_copy_on_read(BlockDriverState *bs);
|
|
|
|
void bdrv_disable_copy_on_read(BlockDriverState *bs);
|
|
|
|
|
2013-08-23 05:14:46 +04:00
|
|
|
void bdrv_ref(BlockDriverState *bs);
|
|
|
|
void bdrv_unref(BlockDriverState *bs);
|
2015-06-15 14:51:04 +03:00
|
|
|
void bdrv_unref_child(BlockDriverState *parent, BdrvChild *child);
|
2016-05-10 10:36:38 +03:00
|
|
|
BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs,
|
|
|
|
BlockDriverState *child_bs,
|
|
|
|
const char *child_name,
|
2020-05-13 14:05:13 +03:00
|
|
|
const BdrvChildClass *child_class,
|
2020-05-13 14:05:15 +03:00
|
|
|
BdrvChildRole child_role,
|
2016-12-21 00:21:17 +03:00
|
|
|
Error **errp);
|
2010-03-15 19:27:00 +03:00
|
|
|
|
2014-05-23 17:29:42 +04:00
|
|
|
bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp);
|
|
|
|
void bdrv_op_block(BlockDriverState *bs, BlockOpType op, Error *reason);
|
|
|
|
void bdrv_op_unblock(BlockDriverState *bs, BlockOpType op, Error *reason);
|
|
|
|
void bdrv_op_block_all(BlockDriverState *bs, Error *reason);
|
|
|
|
void bdrv_op_unblock_all(BlockDriverState *bs, Error *reason);
|
|
|
|
bool bdrv_op_blocker_is_empty(BlockDriverState *bs);
|
|
|
|
|
2015-06-16 15:19:22 +03:00
|
|
|
#define BLKDBG_EVENT(child, evt) \
|
|
|
|
do { \
|
|
|
|
if (child) { \
|
|
|
|
bdrv_debug_event(child->bs, evt); \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
2015-11-18 11:52:54 +03:00
|
|
|
void bdrv_debug_event(BlockDriverState *bs, BlkdebugEvent event);
|
2010-03-15 19:27:00 +03:00
|
|
|
|
2012-12-06 17:32:58 +04:00
|
|
|
int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
|
|
|
|
const char *tag);
|
2013-11-20 06:01:54 +04:00
|
|
|
int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag);
|
2012-12-06 17:32:58 +04:00
|
|
|
int bdrv_debug_resume(BlockDriverState *bs, const char *tag);
|
|
|
|
bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag);
|
|
|
|
|
2014-05-15 15:22:05 +04:00
|
|
|
/**
|
|
|
|
* bdrv_get_aio_context:
|
|
|
|
*
|
|
|
|
* Returns: the currently bound #AioContext
|
|
|
|
*/
|
|
|
|
AioContext *bdrv_get_aio_context(BlockDriverState *bs);
|
|
|
|
|
2020-10-05 18:58:53 +03:00
|
|
|
/**
|
|
|
|
* Move the current coroutine to the AioContext of @bs and return the old
|
|
|
|
* AioContext of the coroutine. Increase bs->in_flight so that draining @bs
|
|
|
|
* will wait for the operation to proceed until the corresponding
|
|
|
|
* bdrv_co_leave().
|
|
|
|
*
|
|
|
|
* Consequently, you can't call drain inside a bdrv_co_enter/leave() section as
|
|
|
|
* this will deadlock.
|
|
|
|
*/
|
|
|
|
AioContext *coroutine_fn bdrv_co_enter(BlockDriverState *bs);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Ends a section started by bdrv_co_enter(). Move the current coroutine back
|
|
|
|
* to old_ctx and decrease bs->in_flight again.
|
|
|
|
*/
|
|
|
|
void coroutine_fn bdrv_co_leave(BlockDriverState *bs, AioContext *old_ctx);
|
|
|
|
|
2020-10-05 18:58:54 +03:00
|
|
|
/**
|
|
|
|
* Locks the AioContext of @bs if it's not the current AioContext. This avoids
|
|
|
|
* double locking which could lead to deadlocks: This is a coroutine_fn, so we
|
|
|
|
* know we already own the lock of the current AioContext.
|
|
|
|
*
|
|
|
|
* May only be called in the main thread.
|
|
|
|
*/
|
|
|
|
void coroutine_fn bdrv_co_lock(BlockDriverState *bs);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Unlocks the AioContext of @bs if it's not the current AioContext.
|
|
|
|
*/
|
|
|
|
void coroutine_fn bdrv_co_unlock(BlockDriverState *bs);
|
|
|
|
|
2017-04-10 15:09:25 +03:00
|
|
|
/**
|
|
|
|
* Transfer control to @co in the aio context of @bs
|
|
|
|
*/
|
|
|
|
void bdrv_coroutine_enter(BlockDriverState *bs, Coroutine *co);
|
|
|
|
|
2019-05-06 20:17:59 +03:00
|
|
|
void bdrv_set_aio_context_ignore(BlockDriverState *bs,
|
|
|
|
AioContext *new_context, GSList **ignore);
|
2019-05-06 20:17:56 +03:00
|
|
|
int bdrv_try_set_aio_context(BlockDriverState *bs, AioContext *ctx,
|
|
|
|
Error **errp);
|
|
|
|
int bdrv_child_try_set_aio_context(BlockDriverState *bs, AioContext *ctx,
|
|
|
|
BdrvChild *ignore_child, Error **errp);
|
|
|
|
bool bdrv_child_can_set_aio_context(BdrvChild *c, AioContext *ctx,
|
|
|
|
GSList **ignore, Error **errp);
|
|
|
|
bool bdrv_can_set_aio_context(BlockDriverState *bs, AioContext *ctx,
|
|
|
|
GSList **ignore, Error **errp);
|
2021-04-28 18:17:33 +03:00
|
|
|
AioContext *bdrv_child_get_parent_aio_context(BdrvChild *c);
|
block/vvfat: child_vvfat_qcow: add .get_parent_aio_context, fix crash
Commit 3ca1f3225727419ba573673b744edac10904276f
"block: BdrvChildClass: add .get_parent_aio_context handler" introduced
new handler and commit 228ca37e12f97788e05bd0c92f89b3e5e4019607
"block: drop ctx argument from bdrv_root_attach_child" made a generic
use of it. But 3ca1f3225727419ba573673b744edac10904276f didn't update
child_vvfat_qcow. Fix that.
Before that fix the command
./build/qemu-system-x86_64 -usb -device usb-storage,drive=fat16 \
-drive file=fat:rw:fat-type=16:"<path of a host folder>",id=fat16,format=raw,if=none
crashes:
1 bdrv_child_get_parent_aio_context (c=0x559d62426d20)
at ../block.c:1440
2 bdrv_attach_child_common
(child_bs=0x559d62468190, child_name=0x559d606f9e3d "write-target",
child_class=0x559d60c58d20 <child_vvfat_qcow>, child_role=3,
perm=3, shared_perm=4, opaque=0x559d62445690,
child=0x7ffc74c2acc8, tran=0x559d6246ddd0, errp=0x7ffc74c2ae60)
at ../block.c:2795
3 bdrv_attach_child_noperm
(parent_bs=0x559d62445690, child_bs=0x559d62468190,
child_name=0x559d606f9e3d "write-target",
child_class=0x559d60c58d20 <child_vvfat_qcow>, child_role=3,
child=0x7ffc74c2acc8, tran=0x559d6246ddd0, errp=0x7ffc74c2ae60) at
../block.c:2855
4 bdrv_attach_child
(parent_bs=0x559d62445690, child_bs=0x559d62468190,
child_name=0x559d606f9e3d "write-target",
child_class=0x559d60c58d20 <child_vvfat_qcow>, child_role=3,
errp=0x7ffc74c2ae60) at ../block.c:2953
5 bdrv_open_child
(filename=0x559d62464b80 "/var/tmp/vl.h3TIS4",
options=0x559d6246ec20, bdref_key=0x559d606f9e3d "write-target",
parent=0x559d62445690, child_class=0x559d60c58d20
<child_vvfat_qcow>, child_role=3, allow_none=false,
errp=0x7ffc74c2ae60) at ../block.c:3351
6 enable_write_target (bs=0x559d62445690, errp=0x7ffc74c2ae60) at
../block/vvfat.c:3176
7 vvfat_open (bs=0x559d62445690, options=0x559d6244adb0, flags=155650,
errp=0x7ffc74c2ae60) at ../block/vvfat.c:1236
8 bdrv_open_driver (bs=0x559d62445690, drv=0x559d60d4f7e0
<bdrv_vvfat>, node_name=0x0,
options=0x559d6244adb0, open_flags=155650,
errp=0x7ffc74c2af70) at ../block.c:1557
9 bdrv_open_common (bs=0x559d62445690, file=0x0,
options=0x559d6244adb0, errp=0x7ffc74c2af70) at
...
(gdb) fr 1
#1 0x0000559d603ea3bf in bdrv_child_get_parent_aio_context
(c=0x559d62426d20) at ../block.c:1440
1440 return c->klass->get_parent_aio_context(c);
(gdb) p c->klass
$1 = (const BdrvChildClass *) 0x559d60c58d20 <child_vvfat_qcow>
(gdb) p c->klass->get_parent_aio_context
$2 = (AioContext *(*)(BdrvChild *)) 0x0
Fixes: 3ca1f3225727419ba573673b744edac10904276f
Fixes: 228ca37e12f97788e05bd0c92f89b3e5e4019607
Reported-by: John Arbuckle <programmingkidx@gmail.com>
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Message-Id: <20210524101257.119377-2-vsementsov@virtuozzo.com>
Tested-by: John Arbuckle <programmingkidx@gmail.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2021-05-24 13:12:56 +03:00
|
|
|
AioContext *child_of_bds_get_parent_aio_context(BdrvChild *c);
|
2021-04-28 18:17:33 +03:00
|
|
|
|
2015-02-16 14:47:54 +03:00
|
|
|
int bdrv_probe_blocksizes(BlockDriverState *bs, BlockSizes *bsz);
|
|
|
|
int bdrv_probe_geometry(BlockDriverState *bs, HDGeometry *geo);
|
2014-05-08 18:34:37 +04:00
|
|
|
|
2014-07-04 14:04:33 +04:00
|
|
|
void bdrv_io_plug(BlockDriverState *bs);
|
|
|
|
void bdrv_io_unplug(BlockDriverState *bs);
|
|
|
|
|
2018-06-29 19:01:31 +03:00
|
|
|
/**
|
|
|
|
* bdrv_parent_drained_begin_single:
|
|
|
|
*
|
|
|
|
* Begin a quiesced section for the parent of @c. If @poll is true, wait for
|
|
|
|
* any pending activity to cease.
|
|
|
|
*/
|
|
|
|
void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll);
|
|
|
|
|
block: Introduce BdrvChild.parent_quiesce_counter
Commit 5cb2737e925042e6c7cd3fb0b01313950b03cddf laid out why
bdrv_do_drained_end() must decrement the quiesce_counter after
bdrv_drain_invoke(). It did not give a very good reason why it has to
happen after bdrv_parent_drained_end(), instead only claiming symmetry
to bdrv_do_drained_begin().
It turns out that delaying it for so long is wrong.
Situation: We have an active commit job (i.e. a mirror job) from top to
base for the following graph:
filter
|
[file]
|
v
top --[backing]--> base
Now the VM is closed, which results in the job being cancelled and a
bdrv_drain_all() happening pretty much simultaneously.
Beginning the drain means the job is paused once whenever one of its
nodes is quiesced. This is reversed when the drain ends.
With how the code currently is, after base's drain ends (which means
that it will have unpaused the job once), its quiesce_counter remains at
1 while it goes to undrain its parents (bdrv_parent_drained_end()). For
some reason or another, undraining filter causes the job to be kicked
and enter mirror_exit_common(), where it proceeds to invoke
block_job_remove_all_bdrv().
Now base will be detached from the job. Because its quiesce_counter is
still 1, it will unpause the job once more. So in total, undraining
base will unpause the job twice. Eventually, this will lead to the
job's pause_count going negative -- well, it would, were there not an
assertion against this, which crashes qemu.
The general problem is that if in bdrv_parent_drained_end() we undrain
parent A, and then undrain parent B, which then leads to A detaching the
child, bdrv_replace_child_noperm() will undrain A as if we had not done
so yet; that is, one time too many.
It follows that we cannot decrement the quiesce_counter after invoking
bdrv_parent_drained_end().
Unfortunately, decrementing it before bdrv_parent_drained_end() would be
wrong, too. Imagine the above situation in reverse: Undraining A leads
to B detaching the child. If we had already decremented the
quiesce_counter by that point, bdrv_replace_child_noperm() would undrain
B one time too little; because it expects bdrv_parent_drained_end() to
issue this undrain. But bdrv_parent_drained_end() won't do that,
because B is no longer a parent.
Therefore, we have to do something else. This patch opts for
introducing a second quiesce_counter that counts how many times a
child's parent has been quiesced (though c->role->drained_*). With
that, bdrv_replace_child_noperm() just has to undrain the parent exactly
that many times when removing a child, and it will always be right.
Signed-off-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2019-07-19 12:26:09 +03:00
|
|
|
/**
|
|
|
|
* bdrv_parent_drained_end_single:
|
|
|
|
*
|
|
|
|
* End a quiesced section for the parent of @c.
|
block: Do not poll in bdrv_do_drained_end()
We should never poll anywhere in bdrv_do_drained_end() (including its
recursive callees like bdrv_drain_invoke()), because it does not cope
well with graph changes. In fact, it has been written based on the
postulation that no graph changes will happen in it.
Instead, the callers that want to poll must poll, i.e. all currently
globally available wrappers: bdrv_drained_end(),
bdrv_subtree_drained_end(), bdrv_unapply_subtree_drain(), and
bdrv_drain_all_end(). Graph changes there do not matter.
They can poll simply by passing a pointer to a drained_end_counter and
wait until it reaches 0.
This patch also adds a non-polling global wrapper for
bdrv_do_drained_end() that takes a drained_end_counter pointer. We need
such a variant because now no function called anywhere from
bdrv_do_drained_end() must poll. This includes
BdrvChildRole.drained_end(), which already must not poll according to
its interface documentation, but bdrv_child_cb_drained_end() just
violates that by invoking bdrv_drained_end() (which does poll).
Therefore, BdrvChildRole.drained_end() must take a *drained_end_counter
parameter, which bdrv_child_cb_drained_end() can pass on to the new
bdrv_drained_end_no_poll() function.
Note that we now have a pattern of all drained_end-related functions
either polling or receiving a *drained_end_counter to let the caller
poll based on that.
A problem with a single poll loop is that when the drained section in
bdrv_set_aio_context_ignore() ends, some nodes in the subgraph may be in
the old contexts, while others are in the new context already. To let
the collective poll in bdrv_drained_end() work correctly, we must not
hold a lock to the old context, so that the old context can make
progress in case it is different from the current context.
(In the process, remove the comment saying that the current context is
always the old context, because it is wrong.)
In all other places, all nodes in a subtree must be in the same context,
so we can just poll that. The exception of course is
bdrv_drain_all_end(), but that always runs in the main context, so we
can just poll NULL (like bdrv_drain_all_begin() does).
Signed-off-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2019-07-19 12:26:14 +03:00
|
|
|
*
|
|
|
|
* This polls @bs's AioContext until all scheduled sub-drained_ends
|
|
|
|
* have settled, which may result in graph changes.
|
block: Introduce BdrvChild.parent_quiesce_counter
Commit 5cb2737e925042e6c7cd3fb0b01313950b03cddf laid out why
bdrv_do_drained_end() must decrement the quiesce_counter after
bdrv_drain_invoke(). It did not give a very good reason why it has to
happen after bdrv_parent_drained_end(), instead only claiming symmetry
to bdrv_do_drained_begin().
It turns out that delaying it for so long is wrong.
Situation: We have an active commit job (i.e. a mirror job) from top to
base for the following graph:
filter
|
[file]
|
v
top --[backing]--> base
Now the VM is closed, which results in the job being cancelled and a
bdrv_drain_all() happening pretty much simultaneously.
Beginning the drain means the job is paused once whenever one of its
nodes is quiesced. This is reversed when the drain ends.
With how the code currently is, after base's drain ends (which means
that it will have unpaused the job once), its quiesce_counter remains at
1 while it goes to undrain its parents (bdrv_parent_drained_end()). For
some reason or another, undraining filter causes the job to be kicked
and enter mirror_exit_common(), where it proceeds to invoke
block_job_remove_all_bdrv().
Now base will be detached from the job. Because its quiesce_counter is
still 1, it will unpause the job once more. So in total, undraining
base will unpause the job twice. Eventually, this will lead to the
job's pause_count going negative -- well, it would, were there not an
assertion against this, which crashes qemu.
The general problem is that if in bdrv_parent_drained_end() we undrain
parent A, and then undrain parent B, which then leads to A detaching the
child, bdrv_replace_child_noperm() will undrain A as if we had not done
so yet; that is, one time too many.
It follows that we cannot decrement the quiesce_counter after invoking
bdrv_parent_drained_end().
Unfortunately, decrementing it before bdrv_parent_drained_end() would be
wrong, too. Imagine the above situation in reverse: Undraining A leads
to B detaching the child. If we had already decremented the
quiesce_counter by that point, bdrv_replace_child_noperm() would undrain
B one time too little; because it expects bdrv_parent_drained_end() to
issue this undrain. But bdrv_parent_drained_end() won't do that,
because B is no longer a parent.
Therefore, we have to do something else. This patch opts for
introducing a second quiesce_counter that counts how many times a
child's parent has been quiesced (though c->role->drained_*). With
that, bdrv_replace_child_noperm() just has to undrain the parent exactly
that many times when removing a child, and it will always be right.
Signed-off-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2019-07-19 12:26:09 +03:00
|
|
|
*/
|
|
|
|
void bdrv_parent_drained_end_single(BdrvChild *c);
|
|
|
|
|
2018-03-22 16:11:20 +03:00
|
|
|
/**
|
|
|
|
* bdrv_drain_poll:
|
|
|
|
*
|
2018-03-23 14:40:41 +03:00
|
|
|
* Poll for pending requests in @bs, its parents (except for @ignore_parent),
|
2018-05-29 18:17:45 +03:00
|
|
|
* and if @recursive is true its children as well (used for subtree drain).
|
|
|
|
*
|
|
|
|
* If @ignore_bds_parents is true, parents that are BlockDriverStates must
|
|
|
|
* ignore the drain request because they will be drained separately (used for
|
|
|
|
* drain_all).
|
2018-03-23 14:40:41 +03:00
|
|
|
*
|
|
|
|
* This is part of bdrv_drained_begin.
|
2018-03-22 16:11:20 +03:00
|
|
|
*/
|
2018-03-23 14:40:41 +03:00
|
|
|
bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
|
2018-05-29 18:17:45 +03:00
|
|
|
BdrvChild *ignore_parent, bool ignore_bds_parents);
|
2018-03-22 16:11:20 +03:00
|
|
|
|
2015-10-23 06:08:09 +03:00
|
|
|
/**
|
|
|
|
* bdrv_drained_begin:
|
|
|
|
*
|
|
|
|
* Begin a quiesced section for exclusive access to the BDS, by disabling
|
2021-09-03 14:38:00 +03:00
|
|
|
* external request sources including NBD server, block jobs, and device model.
|
2015-10-23 06:08:09 +03:00
|
|
|
*
|
|
|
|
* This function can be recursive.
|
|
|
|
*/
|
|
|
|
void bdrv_drained_begin(BlockDriverState *bs);
|
|
|
|
|
2018-03-23 17:57:20 +03:00
|
|
|
/**
|
|
|
|
* bdrv_do_drained_begin_quiesce:
|
|
|
|
*
|
|
|
|
* Quiesces a BDS like bdrv_drained_begin(), but does not wait for already
|
|
|
|
* running requests to complete.
|
|
|
|
*/
|
|
|
|
void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
|
2018-05-29 18:17:45 +03:00
|
|
|
BdrvChild *parent, bool ignore_bds_parents);
|
2018-03-23 17:57:20 +03:00
|
|
|
|
2017-12-06 19:05:44 +03:00
|
|
|
/**
|
|
|
|
* Like bdrv_drained_begin, but recursively begins a quiesced section for
|
|
|
|
* exclusive access to all child nodes as well.
|
|
|
|
*/
|
|
|
|
void bdrv_subtree_drained_begin(BlockDriverState *bs);
|
|
|
|
|
2015-10-23 06:08:09 +03:00
|
|
|
/**
|
|
|
|
* bdrv_drained_end:
|
|
|
|
*
|
|
|
|
* End a quiescent section started by bdrv_drained_begin().
|
block: Do not poll in bdrv_do_drained_end()
We should never poll anywhere in bdrv_do_drained_end() (including its
recursive callees like bdrv_drain_invoke()), because it does not cope
well with graph changes. In fact, it has been written based on the
postulation that no graph changes will happen in it.
Instead, the callers that want to poll must poll, i.e. all currently
globally available wrappers: bdrv_drained_end(),
bdrv_subtree_drained_end(), bdrv_unapply_subtree_drain(), and
bdrv_drain_all_end(). Graph changes there do not matter.
They can poll simply by passing a pointer to a drained_end_counter and
wait until it reaches 0.
This patch also adds a non-polling global wrapper for
bdrv_do_drained_end() that takes a drained_end_counter pointer. We need
such a variant because now no function called anywhere from
bdrv_do_drained_end() must poll. This includes
BdrvChildRole.drained_end(), which already must not poll according to
its interface documentation, but bdrv_child_cb_drained_end() just
violates that by invoking bdrv_drained_end() (which does poll).
Therefore, BdrvChildRole.drained_end() must take a *drained_end_counter
parameter, which bdrv_child_cb_drained_end() can pass on to the new
bdrv_drained_end_no_poll() function.
Note that we now have a pattern of all drained_end-related functions
either polling or receiving a *drained_end_counter to let the caller
poll based on that.
A problem with a single poll loop is that when the drained section in
bdrv_set_aio_context_ignore() ends, some nodes in the subgraph may be in
the old contexts, while others are in the new context already. To let
the collective poll in bdrv_drained_end() work correctly, we must not
hold a lock to the old context, so that the old context can make
progress in case it is different from the current context.
(In the process, remove the comment saying that the current context is
always the old context, because it is wrong.)
In all other places, all nodes in a subtree must be in the same context,
so we can just poll that. The exception of course is
bdrv_drain_all_end(), but that always runs in the main context, so we
can just poll NULL (like bdrv_drain_all_begin() does).
Signed-off-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2019-07-19 12:26:14 +03:00
|
|
|
*
|
|
|
|
* This polls @bs's AioContext until all scheduled sub-drained_ends
|
|
|
|
* have settled. On one hand, that may result in graph changes. On
|
2019-07-22 16:30:54 +03:00
|
|
|
* the other, this requires that the caller either runs in the main
|
|
|
|
* loop; or that all involved nodes (@bs and all of its parents) are
|
|
|
|
* in the caller's AioContext.
|
2015-10-23 06:08:09 +03:00
|
|
|
*/
|
|
|
|
void bdrv_drained_end(BlockDriverState *bs);
|
|
|
|
|
block: Do not poll in bdrv_do_drained_end()
We should never poll anywhere in bdrv_do_drained_end() (including its
recursive callees like bdrv_drain_invoke()), because it does not cope
well with graph changes. In fact, it has been written based on the
postulation that no graph changes will happen in it.
Instead, the callers that want to poll must poll, i.e. all currently
globally available wrappers: bdrv_drained_end(),
bdrv_subtree_drained_end(), bdrv_unapply_subtree_drain(), and
bdrv_drain_all_end(). Graph changes there do not matter.
They can poll simply by passing a pointer to a drained_end_counter and
wait until it reaches 0.
This patch also adds a non-polling global wrapper for
bdrv_do_drained_end() that takes a drained_end_counter pointer. We need
such a variant because now no function called anywhere from
bdrv_do_drained_end() must poll. This includes
BdrvChildRole.drained_end(), which already must not poll according to
its interface documentation, but bdrv_child_cb_drained_end() just
violates that by invoking bdrv_drained_end() (which does poll).
Therefore, BdrvChildRole.drained_end() must take a *drained_end_counter
parameter, which bdrv_child_cb_drained_end() can pass on to the new
bdrv_drained_end_no_poll() function.
Note that we now have a pattern of all drained_end-related functions
either polling or receiving a *drained_end_counter to let the caller
poll based on that.
A problem with a single poll loop is that when the drained section in
bdrv_set_aio_context_ignore() ends, some nodes in the subgraph may be in
the old contexts, while others are in the new context already. To let
the collective poll in bdrv_drained_end() work correctly, we must not
hold a lock to the old context, so that the old context can make
progress in case it is different from the current context.
(In the process, remove the comment saying that the current context is
always the old context, because it is wrong.)
In all other places, all nodes in a subtree must be in the same context,
so we can just poll that. The exception of course is
bdrv_drain_all_end(), but that always runs in the main context, so we
can just poll NULL (like bdrv_drain_all_begin() does).
Signed-off-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2019-07-19 12:26:14 +03:00
|
|
|
/**
|
|
|
|
* bdrv_drained_end_no_poll:
|
|
|
|
*
|
|
|
|
* Same as bdrv_drained_end(), but do not poll for the subgraph to
|
|
|
|
* actually become unquiesced. Therefore, no graph changes will occur
|
|
|
|
* with this function.
|
|
|
|
*
|
|
|
|
* *drained_end_counter is incremented for every background operation
|
|
|
|
* that is scheduled, and will be decremented for every operation once
|
|
|
|
* it settles. The caller must poll until it reaches 0. The counter
|
|
|
|
* should be accessed using atomic operations only.
|
|
|
|
*/
|
|
|
|
void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter);
|
|
|
|
|
2017-12-06 19:05:44 +03:00
|
|
|
/**
|
|
|
|
* End a quiescent section started by bdrv_subtree_drained_begin().
|
|
|
|
*/
|
|
|
|
void bdrv_subtree_drained_end(BlockDriverState *bs);
|
|
|
|
|
2016-05-10 10:36:37 +03:00
|
|
|
void bdrv_add_child(BlockDriverState *parent, BlockDriverState *child,
|
|
|
|
Error **errp);
|
|
|
|
void bdrv_del_child(BlockDriverState *parent, BdrvChild *child, Error **errp);
|
|
|
|
|
2017-06-28 15:05:21 +03:00
|
|
|
bool bdrv_can_store_new_dirty_bitmap(BlockDriverState *bs, const char *name,
|
|
|
|
uint32_t granularity, Error **errp);
|
2018-01-16 09:08:56 +03:00
|
|
|
/**
|
|
|
|
*
|
|
|
|
* bdrv_register_buf/bdrv_unregister_buf:
|
|
|
|
*
|
|
|
|
* Register/unregister a buffer for I/O. For example, VFIO drivers are
|
|
|
|
* interested to know the memory areas that would later be used for I/O, so
|
|
|
|
* that they can prepare IOMMU mapping etc., to get better performance.
|
|
|
|
*/
|
|
|
|
void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size);
|
|
|
|
void bdrv_unregister_buf(BlockDriverState *bs, void *host);
|
2018-06-01 12:26:39 +03:00
|
|
|
|
|
|
|
/**
|
|
|
|
*
|
|
|
|
* bdrv_co_copy_range:
|
|
|
|
*
|
|
|
|
* Do offloaded copy between two children. If the operation is not implemented
|
|
|
|
* by the driver, or if the backend storage doesn't support it, a negative
|
|
|
|
* error code will be returned.
|
|
|
|
*
|
|
|
|
* Note: block layer doesn't emulate or fallback to a bounce buffer approach
|
|
|
|
* because usually the caller shouldn't attempt offloaded copy any more (e.g.
|
|
|
|
* calling copy_file_range(2)) after the first error, thus it should fall back
|
|
|
|
* to a read+write path in the caller level.
|
|
|
|
*
|
|
|
|
* @src: Source child to copy data from
|
|
|
|
* @src_offset: offset in @src image to read data
|
|
|
|
* @dst: Destination child to copy data to
|
|
|
|
* @dst_offset: offset in @dst image to write data
|
|
|
|
* @bytes: number of bytes to copy
|
2018-07-03 05:37:57 +03:00
|
|
|
* @flags: request flags. Supported flags:
|
2018-06-01 12:26:39 +03:00
|
|
|
* BDRV_REQ_ZERO_WRITE - treat the @src range as zero data and do zero
|
|
|
|
* write on @dst as if bdrv_co_pwrite_zeroes is
|
|
|
|
* called. Used to simplify caller code, or
|
|
|
|
* during BlockDriver.bdrv_co_copy_range_from()
|
|
|
|
* recursion.
|
2018-07-03 05:37:57 +03:00
|
|
|
* BDRV_REQ_NO_SERIALISING - do not serialize with other overlapping
|
|
|
|
* requests currently in flight.
|
2018-06-01 12:26:39 +03:00
|
|
|
*
|
|
|
|
* Returns: 0 if succeeded; negative error code if failed.
|
|
|
|
**/
|
2020-12-11 21:39:34 +03:00
|
|
|
int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
|
|
|
|
BdrvChild *dst, int64_t dst_offset,
|
|
|
|
int64_t bytes, BdrvRequestFlags read_flags,
|
2018-07-09 19:37:17 +03:00
|
|
|
BdrvRequestFlags write_flags);
|
2021-02-05 19:37:11 +03:00
|
|
|
|
|
|
|
void bdrv_cancel_in_flight(BlockDriverState *bs);
|
|
|
|
|
2012-07-10 13:12:40 +04:00
|
|
|
#endif
|