qcow2: Clean up handle_alloc()
Things can be simplified a bit now. No semantic changes. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
parent
c37f4cd71d
commit
83baa9a471
@ -886,7 +886,6 @@ static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
|
|||||||
unsigned int nb_clusters;
|
unsigned int nb_clusters;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
uint64_t alloc_offset;
|
|
||||||
uint64_t alloc_cluster_offset;
|
uint64_t alloc_cluster_offset;
|
||||||
|
|
||||||
trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
|
trace_qcow2_handle_alloc(qemu_coroutine_self(), guest_offset, *host_offset,
|
||||||
@ -928,72 +927,69 @@ static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Calculate start and size of allocation */
|
|
||||||
alloc_offset = guest_offset;
|
|
||||||
alloc_cluster_offset = *host_offset;
|
|
||||||
|
|
||||||
/* Allocate, if necessary at a given offset in the image file */
|
/* Allocate, if necessary at a given offset in the image file */
|
||||||
ret = do_alloc_cluster_offset(bs, alloc_offset, &alloc_cluster_offset,
|
alloc_cluster_offset = *host_offset;
|
||||||
|
ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
|
||||||
&nb_clusters);
|
&nb_clusters);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* save info needed for meta data update */
|
/* Can't extend contiguous allocation */
|
||||||
if (nb_clusters > 0) {
|
if (nb_clusters == 0) {
|
||||||
/*
|
|
||||||
* requested_sectors: Number of sectors from the start of the first
|
|
||||||
* newly allocated cluster to the end of the (possibly shortened
|
|
||||||
* before) write request.
|
|
||||||
*
|
|
||||||
* avail_sectors: Number of sectors from the start of the first
|
|
||||||
* newly allocated to the end of the last newly allocated cluster.
|
|
||||||
*
|
|
||||||
* nb_sectors: The number of sectors from the start of the first
|
|
||||||
* newly allocated cluster to the end of the aread that the write
|
|
||||||
* request actually writes to (excluding COW at the end)
|
|
||||||
*/
|
|
||||||
int requested_sectors =
|
|
||||||
(*bytes + offset_into_cluster(s, guest_offset))
|
|
||||||
>> BDRV_SECTOR_BITS;
|
|
||||||
int avail_sectors = nb_clusters
|
|
||||||
<< (s->cluster_bits - BDRV_SECTOR_BITS);
|
|
||||||
int alloc_n_start = offset_into_cluster(s, guest_offset)
|
|
||||||
>> BDRV_SECTOR_BITS;
|
|
||||||
int nb_sectors = MIN(requested_sectors, avail_sectors);
|
|
||||||
|
|
||||||
if (*host_offset == 0) {
|
|
||||||
*host_offset = alloc_cluster_offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
*m = g_malloc0(sizeof(**m));
|
|
||||||
|
|
||||||
**m = (QCowL2Meta) {
|
|
||||||
.alloc_offset = alloc_cluster_offset,
|
|
||||||
.offset = alloc_offset & ~(s->cluster_size - 1),
|
|
||||||
.nb_clusters = nb_clusters,
|
|
||||||
.nb_available = nb_sectors,
|
|
||||||
|
|
||||||
.cow_start = {
|
|
||||||
.offset = 0,
|
|
||||||
.nb_sectors = alloc_n_start,
|
|
||||||
},
|
|
||||||
.cow_end = {
|
|
||||||
.offset = nb_sectors * BDRV_SECTOR_SIZE,
|
|
||||||
.nb_sectors = avail_sectors - nb_sectors,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
qemu_co_queue_init(&(*m)->dependent_requests);
|
|
||||||
QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
|
|
||||||
|
|
||||||
*bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE)
|
|
||||||
- offset_into_cluster(s, guest_offset));
|
|
||||||
assert(*bytes != 0);
|
|
||||||
} else {
|
|
||||||
*bytes = 0;
|
*bytes = 0;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Save info needed for meta data update.
|
||||||
|
*
|
||||||
|
* requested_sectors: Number of sectors from the start of the first
|
||||||
|
* newly allocated cluster to the end of the (possibly shortened
|
||||||
|
* before) write request.
|
||||||
|
*
|
||||||
|
* avail_sectors: Number of sectors from the start of the first
|
||||||
|
* newly allocated to the end of the last newly allocated cluster.
|
||||||
|
*
|
||||||
|
* nb_sectors: The number of sectors from the start of the first
|
||||||
|
* newly allocated cluster to the end of the area that the write
|
||||||
|
* request actually writes to (excluding COW at the end)
|
||||||
|
*/
|
||||||
|
int requested_sectors =
|
||||||
|
(*bytes + offset_into_cluster(s, guest_offset))
|
||||||
|
>> BDRV_SECTOR_BITS;
|
||||||
|
int avail_sectors = nb_clusters
|
||||||
|
<< (s->cluster_bits - BDRV_SECTOR_BITS);
|
||||||
|
int alloc_n_start = offset_into_cluster(s, guest_offset)
|
||||||
|
>> BDRV_SECTOR_BITS;
|
||||||
|
int nb_sectors = MIN(requested_sectors, avail_sectors);
|
||||||
|
|
||||||
|
*host_offset = alloc_cluster_offset;
|
||||||
|
|
||||||
|
*m = g_malloc0(sizeof(**m));
|
||||||
|
|
||||||
|
**m = (QCowL2Meta) {
|
||||||
|
.alloc_offset = *host_offset,
|
||||||
|
.offset = start_of_cluster(s, guest_offset),
|
||||||
|
.nb_clusters = nb_clusters,
|
||||||
|
.nb_available = nb_sectors,
|
||||||
|
|
||||||
|
.cow_start = {
|
||||||
|
.offset = 0,
|
||||||
|
.nb_sectors = alloc_n_start,
|
||||||
|
},
|
||||||
|
.cow_end = {
|
||||||
|
.offset = nb_sectors * BDRV_SECTOR_SIZE,
|
||||||
|
.nb_sectors = avail_sectors - nb_sectors,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
qemu_co_queue_init(&(*m)->dependent_requests);
|
||||||
|
QLIST_INSERT_HEAD(&s->cluster_allocs, *m, next_in_flight);
|
||||||
|
|
||||||
|
*bytes = MIN(*bytes, (nb_sectors * BDRV_SECTOR_SIZE)
|
||||||
|
- offset_into_cluster(s, guest_offset));
|
||||||
|
assert(*bytes != 0);
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
|
Loading…
Reference in New Issue
Block a user