iscsi: Switch iscsi_allocmap_update() to byte-based

We are gradually converting to byte-based interfaces, as they are
easier to reason about than sector-based.  Convert all uses of
the allocmap (no semantic change).  Callers that already had bytes
available are simpler, and callers that now scale to bytes will be
easier to switch to byte-based in the future.

Signed-off-by: Eric Blake <eblake@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
This commit is contained in:
Eric Blake 2018-02-13 14:26:47 -06:00 committed by Kevin Wolf
parent ba059e7b17
commit 04a408fbff

View File

@ -458,24 +458,22 @@ static int iscsi_allocmap_init(IscsiLun *iscsilun, int open_flags)
} }
static void static void
iscsi_allocmap_update(IscsiLun *iscsilun, int64_t sector_num, iscsi_allocmap_update(IscsiLun *iscsilun, int64_t offset,
int nb_sectors, bool allocated, bool valid) int64_t bytes, bool allocated, bool valid)
{ {
int64_t cl_num_expanded, nb_cls_expanded, cl_num_shrunk, nb_cls_shrunk; int64_t cl_num_expanded, nb_cls_expanded, cl_num_shrunk, nb_cls_shrunk;
int cluster_sectors = iscsilun->cluster_size >> BDRV_SECTOR_BITS;
if (iscsilun->allocmap == NULL) { if (iscsilun->allocmap == NULL) {
return; return;
} }
/* expand to entirely contain all affected clusters */ /* expand to entirely contain all affected clusters */
assert(cluster_sectors); assert(iscsilun->cluster_size);
cl_num_expanded = sector_num / cluster_sectors; cl_num_expanded = offset / iscsilun->cluster_size;
nb_cls_expanded = DIV_ROUND_UP(sector_num + nb_sectors, nb_cls_expanded = DIV_ROUND_UP(offset + bytes,
cluster_sectors) - cl_num_expanded; iscsilun->cluster_size) - cl_num_expanded;
/* shrink to touch only completely contained clusters */ /* shrink to touch only completely contained clusters */
cl_num_shrunk = DIV_ROUND_UP(sector_num, cluster_sectors); cl_num_shrunk = DIV_ROUND_UP(offset, iscsilun->cluster_size);
nb_cls_shrunk = (sector_num + nb_sectors) / cluster_sectors nb_cls_shrunk = (offset + bytes) / iscsilun->cluster_size - cl_num_shrunk;
- cl_num_shrunk;
if (allocated) { if (allocated) {
bitmap_set(iscsilun->allocmap, cl_num_expanded, nb_cls_expanded); bitmap_set(iscsilun->allocmap, cl_num_expanded, nb_cls_expanded);
} else { } else {
@ -498,26 +496,26 @@ iscsi_allocmap_update(IscsiLun *iscsilun, int64_t sector_num,
} }
static void static void
iscsi_allocmap_set_allocated(IscsiLun *iscsilun, int64_t sector_num, iscsi_allocmap_set_allocated(IscsiLun *iscsilun, int64_t offset,
int nb_sectors) int64_t bytes)
{ {
iscsi_allocmap_update(iscsilun, sector_num, nb_sectors, true, true); iscsi_allocmap_update(iscsilun, offset, bytes, true, true);
} }
static void static void
iscsi_allocmap_set_unallocated(IscsiLun *iscsilun, int64_t sector_num, iscsi_allocmap_set_unallocated(IscsiLun *iscsilun, int64_t offset,
int nb_sectors) int64_t bytes)
{ {
/* Note: if cache.direct=on the fifth argument to iscsi_allocmap_update /* Note: if cache.direct=on the fifth argument to iscsi_allocmap_update
* is ignored, so this will in effect be an iscsi_allocmap_set_invalid. * is ignored, so this will in effect be an iscsi_allocmap_set_invalid.
*/ */
iscsi_allocmap_update(iscsilun, sector_num, nb_sectors, false, true); iscsi_allocmap_update(iscsilun, offset, bytes, false, true);
} }
static void iscsi_allocmap_set_invalid(IscsiLun *iscsilun, int64_t sector_num, static void iscsi_allocmap_set_invalid(IscsiLun *iscsilun, int64_t offset,
int nb_sectors) int64_t bytes)
{ {
iscsi_allocmap_update(iscsilun, sector_num, nb_sectors, false, false); iscsi_allocmap_update(iscsilun, offset, bytes, false, false);
} }
static void iscsi_allocmap_invalidate(IscsiLun *iscsilun) static void iscsi_allocmap_invalidate(IscsiLun *iscsilun)
@ -531,34 +529,30 @@ static void iscsi_allocmap_invalidate(IscsiLun *iscsilun)
} }
static inline bool static inline bool
iscsi_allocmap_is_allocated(IscsiLun *iscsilun, int64_t sector_num, iscsi_allocmap_is_allocated(IscsiLun *iscsilun, int64_t offset,
int nb_sectors) int64_t bytes)
{ {
unsigned long size; unsigned long size;
if (iscsilun->allocmap == NULL) { if (iscsilun->allocmap == NULL) {
return true; return true;
} }
assert(iscsilun->cluster_size); assert(iscsilun->cluster_size);
size = DIV_ROUND_UP(sector_num + nb_sectors, size = DIV_ROUND_UP(offset + bytes, iscsilun->cluster_size);
iscsilun->cluster_size >> BDRV_SECTOR_BITS);
return !(find_next_bit(iscsilun->allocmap, size, return !(find_next_bit(iscsilun->allocmap, size,
sector_num * BDRV_SECTOR_SIZE / offset / iscsilun->cluster_size) == size);
iscsilun->cluster_size) == size);
} }
static inline bool iscsi_allocmap_is_valid(IscsiLun *iscsilun, static inline bool iscsi_allocmap_is_valid(IscsiLun *iscsilun,
int64_t sector_num, int nb_sectors) int64_t offset, int64_t bytes)
{ {
unsigned long size; unsigned long size;
if (iscsilun->allocmap_valid == NULL) { if (iscsilun->allocmap_valid == NULL) {
return false; return false;
} }
assert(iscsilun->cluster_size); assert(iscsilun->cluster_size);
size = DIV_ROUND_UP(sector_num + nb_sectors, size = DIV_ROUND_UP(offset + bytes, iscsilun->cluster_size);
iscsilun->cluster_size >> BDRV_SECTOR_BITS);
return (find_next_zero_bit(iscsilun->allocmap_valid, size, return (find_next_zero_bit(iscsilun->allocmap_valid, size,
sector_num * BDRV_SECTOR_SIZE / offset / iscsilun->cluster_size) == size);
iscsilun->cluster_size) == size);
} }
static int coroutine_fn static int coroutine_fn
@ -640,14 +634,16 @@ retry:
} }
if (iTask.status != SCSI_STATUS_GOOD) { if (iTask.status != SCSI_STATUS_GOOD) {
iscsi_allocmap_set_invalid(iscsilun, sector_num, nb_sectors); iscsi_allocmap_set_invalid(iscsilun, sector_num * BDRV_SECTOR_SIZE,
nb_sectors * BDRV_SECTOR_SIZE);
error_report("iSCSI WRITE10/16 failed at lba %" PRIu64 ": %s", lba, error_report("iSCSI WRITE10/16 failed at lba %" PRIu64 ": %s", lba,
iTask.err_str); iTask.err_str);
r = iTask.err_code; r = iTask.err_code;
goto out_unlock; goto out_unlock;
} }
iscsi_allocmap_set_allocated(iscsilun, sector_num, nb_sectors); iscsi_allocmap_set_allocated(iscsilun, sector_num * BDRV_SECTOR_SIZE,
nb_sectors * BDRV_SECTOR_SIZE);
out_unlock: out_unlock:
qemu_mutex_unlock(&iscsilun->mutex); qemu_mutex_unlock(&iscsilun->mutex);
@ -747,9 +743,11 @@ retry:
} }
if (ret & BDRV_BLOCK_ZERO) { if (ret & BDRV_BLOCK_ZERO) {
iscsi_allocmap_set_unallocated(iscsilun, sector_num, *pnum); iscsi_allocmap_set_unallocated(iscsilun, sector_num * BDRV_SECTOR_SIZE,
*pnum * BDRV_SECTOR_SIZE);
} else { } else {
iscsi_allocmap_set_allocated(iscsilun, sector_num, *pnum); iscsi_allocmap_set_allocated(iscsilun, sector_num * BDRV_SECTOR_SIZE,
*pnum * BDRV_SECTOR_SIZE);
} }
if (*pnum > nb_sectors) { if (*pnum > nb_sectors) {
@ -789,15 +787,19 @@ static int coroutine_fn iscsi_co_readv(BlockDriverState *bs,
/* if cache.direct is off and we have a valid entry in our allocation map /* if cache.direct is off and we have a valid entry in our allocation map
* we can skip checking the block status and directly return zeroes if * we can skip checking the block status and directly return zeroes if
* the request falls within an unallocated area */ * the request falls within an unallocated area */
if (iscsi_allocmap_is_valid(iscsilun, sector_num, nb_sectors) && if (iscsi_allocmap_is_valid(iscsilun, sector_num * BDRV_SECTOR_SIZE,
!iscsi_allocmap_is_allocated(iscsilun, sector_num, nb_sectors)) { nb_sectors * BDRV_SECTOR_SIZE) &&
!iscsi_allocmap_is_allocated(iscsilun, sector_num * BDRV_SECTOR_SIZE,
nb_sectors * BDRV_SECTOR_SIZE)) {
qemu_iovec_memset(iov, 0, 0x00, iov->size); qemu_iovec_memset(iov, 0, 0x00, iov->size);
return 0; return 0;
} }
if (nb_sectors >= ISCSI_CHECKALLOC_THRES && if (nb_sectors >= ISCSI_CHECKALLOC_THRES &&
!iscsi_allocmap_is_valid(iscsilun, sector_num, nb_sectors) && !iscsi_allocmap_is_valid(iscsilun, sector_num * BDRV_SECTOR_SIZE,
!iscsi_allocmap_is_allocated(iscsilun, sector_num, nb_sectors)) { nb_sectors * BDRV_SECTOR_SIZE) &&
!iscsi_allocmap_is_allocated(iscsilun, sector_num * BDRV_SECTOR_SIZE,
nb_sectors * BDRV_SECTOR_SIZE)) {
int pnum; int pnum;
BlockDriverState *file; BlockDriverState *file;
/* check the block status from the beginning of the cluster /* check the block status from the beginning of the cluster
@ -1160,8 +1162,7 @@ retry:
goto retry; goto retry;
} }
iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS, iscsi_allocmap_set_invalid(iscsilun, offset, bytes);
bytes >> BDRV_SECTOR_BITS);
if (iTask.status == SCSI_STATUS_CHECK_CONDITION) { if (iTask.status == SCSI_STATUS_CHECK_CONDITION) {
/* the target might fail with a check condition if it /* the target might fail with a check condition if it
@ -1274,8 +1275,7 @@ retry:
} }
if (iTask.status != SCSI_STATUS_GOOD) { if (iTask.status != SCSI_STATUS_GOOD) {
iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS, iscsi_allocmap_set_invalid(iscsilun, offset, bytes);
bytes >> BDRV_SECTOR_BITS);
error_report("iSCSI WRITESAME10/16 failed at lba %" PRIu64 ": %s", error_report("iSCSI WRITESAME10/16 failed at lba %" PRIu64 ": %s",
lba, iTask.err_str); lba, iTask.err_str);
r = iTask.err_code; r = iTask.err_code;
@ -1283,11 +1283,9 @@ retry:
} }
if (flags & BDRV_REQ_MAY_UNMAP) { if (flags & BDRV_REQ_MAY_UNMAP) {
iscsi_allocmap_set_invalid(iscsilun, offset >> BDRV_SECTOR_BITS, iscsi_allocmap_set_invalid(iscsilun, offset, bytes);
bytes >> BDRV_SECTOR_BITS);
} else { } else {
iscsi_allocmap_set_allocated(iscsilun, offset >> BDRV_SECTOR_BITS, iscsi_allocmap_set_allocated(iscsilun, offset, bytes);
bytes >> BDRV_SECTOR_BITS);
} }
out_unlock: out_unlock: