block/nvme: Use unsigned integer for queue counter/size
We can not have negative queue count/size/index, use unsigned type. Rename 'nr_queues' as 'queue_count' to match the spec naming. Reviewed-by: Eric Auger <eric.auger@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Tested-by: Eric Auger <eric.auger@redhat.com> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com> Message-id: 20201029093306.1063879-10-philmd@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Tested-by: Eric Auger <eric.auger@redhat.com>
This commit is contained in:
parent
3214b0f094
commit
1b539bd6db
38
block/nvme.c
38
block/nvme.c
@ -104,7 +104,7 @@ struct BDRVNVMeState {
|
||||
* [1..]: io queues.
|
||||
*/
|
||||
NVMeQueuePair **queues;
|
||||
int nr_queues;
|
||||
unsigned queue_count;
|
||||
size_t page_size;
|
||||
/* How many uint32_t elements does each doorbell entry take. */
|
||||
size_t doorbell_scale;
|
||||
@ -161,7 +161,7 @@ static QemuOptsList runtime_opts = {
|
||||
};
|
||||
|
||||
static void nvme_init_queue(BDRVNVMeState *s, NVMeQueue *q,
|
||||
int nentries, int entry_bytes, Error **errp)
|
||||
unsigned nentries, size_t entry_bytes, Error **errp)
|
||||
{
|
||||
size_t bytes;
|
||||
int r;
|
||||
@ -206,7 +206,7 @@ static void nvme_free_req_queue_cb(void *opaque)
|
||||
|
||||
static NVMeQueuePair *nvme_create_queue_pair(BDRVNVMeState *s,
|
||||
AioContext *aio_context,
|
||||
int idx, int size,
|
||||
unsigned idx, size_t size,
|
||||
Error **errp)
|
||||
{
|
||||
int i, r;
|
||||
@ -623,7 +623,7 @@ static bool nvme_poll_queues(BDRVNVMeState *s)
|
||||
bool progress = false;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < s->nr_queues; i++) {
|
||||
for (i = 0; i < s->queue_count; i++) {
|
||||
if (nvme_poll_queue(s->queues[i])) {
|
||||
progress = true;
|
||||
}
|
||||
@ -644,10 +644,10 @@ static void nvme_handle_event(EventNotifier *n)
|
||||
static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
|
||||
{
|
||||
BDRVNVMeState *s = bs->opaque;
|
||||
int n = s->nr_queues;
|
||||
unsigned n = s->queue_count;
|
||||
NVMeQueuePair *q;
|
||||
NvmeCmd cmd;
|
||||
int queue_size = NVME_QUEUE_SIZE;
|
||||
unsigned queue_size = NVME_QUEUE_SIZE;
|
||||
|
||||
q = nvme_create_queue_pair(s, bdrv_get_aio_context(bs),
|
||||
n, queue_size, errp);
|
||||
@ -661,7 +661,7 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
|
||||
.cdw11 = cpu_to_le32(0x3),
|
||||
};
|
||||
if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
|
||||
error_setg(errp, "Failed to create CQ io queue [%d]", n);
|
||||
error_setg(errp, "Failed to create CQ io queue [%u]", n);
|
||||
goto out_error;
|
||||
}
|
||||
cmd = (NvmeCmd) {
|
||||
@ -671,12 +671,12 @@ static bool nvme_add_io_queue(BlockDriverState *bs, Error **errp)
|
||||
.cdw11 = cpu_to_le32(0x1 | (n << 16)),
|
||||
};
|
||||
if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
|
||||
error_setg(errp, "Failed to create SQ io queue [%d]", n);
|
||||
error_setg(errp, "Failed to create SQ io queue [%u]", n);
|
||||
goto out_error;
|
||||
}
|
||||
s->queues = g_renew(NVMeQueuePair *, s->queues, n + 1);
|
||||
s->queues[n] = q;
|
||||
s->nr_queues++;
|
||||
s->queue_count++;
|
||||
return true;
|
||||
out_error:
|
||||
nvme_free_queue_pair(q);
|
||||
@ -785,7 +785,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
s->nr_queues = 1;
|
||||
s->queue_count = 1;
|
||||
QEMU_BUILD_BUG_ON(NVME_QUEUE_SIZE & 0xF000);
|
||||
regs->aqa = cpu_to_le32((NVME_QUEUE_SIZE << AQA_ACQS_SHIFT) |
|
||||
(NVME_QUEUE_SIZE << AQA_ASQS_SHIFT));
|
||||
@ -895,10 +895,9 @@ static int nvme_enable_disable_write_cache(BlockDriverState *bs, bool enable,
|
||||
|
||||
static void nvme_close(BlockDriverState *bs)
|
||||
{
|
||||
int i;
|
||||
BDRVNVMeState *s = bs->opaque;
|
||||
|
||||
for (i = 0; i < s->nr_queues; ++i) {
|
||||
for (unsigned i = 0; i < s->queue_count; ++i) {
|
||||
nvme_free_queue_pair(s->queues[i]);
|
||||
}
|
||||
g_free(s->queues);
|
||||
@ -1123,7 +1122,7 @@ static coroutine_fn int nvme_co_prw_aligned(BlockDriverState *bs,
|
||||
};
|
||||
|
||||
trace_nvme_prw_aligned(s, is_write, offset, bytes, flags, qiov->niov);
|
||||
assert(s->nr_queues > 1);
|
||||
assert(s->queue_count > 1);
|
||||
req = nvme_get_free_req(ioq);
|
||||
assert(req);
|
||||
|
||||
@ -1233,7 +1232,7 @@ static coroutine_fn int nvme_co_flush(BlockDriverState *bs)
|
||||
.ret = -EINPROGRESS,
|
||||
};
|
||||
|
||||
assert(s->nr_queues > 1);
|
||||
assert(s->queue_count > 1);
|
||||
req = nvme_get_free_req(ioq);
|
||||
assert(req);
|
||||
nvme_submit_command(ioq, req, &cmd, nvme_rw_cb, &data);
|
||||
@ -1285,7 +1284,7 @@ static coroutine_fn int nvme_co_pwrite_zeroes(BlockDriverState *bs,
|
||||
cmd.cdw12 = cpu_to_le32(cdw12);
|
||||
|
||||
trace_nvme_write_zeroes(s, offset, bytes, flags);
|
||||
assert(s->nr_queues > 1);
|
||||
assert(s->queue_count > 1);
|
||||
req = nvme_get_free_req(ioq);
|
||||
assert(req);
|
||||
|
||||
@ -1328,7 +1327,7 @@ static int coroutine_fn nvme_co_pdiscard(BlockDriverState *bs,
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
assert(s->nr_queues > 1);
|
||||
assert(s->queue_count > 1);
|
||||
|
||||
buf = qemu_try_memalign(s->page_size, s->page_size);
|
||||
if (!buf) {
|
||||
@ -1408,7 +1407,7 @@ static void nvme_detach_aio_context(BlockDriverState *bs)
|
||||
{
|
||||
BDRVNVMeState *s = bs->opaque;
|
||||
|
||||
for (int i = 0; i < s->nr_queues; i++) {
|
||||
for (unsigned i = 0; i < s->queue_count; i++) {
|
||||
NVMeQueuePair *q = s->queues[i];
|
||||
|
||||
qemu_bh_delete(q->completion_bh);
|
||||
@ -1429,7 +1428,7 @@ static void nvme_attach_aio_context(BlockDriverState *bs,
|
||||
aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
|
||||
false, nvme_handle_event, nvme_poll_cb);
|
||||
|
||||
for (int i = 0; i < s->nr_queues; i++) {
|
||||
for (unsigned i = 0; i < s->queue_count; i++) {
|
||||
NVMeQueuePair *q = s->queues[i];
|
||||
|
||||
q->completion_bh =
|
||||
@ -1446,11 +1445,10 @@ static void nvme_aio_plug(BlockDriverState *bs)
|
||||
|
||||
static void nvme_aio_unplug(BlockDriverState *bs)
|
||||
{
|
||||
int i;
|
||||
BDRVNVMeState *s = bs->opaque;
|
||||
assert(s->plugged);
|
||||
s->plugged = false;
|
||||
for (i = INDEX_IO(0); i < s->nr_queues; i++) {
|
||||
for (unsigned i = INDEX_IO(0); i < s->queue_count; i++) {
|
||||
NVMeQueuePair *q = s->queues[i];
|
||||
qemu_mutex_lock(&q->lock);
|
||||
nvme_kick(q);
|
||||
|
@ -136,13 +136,13 @@ qed_aio_write_main(void *s, void *acb, int ret, uint64_t offset, size_t len) "s
|
||||
# nvme.c
|
||||
nvme_controller_capability_raw(uint64_t value) "0x%08"PRIx64
|
||||
nvme_controller_capability(const char *desc, uint64_t value) "%s: %"PRIu64
|
||||
nvme_kick(void *s, int queue) "s %p queue %d"
|
||||
nvme_kick(void *s, unsigned q_index) "s %p q #%u"
|
||||
nvme_dma_flush_queue_wait(void *s) "s %p"
|
||||
nvme_error(int cmd_specific, int sq_head, int sqid, int cid, int status) "cmd_specific %d sq_head %d sqid %d cid %d status 0x%x"
|
||||
nvme_process_completion(void *s, int index, int inflight) "s %p queue %d inflight %d"
|
||||
nvme_process_completion_queue_plugged(void *s, int index) "s %p queue %d"
|
||||
nvme_complete_command(void *s, int index, int cid) "s %p queue %d cid %d"
|
||||
nvme_submit_command(void *s, int index, int cid) "s %p queue %d cid %d"
|
||||
nvme_process_completion(void *s, unsigned q_index, int inflight) "s %p q #%u inflight %d"
|
||||
nvme_process_completion_queue_plugged(void *s, unsigned q_index) "s %p q #%u"
|
||||
nvme_complete_command(void *s, unsigned q_index, int cid) "s %p q #%u cid %d"
|
||||
nvme_submit_command(void *s, unsigned q_index, int cid) "s %p q #%u cid %d"
|
||||
nvme_submit_command_raw(int c0, int c1, int c2, int c3, int c4, int c5, int c6, int c7) "%02x %02x %02x %02x %02x %02x %02x %02x"
|
||||
nvme_handle_event(void *s) "s %p"
|
||||
nvme_poll_queue(void *s, unsigned q_index) "s %p q #%u"
|
||||
|
Loading…
Reference in New Issue
Block a user