lockable: Replace locks with lock guard macros

Replace manual lock()/unlock() calls with lock guard macros
(QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).

Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
Reviewed-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
Reviewed-by: Marcel Apfelbaum<marcel.apfelbaum@gmail.com>
Tested-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
Message-id: 20200402065035.GA15477@simran-Inspiron-5558
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Simran Singhal 2020-04-02 12:20:35 +05:30 committed by Stefan Hajnoczi
parent 6e8a355de6
commit 08b689aa6b
3 changed files with 35 additions and 37 deletions

View File

@ -15,6 +15,7 @@
#include "sysemu/kvm.h" #include "sysemu/kvm.h"
#include "qemu/bitops.h" #include "qemu/bitops.h"
#include "qemu/error-report.h" #include "qemu/error-report.h"
#include "qemu/lockable.h"
#include "qemu/queue.h" #include "qemu/queue.h"
#include "qemu/rcu.h" #include "qemu/rcu.h"
#include "qemu/rcu_queue.h" #include "qemu/rcu_queue.h"
@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
int ret; int ret;
MsgHandler *mh; MsgHandler *mh;
qemu_mutex_lock(&handlers_mutex); QEMU_LOCK_GUARD(&handlers_mutex);
QLIST_FOREACH(mh, &msg_handlers, link) { QLIST_FOREACH(mh, &msg_handlers, link) {
if (mh->conn_id == conn_id) { if (mh->conn_id == conn_id) {
if (handler) { if (handler) {
@ -501,7 +502,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
g_free_rcu(mh, rcu); g_free_rcu(mh, rcu);
ret = 0; ret = 0;
} }
goto unlock; return ret;
} }
} }
@ -515,8 +516,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
} else { } else {
ret = -ENOENT; ret = -ENOENT;
} }
unlock:
qemu_mutex_unlock(&handlers_mutex);
return ret; return ret;
} }
@ -565,7 +565,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
int ret; int ret;
EventFlagHandler *handler; EventFlagHandler *handler;
qemu_mutex_lock(&handlers_mutex); QEMU_LOCK_GUARD(&handlers_mutex);
QLIST_FOREACH(handler, &event_flag_handlers, link) { QLIST_FOREACH(handler, &event_flag_handlers, link) {
if (handler->conn_id == conn_id) { if (handler->conn_id == conn_id) {
if (notifier) { if (notifier) {
@ -575,7 +575,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
g_free_rcu(handler, rcu); g_free_rcu(handler, rcu);
ret = 0; ret = 0;
} }
goto unlock; return ret;
} }
} }
@ -588,8 +588,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
} else { } else {
ret = -ENOENT; ret = -ENOENT;
} }
unlock:
qemu_mutex_unlock(&handlers_mutex);
return ret; return ret;
} }

View File

@ -95,36 +95,36 @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
struct ibv_wc wc[2]; struct ibv_wc wc[2];
RdmaProtectedGSList *cqe_ctx_list; RdmaProtectedGSList *cqe_ctx_list;
qemu_mutex_lock(&rdma_dev_res->lock); WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
do { do {
ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc); ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
trace_rdma_poll_cq(ne, ibcq); trace_rdma_poll_cq(ne, ibcq);
for (i = 0; i < ne; i++) { for (i = 0; i < ne; i++) {
bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id); bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
if (unlikely(!bctx)) { if (unlikely(!bctx)) {
rdma_error_report("No matching ctx for req %"PRId64, rdma_error_report("No matching ctx for req %"PRId64,
wc[i].wr_id); wc[i].wr_id);
continue; continue;
}
comp_handler(bctx->up_ctx, &wc[i]);
if (bctx->backend_qp) {
cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
} else {
cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
}
rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
g_free(bctx);
} }
total_ne += ne;
comp_handler(bctx->up_ctx, &wc[i]); } while (ne > 0);
atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
if (bctx->backend_qp) { }
cqe_ctx_list = &bctx->backend_qp->cqe_ctx_list;
} else {
cqe_ctx_list = &bctx->backend_srq->cqe_ctx_list;
}
rdma_protected_gslist_remove_int32(cqe_ctx_list, wc[i].wr_id);
rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
g_free(bctx);
}
total_ne += ne;
} while (ne > 0);
atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
qemu_mutex_unlock(&rdma_dev_res->lock);
if (ne < 0) { if (ne < 0) {
rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno); rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);

View File

@ -147,14 +147,13 @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle)
{ {
trace_rdma_res_tbl_dealloc(tbl->name, handle); trace_rdma_res_tbl_dealloc(tbl->name, handle);
qemu_mutex_lock(&tbl->lock); QEMU_LOCK_GUARD(&tbl->lock);
if (handle < tbl->tbl_sz) { if (handle < tbl->tbl_sz) {
clear_bit(handle, tbl->bitmap); clear_bit(handle, tbl->bitmap);
tbl->used--; tbl->used--;
} }
qemu_mutex_unlock(&tbl->lock);
} }
int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev, int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,