lockable: Replace locks with lock guard macros

Replace manual lock()/unlock() calls with lock guard macros
(QEMU_LOCK_GUARD/WITH_QEMU_LOCK_GUARD).

Signed-off-by: Simran Singhal <singhalsimran0@gmail.com>
Reviewed-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
Reviewed-by: Marcel Apfelbaum<marcel.apfelbaum@gmail.com>
Tested-by: Yuval Shaia <yuval.shaia.ml@gmail.com>
Message-id: 20200402065035.GA15477@simran-Inspiron-5558
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
Simran Singhal 2020-04-02 12:20:35 +05:30 committed by Stefan Hajnoczi
parent 6e8a355de6
commit 08b689aa6b
3 changed files with 35 additions and 37 deletions

View File

@ -15,6 +15,7 @@
#include "sysemu/kvm.h"
#include "qemu/bitops.h"
#include "qemu/error-report.h"
#include "qemu/lockable.h"
#include "qemu/queue.h"
#include "qemu/rcu.h"
#include "qemu/rcu_queue.h"
@ -491,7 +492,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
int ret;
MsgHandler *mh;
qemu_mutex_lock(&handlers_mutex);
QEMU_LOCK_GUARD(&handlers_mutex);
QLIST_FOREACH(mh, &msg_handlers, link) {
if (mh->conn_id == conn_id) {
if (handler) {
@ -501,7 +502,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
g_free_rcu(mh, rcu);
ret = 0;
}
goto unlock;
return ret;
}
}
@ -515,8 +516,7 @@ int hyperv_set_msg_handler(uint32_t conn_id, HvMsgHandler handler, void *data)
} else {
ret = -ENOENT;
}
unlock:
qemu_mutex_unlock(&handlers_mutex);
return ret;
}
@ -565,7 +565,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
int ret;
EventFlagHandler *handler;
qemu_mutex_lock(&handlers_mutex);
QEMU_LOCK_GUARD(&handlers_mutex);
QLIST_FOREACH(handler, &event_flag_handlers, link) {
if (handler->conn_id == conn_id) {
if (notifier) {
@ -575,7 +575,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
g_free_rcu(handler, rcu);
ret = 0;
}
goto unlock;
return ret;
}
}
@ -588,8 +588,7 @@ static int set_event_flag_handler(uint32_t conn_id, EventNotifier *notifier)
} else {
ret = -ENOENT;
}
unlock:
qemu_mutex_unlock(&handlers_mutex);
return ret;
}

View File

@ -95,7 +95,7 @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
struct ibv_wc wc[2];
RdmaProtectedGSList *cqe_ctx_list;
qemu_mutex_lock(&rdma_dev_res->lock);
WITH_QEMU_LOCK_GUARD(&rdma_dev_res->lock) {
do {
ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
@ -124,7 +124,7 @@ static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
total_ne += ne;
} while (ne > 0);
atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
qemu_mutex_unlock(&rdma_dev_res->lock);
}
if (ne < 0) {
rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);

View File

@ -147,14 +147,13 @@ static inline void rdma_res_tbl_dealloc(RdmaRmResTbl *tbl, uint32_t handle)
{
trace_rdma_res_tbl_dealloc(tbl->name, handle);
qemu_mutex_lock(&tbl->lock);
QEMU_LOCK_GUARD(&tbl->lock);
if (handle < tbl->tbl_sz) {
clear_bit(handle, tbl->bitmap);
tbl->used--;
}
qemu_mutex_unlock(&tbl->lock);
}
int rdma_rm_alloc_pd(RdmaDeviceResources *dev_res, RdmaBackendDev *backend_dev,