vhost: Check for queue full at vhost_svq_add

The series need to expose vhost_svq_add with full functionality,
including checking for full queue.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
This commit is contained in:
Eugenio Pérez 2022-07-20 08:59:32 +02:00 committed by Jason Wang
parent 98b5adef84
commit f20b70eb5a

View File

@ -233,21 +233,29 @@ static void vhost_svq_kick(VhostShadowVirtqueue *svq)
* Add an element to a SVQ.
*
* The caller must check that there is enough slots for the new element. It
* takes ownership of the element: In case of failure, it is free and the SVQ
* is considered broken.
* takes ownership of the element: In case of failure not ENOSPC, it is free.
*
* Return -EINVAL if element is invalid, -ENOSPC if dev queue is full
*/
static bool vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem)
static int vhost_svq_add(VhostShadowVirtqueue *svq, VirtQueueElement *elem)
{
unsigned qemu_head;
bool ok = vhost_svq_add_split(svq, elem, &qemu_head);
unsigned ndescs = elem->in_num + elem->out_num;
bool ok;
if (unlikely(ndescs > vhost_svq_available_slots(svq))) {
return -ENOSPC;
}
ok = vhost_svq_add_split(svq, elem, &qemu_head);
if (unlikely(!ok)) {
g_free(elem);
return false;
return -EINVAL;
}
svq->ring_id_maps[qemu_head] = elem;
vhost_svq_kick(svq);
return true;
return 0;
}
/**
@ -274,7 +282,7 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
while (true) {
VirtQueueElement *elem;
bool ok;
int r;
if (svq->next_guest_avail_elem) {
elem = g_steal_pointer(&svq->next_guest_avail_elem);
@ -286,25 +294,24 @@ static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq)
break;
}
if (elem->out_num + elem->in_num > vhost_svq_available_slots(svq)) {
/*
* This condition is possible since a contiguous buffer in GPA
* does not imply a contiguous buffer in qemu's VA
* scatter-gather segments. If that happens, the buffer exposed
* to the device needs to be a chain of descriptors at this
* moment.
*
* SVQ cannot hold more available buffers if we are here:
* queue the current guest descriptor and ignore further kicks
* until some elements are used.
*/
svq->next_guest_avail_elem = elem;
return;
}
r = vhost_svq_add(svq, elem);
if (unlikely(r != 0)) {
if (r == -ENOSPC) {
/*
* This condition is possible since a contiguous buffer in
* GPA does not imply a contiguous buffer in qemu's VA
* scatter-gather segments. If that happens, the buffer
* exposed to the device needs to be a chain of descriptors
* at this moment.
*
* SVQ cannot hold more available buffers if we are here:
* queue the current guest descriptor and ignore kicks
* until some elements are used.
*/
svq->next_guest_avail_elem = elem;
}
ok = vhost_svq_add(svq, elem);
if (unlikely(!ok)) {
/* VQ is broken, just return and ignore any other kicks */
/* VQ is full or broken, just return and ignore kicks */
return;
}
}