vhost: Track descriptor chain in private at SVQ
The device could have access to modify them, and it definitely have access when we implement packed vq. Harden SVQ maintaining a private copy of the descriptor chain. Other fields like buffer addresses are already maintained sepparatedly. Signed-off-by: Eugenio Pérez <eperezma@redhat.com> Message-Id: <20220512175747.142058-2-eperezma@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
aa69abe6e8
commit
495fe3a787
@ -138,6 +138,7 @@ static void vhost_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
|
||||
for (n = 0; n < num; n++) {
|
||||
if (more_descs || (n + 1 < num)) {
|
||||
descs[i].flags = flags | cpu_to_le16(VRING_DESC_F_NEXT);
|
||||
descs[i].next = cpu_to_le16(svq->desc_next[i]);
|
||||
} else {
|
||||
descs[i].flags = flags;
|
||||
}
|
||||
@ -145,10 +146,10 @@ static void vhost_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg,
|
||||
descs[i].len = cpu_to_le32(iovec[n].iov_len);
|
||||
|
||||
last = i;
|
||||
i = cpu_to_le16(descs[i].next);
|
||||
i = cpu_to_le16(svq->desc_next[i]);
|
||||
}
|
||||
|
||||
svq->free_head = le16_to_cpu(descs[last].next);
|
||||
svq->free_head = le16_to_cpu(svq->desc_next[last]);
|
||||
}
|
||||
|
||||
static bool vhost_svq_add_split(VhostShadowVirtqueue *svq,
|
||||
@ -336,7 +337,6 @@ static void vhost_svq_disable_notification(VhostShadowVirtqueue *svq)
|
||||
static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
|
||||
uint32_t *len)
|
||||
{
|
||||
vring_desc_t *descs = svq->vring.desc;
|
||||
const vring_used_t *used = svq->vring.used;
|
||||
vring_used_elem_t used_elem;
|
||||
uint16_t last_used;
|
||||
@ -365,7 +365,7 @@ static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
descs[used_elem.id].next = svq->free_head;
|
||||
svq->desc_next[used_elem.id] = svq->free_head;
|
||||
svq->free_head = used_elem.id;
|
||||
|
||||
*len = used_elem.len;
|
||||
@ -540,8 +540,9 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
|
||||
svq->vring.used = qemu_memalign(qemu_real_host_page_size(), device_size);
|
||||
memset(svq->vring.used, 0, device_size);
|
||||
svq->ring_id_maps = g_new0(VirtQueueElement *, svq->vring.num);
|
||||
svq->desc_next = g_new0(uint16_t, svq->vring.num);
|
||||
for (unsigned i = 0; i < svq->vring.num - 1; i++) {
|
||||
svq->vring.desc[i].next = cpu_to_le16(i + 1);
|
||||
svq->desc_next[i] = cpu_to_le16(i + 1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -574,6 +575,7 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
|
||||
virtqueue_detach_element(svq->vq, next_avail_elem, 0);
|
||||
}
|
||||
svq->vq = NULL;
|
||||
g_free(svq->desc_next);
|
||||
g_free(svq->ring_id_maps);
|
||||
qemu_vfree(svq->vring.desc);
|
||||
qemu_vfree(svq->vring.used);
|
||||
|
@ -53,6 +53,12 @@ typedef struct VhostShadowVirtqueue {
|
||||
/* Next VirtQueue element that guest made available */
|
||||
VirtQueueElement *next_guest_avail_elem;
|
||||
|
||||
/*
|
||||
* Backup next field for each descriptor so we can recover securely, not
|
||||
* needing to trust the device access.
|
||||
*/
|
||||
uint16_t *desc_next;
|
||||
|
||||
/* Next head to expose to the device */
|
||||
uint16_t shadow_avail_idx;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user