2022-03-14 20:34:41 +03:00
|
|
|
/*
|
|
|
|
* vhost shadow virtqueue
|
|
|
|
*
|
|
|
|
* SPDX-FileCopyrightText: Red Hat, Inc. 2021
|
|
|
|
* SPDX-FileContributor: Author: Eugenio Pérez <eperezma@redhat.com>
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: GPL-2.0-or-later
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef VHOST_SHADOW_VIRTQUEUE_H
|
|
|
|
#define VHOST_SHADOW_VIRTQUEUE_H
|
|
|
|
|
|
|
|
#include "qemu/event_notifier.h"
|
2022-03-14 20:34:45 +03:00
|
|
|
#include "hw/virtio/virtio.h"
|
|
|
|
#include "standard-headers/linux/vhost_types.h"
|
2022-03-14 20:34:51 +03:00
|
|
|
#include "hw/virtio/vhost-iova-tree.h"
|
2022-03-14 20:34:41 +03:00
|
|
|
|
2022-07-20 09:59:34 +03:00
|
|
|
typedef struct SVQDescState {
|
|
|
|
VirtQueueElement *elem;
|
2022-07-20 09:59:35 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Number of descriptors exposed to the device. May or may not match
|
|
|
|
* guest's
|
|
|
|
*/
|
|
|
|
unsigned int ndescs;
|
2022-07-20 09:59:34 +03:00
|
|
|
} SVQDescState;
|
|
|
|
|
2022-07-20 09:59:39 +03:00
|
|
|
typedef struct VhostShadowVirtqueue VhostShadowVirtqueue;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Callback to handle an avail buffer.
|
|
|
|
*
|
|
|
|
* @svq: Shadow virtqueue
|
|
|
|
* @elem: Element placed in the queue by the guest
|
|
|
|
* @vq_callback_opaque: Opaque
|
|
|
|
*
|
|
|
|
* Returns 0 if the vq is running as expected.
|
|
|
|
*
|
|
|
|
* Note that ownership of elem is transferred to the callback.
|
|
|
|
*/
|
|
|
|
typedef int (*VirtQueueAvailCallback)(VhostShadowVirtqueue *svq,
|
|
|
|
VirtQueueElement *elem,
|
|
|
|
void *vq_callback_opaque);
|
|
|
|
|
|
|
|
typedef struct VhostShadowVirtqueueOps {
|
|
|
|
VirtQueueAvailCallback avail_handler;
|
|
|
|
} VhostShadowVirtqueueOps;
|
|
|
|
|
2022-03-14 20:34:41 +03:00
|
|
|
/* Shadow virtqueue to relay notifications */
|
|
|
|
typedef struct VhostShadowVirtqueue {
|
2022-03-14 20:34:45 +03:00
|
|
|
/* Shadow vring */
|
|
|
|
struct vring vring;
|
|
|
|
|
2022-03-14 20:34:41 +03:00
|
|
|
/* Shadow kick notifier, sent to vhost */
|
|
|
|
EventNotifier hdev_kick;
|
|
|
|
/* Shadow call notifier, sent to vhost */
|
|
|
|
EventNotifier hdev_call;
|
2022-03-14 20:34:42 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Borrowed virtqueue's guest to host notifier. To borrow it in this event
|
|
|
|
* notifier allows to recover the VhostShadowVirtqueue from the event loop
|
|
|
|
* easily. If we use the VirtQueue's one, we don't have an easy way to
|
|
|
|
* retrieve VhostShadowVirtqueue.
|
|
|
|
*
|
|
|
|
* So shadow virtqueue must not clean it, or we would lose VirtQueue one.
|
|
|
|
*/
|
|
|
|
EventNotifier svq_kick;
|
2022-03-14 20:34:43 +03:00
|
|
|
|
|
|
|
/* Guest's call notifier, where the SVQ calls guest. */
|
|
|
|
EventNotifier svq_call;
|
2022-03-14 20:34:47 +03:00
|
|
|
|
|
|
|
/* Virtio queue shadowing */
|
|
|
|
VirtQueue *vq;
|
|
|
|
|
|
|
|
/* Virtio device */
|
|
|
|
VirtIODevice *vdev;
|
|
|
|
|
2022-03-14 20:34:51 +03:00
|
|
|
/* IOVA mapping */
|
|
|
|
VhostIOVATree *iova_tree;
|
|
|
|
|
2022-07-20 09:59:34 +03:00
|
|
|
/* SVQ vring descriptors state */
|
|
|
|
SVQDescState *desc_state;
|
2022-03-14 20:34:47 +03:00
|
|
|
|
|
|
|
/* Next VirtQueue element that guest made available */
|
|
|
|
VirtQueueElement *next_guest_avail_elem;
|
|
|
|
|
2022-05-12 20:57:42 +03:00
|
|
|
/*
|
|
|
|
* Backup next field for each descriptor so we can recover securely, not
|
|
|
|
* needing to trust the device access.
|
|
|
|
*/
|
|
|
|
uint16_t *desc_next;
|
|
|
|
|
2022-07-20 09:59:39 +03:00
|
|
|
/* Caller callbacks */
|
|
|
|
const VhostShadowVirtqueueOps *ops;
|
|
|
|
|
|
|
|
/* Caller callbacks opaque */
|
|
|
|
void *ops_opaque;
|
|
|
|
|
2022-03-14 20:34:47 +03:00
|
|
|
/* Next head to expose to the device */
|
|
|
|
uint16_t shadow_avail_idx;
|
|
|
|
|
|
|
|
/* Next free descriptor */
|
|
|
|
uint16_t free_head;
|
|
|
|
|
|
|
|
/* Last seen used idx */
|
|
|
|
uint16_t shadow_used_idx;
|
|
|
|
|
|
|
|
/* Next head to consume from the device */
|
|
|
|
uint16_t last_used_idx;
|
2022-03-14 20:34:41 +03:00
|
|
|
} VhostShadowVirtqueue;
|
|
|
|
|
2022-03-14 20:34:44 +03:00
|
|
|
bool vhost_svq_valid_features(uint64_t features, Error **errp);
|
|
|
|
|
2022-07-20 09:59:36 +03:00
|
|
|
void vhost_svq_push_elem(VhostShadowVirtqueue *svq,
|
|
|
|
const VirtQueueElement *elem, uint32_t len);
|
2022-07-20 09:59:37 +03:00
|
|
|
int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg,
|
|
|
|
size_t out_num, const struct iovec *in_sg, size_t in_num,
|
|
|
|
VirtQueueElement *elem);
|
2022-07-20 09:59:38 +03:00
|
|
|
size_t vhost_svq_poll(VhostShadowVirtqueue *svq);
|
2022-07-20 09:59:36 +03:00
|
|
|
|
2022-03-14 20:34:42 +03:00
|
|
|
void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd);
|
2022-03-14 20:34:43 +03:00
|
|
|
void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd);
|
2022-03-14 20:34:45 +03:00
|
|
|
void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq,
|
|
|
|
struct vhost_vring_addr *addr);
|
|
|
|
size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq);
|
|
|
|
size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq);
|
2022-03-14 20:34:42 +03:00
|
|
|
|
2022-03-14 20:34:47 +03:00
|
|
|
void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
|
2022-12-15 14:31:36 +03:00
|
|
|
VirtQueue *vq, VhostIOVATree *iova_tree);
|
2022-03-14 20:34:42 +03:00
|
|
|
void vhost_svq_stop(VhostShadowVirtqueue *svq);
|
|
|
|
|
2022-12-15 14:31:36 +03:00
|
|
|
VhostShadowVirtqueue *vhost_svq_new(const VhostShadowVirtqueueOps *ops,
|
2022-07-20 09:59:39 +03:00
|
|
|
void *ops_opaque);
|
2022-03-14 20:34:41 +03:00
|
|
|
|
|
|
|
void vhost_svq_free(gpointer vq);
|
|
|
|
G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostShadowVirtqueue, vhost_svq_free);
|
|
|
|
|
|
|
|
#endif
|