vhost: backend masking support
Support backend guest notifier masking in vhost-net: create eventfd at device init, when masked, make vhost use that as eventfd instead of sending an interrupt. Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
24f4fe345c
commit
f56a12475f
95
hw/vhost.c
95
hw/vhost.c
@ -612,7 +612,7 @@ static void vhost_log_stop(MemoryListener *listener,
|
||||
/* FIXME: implement */
|
||||
}
|
||||
|
||||
static int vhost_virtqueue_init(struct vhost_dev *dev,
|
||||
static int vhost_virtqueue_start(struct vhost_dev *dev,
|
||||
struct VirtIODevice *vdev,
|
||||
struct vhost_virtqueue *vq,
|
||||
unsigned idx)
|
||||
@ -681,16 +681,11 @@ static int vhost_virtqueue_init(struct vhost_dev *dev,
|
||||
goto fail_kick;
|
||||
}
|
||||
|
||||
file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
|
||||
r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
|
||||
if (r) {
|
||||
r = -errno;
|
||||
goto fail_call;
|
||||
}
|
||||
/* Clear and discard previous events if any. */
|
||||
event_notifier_test_and_clear(&vq->masked_notifier);
|
||||
|
||||
return 0;
|
||||
|
||||
fail_call:
|
||||
fail_kick:
|
||||
fail_alloc:
|
||||
cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
|
||||
@ -708,7 +703,7 @@ fail_alloc_desc:
|
||||
return r;
|
||||
}
|
||||
|
||||
static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
|
||||
static void vhost_virtqueue_stop(struct vhost_dev *dev,
|
||||
struct VirtIODevice *vdev,
|
||||
struct vhost_virtqueue *vq,
|
||||
unsigned idx)
|
||||
@ -746,11 +741,39 @@ static void vhost_eventfd_del(MemoryListener *listener,
|
||||
{
|
||||
}
|
||||
|
||||
static int vhost_virtqueue_init(struct vhost_dev *dev,
|
||||
struct vhost_virtqueue *vq, int n)
|
||||
{
|
||||
struct vhost_vring_file file = {
|
||||
.index = n,
|
||||
};
|
||||
int r = event_notifier_init(&vq->masked_notifier, 0);
|
||||
if (r < 0) {
|
||||
return r;
|
||||
}
|
||||
|
||||
file.fd = event_notifier_get_fd(&vq->masked_notifier);
|
||||
r = ioctl(dev->control, VHOST_SET_VRING_CALL, &file);
|
||||
if (r) {
|
||||
r = -errno;
|
||||
goto fail_call;
|
||||
}
|
||||
return 0;
|
||||
fail_call:
|
||||
event_notifier_cleanup(&vq->masked_notifier);
|
||||
return r;
|
||||
}
|
||||
|
||||
static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq)
|
||||
{
|
||||
event_notifier_cleanup(&vq->masked_notifier);
|
||||
}
|
||||
|
||||
int vhost_dev_init(struct vhost_dev *hdev, int devfd, const char *devpath,
|
||||
bool force)
|
||||
{
|
||||
uint64_t features;
|
||||
int r;
|
||||
int i, r;
|
||||
if (devfd >= 0) {
|
||||
hdev->control = devfd;
|
||||
} else {
|
||||
@ -768,6 +791,13 @@ int vhost_dev_init(struct vhost_dev *hdev, int devfd, const char *devpath,
|
||||
if (r < 0) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < hdev->nvqs; ++i) {
|
||||
r = vhost_virtqueue_init(hdev, hdev->vqs + i, i);
|
||||
if (r < 0) {
|
||||
goto fail_vq;
|
||||
}
|
||||
}
|
||||
hdev->features = features;
|
||||
|
||||
hdev->memory_listener = (MemoryListener) {
|
||||
@ -795,6 +825,10 @@ int vhost_dev_init(struct vhost_dev *hdev, int devfd, const char *devpath,
|
||||
memory_listener_register(&hdev->memory_listener, &address_space_memory);
|
||||
hdev->force = force;
|
||||
return 0;
|
||||
fail_vq:
|
||||
while (--i >= 0) {
|
||||
vhost_virtqueue_cleanup(hdev->vqs + i);
|
||||
}
|
||||
fail:
|
||||
r = -errno;
|
||||
close(hdev->control);
|
||||
@ -803,6 +837,10 @@ fail:
|
||||
|
||||
void vhost_dev_cleanup(struct vhost_dev *hdev)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < hdev->nvqs; ++i) {
|
||||
vhost_virtqueue_cleanup(hdev->vqs + i);
|
||||
}
|
||||
memory_listener_unregister(&hdev->memory_listener);
|
||||
g_free(hdev->mem);
|
||||
g_free(hdev->mem_sections);
|
||||
@ -869,6 +907,37 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev)
|
||||
}
|
||||
}
|
||||
|
||||
/* Test and clear event pending status.
|
||||
* Should be called after unmask to avoid losing events.
|
||||
*/
|
||||
bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
|
||||
{
|
||||
struct vhost_virtqueue *vq = hdev->vqs + n;
|
||||
assert(hdev->started);
|
||||
return event_notifier_test_and_clear(&vq->masked_notifier);
|
||||
}
|
||||
|
||||
/* Mask/unmask events from this vq. */
|
||||
void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
|
||||
bool mask)
|
||||
{
|
||||
struct VirtQueue *vvq = virtio_get_queue(vdev, n);
|
||||
int r;
|
||||
|
||||
assert(hdev->started);
|
||||
|
||||
struct vhost_vring_file file = {
|
||||
.index = n,
|
||||
};
|
||||
if (mask) {
|
||||
file.fd = event_notifier_get_fd(&hdev->vqs[n].masked_notifier);
|
||||
} else {
|
||||
file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
|
||||
}
|
||||
r = ioctl(hdev->control, VHOST_SET_VRING_CALL, &file);
|
||||
assert(r >= 0);
|
||||
}
|
||||
|
||||
/* Host notifiers must be enabled at this point. */
|
||||
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
|
||||
{
|
||||
@ -900,7 +969,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
|
||||
goto fail_mem;
|
||||
}
|
||||
for (i = 0; i < hdev->nvqs; ++i) {
|
||||
r = vhost_virtqueue_init(hdev,
|
||||
r = vhost_virtqueue_start(hdev,
|
||||
vdev,
|
||||
hdev->vqs + i,
|
||||
i);
|
||||
@ -925,7 +994,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
|
||||
fail_log:
|
||||
fail_vq:
|
||||
while (--i >= 0) {
|
||||
vhost_virtqueue_cleanup(hdev,
|
||||
vhost_virtqueue_stop(hdev,
|
||||
vdev,
|
||||
hdev->vqs + i,
|
||||
i);
|
||||
@ -946,7 +1015,7 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < hdev->nvqs; ++i) {
|
||||
vhost_virtqueue_cleanup(hdev,
|
||||
vhost_virtqueue_stop(hdev,
|
||||
vdev,
|
||||
hdev->vqs + i,
|
||||
i);
|
||||
|
10
hw/vhost.h
10
hw/vhost.h
@ -18,6 +18,7 @@ struct vhost_virtqueue {
|
||||
void *ring;
|
||||
unsigned long long ring_phys;
|
||||
unsigned ring_size;
|
||||
EventNotifier masked_notifier;
|
||||
};
|
||||
|
||||
typedef unsigned long vhost_log_chunk_t;
|
||||
@ -53,4 +54,13 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
|
||||
int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
|
||||
void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
|
||||
|
||||
/* Test and clear masked event pending status.
|
||||
* Should be called after unmask to avoid losing events.
|
||||
*/
|
||||
bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n);
|
||||
|
||||
/* Mask/unmask events from this vq.
|
||||
*/
|
||||
void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
|
||||
bool mask);
|
||||
#endif
|
||||
|
@ -109,6 +109,9 @@ struct vhost_net *vhost_net_init(NetClientState *backend, int devfd,
|
||||
(1 << VHOST_NET_F_VIRTIO_NET_HDR);
|
||||
net->backend = r;
|
||||
|
||||
net->dev.nvqs = 2;
|
||||
net->dev.vqs = net->vqs;
|
||||
|
||||
r = vhost_dev_init(&net->dev, devfd, "/dev/vhost-net", force);
|
||||
if (r < 0) {
|
||||
goto fail;
|
||||
@ -143,9 +146,6 @@ int vhost_net_start(struct vhost_net *net,
|
||||
struct vhost_vring_file file = { };
|
||||
int r;
|
||||
|
||||
net->dev.nvqs = 2;
|
||||
net->dev.vqs = net->vqs;
|
||||
|
||||
r = vhost_dev_enable_notifiers(&net->dev, dev);
|
||||
if (r < 0) {
|
||||
goto fail_notifiers;
|
||||
@ -200,6 +200,17 @@ void vhost_net_cleanup(struct vhost_net *net)
|
||||
vhost_dev_cleanup(&net->dev);
|
||||
g_free(net);
|
||||
}
|
||||
|
||||
bool vhost_net_virtqueue_pending(VHostNetState *net, int idx)
|
||||
{
|
||||
return vhost_virtqueue_pending(&net->dev, idx);
|
||||
}
|
||||
|
||||
void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
|
||||
int idx, bool mask)
|
||||
{
|
||||
vhost_virtqueue_mask(&net->dev, dev, idx, mask);
|
||||
}
|
||||
#else
|
||||
struct vhost_net *vhost_net_init(NetClientState *backend, int devfd,
|
||||
bool force)
|
||||
@ -234,4 +245,14 @@ unsigned vhost_net_get_features(struct vhost_net *net, unsigned features)
|
||||
void vhost_net_ack_features(struct vhost_net *net, unsigned features)
|
||||
{
|
||||
}
|
||||
|
||||
bool vhost_net_virtqueue_pending(VHostNetState *net, int idx)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
|
||||
int idx, bool mask)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
@ -17,4 +17,7 @@ void vhost_net_cleanup(VHostNetState *net);
|
||||
unsigned vhost_net_get_features(VHostNetState *net, unsigned features);
|
||||
void vhost_net_ack_features(VHostNetState *net, unsigned features);
|
||||
|
||||
bool vhost_net_virtqueue_pending(VHostNetState *net, int n);
|
||||
void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
|
||||
int idx, bool mask);
|
||||
#endif
|
||||
|
@ -1010,6 +1010,22 @@ static NetClientInfo net_virtio_info = {
|
||||
.link_status_changed = virtio_net_set_link_status,
|
||||
};
|
||||
|
||||
static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
|
||||
{
|
||||
VirtIONet *n = to_virtio_net(vdev);
|
||||
assert(n->vhost_started);
|
||||
return vhost_net_virtqueue_pending(tap_get_vhost_net(n->nic->nc.peer), idx);
|
||||
}
|
||||
|
||||
static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
|
||||
bool mask)
|
||||
{
|
||||
VirtIONet *n = to_virtio_net(vdev);
|
||||
assert(n->vhost_started);
|
||||
vhost_net_virtqueue_mask(tap_get_vhost_net(n->nic->nc.peer),
|
||||
vdev, idx, mask);
|
||||
}
|
||||
|
||||
VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
|
||||
virtio_net_conf *net)
|
||||
{
|
||||
@ -1026,6 +1042,8 @@ VirtIODevice *virtio_net_init(DeviceState *dev, NICConf *conf,
|
||||
n->vdev.bad_features = virtio_net_bad_features;
|
||||
n->vdev.reset = virtio_net_reset;
|
||||
n->vdev.set_status = virtio_net_set_status;
|
||||
n->vdev.guest_notifier_mask = virtio_net_guest_notifier_mask;
|
||||
n->vdev.guest_notifier_pending = virtio_net_guest_notifier_pending;
|
||||
n->rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
|
||||
|
||||
if (net->tx && strcmp(net->tx, "timer") && strcmp(net->tx, "bh")) {
|
||||
|
Loading…
Reference in New Issue
Block a user