vhost_net: configure all host notifiers in a single MR transaction

This allows the vhost_net device which has multiple virtqueues to batch
the setup of all its host notifiers. This significantly reduces the
vhost_net device starting and stoping time, e.g. the time spend
on enabling notifiers reduce from 630ms to 75ms and the time spend on
disabling notifiers reduce from 441ms to 45ms for a VM with 192 vCPUs
and 15 vhost-user-net devices (64vq per device) in our case.

Signed-off-by: zuoboqun <zuoboqun@baidu.com>
Message-Id: <20240816070835.8309-1-zuoboqun@baidu.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
zuoboqun 2024-08-16 15:08:35 +08:00 committed by Michael S. Tsirkin
parent a6896ebc8f
commit 6166799f69
3 changed files with 150 additions and 15 deletions

View File

@ -162,6 +162,135 @@ void vhost_net_save_acked_features(NetClientState *nc)
#endif #endif
} }
static void vhost_net_disable_notifiers_nvhosts(VirtIODevice *dev,
NetClientState *ncs, int data_queue_pairs, int nvhosts)
{
VirtIONet *n = VIRTIO_NET(dev);
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
struct vhost_net *net;
struct vhost_dev *hdev;
int r, i, j;
NetClientState *peer;
/*
* Batch all the host notifiers in a single transaction to avoid
* quadratic time complexity in address_space_update_ioeventfds().
*/
memory_region_transaction_begin();
for (i = 0; i < nvhosts; i++) {
if (i < data_queue_pairs) {
peer = qemu_get_peer(ncs, i);
} else {
peer = qemu_get_peer(ncs, n->max_queue_pairs);
}
net = get_vhost_net(peer);
hdev = &net->dev;
for (j = 0; j < hdev->nvqs; j++) {
r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus),
hdev->vq_index + j,
false);
if (r < 0) {
error_report("vhost %d VQ %d notifier cleanup failed: %d",
i, j, -r);
}
assert(r >= 0);
}
}
/*
* The transaction expects the ioeventfds to be open when it
* commits. Do it now, before the cleanup loop.
*/
memory_region_transaction_commit();
for (i = 0; i < nvhosts; i++) {
if (i < data_queue_pairs) {
peer = qemu_get_peer(ncs, i);
} else {
peer = qemu_get_peer(ncs, n->max_queue_pairs);
}
net = get_vhost_net(peer);
hdev = &net->dev;
for (j = 0; j < hdev->nvqs; j++) {
virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus),
hdev->vq_index + j);
}
virtio_device_release_ioeventfd(dev);
}
}
static int vhost_net_enable_notifiers(VirtIODevice *dev,
NetClientState *ncs, int data_queue_pairs, int cvq)
{
VirtIONet *n = VIRTIO_NET(dev);
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(dev)));
int nvhosts = data_queue_pairs + cvq;
struct vhost_net *net;
struct vhost_dev *hdev;
int r, i, j;
NetClientState *peer;
/*
* Batch all the host notifiers in a single transaction to avoid
* quadratic time complexity in address_space_update_ioeventfds().
*/
memory_region_transaction_begin();
for (i = 0; i < nvhosts; i++) {
if (i < data_queue_pairs) {
peer = qemu_get_peer(ncs, i);
} else {
peer = qemu_get_peer(ncs, n->max_queue_pairs);
}
net = get_vhost_net(peer);
hdev = &net->dev;
/*
* We will pass the notifiers to the kernel, make sure that QEMU
* doesn't interfere.
*/
r = virtio_device_grab_ioeventfd(dev);
if (r < 0) {
error_report("binding does not support host notifiers");
memory_region_transaction_commit();
goto fail_nvhosts;
}
for (j = 0; j < hdev->nvqs; j++) {
r = virtio_bus_set_host_notifier(VIRTIO_BUS(qbus),
hdev->vq_index + j,
true);
if (r < 0) {
error_report("vhost %d VQ %d notifier binding failed: %d",
i, j, -r);
memory_region_transaction_commit();
vhost_dev_disable_notifiers_nvqs(hdev, dev, j);
goto fail_nvhosts;
}
}
}
memory_region_transaction_commit();
return 0;
fail_nvhosts:
vhost_net_disable_notifiers_nvhosts(dev, ncs, data_queue_pairs, i);
return r;
}
/*
* Stop processing guest IO notifications in qemu.
* Start processing them in vhost in kernel.
*/
static void vhost_net_disable_notifiers(VirtIODevice *dev,
NetClientState *ncs, int data_queue_pairs, int cvq)
{
vhost_net_disable_notifiers_nvhosts(dev, ncs, data_queue_pairs,
data_queue_pairs + cvq);
}
static int vhost_net_get_fd(NetClientState *backend) static int vhost_net_get_fd(NetClientState *backend)
{ {
switch (backend->info->type) { switch (backend->info->type) {
@ -272,11 +401,6 @@ static int vhost_net_start_one(struct vhost_net *net,
} }
} }
r = vhost_dev_enable_notifiers(&net->dev, dev);
if (r < 0) {
goto fail_notifiers;
}
r = vhost_dev_start(&net->dev, dev, false); r = vhost_dev_start(&net->dev, dev, false);
if (r < 0) { if (r < 0) {
goto fail_start; goto fail_start;
@ -328,8 +452,6 @@ fail:
} }
vhost_dev_stop(&net->dev, dev, false); vhost_dev_stop(&net->dev, dev, false);
fail_start: fail_start:
vhost_dev_disable_notifiers(&net->dev, dev);
fail_notifiers:
return r; return r;
} }
@ -351,7 +473,6 @@ static void vhost_net_stop_one(struct vhost_net *net,
if (net->nc->info->stop) { if (net->nc->info->stop) {
net->nc->info->stop(net->nc); net->nc->info->stop(net->nc);
} }
vhost_dev_disable_notifiers(&net->dev, dev);
} }
int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
@ -396,10 +517,16 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
} }
} }
r = vhost_net_enable_notifiers(dev, ncs, data_queue_pairs, cvq);
if (r < 0) {
error_report("Error enabling host notifiers: %d", -r);
goto err;
}
r = k->set_guest_notifiers(qbus->parent, total_notifiers, true); r = k->set_guest_notifiers(qbus->parent, total_notifiers, true);
if (r < 0) { if (r < 0) {
error_report("Error binding guest notifier: %d", -r); error_report("Error binding guest notifier: %d", -r);
goto err; goto err_host_notifiers;
} }
for (i = 0; i < nvhosts; i++) { for (i = 0; i < nvhosts; i++) {
@ -414,19 +541,19 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
r = vhost_set_vring_enable(peer, peer->vring_enable); r = vhost_set_vring_enable(peer, peer->vring_enable);
if (r < 0) { if (r < 0) {
goto err_start; goto err_guest_notifiers;
} }
} }
r = vhost_net_start_one(get_vhost_net(peer), dev); r = vhost_net_start_one(get_vhost_net(peer), dev);
if (r < 0) { if (r < 0) {
goto err_start; goto err_guest_notifiers;
} }
} }
return 0; return 0;
err_start: err_guest_notifiers:
while (--i >= 0) { while (--i >= 0) {
peer = qemu_get_peer(ncs, i < data_queue_pairs ? peer = qemu_get_peer(ncs, i < data_queue_pairs ?
i : n->max_queue_pairs); i : n->max_queue_pairs);
@ -437,6 +564,8 @@ err_start:
fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e); fprintf(stderr, "vhost guest notifier cleanup failed: %d\n", e);
fflush(stderr); fflush(stderr);
} }
err_host_notifiers:
vhost_net_disable_notifiers(dev, ncs, data_queue_pairs, cvq);
err: err:
return r; return r;
} }
@ -468,6 +597,8 @@ void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs,
fflush(stderr); fflush(stderr);
} }
assert(r >= 0); assert(r >= 0);
vhost_net_disable_notifiers(dev, ncs, data_queue_pairs, cvq);
} }
void vhost_net_cleanup(struct vhost_net *net) void vhost_net_cleanup(struct vhost_net *net)

View File

@ -1682,9 +1682,9 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
memset(hdev, 0, sizeof(struct vhost_dev)); memset(hdev, 0, sizeof(struct vhost_dev));
} }
static void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev, void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev,
VirtIODevice *vdev, VirtIODevice *vdev,
unsigned int nvqs) unsigned int nvqs)
{ {
BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
int i, r; int i, r;

View File

@ -171,6 +171,10 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
*/ */
void vhost_dev_cleanup(struct vhost_dev *hdev); void vhost_dev_cleanup(struct vhost_dev *hdev);
void vhost_dev_disable_notifiers_nvqs(struct vhost_dev *hdev,
VirtIODevice *vdev,
unsigned int nvqs);
/** /**
* vhost_dev_enable_notifiers() - enable event notifiers * vhost_dev_enable_notifiers() - enable event notifiers
* @hdev: common vhost_dev structure * @hdev: common vhost_dev structure