vdpa: move vhost_vdpa_set_vring_ready to the caller

Doing that way allows CVQ to be enabled before the dataplane vqs,
restoring the state as MQ or MAC addresses properly in the case of a
migration.

The patch does it by defining a ->load NetClientInfo callback also for
dataplane.  Ideally, this should be done by an independent patch, but
the function is already static so it would only add an empty
vhost_vdpa_net_data_load stub.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Message-Id: <20230822085330.3978829-5-eperezma@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Tested-by: Lei Yang <leiyang@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Eugenio Pérez 2023-08-22 10:53:29 +02:00 committed by Michael S. Tsirkin
parent f3fada598c
commit 6c4825476a
3 changed files with 46 additions and 25 deletions

View File

@ -255,6 +255,9 @@ static int vhost_vdpa_device_start(VirtIODevice *vdev, Error **errp)
error_setg_errno(errp, -ret, "Error starting vhost"); error_setg_errno(errp, -ret, "Error starting vhost");
goto err_guest_notifiers; goto err_guest_notifiers;
} }
for (i = 0; i < s->dev.nvqs; ++i) {
vhost_vdpa_set_vring_ready(&s->vdpa, i);
}
s->started = true; s->started = true;
/* /*

View File

@ -1303,9 +1303,6 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
if (unlikely(!ok)) { if (unlikely(!ok)) {
return -1; return -1;
} }
for (int i = 0; i < dev->nvqs; ++i) {
vhost_vdpa_set_vring_ready(v, dev->vq_index + i);
}
} else { } else {
vhost_vdpa_suspend(dev); vhost_vdpa_suspend(dev);
vhost_vdpa_svqs_stop(dev); vhost_vdpa_svqs_stop(dev);

View File

@ -375,6 +375,22 @@ static int vhost_vdpa_net_data_start(NetClientState *nc)
return 0; return 0;
} }
static int vhost_vdpa_net_data_load(NetClientState *nc)
{
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
struct vhost_vdpa *v = &s->vhost_vdpa;
bool has_cvq = v->dev->vq_index_end % 2;
if (has_cvq) {
return 0;
}
for (int i = 0; i < v->dev->nvqs; ++i) {
vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index);
}
return 0;
}
static void vhost_vdpa_net_client_stop(NetClientState *nc) static void vhost_vdpa_net_client_stop(NetClientState *nc)
{ {
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
@ -397,6 +413,7 @@ static NetClientInfo net_vhost_vdpa_info = {
.size = sizeof(VhostVDPAState), .size = sizeof(VhostVDPAState),
.receive = vhost_vdpa_receive, .receive = vhost_vdpa_receive,
.start = vhost_vdpa_net_data_start, .start = vhost_vdpa_net_data_start,
.load = vhost_vdpa_net_data_load,
.stop = vhost_vdpa_net_client_stop, .stop = vhost_vdpa_net_client_stop,
.cleanup = vhost_vdpa_cleanup, .cleanup = vhost_vdpa_cleanup,
.has_vnet_hdr = vhost_vdpa_has_vnet_hdr, .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
@ -1022,30 +1039,34 @@ static int vhost_vdpa_net_cvq_load(NetClientState *nc)
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
if (!v->shadow_vqs_enabled) { vhost_vdpa_set_vring_ready(v, v->dev->vq_index);
return 0;
if (v->shadow_vqs_enabled) {
n = VIRTIO_NET(v->dev->vdev);
r = vhost_vdpa_net_load_mac(s, n);
if (unlikely(r < 0)) {
return r;
}
r = vhost_vdpa_net_load_mq(s, n);
if (unlikely(r)) {
return r;
}
r = vhost_vdpa_net_load_offloads(s, n);
if (unlikely(r)) {
return r;
}
r = vhost_vdpa_net_load_rx(s, n);
if (unlikely(r)) {
return r;
}
r = vhost_vdpa_net_load_vlan(s, n);
if (unlikely(r)) {
return r;
}
} }
n = VIRTIO_NET(v->dev->vdev); for (int i = 0; i < v->dev->vq_index; ++i) {
r = vhost_vdpa_net_load_mac(s, n); vhost_vdpa_set_vring_ready(v, i);
if (unlikely(r < 0)) {
return r;
}
r = vhost_vdpa_net_load_mq(s, n);
if (unlikely(r)) {
return r;
}
r = vhost_vdpa_net_load_offloads(s, n);
if (unlikely(r)) {
return r;
}
r = vhost_vdpa_net_load_rx(s, n);
if (unlikely(r)) {
return r;
}
r = vhost_vdpa_net_load_vlan(s, n);
if (unlikely(r)) {
return r;
} }
return 0; return 0;