vdpa net: move iova tree creation from init to start

Only create iova_tree if and when it is needed.

The cleanup keeps being responsible for the last VQ but this change
allows it to merge both cleanup functions.

Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Message-Id: <20230303172445.1089785-2-eperezma@redhat.com>
Tested-by: Lei Yang <leiyang@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Eugenio Pérez 2023-03-03 18:24:32 +01:00 committed by Michael S. Tsirkin
parent 2133e07c4c
commit 00ef422e9f

View File

@ -178,13 +178,9 @@ err_init:
static void vhost_vdpa_cleanup(NetClientState *nc) static void vhost_vdpa_cleanup(NetClientState *nc)
{ {
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
struct vhost_dev *dev = &s->vhost_net->dev;
qemu_vfree(s->cvq_cmd_out_buffer); qemu_vfree(s->cvq_cmd_out_buffer);
qemu_vfree(s->status); qemu_vfree(s->status);
if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
}
if (s->vhost_net) { if (s->vhost_net) {
vhost_net_cleanup(s->vhost_net); vhost_net_cleanup(s->vhost_net);
g_free(s->vhost_net); g_free(s->vhost_net);
@ -234,10 +230,64 @@ static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
return size; return size;
} }
/** From any vdpa net client, get the netclient of the first queue pair */
static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
{
NICState *nic = qemu_get_nic(s->nc.peer);
NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
return DO_UPCAST(VhostVDPAState, nc, nc0);
}
static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
{
struct vhost_vdpa *v = &s->vhost_vdpa;
if (v->shadow_vqs_enabled) {
v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
v->iova_range.last);
}
}
static int vhost_vdpa_net_data_start(NetClientState *nc)
{
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
struct vhost_vdpa *v = &s->vhost_vdpa;
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
if (v->index == 0) {
vhost_vdpa_net_data_start_first(s);
return 0;
}
if (v->shadow_vqs_enabled) {
VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
v->iova_tree = s0->vhost_vdpa.iova_tree;
}
return 0;
}
static void vhost_vdpa_net_client_stop(NetClientState *nc)
{
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
struct vhost_dev *dev;
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
dev = s->vhost_vdpa.dev;
if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
}
}
static NetClientInfo net_vhost_vdpa_info = { static NetClientInfo net_vhost_vdpa_info = {
.type = NET_CLIENT_DRIVER_VHOST_VDPA, .type = NET_CLIENT_DRIVER_VHOST_VDPA,
.size = sizeof(VhostVDPAState), .size = sizeof(VhostVDPAState),
.receive = vhost_vdpa_receive, .receive = vhost_vdpa_receive,
.start = vhost_vdpa_net_data_start,
.stop = vhost_vdpa_net_client_stop,
.cleanup = vhost_vdpa_cleanup, .cleanup = vhost_vdpa_cleanup,
.has_vnet_hdr = vhost_vdpa_has_vnet_hdr, .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
.has_ufo = vhost_vdpa_has_ufo, .has_ufo = vhost_vdpa_has_ufo,
@ -351,7 +401,7 @@ dma_map_err:
static int vhost_vdpa_net_cvq_start(NetClientState *nc) static int vhost_vdpa_net_cvq_start(NetClientState *nc)
{ {
VhostVDPAState *s; VhostVDPAState *s, *s0;
struct vhost_vdpa *v; struct vhost_vdpa *v;
uint64_t backend_features; uint64_t backend_features;
int64_t cvq_group; int64_t cvq_group;
@ -415,8 +465,6 @@ static int vhost_vdpa_net_cvq_start(NetClientState *nc)
return r; return r;
} }
v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
v->iova_range.last);
v->shadow_vqs_enabled = true; v->shadow_vqs_enabled = true;
s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID; s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
@ -425,6 +473,27 @@ out:
return 0; return 0;
} }
s0 = vhost_vdpa_net_first_nc_vdpa(s);
if (s0->vhost_vdpa.iova_tree) {
/*
* SVQ is already configured for all virtqueues. Reuse IOVA tree for
* simplicity, whether CVQ shares ASID with guest or not, because:
* - Memory listener need access to guest's memory addresses allocated
* in the IOVA tree.
* - There should be plenty of IOVA address space for both ASID not to
* worry about collisions between them. Guest's translations are
* still validated with virtio virtqueue_pop so there is no risk for
* the guest to access memory that it shouldn't.
*
* To allocate a iova tree per ASID is doable but it complicates the
* code and it is not worth it for the moment.
*/
v->iova_tree = s0->vhost_vdpa.iova_tree;
} else {
v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
v->iova_range.last);
}
r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
vhost_vdpa_net_cvq_cmd_page_len(), false); vhost_vdpa_net_cvq_cmd_page_len(), false);
if (unlikely(r < 0)) { if (unlikely(r < 0)) {
@ -449,15 +518,9 @@ static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
if (s->vhost_vdpa.shadow_vqs_enabled) { if (s->vhost_vdpa.shadow_vqs_enabled) {
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status); vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
if (!s->always_svq) {
/*
* If only the CVQ is shadowed we can delete this safely.
* If all the VQs are shadows this will be needed by the time the
* device is started again to register SVQ vrings and similar.
*/
g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
}
} }
vhost_vdpa_net_client_stop(nc);
} }
static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len, static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
@ -667,8 +730,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
int nvqs, int nvqs,
bool is_datapath, bool is_datapath,
bool svq, bool svq,
struct vhost_vdpa_iova_range iova_range, struct vhost_vdpa_iova_range iova_range)
VhostIOVATree *iova_tree)
{ {
NetClientState *nc = NULL; NetClientState *nc = NULL;
VhostVDPAState *s; VhostVDPAState *s;
@ -690,7 +752,6 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
s->vhost_vdpa.shadow_vqs_enabled = svq; s->vhost_vdpa.shadow_vqs_enabled = svq;
s->vhost_vdpa.iova_range = iova_range; s->vhost_vdpa.iova_range = iova_range;
s->vhost_vdpa.shadow_data = svq; s->vhost_vdpa.shadow_data = svq;
s->vhost_vdpa.iova_tree = iova_tree;
if (!is_datapath) { if (!is_datapath) {
s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(), s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
vhost_vdpa_net_cvq_cmd_page_len()); vhost_vdpa_net_cvq_cmd_page_len());
@ -760,7 +821,6 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
uint64_t features; uint64_t features;
int vdpa_device_fd; int vdpa_device_fd;
g_autofree NetClientState **ncs = NULL; g_autofree NetClientState **ncs = NULL;
g_autoptr(VhostIOVATree) iova_tree = NULL;
struct vhost_vdpa_iova_range iova_range; struct vhost_vdpa_iova_range iova_range;
NetClientState *nc; NetClientState *nc;
int queue_pairs, r, i = 0, has_cvq = 0; int queue_pairs, r, i = 0, has_cvq = 0;
@ -812,12 +872,8 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
goto err; goto err;
} }
if (opts->x_svq) { if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
if (!vhost_vdpa_net_valid_svq_features(features, errp)) { goto err;
goto err_svq;
}
iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last);
} }
ncs = g_malloc0(sizeof(*ncs) * queue_pairs); ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
@ -825,7 +881,7 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
for (i = 0; i < queue_pairs; i++) { for (i = 0; i < queue_pairs; i++) {
ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 2, true, opts->x_svq, vdpa_device_fd, i, 2, true, opts->x_svq,
iova_range, iova_tree); iova_range);
if (!ncs[i]) if (!ncs[i])
goto err; goto err;
} }
@ -833,13 +889,11 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
if (has_cvq) { if (has_cvq) {
nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
vdpa_device_fd, i, 1, false, vdpa_device_fd, i, 1, false,
opts->x_svq, iova_range, iova_tree); opts->x_svq, iova_range);
if (!nc) if (!nc)
goto err; goto err;
} }
/* iova_tree ownership belongs to last NetClientState */
g_steal_pointer(&iova_tree);
return 0; return 0;
err: err:
@ -849,7 +903,6 @@ err:
} }
} }
err_svq:
qemu_close(vdpa_device_fd); qemu_close(vdpa_device_fd);
return -1; return -1;