virtio/vhost-user: dynamically assign VhostUserHostNotifiers

At a couple of hundred bytes per notifier allocating one for every
potential queue is very wasteful as most devices only have a few
queues. Instead of having this handled statically dynamically assign
them and track in a GPtrArray.

[AJB: it's hard to trigger the vhost notifiers code, I assume as it
requires a KVM guest with appropriate backend]

Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Message-Id: <20220321153037.3622127-14-alex.bennee@linaro.org>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
Alex Bennée 2022-03-21 15:30:37 +00:00 committed by Michael S. Tsirkin
parent 56534930b5
commit 503e355465
3 changed files with 108 additions and 18 deletions

View File

@ -23,6 +23,7 @@ vhost_user_postcopy_waker_found(uint64_t client_addr) "0x%"PRIx64
vhost_user_postcopy_waker_nomatch(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64 vhost_user_postcopy_waker_nomatch(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64
vhost_user_read(uint32_t req, uint32_t flags) "req:%d flags:0x%"PRIx32"" vhost_user_read(uint32_t req, uint32_t flags) "req:%d flags:0x%"PRIx32""
vhost_user_write(uint32_t req, uint32_t flags) "req:%d flags:0x%"PRIx32"" vhost_user_write(uint32_t req, uint32_t flags) "req:%d flags:0x%"PRIx32""
vhost_user_create_notifier(int idx, void *n) "idx:%d n:%p"
# vhost-vdpa.c # vhost-vdpa.c
vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8 vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8

View File

@ -1174,14 +1174,16 @@ static void vhost_user_host_notifier_free(VhostUserHostNotifier *n)
n->unmap_addr = NULL; n->unmap_addr = NULL;
} }
static void vhost_user_host_notifier_remove(VhostUserState *user, /*
VirtIODevice *vdev, int queue_idx) * clean-up function for notifier, will finally free the structure
* under rcu.
*/
static void vhost_user_host_notifier_remove(VhostUserHostNotifier *n,
VirtIODevice *vdev)
{ {
VhostUserHostNotifier *n = &user->notifier[queue_idx];
if (n->addr) { if (n->addr) {
if (vdev) { if (vdev) {
virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false); virtio_queue_set_host_notifier_mr(vdev, n->idx, &n->mr, false);
} }
assert(!n->unmap_addr); assert(!n->unmap_addr);
n->unmap_addr = n->addr; n->unmap_addr = n->addr;
@ -1225,6 +1227,15 @@ static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
return 0; return 0;
} }
static VhostUserHostNotifier *fetch_notifier(VhostUserState *u,
int idx)
{
if (idx >= u->notifiers->len) {
return NULL;
}
return g_ptr_array_index(u->notifiers, idx);
}
static int vhost_user_get_vring_base(struct vhost_dev *dev, static int vhost_user_get_vring_base(struct vhost_dev *dev,
struct vhost_vring_state *ring) struct vhost_vring_state *ring)
{ {
@ -1237,7 +1248,10 @@ static int vhost_user_get_vring_base(struct vhost_dev *dev,
}; };
struct vhost_user *u = dev->opaque; struct vhost_user *u = dev->opaque;
vhost_user_host_notifier_remove(u->user, dev->vdev, ring->index); VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index);
if (n) {
vhost_user_host_notifier_remove(n, dev->vdev);
}
ret = vhost_user_write(dev, &msg, NULL, 0); ret = vhost_user_write(dev, &msg, NULL, 0);
if (ret < 0) { if (ret < 0) {
@ -1502,6 +1516,29 @@ static int vhost_user_slave_handle_config_change(struct vhost_dev *dev)
return dev->config_ops->vhost_dev_config_notifier(dev); return dev->config_ops->vhost_dev_config_notifier(dev);
} }
/*
* Fetch or create the notifier for a given idx. Newly created
* notifiers are added to the pointer array that tracks them.
*/
static VhostUserHostNotifier *fetch_or_create_notifier(VhostUserState *u,
int idx)
{
VhostUserHostNotifier *n = NULL;
if (idx >= u->notifiers->len) {
g_ptr_array_set_size(u->notifiers, idx);
}
n = g_ptr_array_index(u->notifiers, idx);
if (!n) {
n = g_new0(VhostUserHostNotifier, 1);
n->idx = idx;
g_ptr_array_insert(u->notifiers, idx, n);
trace_vhost_user_create_notifier(idx, n);
}
return n;
}
static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev, static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
VhostUserVringArea *area, VhostUserVringArea *area,
int fd) int fd)
@ -1521,9 +1558,12 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
return -EINVAL; return -EINVAL;
} }
n = &user->notifier[queue_idx]; /*
* Fetch notifier and invalidate any old data before setting up
vhost_user_host_notifier_remove(user, vdev, queue_idx); * new mapped address.
*/
n = fetch_or_create_notifier(user, queue_idx);
vhost_user_host_notifier_remove(n, vdev);
if (area->u64 & VHOST_USER_VRING_NOFD_MASK) { if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
return 0; return 0;
@ -2526,6 +2566,20 @@ static int vhost_user_set_inflight_fd(struct vhost_dev *dev,
return vhost_user_write(dev, &msg, &inflight->fd, 1); return vhost_user_write(dev, &msg, &inflight->fd, 1);
} }
static void vhost_user_state_destroy(gpointer data)
{
VhostUserHostNotifier *n = (VhostUserHostNotifier *) data;
if (n) {
vhost_user_host_notifier_remove(n, NULL);
object_unparent(OBJECT(&n->mr));
/*
* We can't free until vhost_user_host_notifier_remove has
* done it's thing so schedule the free with RCU.
*/
g_free_rcu(n, rcu);
}
}
bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp) bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
{ {
if (user->chr) { if (user->chr) {
@ -2534,23 +2588,18 @@ bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
} }
user->chr = chr; user->chr = chr;
user->memory_slots = 0; user->memory_slots = 0;
user->notifiers = g_ptr_array_new_full(VIRTIO_QUEUE_MAX / 4,
&vhost_user_state_destroy);
return true; return true;
} }
void vhost_user_cleanup(VhostUserState *user) void vhost_user_cleanup(VhostUserState *user)
{ {
int i;
VhostUserHostNotifier *n;
if (!user->chr) { if (!user->chr) {
return; return;
} }
memory_region_transaction_begin(); memory_region_transaction_begin();
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true);
n = &user->notifier[i];
vhost_user_host_notifier_remove(user, NULL, i);
object_unparent(OBJECT(&n->mr));
}
memory_region_transaction_commit(); memory_region_transaction_commit();
user->chr = NULL; user->chr = NULL;
} }

View File

@ -11,21 +11,61 @@
#include "chardev/char-fe.h" #include "chardev/char-fe.h"
#include "hw/virtio/virtio.h" #include "hw/virtio/virtio.h"
/**
* VhostUserHostNotifier - notifier information for one queue
* @rcu: rcu_head for cleanup
* @mr: memory region of notifier
* @addr: current mapped address
* @unmap_addr: address to be un-mapped
* @idx: virtioqueue index
*
* The VhostUserHostNotifier entries are re-used. When an old mapping
* is to be released it is moved to @unmap_addr and @addr is replaced.
* Once the RCU process has completed the unmap @unmap_addr is
* cleared.
*/
typedef struct VhostUserHostNotifier { typedef struct VhostUserHostNotifier {
struct rcu_head rcu; struct rcu_head rcu;
MemoryRegion mr; MemoryRegion mr;
void *addr; void *addr;
void *unmap_addr; void *unmap_addr;
int idx;
} VhostUserHostNotifier; } VhostUserHostNotifier;
/**
* VhostUserState - shared state for all vhost-user devices
* @chr: the character backend for the socket
* @notifiers: GPtrArray of @VhostUserHostnotifier
* @memory_slots:
*/
typedef struct VhostUserState { typedef struct VhostUserState {
CharBackend *chr; CharBackend *chr;
VhostUserHostNotifier notifier[VIRTIO_QUEUE_MAX]; GPtrArray *notifiers;
int memory_slots; int memory_slots;
bool supports_config; bool supports_config;
} VhostUserState; } VhostUserState;
/**
* vhost_user_init() - initialise shared vhost_user state
* @user: allocated area for storing shared state
* @chr: the chardev for the vhost socket
* @errp: error handle
*
* User can either directly g_new() space for the state or embed
* VhostUserState in their larger device structure and just point to
* it.
*
* Return: true on success, false on error while setting errp.
*/
bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp); bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp);
/**
* vhost_user_cleanup() - cleanup state
* @user: ptr to use state
*
* Cleans up shared state and notifiers, callee is responsible for
* freeing the @VhostUserState memory itself.
*/
void vhost_user_cleanup(VhostUserState *user); void vhost_user_cleanup(VhostUserState *user);
#endif #endif