vhost-scsi: Add support for a worker thread per virtqueue

This adds support for vhost-scsi to be able to create a worker thread
per virtqueue. Right now for vhost-net we get a worker thread per
tx/rx virtqueue pair which scales nicely as we add more virtqueues and
CPUs, but for scsi we get the single worker thread that's shared by all
virtqueues. When trying to send IO to more than 2 virtqueues the single
thread becomes a bottlneck.

This patch adds a new setting, worker_per_virtqueue, which can be set
to:

false: Existing behavior where we get the single worker thread.
true: Create a worker per IO virtqueue.

Signed-off-by: Mike Christie <michael.christie@oracle.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>

Message-Id: <20231204231618.21962-3-michael.christie@oracle.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
This commit is contained in:
Mike Christie 2023-12-04 17:16:18 -06:00 committed by Michael S. Tsirkin
parent 9aad781959
commit 51396556f0
2 changed files with 63 additions and 0 deletions

View File

@ -165,6 +165,59 @@ static const VMStateDescription vmstate_virtio_vhost_scsi = {
.pre_save = vhost_scsi_pre_save,
};
static int vhost_scsi_set_workers(VHostSCSICommon *vsc, bool per_virtqueue)
{
struct vhost_dev *dev = &vsc->dev;
struct vhost_vring_worker vq_worker;
struct vhost_worker_state worker;
int i, ret;
/* Use default worker */
if (!per_virtqueue || dev->nvqs == VHOST_SCSI_VQ_NUM_FIXED + 1) {
return 0;
}
/*
* ctl/evt share the first worker since it will be rare for them
* to send cmds while IO is running.
*/
for (i = VHOST_SCSI_VQ_NUM_FIXED + 1; i < dev->nvqs; i++) {
memset(&worker, 0, sizeof(worker));
ret = dev->vhost_ops->vhost_new_worker(dev, &worker);
if (ret == -ENOTTY) {
/*
* worker ioctls are not implemented so just ignore and
* and continue device setup.
*/
warn_report("vhost-scsi: Backend supports a single worker. "
"Ignoring worker_per_virtqueue=true setting.");
ret = 0;
break;
} else if (ret) {
break;
}
memset(&vq_worker, 0, sizeof(vq_worker));
vq_worker.worker_id = worker.worker_id;
vq_worker.index = i;
ret = dev->vhost_ops->vhost_attach_vring_worker(dev, &vq_worker);
if (ret == -ENOTTY) {
/*
* It's a bug for the kernel to have supported the worker creation
* ioctl but not attach.
*/
dev->vhost_ops->vhost_free_worker(dev, &worker);
break;
} else if (ret) {
break;
}
}
return ret;
}
static void vhost_scsi_realize(DeviceState *dev, Error **errp)
{
VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev);
@ -232,6 +285,13 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp)
goto free_vqs;
}
ret = vhost_scsi_set_workers(vsc, vs->conf.worker_per_virtqueue);
if (ret < 0) {
error_setg(errp, "vhost-scsi: vhost worker setup failed: %s",
strerror(-ret));
goto free_vqs;
}
/* At present, channel and lun both are 0 for bootable vhost-scsi disk */
vsc->channel = 0;
vsc->lun = 0;
@ -297,6 +357,8 @@ static Property vhost_scsi_properties[] = {
VIRTIO_SCSI_F_T10_PI,
false),
DEFINE_PROP_BOOL("migratable", VHostSCSICommon, migratable, false),
DEFINE_PROP_BOOL("worker_per_virtqueue", VirtIOSCSICommon,
conf.worker_per_virtqueue, false),
DEFINE_PROP_END_OF_LIST(),
};

View File

@ -51,6 +51,7 @@ typedef struct virtio_scsi_config VirtIOSCSIConfig;
struct VirtIOSCSIConf {
uint32_t num_queues;
uint32_t virtqueue_size;
bool worker_per_virtqueue;
bool seg_max_adjust;
uint32_t max_sectors;
uint32_t cmd_per_lun;