2020-07-01 17:55:37 +03:00
|
|
|
/*
|
|
|
|
* vhost-vdpa.h
|
|
|
|
*
|
|
|
|
* Copyright(c) 2017-2018 Intel Corporation.
|
|
|
|
* Copyright(c) 2020 Red Hat, Inc.
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
|
|
* See the COPYING file in the top-level directory.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef HW_VIRTIO_VHOST_VDPA_H
|
|
|
|
#define HW_VIRTIO_VHOST_VDPA_H
|
|
|
|
|
2022-03-14 20:34:42 +03:00
|
|
|
#include <gmodule.h>
|
|
|
|
|
2022-03-14 20:34:51 +03:00
|
|
|
#include "hw/virtio/vhost-iova-tree.h"
|
2022-07-20 09:59:42 +03:00
|
|
|
#include "hw/virtio/vhost-shadow-virtqueue.h"
|
2020-07-01 17:55:37 +03:00
|
|
|
#include "hw/virtio/virtio.h"
|
2021-10-14 17:12:36 +03:00
|
|
|
#include "standard-headers/linux/vhost_types.h"
|
2020-07-01 17:55:37 +03:00
|
|
|
|
2022-12-15 14:31:41 +03:00
|
|
|
/*
|
|
|
|
* ASID dedicated to map guest's addresses. If SVQ is disabled it maps GPA to
|
|
|
|
* qemu's IOVA. If SVQ is enabled it maps also the SVQ vring here
|
|
|
|
*/
|
|
|
|
#define VHOST_VDPA_GUEST_PA_ASID 0
|
|
|
|
|
2021-04-15 10:33:56 +03:00
|
|
|
typedef struct VhostVDPAHostNotifier {
|
|
|
|
MemoryRegion mr;
|
|
|
|
void *addr;
|
|
|
|
} VhostVDPAHostNotifier;
|
|
|
|
|
2020-07-01 17:55:37 +03:00
|
|
|
typedef struct vhost_vdpa {
|
|
|
|
int device_fd;
|
2021-10-20 07:55:52 +03:00
|
|
|
int index;
|
2020-07-01 17:55:37 +03:00
|
|
|
uint32_t msg_type;
|
2021-08-12 17:09:33 +03:00
|
|
|
bool iotlb_batch_begin_sent;
|
2022-12-15 14:31:41 +03:00
|
|
|
uint32_t address_space_id;
|
2020-07-01 17:55:37 +03:00
|
|
|
MemoryListener listener;
|
2021-10-14 17:12:36 +03:00
|
|
|
struct vhost_vdpa_iova_range iova_range;
|
2022-03-14 20:34:54 +03:00
|
|
|
uint64_t acked_features;
|
2022-03-14 20:34:42 +03:00
|
|
|
bool shadow_vqs_enabled;
|
2022-12-15 14:31:43 +03:00
|
|
|
/* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */
|
|
|
|
bool shadow_data;
|
2023-03-03 20:24:36 +03:00
|
|
|
/* Device suspended successfully */
|
|
|
|
bool suspended;
|
2022-03-14 20:34:51 +03:00
|
|
|
/* IOVA mapping used by the Shadow Virtqueue */
|
|
|
|
VhostIOVATree *iova_tree;
|
2022-03-14 20:34:42 +03:00
|
|
|
GPtrArray *shadow_vqs;
|
2022-07-20 09:59:42 +03:00
|
|
|
const VhostShadowVirtqueueOps *shadow_vq_ops;
|
|
|
|
void *shadow_vq_ops_opaque;
|
2020-09-07 13:49:03 +03:00
|
|
|
struct vhost_dev *dev;
|
2023-03-03 20:24:41 +03:00
|
|
|
Error *migration_blocker;
|
2021-04-15 10:33:56 +03:00
|
|
|
VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX];
|
2023-05-10 08:46:31 +03:00
|
|
|
QLIST_HEAD(, vdpa_iommu) iommu_list;
|
|
|
|
IOMMUNotifier n;
|
2020-07-01 17:55:37 +03:00
|
|
|
} VhostVDPA;
|
|
|
|
|
2022-12-24 14:48:47 +03:00
|
|
|
int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range);
|
|
|
|
|
2022-12-15 14:31:41 +03:00
|
|
|
int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
|
|
|
|
hwaddr size, void *vaddr, bool readonly);
|
|
|
|
int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
|
|
|
|
hwaddr size);
|
2022-07-20 09:59:40 +03:00
|
|
|
|
2023-05-10 08:46:31 +03:00
|
|
|
typedef struct vdpa_iommu {
|
|
|
|
struct vhost_vdpa *dev;
|
|
|
|
IOMMUMemoryRegion *iommu_mr;
|
|
|
|
hwaddr iommu_offset;
|
|
|
|
IOMMUNotifier n;
|
|
|
|
QLIST_ENTRY(vdpa_iommu) iommu_next;
|
|
|
|
} VDPAIOMMUState;
|
|
|
|
|
|
|
|
|
2020-07-01 17:55:37 +03:00
|
|
|
#endif
|