virtio, pc: fixes
Some fixes to fallback from using virtio caching, pls a minor vm gen id fix. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJYyYD9AAoJECgfDbjSjVRpVC0IAL50O94eD711A1LhbHYaf01j 0d++IQM0FeyY+Vg3YfIhpil/sjJ9xVt4GiX3sr2yE7Et4f57N4nXKqemsjyNAeno RgfTrO/s3VOFSjmy0RpwJYdbLs5bIMd3fWh7Yc1auSfpWtxkGVZFDDGuXYmmQnJP 4FgJSMmJGzSSlSxCl7R9AKnR9xfPuPkpLUlq1hcSZe/gjG/jNPkGa0ZxuiCWgKzB kQIrOl8q1lWAQ2AqdWKL+XPzicARrk5thFD2uhOPqHJo5i2oEB8P1vtxOSG3Qtw1 X0P/B5WooCi9cjJHujNSQiG5mUCrGWrlftpKxBdO0BIz29WnXpcjTl7zZauKdsA= =RXnk -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging virtio, pc: fixes Some fixes to fallback from using virtio caching, pls a minor vm gen id fix. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # gpg: Signature made Wed 15 Mar 2017 17:59:25 GMT # gpg: using RSA key 0x281F0DB8D28D5469 # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" # Primary key fingerprint: 0270 606B 6F3C DF3D 0B17 0970 C350 3912 AFBE 8E67 # Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA 8A0D 281F 0DB8 D28D 5469 * remotes/mst/tags/for_upstream: virtio-pci: reset modern vq meta data Revert "virtio: unbreak virtio-pci with IOMMU after caching ring translations" pci: introduce a bus master container virtio: validate address space cache during init virtio: destroy region cache during reset virtio: guard against NULL pfn Bugfix: Handle error if VM Generation ID device not present Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
1883ff34b5
4
hmp.c
4
hmp.c
@ -2608,9 +2608,11 @@ void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict)
|
|||||||
|
|
||||||
void hmp_info_vm_generation_id(Monitor *mon, const QDict *qdict)
|
void hmp_info_vm_generation_id(Monitor *mon, const QDict *qdict)
|
||||||
{
|
{
|
||||||
GuidInfo *info = qmp_query_vm_generation_id(NULL);
|
Error *err = NULL;
|
||||||
|
GuidInfo *info = qmp_query_vm_generation_id(&err);
|
||||||
if (info) {
|
if (info) {
|
||||||
monitor_printf(mon, "%s\n", info->guid);
|
monitor_printf(mon, "%s\n", info->guid);
|
||||||
}
|
}
|
||||||
|
hmp_handle_error(mon, &err);
|
||||||
qapi_free_GuidInfo(info);
|
qapi_free_GuidInfo(info);
|
||||||
}
|
}
|
||||||
|
@ -248,6 +248,7 @@ GuidInfo *qmp_query_vm_generation_id(Error **errp)
|
|||||||
Object *obj = find_vmgenid_dev();
|
Object *obj = find_vmgenid_dev();
|
||||||
|
|
||||||
if (!obj) {
|
if (!obj) {
|
||||||
|
error_setg(errp, "VM Generation ID device not found");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
vms = VMGENID(obj);
|
vms = VMGENID(obj);
|
||||||
|
@ -88,8 +88,8 @@ static void pci_init_bus_master(PCIDevice *pci_dev)
|
|||||||
OBJECT(pci_dev), "bus master",
|
OBJECT(pci_dev), "bus master",
|
||||||
dma_as->root, 0, memory_region_size(dma_as->root));
|
dma_as->root, 0, memory_region_size(dma_as->root));
|
||||||
memory_region_set_enabled(&pci_dev->bus_master_enable_region, false);
|
memory_region_set_enabled(&pci_dev->bus_master_enable_region, false);
|
||||||
address_space_init(&pci_dev->bus_master_as,
|
memory_region_add_subregion(&pci_dev->bus_master_container_region, 0,
|
||||||
&pci_dev->bus_master_enable_region, pci_dev->name);
|
&pci_dev->bus_master_enable_region);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pcibus_machine_done(Notifier *notifier, void *data)
|
static void pcibus_machine_done(Notifier *notifier, void *data)
|
||||||
@ -995,6 +995,11 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, PCIBus *bus,
|
|||||||
pci_dev->devfn = devfn;
|
pci_dev->devfn = devfn;
|
||||||
pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev);
|
pci_dev->requester_id_cache = pci_req_id_cache_get(pci_dev);
|
||||||
|
|
||||||
|
memory_region_init(&pci_dev->bus_master_container_region, OBJECT(pci_dev),
|
||||||
|
"bus master container", UINT64_MAX);
|
||||||
|
address_space_init(&pci_dev->bus_master_as,
|
||||||
|
&pci_dev->bus_master_container_region, pci_dev->name);
|
||||||
|
|
||||||
if (qdev_hotplug) {
|
if (qdev_hotplug) {
|
||||||
pci_init_bus_master(pci_dev);
|
pci_init_bus_master(pci_dev);
|
||||||
}
|
}
|
||||||
|
@ -1153,7 +1153,7 @@ static AddressSpace *virtio_pci_get_dma_as(DeviceState *d)
|
|||||||
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
|
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
|
||||||
PCIDevice *dev = &proxy->pci_dev;
|
PCIDevice *dev = &proxy->pci_dev;
|
||||||
|
|
||||||
return pci_device_iommu_address_space(dev);
|
return pci_get_address_space(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
|
static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
|
||||||
@ -1857,6 +1857,10 @@ static void virtio_pci_reset(DeviceState *qdev)
|
|||||||
|
|
||||||
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
|
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
|
||||||
proxy->vqs[i].enabled = 0;
|
proxy->vqs[i].enabled = 0;
|
||||||
|
proxy->vqs[i].num = 0;
|
||||||
|
proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0;
|
||||||
|
proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0;
|
||||||
|
proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,6 +131,7 @@ static void virtio_init_region_cache(VirtIODevice *vdev, int n)
|
|||||||
VRingMemoryRegionCaches *new;
|
VRingMemoryRegionCaches *new;
|
||||||
hwaddr addr, size;
|
hwaddr addr, size;
|
||||||
int event_size;
|
int event_size;
|
||||||
|
int64_t len;
|
||||||
|
|
||||||
event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
|
event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
|
||||||
|
|
||||||
@ -140,21 +141,41 @@ static void virtio_init_region_cache(VirtIODevice *vdev, int n)
|
|||||||
}
|
}
|
||||||
new = g_new0(VRingMemoryRegionCaches, 1);
|
new = g_new0(VRingMemoryRegionCaches, 1);
|
||||||
size = virtio_queue_get_desc_size(vdev, n);
|
size = virtio_queue_get_desc_size(vdev, n);
|
||||||
address_space_cache_init(&new->desc, vdev->dma_as,
|
len = address_space_cache_init(&new->desc, vdev->dma_as,
|
||||||
addr, size, false);
|
addr, size, false);
|
||||||
|
if (len < size) {
|
||||||
|
virtio_error(vdev, "Cannot map desc");
|
||||||
|
goto err_desc;
|
||||||
|
}
|
||||||
|
|
||||||
size = virtio_queue_get_used_size(vdev, n) + event_size;
|
size = virtio_queue_get_used_size(vdev, n) + event_size;
|
||||||
address_space_cache_init(&new->used, vdev->dma_as,
|
len = address_space_cache_init(&new->used, vdev->dma_as,
|
||||||
vq->vring.used, size, true);
|
vq->vring.used, size, true);
|
||||||
|
if (len < size) {
|
||||||
|
virtio_error(vdev, "Cannot map used");
|
||||||
|
goto err_used;
|
||||||
|
}
|
||||||
|
|
||||||
size = virtio_queue_get_avail_size(vdev, n) + event_size;
|
size = virtio_queue_get_avail_size(vdev, n) + event_size;
|
||||||
address_space_cache_init(&new->avail, vdev->dma_as,
|
len = address_space_cache_init(&new->avail, vdev->dma_as,
|
||||||
vq->vring.avail, size, false);
|
vq->vring.avail, size, false);
|
||||||
|
if (len < size) {
|
||||||
|
virtio_error(vdev, "Cannot map avail");
|
||||||
|
goto err_avail;
|
||||||
|
}
|
||||||
|
|
||||||
atomic_rcu_set(&vq->vring.caches, new);
|
atomic_rcu_set(&vq->vring.caches, new);
|
||||||
if (old) {
|
if (old) {
|
||||||
call_rcu(old, virtio_free_region_cache, rcu);
|
call_rcu(old, virtio_free_region_cache, rcu);
|
||||||
}
|
}
|
||||||
|
return;
|
||||||
|
|
||||||
|
err_avail:
|
||||||
|
address_space_cache_destroy(&new->used);
|
||||||
|
err_used:
|
||||||
|
address_space_cache_destroy(&new->desc);
|
||||||
|
err_desc:
|
||||||
|
g_free(new);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* virt queue functions */
|
/* virt queue functions */
|
||||||
@ -185,10 +206,16 @@ static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
|
|||||||
virtio_tswap16s(vdev, &desc->next);
|
virtio_tswap16s(vdev, &desc->next);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
|
||||||
|
{
|
||||||
|
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
|
||||||
|
assert(caches != NULL);
|
||||||
|
return caches;
|
||||||
|
}
|
||||||
/* Called within rcu_read_lock(). */
|
/* Called within rcu_read_lock(). */
|
||||||
static inline uint16_t vring_avail_flags(VirtQueue *vq)
|
static inline uint16_t vring_avail_flags(VirtQueue *vq)
|
||||||
{
|
{
|
||||||
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
|
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
|
||||||
hwaddr pa = offsetof(VRingAvail, flags);
|
hwaddr pa = offsetof(VRingAvail, flags);
|
||||||
return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
|
return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
|
||||||
}
|
}
|
||||||
@ -196,7 +223,7 @@ static inline uint16_t vring_avail_flags(VirtQueue *vq)
|
|||||||
/* Called within rcu_read_lock(). */
|
/* Called within rcu_read_lock(). */
|
||||||
static inline uint16_t vring_avail_idx(VirtQueue *vq)
|
static inline uint16_t vring_avail_idx(VirtQueue *vq)
|
||||||
{
|
{
|
||||||
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
|
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
|
||||||
hwaddr pa = offsetof(VRingAvail, idx);
|
hwaddr pa = offsetof(VRingAvail, idx);
|
||||||
vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
|
vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
|
||||||
return vq->shadow_avail_idx;
|
return vq->shadow_avail_idx;
|
||||||
@ -205,7 +232,7 @@ static inline uint16_t vring_avail_idx(VirtQueue *vq)
|
|||||||
/* Called within rcu_read_lock(). */
|
/* Called within rcu_read_lock(). */
|
||||||
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
|
static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
|
||||||
{
|
{
|
||||||
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
|
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
|
||||||
hwaddr pa = offsetof(VRingAvail, ring[i]);
|
hwaddr pa = offsetof(VRingAvail, ring[i]);
|
||||||
return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
|
return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
|
||||||
}
|
}
|
||||||
@ -220,7 +247,7 @@ static inline uint16_t vring_get_used_event(VirtQueue *vq)
|
|||||||
static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
|
static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
|
||||||
int i)
|
int i)
|
||||||
{
|
{
|
||||||
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
|
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
|
||||||
hwaddr pa = offsetof(VRingUsed, ring[i]);
|
hwaddr pa = offsetof(VRingUsed, ring[i]);
|
||||||
virtio_tswap32s(vq->vdev, &uelem->id);
|
virtio_tswap32s(vq->vdev, &uelem->id);
|
||||||
virtio_tswap32s(vq->vdev, &uelem->len);
|
virtio_tswap32s(vq->vdev, &uelem->len);
|
||||||
@ -231,7 +258,7 @@ static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
|
|||||||
/* Called within rcu_read_lock(). */
|
/* Called within rcu_read_lock(). */
|
||||||
static uint16_t vring_used_idx(VirtQueue *vq)
|
static uint16_t vring_used_idx(VirtQueue *vq)
|
||||||
{
|
{
|
||||||
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
|
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
|
||||||
hwaddr pa = offsetof(VRingUsed, idx);
|
hwaddr pa = offsetof(VRingUsed, idx);
|
||||||
return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
|
return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
|
||||||
}
|
}
|
||||||
@ -239,7 +266,7 @@ static uint16_t vring_used_idx(VirtQueue *vq)
|
|||||||
/* Called within rcu_read_lock(). */
|
/* Called within rcu_read_lock(). */
|
||||||
static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
|
static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
|
||||||
{
|
{
|
||||||
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
|
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
|
||||||
hwaddr pa = offsetof(VRingUsed, idx);
|
hwaddr pa = offsetof(VRingUsed, idx);
|
||||||
virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
|
virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
|
||||||
address_space_cache_invalidate(&caches->used, pa, sizeof(val));
|
address_space_cache_invalidate(&caches->used, pa, sizeof(val));
|
||||||
@ -249,7 +276,7 @@ static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
|
|||||||
/* Called within rcu_read_lock(). */
|
/* Called within rcu_read_lock(). */
|
||||||
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
|
static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
|
||||||
{
|
{
|
||||||
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
|
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
|
||||||
VirtIODevice *vdev = vq->vdev;
|
VirtIODevice *vdev = vq->vdev;
|
||||||
hwaddr pa = offsetof(VRingUsed, flags);
|
hwaddr pa = offsetof(VRingUsed, flags);
|
||||||
uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
|
uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
|
||||||
@ -261,7 +288,7 @@ static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
|
|||||||
/* Called within rcu_read_lock(). */
|
/* Called within rcu_read_lock(). */
|
||||||
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
|
static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
|
||||||
{
|
{
|
||||||
VRingMemoryRegionCaches *caches = atomic_rcu_read(&vq->vring.caches);
|
VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
|
||||||
VirtIODevice *vdev = vq->vdev;
|
VirtIODevice *vdev = vq->vdev;
|
||||||
hwaddr pa = offsetof(VRingUsed, flags);
|
hwaddr pa = offsetof(VRingUsed, flags);
|
||||||
uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
|
uint16_t flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
|
||||||
@ -279,7 +306,7 @@ static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
caches = atomic_rcu_read(&vq->vring.caches);
|
caches = vring_get_region_caches(vq);
|
||||||
pa = offsetof(VRingUsed, ring[vq->vring.num]);
|
pa = offsetof(VRingUsed, ring[vq->vring.num]);
|
||||||
virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
|
virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
|
||||||
address_space_cache_invalidate(&caches->used, pa, sizeof(val));
|
address_space_cache_invalidate(&caches->used, pa, sizeof(val));
|
||||||
@ -318,6 +345,10 @@ int virtio_queue_ready(VirtQueue *vq)
|
|||||||
* Called within rcu_read_lock(). */
|
* Called within rcu_read_lock(). */
|
||||||
static int virtio_queue_empty_rcu(VirtQueue *vq)
|
static int virtio_queue_empty_rcu(VirtQueue *vq)
|
||||||
{
|
{
|
||||||
|
if (unlikely(!vq->vring.avail)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
if (vq->shadow_avail_idx != vq->last_avail_idx) {
|
if (vq->shadow_avail_idx != vq->last_avail_idx) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -329,6 +360,10 @@ int virtio_queue_empty(VirtQueue *vq)
|
|||||||
{
|
{
|
||||||
bool empty;
|
bool empty;
|
||||||
|
|
||||||
|
if (unlikely(!vq->vring.avail)) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
if (vq->shadow_avail_idx != vq->last_avail_idx) {
|
if (vq->shadow_avail_idx != vq->last_avail_idx) {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -431,6 +466,10 @@ void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (unlikely(!vq->vring.used)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
idx = (idx + vq->used_idx) % vq->vring.num;
|
idx = (idx + vq->used_idx) % vq->vring.num;
|
||||||
|
|
||||||
uelem.id = elem->index;
|
uelem.id = elem->index;
|
||||||
@ -448,6 +487,10 @@ void virtqueue_flush(VirtQueue *vq, unsigned int count)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (unlikely(!vq->vring.used)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* Make sure buffer is written before we update index. */
|
/* Make sure buffer is written before we update index. */
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
trace_virtqueue_flush(vq, count);
|
trace_virtqueue_flush(vq, count);
|
||||||
@ -546,12 +589,22 @@ void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
|
|||||||
int64_t len = 0;
|
int64_t len = 0;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
if (unlikely(!vq->vring.desc)) {
|
||||||
|
if (in_bytes) {
|
||||||
|
*in_bytes = 0;
|
||||||
|
}
|
||||||
|
if (out_bytes) {
|
||||||
|
*out_bytes = 0;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
idx = vq->last_avail_idx;
|
idx = vq->last_avail_idx;
|
||||||
total_bufs = in_total = out_total = 0;
|
total_bufs = in_total = out_total = 0;
|
||||||
|
|
||||||
max = vq->vring.num;
|
max = vq->vring.num;
|
||||||
caches = atomic_rcu_read(&vq->vring.caches);
|
caches = vring_get_region_caches(vq);
|
||||||
if (caches->desc.len < max * sizeof(VRingDesc)) {
|
if (caches->desc.len < max * sizeof(VRingDesc)) {
|
||||||
virtio_error(vdev, "Cannot map descriptor ring");
|
virtio_error(vdev, "Cannot map descriptor ring");
|
||||||
goto err;
|
goto err;
|
||||||
@ -818,7 +871,7 @@ void *virtqueue_pop(VirtQueue *vq, size_t sz)
|
|||||||
|
|
||||||
i = head;
|
i = head;
|
||||||
|
|
||||||
caches = atomic_rcu_read(&vq->vring.caches);
|
caches = vring_get_region_caches(vq);
|
||||||
if (caches->desc.len < max * sizeof(VRingDesc)) {
|
if (caches->desc.len < max * sizeof(VRingDesc)) {
|
||||||
virtio_error(vdev, "Cannot map descriptor ring");
|
virtio_error(vdev, "Cannot map descriptor ring");
|
||||||
goto done;
|
goto done;
|
||||||
@ -1117,6 +1170,17 @@ static enum virtio_device_endian virtio_current_cpu_endian(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
|
||||||
|
{
|
||||||
|
VRingMemoryRegionCaches *caches;
|
||||||
|
|
||||||
|
caches = atomic_read(&vq->vring.caches);
|
||||||
|
atomic_rcu_set(&vq->vring.caches, NULL);
|
||||||
|
if (caches) {
|
||||||
|
call_rcu(caches, virtio_free_region_cache, rcu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void virtio_reset(void *opaque)
|
void virtio_reset(void *opaque)
|
||||||
{
|
{
|
||||||
VirtIODevice *vdev = opaque;
|
VirtIODevice *vdev = opaque;
|
||||||
@ -1157,6 +1221,7 @@ void virtio_reset(void *opaque)
|
|||||||
vdev->vq[i].notification = true;
|
vdev->vq[i].notification = true;
|
||||||
vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
|
vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
|
||||||
vdev->vq[i].inuse = 0;
|
vdev->vq[i].inuse = 0;
|
||||||
|
virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2451,13 +2516,10 @@ static void virtio_device_free_virtqueues(VirtIODevice *vdev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
|
for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
|
||||||
VRingMemoryRegionCaches *caches;
|
|
||||||
if (vdev->vq[i].vring.num == 0) {
|
if (vdev->vq[i].vring.num == 0) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
caches = atomic_read(&vdev->vq[i].vring.caches);
|
virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
|
||||||
atomic_set(&vdev->vq[i].vring.caches, NULL);
|
|
||||||
virtio_free_region_cache(caches);
|
|
||||||
}
|
}
|
||||||
g_free(vdev->vq);
|
g_free(vdev->vq);
|
||||||
}
|
}
|
||||||
|
@ -284,6 +284,7 @@ struct PCIDevice {
|
|||||||
char name[64];
|
char name[64];
|
||||||
PCIIORegion io_regions[PCI_NUM_REGIONS];
|
PCIIORegion io_regions[PCI_NUM_REGIONS];
|
||||||
AddressSpace bus_master_as;
|
AddressSpace bus_master_as;
|
||||||
|
MemoryRegion bus_master_container_region;
|
||||||
MemoryRegion bus_master_enable_region;
|
MemoryRegion bus_master_enable_region;
|
||||||
|
|
||||||
/* do not access the following fields */
|
/* do not access the following fields */
|
||||||
|
Loading…
Reference in New Issue
Block a user