96b7af4388
vfio_container_destroy() clears the resources allocated VFIOContainerBase object. Now that VFIOContainerBase is a QOM object, add an instance_finalize() handler to do the cleanup. It will be called through object_unref(). Suggested-by: Zhenzhong Duan <zhenzhong.duan@intel.com> Reviewed-by: Zhenzhong Duan <zhenzhong.duan@intel.com> Reviewed-by: Eric Auger <eric.auger@redhat.com> Tested-by: Eric Auger <eric.auger@redhat.com> Signed-off-by: Cédric Le Goater <clg@redhat.com>
128 lines
3.6 KiB
C
128 lines
3.6 KiB
C
/*
|
|
* VFIO BASE CONTAINER
|
|
*
|
|
* Copyright (C) 2023 Intel Corporation.
|
|
* Copyright Red Hat, Inc. 2023
|
|
*
|
|
* Authors: Yi Liu <yi.l.liu@intel.com>
|
|
* Eric Auger <eric.auger@redhat.com>
|
|
*
|
|
* SPDX-License-Identifier: GPL-2.0-or-later
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "qapi/error.h"
|
|
#include "qemu/error-report.h"
|
|
#include "hw/vfio/vfio-container-base.h"
|
|
|
|
int vfio_container_dma_map(VFIOContainerBase *bcontainer,
|
|
hwaddr iova, ram_addr_t size,
|
|
void *vaddr, bool readonly)
|
|
{
|
|
VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
|
|
|
|
g_assert(vioc->dma_map);
|
|
return vioc->dma_map(bcontainer, iova, size, vaddr, readonly);
|
|
}
|
|
|
|
int vfio_container_dma_unmap(VFIOContainerBase *bcontainer,
|
|
hwaddr iova, ram_addr_t size,
|
|
IOMMUTLBEntry *iotlb)
|
|
{
|
|
VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
|
|
|
|
g_assert(vioc->dma_unmap);
|
|
return vioc->dma_unmap(bcontainer, iova, size, iotlb);
|
|
}
|
|
|
|
bool vfio_container_add_section_window(VFIOContainerBase *bcontainer,
|
|
MemoryRegionSection *section,
|
|
Error **errp)
|
|
{
|
|
VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
|
|
|
|
if (!vioc->add_window) {
|
|
return true;
|
|
}
|
|
|
|
return vioc->add_window(bcontainer, section, errp);
|
|
}
|
|
|
|
void vfio_container_del_section_window(VFIOContainerBase *bcontainer,
|
|
MemoryRegionSection *section)
|
|
{
|
|
VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
|
|
|
|
if (!vioc->del_window) {
|
|
return;
|
|
}
|
|
|
|
return vioc->del_window(bcontainer, section);
|
|
}
|
|
|
|
int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
|
|
bool start, Error **errp)
|
|
{
|
|
VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
|
|
|
|
if (!bcontainer->dirty_pages_supported) {
|
|
return 0;
|
|
}
|
|
|
|
g_assert(vioc->set_dirty_page_tracking);
|
|
return vioc->set_dirty_page_tracking(bcontainer, start, errp);
|
|
}
|
|
|
|
int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
|
|
VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp)
|
|
{
|
|
VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
|
|
|
|
g_assert(vioc->query_dirty_bitmap);
|
|
return vioc->query_dirty_bitmap(bcontainer, vbmap, iova, size,
|
|
errp);
|
|
}
|
|
|
|
static void vfio_container_instance_finalize(Object *obj)
|
|
{
|
|
VFIOContainerBase *bcontainer = VFIO_IOMMU(obj);
|
|
VFIOGuestIOMMU *giommu, *tmp;
|
|
|
|
QLIST_REMOVE(bcontainer, next);
|
|
|
|
QLIST_FOREACH_SAFE(giommu, &bcontainer->giommu_list, giommu_next, tmp) {
|
|
memory_region_unregister_iommu_notifier(
|
|
MEMORY_REGION(giommu->iommu_mr), &giommu->n);
|
|
QLIST_REMOVE(giommu, giommu_next);
|
|
g_free(giommu);
|
|
}
|
|
|
|
g_list_free_full(bcontainer->iova_ranges, g_free);
|
|
}
|
|
|
|
static void vfio_container_instance_init(Object *obj)
|
|
{
|
|
VFIOContainerBase *bcontainer = VFIO_IOMMU(obj);
|
|
|
|
bcontainer->error = NULL;
|
|
bcontainer->dirty_pages_supported = false;
|
|
bcontainer->dma_max_mappings = 0;
|
|
bcontainer->iova_ranges = NULL;
|
|
QLIST_INIT(&bcontainer->giommu_list);
|
|
QLIST_INIT(&bcontainer->vrdl_list);
|
|
}
|
|
|
|
static const TypeInfo types[] = {
|
|
{
|
|
.name = TYPE_VFIO_IOMMU,
|
|
.parent = TYPE_OBJECT,
|
|
.instance_init = vfio_container_instance_init,
|
|
.instance_finalize = vfio_container_instance_finalize,
|
|
.instance_size = sizeof(VFIOContainerBase),
|
|
.class_size = sizeof(VFIOIOMMUClass),
|
|
.abstract = true,
|
|
},
|
|
};
|
|
|
|
DEFINE_TYPES(types)
|