vfio/common: Fix vfio_iommu_type1_info use after free
On error, vfio_get_iommu_info() frees and clears *info, but vfio_connect_container() continues to use the pointer regardless of the return value. Restructure the code such that a failure of this function triggers an error and clean up the remainder of the function, including updating an outdated comment that had drifted from its relevant line of code and using host page size for a default for better compatibility on non-4KB systems. Reported-by: Nicolin Chen <nicolinc@nvidia.com> Link: https://lore.kernel.org/all/20220910004245.2878-1-nicolinc@nvidia.com/ Signed-off-by: Alex Williamson <alex.williamson@redhat.com> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Reviewed-by: Nicolin Chen <nicolinc@nvidia.com> Tested-by: Nicolin Chen <nicolinc@nvidia.com> Link: https://lore.kernel.org/r/166326219630.3388898.12882473157184946072.stgit@omen Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
parent
429c728006
commit
85b6d2b5fc
@ -2111,29 +2111,31 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
|
||||
{
|
||||
struct vfio_iommu_type1_info *info;
|
||||
|
||||
/*
|
||||
* FIXME: This assumes that a Type1 IOMMU can map any 64-bit
|
||||
* IOVA whatsoever. That's not actually true, but the current
|
||||
* kernel interface doesn't tell us what it can map, and the
|
||||
* existing Type1 IOMMUs generally support any IOVA we're
|
||||
* going to actually try in practice.
|
||||
*/
|
||||
ret = vfio_get_iommu_info(container, &info);
|
||||
|
||||
if (ret || !(info->flags & VFIO_IOMMU_INFO_PGSIZES)) {
|
||||
/* Assume 4k IOVA page size */
|
||||
info->iova_pgsizes = 4096;
|
||||
if (ret) {
|
||||
error_setg_errno(errp, -ret, "Failed to get VFIO IOMMU info");
|
||||
goto enable_discards_exit;
|
||||
}
|
||||
vfio_host_win_add(container, 0, (hwaddr)-1, info->iova_pgsizes);
|
||||
container->pgsizes = info->iova_pgsizes;
|
||||
|
||||
/* The default in the kernel ("dma_entry_limit") is 65535. */
|
||||
container->dma_max_mappings = 65535;
|
||||
if (!ret) {
|
||||
vfio_get_info_dma_avail(info, &container->dma_max_mappings);
|
||||
vfio_get_iommu_info_migration(container, info);
|
||||
if (info->flags & VFIO_IOMMU_INFO_PGSIZES) {
|
||||
container->pgsizes = info->iova_pgsizes;
|
||||
} else {
|
||||
container->pgsizes = qemu_real_host_page_size();
|
||||
}
|
||||
|
||||
if (!vfio_get_info_dma_avail(info, &container->dma_max_mappings)) {
|
||||
container->dma_max_mappings = 65535;
|
||||
}
|
||||
vfio_get_iommu_info_migration(container, info);
|
||||
g_free(info);
|
||||
|
||||
/*
|
||||
* FIXME: We should parse VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE
|
||||
* information to get the actual window extent rather than assume
|
||||
* a 64-bit IOVA address space.
|
||||
*/
|
||||
vfio_host_win_add(container, 0, (hwaddr)-1, container->pgsizes);
|
||||
|
||||
break;
|
||||
}
|
||||
case VFIO_SPAPR_TCE_v2_IOMMU:
|
||||
|
Loading…
Reference in New Issue
Block a user