Commit 07cd9ac4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes-v5.17-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu fixes from Joerg Roedel:

 - Warning fixes and a fix for a potential use-after-free in IOMMU core
   code

 - Another potential memory leak fix for the Intel VT-d driver

 - Fix for an IO polling loop timeout issue in the AMD IOMMU driver

* tag 'iommu-fixes-v5.17-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/amd: Fix loop timeout issue in iommu_ga_log_enable()
  iommu/vt-d: Fix potential memory leak in intel_setup_irq_remapping()
  iommu: Fix some W=1 warnings
  iommu: Fix potential use-after-free during probe
parents ba6ef8af 9b45a773
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/kmemleak.h> #include <linux/kmemleak.h>
#include <linux/cc_platform.h> #include <linux/cc_platform.h>
#include <linux/iopoll.h>
#include <asm/pci-direct.h> #include <asm/pci-direct.h>
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/apic.h> #include <asm/apic.h>
...@@ -834,6 +835,7 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu) ...@@ -834,6 +835,7 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
if (status & (MMIO_STATUS_GALOG_RUN_MASK)) if (status & (MMIO_STATUS_GALOG_RUN_MASK))
break; break;
udelay(10);
} }
if (WARN_ON(i >= LOOP_TIMEOUT)) if (WARN_ON(i >= LOOP_TIMEOUT))
......
...@@ -569,9 +569,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) ...@@ -569,9 +569,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
fn, &intel_ir_domain_ops, fn, &intel_ir_domain_ops,
iommu); iommu);
if (!iommu->ir_domain) { if (!iommu->ir_domain) {
irq_domain_free_fwnode(fn);
pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
goto out_free_bitmap; goto out_free_fwnode;
} }
iommu->ir_msi_domain = iommu->ir_msi_domain =
arch_create_remap_msi_irq_domain(iommu->ir_domain, arch_create_remap_msi_irq_domain(iommu->ir_domain,
...@@ -595,7 +594,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) ...@@ -595,7 +594,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
if (dmar_enable_qi(iommu)) { if (dmar_enable_qi(iommu)) {
pr_err("Failed to enable queued invalidation\n"); pr_err("Failed to enable queued invalidation\n");
goto out_free_bitmap; goto out_free_ir_domain;
} }
} }
...@@ -619,6 +618,14 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) ...@@ -619,6 +618,14 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
return 0; return 0;
out_free_ir_domain:
if (iommu->ir_msi_domain)
irq_domain_remove(iommu->ir_msi_domain);
iommu->ir_msi_domain = NULL;
irq_domain_remove(iommu->ir_domain);
iommu->ir_domain = NULL;
out_free_fwnode:
irq_domain_free_fwnode(fn);
out_free_bitmap: out_free_bitmap:
bitmap_free(bitmap); bitmap_free(bitmap);
out_free_pages: out_free_pages:
......
...@@ -349,6 +349,7 @@ EXPORT_SYMBOL_GPL(ioasid_alloc); ...@@ -349,6 +349,7 @@ EXPORT_SYMBOL_GPL(ioasid_alloc);
/** /**
* ioasid_get - obtain a reference to the IOASID * ioasid_get - obtain a reference to the IOASID
* @ioasid: the ID to get
*/ */
void ioasid_get(ioasid_t ioasid) void ioasid_get(ioasid_t ioasid)
{ {
......
...@@ -207,9 +207,14 @@ static struct dev_iommu *dev_iommu_get(struct device *dev) ...@@ -207,9 +207,14 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
static void dev_iommu_free(struct device *dev) static void dev_iommu_free(struct device *dev)
{ {
iommu_fwspec_free(dev); struct dev_iommu *param = dev->iommu;
kfree(dev->iommu);
dev->iommu = NULL; dev->iommu = NULL;
if (param->fwspec) {
fwnode_handle_put(param->fwspec->iommu_fwnode);
kfree(param->fwspec);
}
kfree(param);
} }
static int __iommu_probe_device(struct device *dev, struct list_head *group_list) static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
...@@ -980,17 +985,6 @@ static int iommu_group_device_count(struct iommu_group *group) ...@@ -980,17 +985,6 @@ static int iommu_group_device_count(struct iommu_group *group)
return ret; return ret;
} }
/**
* iommu_group_for_each_dev - iterate over each device in the group
* @group: the group
* @data: caller opaque data to be passed to callback function
* @fn: caller supplied callback function
*
* This function is called by group users to iterate over group devices.
* Callers should hold a reference count to the group during callback.
* The group->mutex is held across callbacks, which will block calls to
* iommu_group_add/remove_device.
*/
static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *)) int (*fn)(struct device *, void *))
{ {
...@@ -1005,7 +999,17 @@ static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, ...@@ -1005,7 +999,17 @@ static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
return ret; return ret;
} }
/**
* iommu_group_for_each_dev - iterate over each device in the group
* @group: the group
* @data: caller opaque data to be passed to callback function
* @fn: caller supplied callback function
*
* This function is called by group users to iterate over group devices.
* Callers should hold a reference count to the group during callback.
* The group->mutex is held across callbacks, which will block calls to
* iommu_group_add/remove_device.
*/
int iommu_group_for_each_dev(struct iommu_group *group, void *data, int iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *)) int (*fn)(struct device *, void *))
{ {
...@@ -3032,6 +3036,7 @@ EXPORT_SYMBOL_GPL(iommu_aux_get_pasid); ...@@ -3032,6 +3036,7 @@ EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
* iommu_sva_bind_device() - Bind a process address space to a device * iommu_sva_bind_device() - Bind a process address space to a device
* @dev: the device * @dev: the device
* @mm: the mm to bind, caller must hold a reference to it * @mm: the mm to bind, caller must hold a reference to it
* @drvdata: opaque data pointer to pass to bind callback
* *
* Create a bond between device and address space, allowing the device to access * Create a bond between device and address space, allowing the device to access
* the mm using the returned PASID. If a bond already exists between @device and * the mm using the returned PASID. If a bond already exists between @device and
......
...@@ -1085,7 +1085,7 @@ static __maybe_unused int omap_iommu_runtime_resume(struct device *dev) ...@@ -1085,7 +1085,7 @@ static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
} }
/** /**
* omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation * omap_iommu_prepare - prepare() dev_pm_ops implementation
* @dev: iommu device * @dev: iommu device
* *
* This function performs the necessary checks to determine if the IOMMU * This function performs the necessary checks to determine if the IOMMU
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment