Commit dd8a25c5 authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Joerg Roedel

iommu: Remove deferred attach check from __iommu_detach_device()

At the current moment, __iommu_detach_device() is only called via call
chains that are after the device driver is attached - eg via explicit
attach APIs called by the device driver.

Commit bd421264 ("iommu: Fix deferred domain attachment") has removed
deferred domain attachment check from __iommu_attach_device() path, so it
should just unconditionally work in the __iommu_detach_device() path.

It actually looks like a bug that we were blocking detach on these paths
since the attach was unconditional and the caller is going to free the
(probably) UNAMANGED domain once this returns.

The only place we should be testing for deferred attach is during the
initial point the dma device is linked to the group, and then again
during the dma api calls.
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Signed-off-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/20230110025408.667767-5-baolu.lu@linux.intel.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent c1fe9119
...@@ -371,6 +371,30 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list ...@@ -371,6 +371,30 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
return ret; return ret;
} }
static bool iommu_is_attach_deferred(struct device *dev)
{
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (ops->is_attach_deferred)
return ops->is_attach_deferred(dev);
return false;
}
static int iommu_group_do_dma_first_attach(struct device *dev, void *data)
{
struct iommu_domain *domain = data;
lockdep_assert_held(&dev->iommu_group->mutex);
if (iommu_is_attach_deferred(dev)) {
dev->iommu->attach_deferred = 1;
return 0;
}
return __iommu_attach_device(domain, dev);
}
int iommu_probe_device(struct device *dev) int iommu_probe_device(struct device *dev)
{ {
const struct iommu_ops *ops; const struct iommu_ops *ops;
...@@ -401,7 +425,7 @@ int iommu_probe_device(struct device *dev) ...@@ -401,7 +425,7 @@ int iommu_probe_device(struct device *dev)
* attach the default domain. * attach the default domain.
*/ */
if (group->default_domain && !group->owner) { if (group->default_domain && !group->owner) {
ret = __iommu_attach_device(group->default_domain, dev); ret = iommu_group_do_dma_first_attach(dev, group->default_domain);
if (ret) { if (ret) {
mutex_unlock(&group->mutex); mutex_unlock(&group->mutex);
iommu_group_put(group); iommu_group_put(group);
...@@ -947,16 +971,6 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group, ...@@ -947,16 +971,6 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group,
return ret; return ret;
} }
static bool iommu_is_attach_deferred(struct device *dev)
{
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (ops->is_attach_deferred)
return ops->is_attach_deferred(dev);
return false;
}
/** /**
* iommu_group_add_device - add a device to an iommu group * iommu_group_add_device - add a device to an iommu group
* @group: the group into which to add the device (reference should be held) * @group: the group into which to add the device (reference should be held)
...@@ -1009,8 +1023,8 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev) ...@@ -1009,8 +1023,8 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
mutex_lock(&group->mutex); mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices); list_add_tail(&device->list, &group->devices);
if (group->domain && !iommu_is_attach_deferred(dev)) if (group->domain)
ret = __iommu_attach_device(group->domain, dev); ret = iommu_group_do_dma_first_attach(dev, group->domain);
mutex_unlock(&group->mutex); mutex_unlock(&group->mutex);
if (ret) if (ret)
goto err_put_group; goto err_put_group;
...@@ -1776,21 +1790,10 @@ static void probe_alloc_default_domain(struct bus_type *bus, ...@@ -1776,21 +1790,10 @@ static void probe_alloc_default_domain(struct bus_type *bus,
} }
static int iommu_group_do_dma_attach(struct device *dev, void *data) static int __iommu_group_dma_first_attach(struct iommu_group *group)
{
struct iommu_domain *domain = data;
int ret = 0;
if (!iommu_is_attach_deferred(dev))
ret = __iommu_attach_device(domain, dev);
return ret;
}
static int __iommu_group_dma_attach(struct iommu_group *group)
{ {
return __iommu_group_for_each_dev(group, group->default_domain, return __iommu_group_for_each_dev(group, group->default_domain,
iommu_group_do_dma_attach); iommu_group_do_dma_first_attach);
} }
static int iommu_group_do_probe_finalize(struct device *dev, void *data) static int iommu_group_do_probe_finalize(struct device *dev, void *data)
...@@ -1855,7 +1858,7 @@ int bus_iommu_probe(struct bus_type *bus) ...@@ -1855,7 +1858,7 @@ int bus_iommu_probe(struct bus_type *bus)
iommu_group_create_direct_mappings(group); iommu_group_create_direct_mappings(group);
ret = __iommu_group_dma_attach(group); ret = __iommu_group_dma_first_attach(group);
mutex_unlock(&group->mutex); mutex_unlock(&group->mutex);
...@@ -1987,9 +1990,11 @@ static int __iommu_attach_device(struct iommu_domain *domain, ...@@ -1987,9 +1990,11 @@ static int __iommu_attach_device(struct iommu_domain *domain,
return -ENODEV; return -ENODEV;
ret = domain->ops->attach_dev(domain, dev); ret = domain->ops->attach_dev(domain, dev);
if (!ret) if (ret)
trace_attach_device_to_domain(dev);
return ret; return ret;
dev->iommu->attach_deferred = 0;
trace_attach_device_to_domain(dev);
return 0;
} }
/** /**
...@@ -2034,7 +2039,7 @@ EXPORT_SYMBOL_GPL(iommu_attach_device); ...@@ -2034,7 +2039,7 @@ EXPORT_SYMBOL_GPL(iommu_attach_device);
int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
{ {
if (iommu_is_attach_deferred(dev)) if (dev->iommu && dev->iommu->attach_deferred)
return __iommu_attach_device(domain, dev); return __iommu_attach_device(domain, dev);
return 0; return 0;
...@@ -2043,9 +2048,6 @@ int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain) ...@@ -2043,9 +2048,6 @@ int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
static void __iommu_detach_device(struct iommu_domain *domain, static void __iommu_detach_device(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
if (iommu_is_attach_deferred(dev))
return;
domain->ops->detach_dev(domain, dev); domain->ops->detach_dev(domain, dev);
trace_detach_device_from_domain(dev); trace_detach_device_from_domain(dev);
} }
......
...@@ -405,6 +405,7 @@ struct iommu_fault_param { ...@@ -405,6 +405,7 @@ struct iommu_fault_param {
* @iommu_dev: IOMMU device this device is linked to * @iommu_dev: IOMMU device this device is linked to
* @priv: IOMMU Driver private data * @priv: IOMMU Driver private data
* @max_pasids: number of PASIDs this device can consume * @max_pasids: number of PASIDs this device can consume
* @attach_deferred: the dma domain attachment is deferred
* *
* TODO: migrate other per device data pointers under iommu_dev_data, e.g. * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
* struct iommu_group *iommu_group; * struct iommu_group *iommu_group;
...@@ -417,6 +418,7 @@ struct dev_iommu { ...@@ -417,6 +418,7 @@ struct dev_iommu {
struct iommu_device *iommu_dev; struct iommu_device *iommu_dev;
void *priv; void *priv;
u32 max_pasids; u32 max_pasids;
u32 attach_deferred:1;
}; };
int iommu_device_register(struct iommu_device *iommu, int iommu_device_register(struct iommu_device *iommu,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment