Commit 5957c193 authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Joerg Roedel

iommu: Tidy the control flow in iommu_group_store_type()

Use a normal "goto unwind" instead of trying to be clever with checking
!ret and manually managing the unlock.
Reviewed-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Tested-by: default avatarHeiko Stuebner <heiko@sntech.de>
Tested-by: default avatarNiklas Schnelle <schnelle@linux.ibm.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/17-v5-1b99ae392328+44574-iommu_err_unwind_jgg@nvidia.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent e996c12d
...@@ -2940,6 +2940,7 @@ static int iommu_setup_default_domain(struct iommu_group *group, ...@@ -2940,6 +2940,7 @@ static int iommu_setup_default_domain(struct iommu_group *group,
static ssize_t iommu_group_store_type(struct iommu_group *group, static ssize_t iommu_group_store_type(struct iommu_group *group,
const char *buf, size_t count) const char *buf, size_t count)
{ {
struct group_device *gdev;
int ret, req_type; int ret, req_type;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
...@@ -2964,20 +2965,23 @@ static ssize_t iommu_group_store_type(struct iommu_group *group, ...@@ -2964,20 +2965,23 @@ static ssize_t iommu_group_store_type(struct iommu_group *group,
if (req_type == IOMMU_DOMAIN_DMA_FQ && if (req_type == IOMMU_DOMAIN_DMA_FQ &&
group->default_domain->type == IOMMU_DOMAIN_DMA) { group->default_domain->type == IOMMU_DOMAIN_DMA) {
ret = iommu_dma_init_fq(group->default_domain); ret = iommu_dma_init_fq(group->default_domain);
if (!ret) if (ret)
group->default_domain->type = IOMMU_DOMAIN_DMA_FQ; goto out_unlock;
mutex_unlock(&group->mutex);
return ret ?: count; group->default_domain->type = IOMMU_DOMAIN_DMA_FQ;
ret = count;
goto out_unlock;
} }
/* Otherwise, ensure that device exists and no driver is bound. */ /* Otherwise, ensure that device exists and no driver is bound. */
if (list_empty(&group->devices) || group->owner_cnt) { if (list_empty(&group->devices) || group->owner_cnt) {
mutex_unlock(&group->mutex); ret = -EPERM;
return -EPERM; goto out_unlock;
} }
ret = iommu_setup_default_domain(group, req_type); ret = iommu_setup_default_domain(group, req_type);
if (ret)
goto out_unlock;
/* /*
* Release the mutex here because ops->probe_finalize() call-back of * Release the mutex here because ops->probe_finalize() call-back of
...@@ -2988,13 +2992,12 @@ static ssize_t iommu_group_store_type(struct iommu_group *group, ...@@ -2988,13 +2992,12 @@ static ssize_t iommu_group_store_type(struct iommu_group *group,
mutex_unlock(&group->mutex); mutex_unlock(&group->mutex);
/* Make sure dma_ops is appropriatley set */ /* Make sure dma_ops is appropriatley set */
if (!ret) { for_each_group_device(group, gdev)
struct group_device *gdev; iommu_group_do_probe_finalize(gdev->dev);
return count;
for_each_group_device(group, gdev)
iommu_group_do_probe_finalize(gdev->dev);
}
out_unlock:
mutex_unlock(&group->mutex);
return ret ?: count; return ret ?: count;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment