Commit f4a14773 authored by Nicolin Chen's avatar Nicolin Chen Committed by Jason Gunthorpe

iommu: Use EINVAL for incompatible device/domain in ->attach_dev

Following the new rules in include/linux/iommu.h kdocs, update all drivers
->attach_dev callback functions to return EINVAL in the failure paths that
are related to domain incompatibility.

Also, drop adjacent error prints to prevent a kernel log spam.

Link: https://lore.kernel.org/r/f52a07f7320da94afe575c9631340d0019a203a7.1666042873.git.nicolinc@nvidia.comReviewed-by: default avatarJean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Signed-off-by: default avatarNicolin Chen <nicolinc@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent bd7ebb77
...@@ -2430,23 +2430,14 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -2430,23 +2430,14 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
goto out_unlock; goto out_unlock;
} }
} else if (smmu_domain->smmu != smmu) { } else if (smmu_domain->smmu != smmu) {
dev_err(dev, ret = -EINVAL;
"cannot attach to SMMU %s (upstream of %s)\n",
dev_name(smmu_domain->smmu->dev),
dev_name(smmu->dev));
ret = -ENXIO;
goto out_unlock; goto out_unlock;
} else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) { master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) {
dev_err(dev,
"cannot attach to incompatible domain (%u SSID bits != %u)\n",
smmu_domain->s1_cfg.s1cdmax, master->ssid_bits);
ret = -EINVAL; ret = -EINVAL;
goto out_unlock; goto out_unlock;
} else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
smmu_domain->stall_enabled != master->stall_enabled) { smmu_domain->stall_enabled != master->stall_enabled) {
dev_err(dev, "cannot attach to stall-%s domain\n",
smmu_domain->stall_enabled ? "enabled" : "disabled");
ret = -EINVAL; ret = -EINVAL;
goto out_unlock; goto out_unlock;
} }
......
...@@ -1150,9 +1150,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -1150,9 +1150,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
* different SMMUs. * different SMMUs.
*/ */
if (smmu_domain->smmu != smmu) { if (smmu_domain->smmu != smmu) {
dev_err(dev,
"cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
ret = -EINVAL; ret = -EINVAL;
goto rpm_put; goto rpm_put;
} }
......
...@@ -381,13 +381,8 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev ...@@ -381,13 +381,8 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev
* Sanity check the domain. We don't support domains across * Sanity check the domain. We don't support domains across
* different IOMMUs. * different IOMMUs.
*/ */
if (qcom_domain->iommu != qcom_iommu) { if (qcom_domain->iommu != qcom_iommu)
dev_err(dev, "cannot attach to IOMMU %s while already "
"attached to domain on IOMMU %s\n",
dev_name(qcom_domain->iommu->dev),
dev_name(qcom_iommu->dev));
return -EINVAL; return -EINVAL;
}
return 0; return 0;
} }
......
...@@ -4194,19 +4194,15 @@ static int prepare_domain_attach_device(struct iommu_domain *domain, ...@@ -4194,19 +4194,15 @@ static int prepare_domain_attach_device(struct iommu_domain *domain,
return -ENODEV; return -ENODEV;
if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap)) if (dmar_domain->force_snooping && !ecap_sc_support(iommu->ecap))
return -EOPNOTSUPP; return -EINVAL;
/* check if this iommu agaw is sufficient for max mapped address */ /* check if this iommu agaw is sufficient for max mapped address */
addr_width = agaw_to_width(iommu->agaw); addr_width = agaw_to_width(iommu->agaw);
if (addr_width > cap_mgaw(iommu->cap)) if (addr_width > cap_mgaw(iommu->cap))
addr_width = cap_mgaw(iommu->cap); addr_width = cap_mgaw(iommu->cap);
if (dmar_domain->max_addr > (1LL << addr_width)) { if (dmar_domain->max_addr > (1LL << addr_width))
dev_err(dev, "%s: iommu width (%d) is not " return -EINVAL;
"sufficient for the mapped address (%llx)\n",
__func__, addr_width, dmar_domain->max_addr);
return -EFAULT;
}
dmar_domain->gaw = addr_width; dmar_domain->gaw = addr_width;
/* /*
......
...@@ -628,8 +628,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, ...@@ -628,8 +628,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
* Something is wrong, we can't attach two devices using * Something is wrong, we can't attach two devices using
* different IOMMUs to the same domain. * different IOMMUs to the same domain.
*/ */
dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n",
dev_name(mmu->dev), dev_name(domain->mmu->dev));
ret = -EINVAL; ret = -EINVAL;
} else } else
dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
......
...@@ -1472,7 +1472,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -1472,7 +1472,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
/* only a single client device can be attached to a domain */ /* only a single client device can be attached to a domain */
if (omap_domain->dev) { if (omap_domain->dev) {
dev_err(dev, "iommu domain is already attached\n"); dev_err(dev, "iommu domain is already attached\n");
ret = -EBUSY; ret = -EINVAL;
goto out; goto out;
} }
......
...@@ -237,10 +237,8 @@ static int sprd_iommu_attach_device(struct iommu_domain *domain, ...@@ -237,10 +237,8 @@ static int sprd_iommu_attach_device(struct iommu_domain *domain,
struct sprd_iommu_domain *dom = to_sprd_domain(domain); struct sprd_iommu_domain *dom = to_sprd_domain(domain);
size_t pgt_size = sprd_iommu_pgt_size(domain); size_t pgt_size = sprd_iommu_pgt_size(domain);
if (dom->sdev) { if (dom->sdev)
pr_err("There's already a device attached to this domain.\n");
return -EINVAL; return -EINVAL;
}
dom->pgt_va = dma_alloc_coherent(sdev->dev, pgt_size, &dom->pgt_pa, GFP_KERNEL); dom->pgt_va = dma_alloc_coherent(sdev->dev, pgt_size, &dom->pgt_pa, GFP_KERNEL);
if (!dom->pgt_va) if (!dom->pgt_va)
......
...@@ -112,7 +112,7 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain, ...@@ -112,7 +112,7 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
spin_lock(&gart->dom_lock); spin_lock(&gart->dom_lock);
if (gart->active_domain && gart->active_domain != domain) { if (gart->active_domain && gart->active_domain != domain) {
ret = -EBUSY; ret = -EINVAL;
} else if (dev_iommu_priv_get(dev) != domain) { } else if (dev_iommu_priv_get(dev) != domain) {
dev_iommu_priv_set(dev, domain); dev_iommu_priv_set(dev, domain);
gart->active_domain = domain; gart->active_domain = domain;
......
...@@ -734,8 +734,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -734,8 +734,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
*/ */
ret = viommu_domain_finalise(vdev, domain); ret = viommu_domain_finalise(vdev, domain);
} else if (vdomain->viommu != vdev->viommu) { } else if (vdomain->viommu != vdev->viommu) {
dev_err(dev, "cannot attach to foreign vIOMMU\n"); ret = -EINVAL;
ret = -EXDEV;
} }
mutex_unlock(&vdomain->mutex); mutex_unlock(&vdomain->mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment