Commit fa8ce574 authored by Robin Murphy's avatar Robin Murphy Committed by Joerg Roedel

iommu/qcom: Update to {map,unmap}_pages

Update map/unmap to the new multi-page interfaces, which is dead easy
since we just pass them through to io-pgtable anyway.
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Acked-by: default avatarWill Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/ccff9a133d12ec938741720be6baf5d788b71ea0.1668100209.git.robin.murphy@arm.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 0a17bbab
...@@ -410,7 +410,8 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de ...@@ -410,7 +410,8 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
} }
static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp) phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
{ {
int ret; int ret;
unsigned long flags; unsigned long flags;
...@@ -421,13 +422,14 @@ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -421,13 +422,14 @@ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV; return -ENODEV;
spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
ret = ops->map(ops, iova, paddr, size, prot, GFP_ATOMIC); ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, GFP_ATOMIC, mapped);
spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
return ret; return ret;
} }
static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova, static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size, struct iommu_iotlb_gather *gather) size_t pgsize, size_t pgcount,
struct iommu_iotlb_gather *gather)
{ {
size_t ret; size_t ret;
unsigned long flags; unsigned long flags;
...@@ -444,7 +446,7 @@ static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova, ...@@ -444,7 +446,7 @@ static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
*/ */
pm_runtime_get_sync(qcom_domain->iommu->dev); pm_runtime_get_sync(qcom_domain->iommu->dev);
spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
ret = ops->unmap(ops, iova, size, gather); ret = ops->unmap_pages(ops, iova, pgsize, pgcount, gather);
spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
pm_runtime_put_sync(qcom_domain->iommu->dev); pm_runtime_put_sync(qcom_domain->iommu->dev);
...@@ -582,8 +584,8 @@ static const struct iommu_ops qcom_iommu_ops = { ...@@ -582,8 +584,8 @@ static const struct iommu_ops qcom_iommu_ops = {
.default_domain_ops = &(const struct iommu_domain_ops) { .default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = qcom_iommu_attach_dev, .attach_dev = qcom_iommu_attach_dev,
.detach_dev = qcom_iommu_detach_dev, .detach_dev = qcom_iommu_detach_dev,
.map = qcom_iommu_map, .map_pages = qcom_iommu_map,
.unmap = qcom_iommu_unmap, .unmap_pages = qcom_iommu_unmap,
.flush_iotlb_all = qcom_iommu_flush_iotlb_all, .flush_iotlb_all = qcom_iommu_flush_iotlb_all,
.iotlb_sync = qcom_iommu_iotlb_sync, .iotlb_sync = qcom_iommu_iotlb_sync,
.iova_to_phys = qcom_iommu_iova_to_phys, .iova_to_phys = qcom_iommu_iova_to_phys,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment