Commit b1d99dc5 authored by Will Deacon's avatar Will Deacon Committed by Joerg Roedel

iommu: Hook up '->unmap_pages' driver callback

Extend iommu_pgsize() to populate an optional 'count' parameter so that
we can direct unmapping operation to the ->unmap_pages callback if it
has been provided by the driver.
Signed-off-by: default avatarWill Deacon <will@kernel.org>
Signed-off-by: default avatarIsaac J. Manjarres <isaacm@codeaurora.org>
Signed-off-by: default avatarGeorgi Djakov <quic_c_gdjako@quicinc.com>
Reviewed-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Link: https://lore.kernel.org/r/1623850736-389584-8-git-send-email-quic_c_gdjako@quicinc.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 89d5b960
...@@ -2376,11 +2376,11 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) ...@@ -2376,11 +2376,11 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
EXPORT_SYMBOL_GPL(iommu_iova_to_phys); EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size) phys_addr_t paddr, size_t size, size_t *count)
{ {
unsigned int pgsize_idx; unsigned int pgsize_idx, pgsize_idx_next;
unsigned long pgsizes; unsigned long pgsizes;
size_t pgsize; size_t offset, pgsize, pgsize_next;
unsigned long addr_merge = paddr | iova; unsigned long addr_merge = paddr | iova;
/* Page sizes supported by the hardware and small enough for @size */ /* Page sizes supported by the hardware and small enough for @size */
...@@ -2396,7 +2396,36 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, ...@@ -2396,7 +2396,36 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
/* Pick the biggest page size remaining */ /* Pick the biggest page size remaining */
pgsize_idx = __fls(pgsizes); pgsize_idx = __fls(pgsizes);
pgsize = BIT(pgsize_idx); pgsize = BIT(pgsize_idx);
if (!count)
return pgsize;
/* Find the next biggest support page size, if it exists */
pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
if (!pgsizes)
goto out_set_count;
pgsize_idx_next = __ffs(pgsizes);
pgsize_next = BIT(pgsize_idx_next);
/*
* There's no point trying a bigger page size unless the virtual
* and physical addresses are similarly offset within the larger page.
*/
if ((iova ^ paddr) & (pgsize_next - 1))
goto out_set_count;
/* Calculate the offset to the next page size alignment boundary */
offset = pgsize_next - (addr_merge & (pgsize_next - 1));
/*
* If size is big enough to accommodate the larger page, reduce
* the number of smaller pages.
*/
if (offset + pgsize_next <= size)
size = offset;
out_set_count:
*count = size >> pgsize_idx;
return pgsize; return pgsize;
} }
...@@ -2434,7 +2463,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -2434,7 +2463,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
while (size) { while (size) {
size_t pgsize = iommu_pgsize(domain, iova, paddr, size); size_t pgsize = iommu_pgsize(domain, iova, paddr, size, NULL);
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
iova, &paddr, pgsize); iova, &paddr, pgsize);
...@@ -2485,6 +2514,19 @@ int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, ...@@ -2485,6 +2514,19 @@ int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
} }
EXPORT_SYMBOL_GPL(iommu_map_atomic); EXPORT_SYMBOL_GPL(iommu_map_atomic);
static size_t __iommu_unmap_pages(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
{
const struct iommu_ops *ops = domain->ops;
size_t pgsize, count;
pgsize = iommu_pgsize(domain, iova, iova, size, &count);
return ops->unmap_pages ?
ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) :
ops->unmap(domain, iova, pgsize, iotlb_gather);
}
static size_t __iommu_unmap(struct iommu_domain *domain, static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size, unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather) struct iommu_iotlb_gather *iotlb_gather)
...@@ -2494,7 +2536,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain, ...@@ -2494,7 +2536,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long orig_iova = iova; unsigned long orig_iova = iova;
unsigned int min_pagesz; unsigned int min_pagesz;
if (unlikely(ops->unmap == NULL || if (unlikely(!(ops->unmap || ops->unmap_pages) ||
domain->pgsize_bitmap == 0UL)) domain->pgsize_bitmap == 0UL))
return 0; return 0;
...@@ -2522,10 +2564,9 @@ static size_t __iommu_unmap(struct iommu_domain *domain, ...@@ -2522,10 +2564,9 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
* or we hit an area that isn't mapped. * or we hit an area that isn't mapped.
*/ */
while (unmapped < size) { while (unmapped < size) {
size_t pgsize; unmapped_page = __iommu_unmap_pages(domain, iova,
size - unmapped,
pgsize = iommu_pgsize(domain, iova, iova, size - unmapped); iotlb_gather);
unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
if (!unmapped_page) if (!unmapped_page)
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment