Commit 70bccecf authored by Rob Clark's avatar Rob Clark

drm/msm/iommu: optimize map/unmap

Using map_pages/unmap_pages cuts down on the # of pgtable walks needed
in the process of finding where to insert/remove an entry.  The end
result is ~5-10x faster than mapping a single page at a time.

v2: Rename iommu_pgsize(), drop obsolete comments, fix error handling
    in msm_iommu_pagetable_map()
Signed-off-by: default avatarRob Clark <robdclark@chromium.org>
Reviewed-by: default avatarSai Prakash Ranjan <quic_saipraka@quicinc.com>
Patchwork: https://patchwork.freedesktop.org/patch/498892/
Link: https://lore.kernel.org/r/20220823163719.90399-1-robdclark@gmail.com
parent 6d29709d
......@@ -21,6 +21,7 @@ struct msm_iommu_pagetable {
struct msm_mmu base;
struct msm_mmu *parent;
struct io_pgtable_ops *pgtbl_ops;
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
phys_addr_t ttbr;
u32 asid;
};
......@@ -29,23 +30,84 @@ static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
return container_of(mmu, struct msm_iommu_pagetable, base);
}
/* based on iommu_pgsize() in iommu.c: */
static size_t calc_pgsize(struct msm_iommu_pagetable *pagetable,
unsigned long iova, phys_addr_t paddr,
size_t size, size_t *count)
{
unsigned int pgsize_idx, pgsize_idx_next;
unsigned long pgsizes;
size_t offset, pgsize, pgsize_next;
unsigned long addr_merge = paddr | iova;
/* Page sizes supported by the hardware and small enough for @size */
pgsizes = pagetable->pgsize_bitmap & GENMASK(__fls(size), 0);
/* Constrain the page sizes further based on the maximum alignment */
if (likely(addr_merge))
pgsizes &= GENMASK(__ffs(addr_merge), 0);
/* Make sure we have at least one suitable page size */
BUG_ON(!pgsizes);
/* Pick the biggest page size remaining */
pgsize_idx = __fls(pgsizes);
pgsize = BIT(pgsize_idx);
if (!count)
return pgsize;
/* Find the next biggest support page size, if it exists */
pgsizes = pagetable->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
if (!pgsizes)
goto out_set_count;
pgsize_idx_next = __ffs(pgsizes);
pgsize_next = BIT(pgsize_idx_next);
/*
* There's no point trying a bigger page size unless the virtual
* and physical addresses are similarly offset within the larger page.
*/
if ((iova ^ paddr) & (pgsize_next - 1))
goto out_set_count;
/* Calculate the offset to the next page size alignment boundary */
offset = pgsize_next - (addr_merge & (pgsize_next - 1));
/*
* If size is big enough to accommodate the larger page, reduce
* the number of smaller pages.
*/
if (offset + pgsize_next <= size)
size = offset;
out_set_count:
*count = size >> pgsize_idx;
return pgsize;
}
static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
size_t size)
{
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
size_t unmapped = 0;
/* Unmap the block one page at a time */
while (size) {
unmapped += ops->unmap(ops, iova, 4096, NULL);
iova += 4096;
size -= 4096;
size_t unmapped, pgsize, count;
pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
if (!unmapped)
break;
iova += unmapped;
size -= unmapped;
}
iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
return (unmapped == size) ? 0 : -EINVAL;
return (size == 0) ? 0 : -EINVAL;
}
static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
......@@ -54,7 +116,6 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
struct scatterlist *sg;
size_t mapped = 0;
u64 addr = iova;
unsigned int i;
......@@ -62,17 +123,26 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
size_t size = sg->length;
phys_addr_t phys = sg_phys(sg);
/* Map the block one page at a time */
while (size) {
if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) {
msm_iommu_pagetable_unmap(mmu, iova, mapped);
size_t pgsize, count, mapped = 0;
int ret;
pgsize = calc_pgsize(pagetable, addr, phys, size, &count);
ret = ops->map_pages(ops, addr, phys, pgsize, count,
prot, GFP_KERNEL, &mapped);
/* map_pages could fail after mapping some of the pages,
* so update the counters before error handling.
*/
phys += mapped;
addr += mapped;
size -= mapped;
if (ret) {
msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
return -EINVAL;
}
phys += 4096;
addr += 4096;
size -= 4096;
mapped += 4096;
}
}
......@@ -207,6 +277,7 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
/* Needed later for TLB flush */
pagetable->parent = parent;
pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment