Commit 4dff47c7 authored by Rob Herring's avatar Rob Herring

drm/panfrost: Add support for 2MB page entries

Add support for 2MB sized pages. This will improve our map and unmap
times and save a bit of memory by avoiding 3rd level page tables for
contiguous allocations.

As we use shmem for buffers and huge page allocations for shmem are off
by default, there isn't an improvement out of the box and userspace must
enable THP for shmem.

It's not clear if the h/w can support 1GB page sizes which standard
ARM long format descriptors support. In any case, it is unlikely we'll
see any contiguous 1GB allocations on current h/w.

Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: default avatarRob Herring <robh@kernel.org>
Acked-by: default avatarAlyssa Rosenzweig <alyssa@rosenzweig.io>
Reviewed-by: default avatarSteven Price <steven.price@arm.com>
Acked-by: default avatarTomeu Vizoso <tomeu.vizoso@collabora.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190411215313.1937-1-robh@kernel.org
parent f3ba9122
...@@ -137,6 +137,14 @@ static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr) ...@@ -137,6 +137,14 @@ static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE); write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
} }
static size_t get_pgsize(u64 addr, size_t size)
{
if (addr & (SZ_2M - 1) || size < SZ_2M)
return SZ_4K;
return SZ_2M;
}
int panfrost_mmu_map(struct panfrost_gem_object *bo) int panfrost_mmu_map(struct panfrost_gem_object *bo)
{ {
struct drm_gem_object *obj = &bo->base.base; struct drm_gem_object *obj = &bo->base.base;
...@@ -165,10 +173,12 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo) ...@@ -165,10 +173,12 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo)
dev_dbg(pfdev->dev, "map: iova=%llx, paddr=%lx, len=%zx", iova, paddr, len); dev_dbg(pfdev->dev, "map: iova=%llx, paddr=%lx, len=%zx", iova, paddr, len);
while (len) { while (len) {
ops->map(ops, iova, paddr, SZ_4K, IOMMU_WRITE | IOMMU_READ); size_t pgsize = get_pgsize(iova | paddr, len);
iova += SZ_4K;
paddr += SZ_4K; ops->map(ops, iova, paddr, pgsize, IOMMU_WRITE | IOMMU_READ);
len -= SZ_4K; iova += pgsize;
paddr += pgsize;
len -= pgsize;
} }
} }
...@@ -202,9 +212,15 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) ...@@ -202,9 +212,15 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
mutex_lock(&pfdev->mmu->lock); mutex_lock(&pfdev->mmu->lock);
while (unmapped_len < len) { while (unmapped_len < len) {
ops->unmap(ops, iova, SZ_4K); size_t unmapped_page;
iova += SZ_4K; size_t pgsize = get_pgsize(iova, len - unmapped_len);
unmapped_len += SZ_4K;
unmapped_page = ops->unmap(ops, iova, pgsize);
if (!unmapped_page)
break;
iova += unmapped_page;
unmapped_len += unmapped_page;
} }
mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT, mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
...@@ -342,7 +358,7 @@ int panfrost_mmu_init(struct panfrost_device *pfdev) ...@@ -342,7 +358,7 @@ int panfrost_mmu_init(struct panfrost_device *pfdev)
mmu_write(pfdev, MMU_INT_MASK, ~0); mmu_write(pfdev, MMU_INT_MASK, ~0);
pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) { pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = SZ_4K, // | SZ_2M | SZ_1G), .pgsize_bitmap = SZ_4K | SZ_2M,
.ias = FIELD_GET(0xff, pfdev->features.mmu_features), .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
.oas = FIELD_GET(0xff00, pfdev->features.mmu_features), .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
.tlb = &mmu_tlb_ops, .tlb = &mmu_tlb_ops,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment