Commit 5e3435eb authored by Marek Szyprowski's avatar Marek Szyprowski Committed by Joerg Roedel

iommu/exynos: Remove ARM-specific cache flush interface

This patch replaces custom ARM-specific code for performing CPU cache flush
operations with generic code based on DMA-mapping. Domain managing code
is independent of particular SYSMMU device, so the first registered SYSMMU
device is used for DMA-mapping calls. This simplification works fine
because all SYSMMU controllers are in the same address space (where
DMA address equals physical address) and the DMA-mapping calls are done
mainly to flush CPU cache to make changes visible to SYSMMU controllers.
Signed-off-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 58c6f6a3
...@@ -27,9 +27,6 @@ ...@@ -27,9 +27,6 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/dma-iommu.h> #include <linux/dma-iommu.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
typedef u32 sysmmu_iova_t; typedef u32 sysmmu_iova_t;
typedef u32 sysmmu_pte_t; typedef u32 sysmmu_pte_t;
...@@ -83,6 +80,7 @@ static u32 lv2ent_offset(sysmmu_iova_t iova) ...@@ -83,6 +80,7 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
} }
#define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t)) #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
...@@ -134,6 +132,7 @@ static u32 lv2ent_offset(sysmmu_iova_t iova) ...@@ -134,6 +132,7 @@ static u32 lv2ent_offset(sysmmu_iova_t iova)
#define has_sysmmu(dev) (dev->archdata.iommu != NULL) #define has_sysmmu(dev) (dev->archdata.iommu != NULL)
static struct device *dma_dev;
static struct kmem_cache *lv2table_kmem_cache; static struct kmem_cache *lv2table_kmem_cache;
static sysmmu_pte_t *zero_lv2_table; static sysmmu_pte_t *zero_lv2_table;
#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table)) #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
...@@ -650,16 +649,19 @@ static struct platform_driver exynos_sysmmu_driver __refdata = { ...@@ -650,16 +649,19 @@ static struct platform_driver exynos_sysmmu_driver __refdata = {
} }
}; };
static inline void pgtable_flush(void *vastart, void *vaend) static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
{ {
dmac_flush_range(vastart, vaend); dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
outer_flush_range(virt_to_phys(vastart), DMA_TO_DEVICE);
virt_to_phys(vaend)); *ent = val;
dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
DMA_TO_DEVICE);
} }
static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
{ {
struct exynos_iommu_domain *domain; struct exynos_iommu_domain *domain;
dma_addr_t handle;
int i; int i;
...@@ -694,7 +696,10 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) ...@@ -694,7 +696,10 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
domain->pgtable[i + 7] = ZERO_LV2LINK; domain->pgtable[i + 7] = ZERO_LV2LINK;
} }
pgtable_flush(domain->pgtable, domain->pgtable + NUM_LV1ENTRIES); handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
DMA_TO_DEVICE);
/* For mapping page table entries we rely on dma == phys */
BUG_ON(handle != virt_to_phys(domain->pgtable));
spin_lock_init(&domain->lock); spin_lock_init(&domain->lock);
spin_lock_init(&domain->pgtablelock); spin_lock_init(&domain->pgtablelock);
...@@ -738,10 +743,18 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain) ...@@ -738,10 +743,18 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
if (iommu_domain->type == IOMMU_DOMAIN_DMA) if (iommu_domain->type == IOMMU_DOMAIN_DMA)
iommu_put_dma_cookie(iommu_domain); iommu_put_dma_cookie(iommu_domain);
dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
DMA_TO_DEVICE);
for (i = 0; i < NUM_LV1ENTRIES; i++) for (i = 0; i < NUM_LV1ENTRIES; i++)
if (lv1ent_page(domain->pgtable + i)) if (lv1ent_page(domain->pgtable + i)) {
phys_addr_t base = lv2table_base(domain->pgtable + i);
dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
DMA_TO_DEVICE);
kmem_cache_free(lv2table_kmem_cache, kmem_cache_free(lv2table_kmem_cache,
phys_to_virt(lv2table_base(domain->pgtable + i))); phys_to_virt(base));
}
free_pages((unsigned long)domain->pgtable, 2); free_pages((unsigned long)domain->pgtable, 2);
free_pages((unsigned long)domain->lv2entcnt, 1); free_pages((unsigned long)domain->lv2entcnt, 1);
...@@ -834,11 +847,10 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, ...@@ -834,11 +847,10 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
if (!pent) if (!pent)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
*sent = mk_lv1ent_page(virt_to_phys(pent)); update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
kmemleak_ignore(pent); kmemleak_ignore(pent);
*pgcounter = NUM_LV2ENTRIES; *pgcounter = NUM_LV2ENTRIES;
pgtable_flush(pent, pent + NUM_LV2ENTRIES); dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
pgtable_flush(sent, sent + 1);
/* /*
* If pre-fetched SLPD is a faulty SLPD in zero_l2_table, * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
...@@ -891,9 +903,7 @@ static int lv1set_section(struct exynos_iommu_domain *domain, ...@@ -891,9 +903,7 @@ static int lv1set_section(struct exynos_iommu_domain *domain,
*pgcnt = 0; *pgcnt = 0;
} }
*sent = mk_lv1ent_sect(paddr); update_pte(sent, mk_lv1ent_sect(paddr));
pgtable_flush(sent, sent + 1);
spin_lock(&domain->lock); spin_lock(&domain->lock);
if (lv1ent_page_zero(sent)) { if (lv1ent_page_zero(sent)) {
...@@ -917,12 +927,15 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, ...@@ -917,12 +927,15 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
if (WARN_ON(!lv2ent_fault(pent))) if (WARN_ON(!lv2ent_fault(pent)))
return -EADDRINUSE; return -EADDRINUSE;
*pent = mk_lv2ent_spage(paddr); update_pte(pent, mk_lv2ent_spage(paddr));
pgtable_flush(pent, pent + 1);
*pgcnt -= 1; *pgcnt -= 1;
} else { /* size == LPAGE_SIZE */ } else { /* size == LPAGE_SIZE */
int i; int i;
dma_addr_t pent_base = virt_to_phys(pent);
dma_sync_single_for_cpu(dma_dev, pent_base,
sizeof(*pent) * SPAGES_PER_LPAGE,
DMA_TO_DEVICE);
for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
if (WARN_ON(!lv2ent_fault(pent))) { if (WARN_ON(!lv2ent_fault(pent))) {
if (i > 0) if (i > 0)
...@@ -932,7 +945,9 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, ...@@ -932,7 +945,9 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
*pent = mk_lv2ent_lpage(paddr); *pent = mk_lv2ent_lpage(paddr);
} }
pgtable_flush(pent - SPAGES_PER_LPAGE, pent); dma_sync_single_for_device(dma_dev, pent_base,
sizeof(*pent) * SPAGES_PER_LPAGE,
DMA_TO_DEVICE);
*pgcnt -= SPAGES_PER_LPAGE; *pgcnt -= SPAGES_PER_LPAGE;
} }
...@@ -1042,8 +1057,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain, ...@@ -1042,8 +1057,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
} }
/* workaround for h/w bug in System MMU v3.3 */ /* workaround for h/w bug in System MMU v3.3 */
*ent = ZERO_LV2LINK; update_pte(ent, ZERO_LV2LINK);
pgtable_flush(ent, ent + 1);
size = SECT_SIZE; size = SECT_SIZE;
goto done; goto done;
} }
...@@ -1064,9 +1078,8 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain, ...@@ -1064,9 +1078,8 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
} }
if (lv2ent_small(ent)) { if (lv2ent_small(ent)) {
*ent = 0; update_pte(ent, 0);
size = SPAGE_SIZE; size = SPAGE_SIZE;
pgtable_flush(ent, ent + 1);
domain->lv2entcnt[lv1ent_offset(iova)] += 1; domain->lv2entcnt[lv1ent_offset(iova)] += 1;
goto done; goto done;
} }
...@@ -1077,9 +1090,13 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain, ...@@ -1077,9 +1090,13 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
goto err; goto err;
} }
dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
sizeof(*ent) * SPAGES_PER_LPAGE,
DMA_TO_DEVICE);
memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
pgtable_flush(ent, ent + SPAGES_PER_LPAGE); dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
sizeof(*ent) * SPAGES_PER_LPAGE,
DMA_TO_DEVICE);
size = LPAGE_SIZE; size = LPAGE_SIZE;
domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
done: done:
...@@ -1261,6 +1278,13 @@ static int __init exynos_iommu_of_setup(struct device_node *np) ...@@ -1261,6 +1278,13 @@ static int __init exynos_iommu_of_setup(struct device_node *np)
if (IS_ERR(pdev)) if (IS_ERR(pdev))
return PTR_ERR(pdev); return PTR_ERR(pdev);
/*
* use the first registered sysmmu device for performing
* dma mapping operations on iommu page tables (cpu cache flush)
*/
if (!dma_dev)
dma_dev = &pdev->dev;
of_iommu_set_ops(np, &exynos_iommu_ops); of_iommu_set_ops(np, &exynos_iommu_ops);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment