Commit 61128f08 authored by Cho KyongHo's avatar Cho KyongHo Committed by Joerg Roedel

iommu/exynos: Change error handling when page table update is failed

This patch changes not to panic on any error when updating page table.
Instead prints error messages with callstack.
Signed-off-by: default avatarCho KyongHo <pullip.cho@samsung.com>
Signed-off-by: default avatarShaik Ameer Basha <shaik.ameer@samsung.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 7222e8db
...@@ -728,13 +728,18 @@ static void exynos_iommu_detach_device(struct iommu_domain *domain, ...@@ -728,13 +728,18 @@ static void exynos_iommu_detach_device(struct iommu_domain *domain,
static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova, static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
short *pgcounter) short *pgcounter)
{ {
if (lv1ent_section(sent)) {
WARN(1, "Trying mapping on %#08lx mapped with 1MiB page", iova);
return ERR_PTR(-EADDRINUSE);
}
if (lv1ent_fault(sent)) { if (lv1ent_fault(sent)) {
unsigned long *pent; unsigned long *pent;
pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC); pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC);
BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1)); BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
if (!pent) if (!pent)
return NULL; return ERR_PTR(-ENOMEM);
*sent = mk_lv1ent_page(virt_to_phys(pent)); *sent = mk_lv1ent_page(virt_to_phys(pent));
*pgcounter = NUM_LV2ENTRIES; *pgcounter = NUM_LV2ENTRIES;
...@@ -745,14 +750,21 @@ static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova, ...@@ -745,14 +750,21 @@ static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
return page_entry(sent, iova); return page_entry(sent, iova);
} }
static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt) static int lv1set_section(unsigned long *sent, unsigned long iova,
phys_addr_t paddr, short *pgcnt)
{ {
if (lv1ent_section(sent)) if (lv1ent_section(sent)) {
WARN(1, "Trying mapping on 1MiB@%#08lx that is mapped",
iova);
return -EADDRINUSE; return -EADDRINUSE;
}
if (lv1ent_page(sent)) { if (lv1ent_page(sent)) {
if (*pgcnt != NUM_LV2ENTRIES) if (*pgcnt != NUM_LV2ENTRIES) {
WARN(1, "Trying mapping on 1MiB@%#08lx that is mapped",
iova);
return -EADDRINUSE; return -EADDRINUSE;
}
kfree(page_entry(sent, 0)); kfree(page_entry(sent, 0));
...@@ -770,8 +782,10 @@ static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size, ...@@ -770,8 +782,10 @@ static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
short *pgcnt) short *pgcnt)
{ {
if (size == SPAGE_SIZE) { if (size == SPAGE_SIZE) {
if (!lv2ent_fault(pent)) if (!lv2ent_fault(pent)) {
WARN(1, "Trying mapping on 4KiB where mapping exists");
return -EADDRINUSE; return -EADDRINUSE;
}
*pent = mk_lv2ent_spage(paddr); *pent = mk_lv2ent_spage(paddr);
pgtable_flush(pent, pent + 1); pgtable_flush(pent, pent + 1);
...@@ -780,7 +794,10 @@ static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size, ...@@ -780,7 +794,10 @@ static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
int i; int i;
for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
if (!lv2ent_fault(pent)) { if (!lv2ent_fault(pent)) {
memset(pent, 0, sizeof(*pent) * i); WARN(1,
"Trying mapping on 64KiB where mapping exists");
if (i > 0)
memset(pent - i, 0, sizeof(*pent) * i);
return -EADDRINUSE; return -EADDRINUSE;
} }
...@@ -808,7 +825,7 @@ static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -808,7 +825,7 @@ static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
entry = section_entry(priv->pgtable, iova); entry = section_entry(priv->pgtable, iova);
if (size == SECT_SIZE) { if (size == SECT_SIZE) {
ret = lv1set_section(entry, paddr, ret = lv1set_section(entry, iova, paddr,
&priv->lv2entcnt[lv1ent_offset(iova)]); &priv->lv2entcnt[lv1ent_offset(iova)]);
} else { } else {
unsigned long *pent; unsigned long *pent;
...@@ -816,17 +833,16 @@ static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova, ...@@ -816,17 +833,16 @@ static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
pent = alloc_lv2entry(entry, iova, pent = alloc_lv2entry(entry, iova,
&priv->lv2entcnt[lv1ent_offset(iova)]); &priv->lv2entcnt[lv1ent_offset(iova)]);
if (!pent) if (IS_ERR(pent))
ret = -ENOMEM; ret = PTR_ERR(pent);
else else
ret = lv2set_page(pent, paddr, size, ret = lv2set_page(pent, paddr, size,
&priv->lv2entcnt[lv1ent_offset(iova)]); &priv->lv2entcnt[lv1ent_offset(iova)]);
} }
if (ret) { if (ret)
pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n", pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
__func__, iova, size); __func__, iova, size);
}
spin_unlock_irqrestore(&priv->pgtablelock, flags); spin_unlock_irqrestore(&priv->pgtablelock, flags);
...@@ -840,6 +856,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain, ...@@ -840,6 +856,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain,
struct sysmmu_drvdata *data; struct sysmmu_drvdata *data;
unsigned long flags; unsigned long flags;
unsigned long *ent; unsigned long *ent;
size_t err_pgsize;
BUG_ON(priv->pgtable == NULL); BUG_ON(priv->pgtable == NULL);
...@@ -848,7 +865,10 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain, ...@@ -848,7 +865,10 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain,
ent = section_entry(priv->pgtable, iova); ent = section_entry(priv->pgtable, iova);
if (lv1ent_section(ent)) { if (lv1ent_section(ent)) {
BUG_ON(size < SECT_SIZE); if (size < SECT_SIZE) {
err_pgsize = SECT_SIZE;
goto err;
}
*ent = 0; *ent = 0;
pgtable_flush(ent, ent + 1); pgtable_flush(ent, ent + 1);
...@@ -879,7 +899,10 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain, ...@@ -879,7 +899,10 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain,
} }
/* lv1ent_large(ent) == true here */ /* lv1ent_large(ent) == true here */
BUG_ON(size < LPAGE_SIZE); if (size < LPAGE_SIZE) {
err_pgsize = LPAGE_SIZE;
goto err;
}
memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
...@@ -893,8 +916,15 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain, ...@@ -893,8 +916,15 @@ static size_t exynos_iommu_unmap(struct iommu_domain *domain,
sysmmu_tlb_invalidate_entry(data->dev, iova); sysmmu_tlb_invalidate_entry(data->dev, iova);
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
return size; return size;
err:
spin_unlock_irqrestore(&priv->pgtablelock, flags);
WARN(1,
"%s: Failed due to size(%#x) @ %#08lx is smaller than page size %#x\n",
__func__, size, iova, err_pgsize);
return 0;
} }
static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment