Commit ad108121 authored by Guzman Lugo, Fernando's avatar Guzman Lugo, Fernando Committed by Hari Kanigeri

OMAP: iovmm: add superpages support to fixed da address

This patch adds superpages support to fixed ad address
inside iommu_kmap function.
Signed-off-by: default avatarFernando Guzman Lugo <x0095840@ti.com>
Acked-by: default avatarHiroshi DOYU <Hiroshi.DOYU@nokia.com>
parent ba6e1f4f
...@@ -87,35 +87,43 @@ static size_t sgtable_len(const struct sg_table *sgt) ...@@ -87,35 +87,43 @@ static size_t sgtable_len(const struct sg_table *sgt)
} }
#define sgtable_ok(x) (!!sgtable_len(x)) #define sgtable_ok(x) (!!sgtable_len(x))
static unsigned max_alignment(u32 addr)
{
int i;
unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++)
;
return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0;
}
/* /*
* calculate the optimal number sg elements from total bytes based on * calculate the optimal number sg elements from total bytes based on
* iommu superpages * iommu superpages
*/ */
static unsigned int sgtable_nents(size_t bytes) static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa)
{ {
int i; unsigned nr_entries = 0, ent_sz;
unsigned int nr_entries;
const unsigned long pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, };
if (!IS_ALIGNED(bytes, PAGE_SIZE)) { if (!IS_ALIGNED(bytes, PAGE_SIZE)) {
pr_err("%s: wrong size %08x\n", __func__, bytes); pr_err("%s: wrong size %08x\n", __func__, bytes);
return 0; return 0;
} }
nr_entries = 0; while (bytes) {
for (i = 0; i < ARRAY_SIZE(pagesize); i++) { ent_sz = max_alignment(da | pa);
if (bytes >= pagesize[i]) { ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes));
nr_entries += (bytes / pagesize[i]); nr_entries++;
bytes %= pagesize[i]; da += ent_sz;
} pa += ent_sz;
bytes -= ent_sz;
} }
BUG_ON(bytes);
return nr_entries; return nr_entries;
} }
/* allocate and initialize sg_table header(a kind of 'superblock') */ /* allocate and initialize sg_table header(a kind of 'superblock') */
static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags) static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags,
u32 da, u32 pa)
{ {
unsigned int nr_entries; unsigned int nr_entries;
int err; int err;
...@@ -127,9 +135,8 @@ static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags) ...@@ -127,9 +135,8 @@ static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags)
if (!IS_ALIGNED(bytes, PAGE_SIZE)) if (!IS_ALIGNED(bytes, PAGE_SIZE))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
/* FIXME: IOVMF_DA_FIXED should support 'superpages' */ if (flags & IOVMF_LINEAR) {
if ((flags & IOVMF_LINEAR) && (flags & IOVMF_DA_ANON)) { nr_entries = sgtable_nents(bytes, da, pa);
nr_entries = sgtable_nents(bytes);
if (!nr_entries) if (!nr_entries)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} else } else
...@@ -409,7 +416,8 @@ static inline void sgtable_drain_vmalloc(struct sg_table *sgt) ...@@ -409,7 +416,8 @@ static inline void sgtable_drain_vmalloc(struct sg_table *sgt)
BUG_ON(!sgt); BUG_ON(!sgt);
} }
static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len) static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da,
size_t len)
{ {
unsigned int i; unsigned int i;
struct scatterlist *sg; struct scatterlist *sg;
...@@ -418,9 +426,10 @@ static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len) ...@@ -418,9 +426,10 @@ static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
va = phys_to_virt(pa); va = phys_to_virt(pa);
for_each_sg(sgt->sgl, sg, sgt->nents, i) { for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes; unsigned bytes;
bytes = iopgsz_max(len); bytes = max_alignment(da | pa);
bytes = min_t(unsigned, bytes, iopgsz_max(len));
BUG_ON(!iopgsz_ok(bytes)); BUG_ON(!iopgsz_ok(bytes));
...@@ -429,6 +438,7 @@ static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len) ...@@ -429,6 +438,7 @@ static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, size_t len)
* 'pa' is cotinuous(linear). * 'pa' is cotinuous(linear).
*/ */
pa += bytes; pa += bytes;
da += bytes;
len -= bytes; len -= bytes;
} }
BUG_ON(len); BUG_ON(len);
...@@ -695,18 +705,18 @@ u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) ...@@ -695,18 +705,18 @@ u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
if (!va) if (!va)
return -ENOMEM; return -ENOMEM;
sgt = sgtable_alloc(bytes, flags); flags &= IOVMF_HW_MASK;
flags |= IOVMF_DISCONT;
flags |= IOVMF_ALLOC;
flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
sgt = sgtable_alloc(bytes, flags, da, 0);
if (IS_ERR(sgt)) { if (IS_ERR(sgt)) {
da = PTR_ERR(sgt); da = PTR_ERR(sgt);
goto err_sgt_alloc; goto err_sgt_alloc;
} }
sgtable_fill_vmalloc(sgt, va); sgtable_fill_vmalloc(sgt, va);
flags &= IOVMF_HW_MASK;
flags |= IOVMF_DISCONT;
flags |= IOVMF_ALLOC;
flags |= (da ? IOVMF_DA_FIXED : IOVMF_DA_ANON);
da = __iommu_vmap(obj, da, sgt, va, bytes, flags); da = __iommu_vmap(obj, da, sgt, va, bytes, flags);
if (IS_ERR_VALUE(da)) if (IS_ERR_VALUE(da))
goto err_iommu_vmap; goto err_iommu_vmap;
...@@ -746,11 +756,11 @@ static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va, ...@@ -746,11 +756,11 @@ static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va,
{ {
struct sg_table *sgt; struct sg_table *sgt;
sgt = sgtable_alloc(bytes, flags); sgt = sgtable_alloc(bytes, flags, da, pa);
if (IS_ERR(sgt)) if (IS_ERR(sgt))
return PTR_ERR(sgt); return PTR_ERR(sgt);
sgtable_fill_kmalloc(sgt, pa, bytes); sgtable_fill_kmalloc(sgt, pa, da, bytes);
da = map_iommu_region(obj, da, sgt, va, bytes, flags); da = map_iommu_region(obj, da, sgt, va, bytes, flags);
if (IS_ERR_VALUE(da)) { if (IS_ERR_VALUE(da)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment