Commit 7690a33f authored by Marek Szyprowski's avatar Marek Szyprowski

drm: msm: fix common struct sg_table related issues

The Documentation/DMA-API-HOWTO.txt states that the dma_map_sg() function
returns the number of the created entries in the DMA address space.
However the subsequent calls to the dma_sync_sg_for_{device,cpu}() and
dma_unmap_sg must be called with the original number of the entries
passed to the dma_map_sg().

struct sg_table is a common structure used for describing a non-contiguous
memory buffer, used commonly in the DRM and graphics subsystems. It
consists of a scatterlist with memory pages and DMA addresses (sgl entry),
as well as the number of scatterlist entries: CPU pages (orig_nents entry)
and DMA mapped pages (nents entry).

It turned out that it was a common mistake to misuse nents and orig_nents
entries, calling DMA-mapping functions with a wrong number of entries or
ignoring the number of mapped entries returned by the dma_map_sg()
function.

To avoid such issues, lets use a common dma-mapping wrappers operating
directly on the struct sg_table objects and use scatterlist page
iterators where possible. This, almost always, hides references to the
nents and orig_nents entries, making the code robust, easier to follow
and copy/paste safe.
Signed-off-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Acked-by: default avatarRob Clark <robdclark@gmail.com>
parent 90dcf444
...@@ -53,11 +53,10 @@ static void sync_for_device(struct msm_gem_object *msm_obj) ...@@ -53,11 +53,10 @@ static void sync_for_device(struct msm_gem_object *msm_obj)
struct device *dev = msm_obj->base.dev->dev; struct device *dev = msm_obj->base.dev->dev;
if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
dma_sync_sg_for_device(dev, msm_obj->sgt->sgl, dma_sync_sgtable_for_device(dev, msm_obj->sgt,
msm_obj->sgt->nents, DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
} else { } else {
dma_map_sg(dev, msm_obj->sgt->sgl, dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
} }
} }
...@@ -66,11 +65,9 @@ static void sync_for_cpu(struct msm_gem_object *msm_obj) ...@@ -66,11 +65,9 @@ static void sync_for_cpu(struct msm_gem_object *msm_obj)
struct device *dev = msm_obj->base.dev->dev; struct device *dev = msm_obj->base.dev->dev;
if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) { if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl, dma_sync_sgtable_for_cpu(dev, msm_obj->sgt, DMA_BIDIRECTIONAL);
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
} else { } else {
dma_unmap_sg(dev, msm_obj->sgt->sgl, dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
} }
} }
......
...@@ -30,21 +30,20 @@ static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova, ...@@ -30,21 +30,20 @@ static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
{ {
struct msm_gpummu *gpummu = to_msm_gpummu(mmu); struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE; unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
struct scatterlist *sg; struct sg_dma_page_iter dma_iter;
unsigned prot_bits = 0; unsigned prot_bits = 0;
unsigned i, j;
if (prot & IOMMU_WRITE) if (prot & IOMMU_WRITE)
prot_bits |= 1; prot_bits |= 1;
if (prot & IOMMU_READ) if (prot & IOMMU_READ)
prot_bits |= 2; prot_bits |= 2;
for_each_sg(sgt->sgl, sg, sgt->nents, i) { for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
dma_addr_t addr = sg->dma_address; dma_addr_t addr = sg_page_iter_dma_address(&dma_iter);
for (j = 0; j < sg->length / GPUMMU_PAGE_SIZE; j++, idx++) { int i;
gpummu->table[idx] = addr | prot_bits;
addr += GPUMMU_PAGE_SIZE; for (i = 0; i < PAGE_SIZE; i += GPUMMU_PAGE_SIZE)
} gpummu->table[idx++] = (addr + i) | prot_bits;
} }
/* we can improve by deferring flush for multiple map() */ /* we can improve by deferring flush for multiple map() */
......
...@@ -36,7 +36,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, ...@@ -36,7 +36,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
struct msm_iommu *iommu = to_msm_iommu(mmu); struct msm_iommu *iommu = to_msm_iommu(mmu);
size_t ret; size_t ret;
ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot); ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
WARN_ON(!ret); WARN_ON(!ret);
return (ret == len) ? 0 : -EINVAL; return (ret == len) ? 0 : -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment