Commit 251a79f8 authored by Hans Verkuil's avatar Hans Verkuil Committed by Mauro Carvalho Chehab

[media] vb2: use dma_map_sg_attrs to prevent unnecessary sync

By default dma_map_sg syncs the mapped buffer to the device. But
buf_prepare expects a buffer syncs for the cpu and the buffer
will be synced to the device in the prepare memop.

The reverse is true for dma_unmap_sg, buf_finish and the finish
memop.

To prevent unnecessary syncs we ask dma_(un)map_sg to skip the
sync.
Signed-off-by: default avatarHans Verkuil <hans.verkuil@cisco.com>
Acked-by: default avatarPawel Osciak <pawel@osciak.com>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab@osg.samsung.com>
parent f5294f45
...@@ -511,7 +511,15 @@ static void vb2_dc_put_userptr(void *buf_priv) ...@@ -511,7 +511,15 @@ static void vb2_dc_put_userptr(void *buf_priv)
struct sg_table *sgt = buf->dma_sgt; struct sg_table *sgt = buf->dma_sgt;
if (sgt) { if (sgt) {
dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
/*
* No need to sync to CPU, it's already synced to the CPU
* since the finish() memop will have been called before this.
*/
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
buf->dma_dir, &attrs);
if (!vma_is_io(buf->vma)) if (!vma_is_io(buf->vma))
vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page); vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
...@@ -568,6 +576,9 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, ...@@ -568,6 +576,9 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
struct sg_table *sgt; struct sg_table *sgt;
unsigned long contig_size; unsigned long contig_size;
unsigned long dma_align = dma_get_cache_alignment(); unsigned long dma_align = dma_get_cache_alignment();
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
/* Only cache aligned DMA transfers are reliable */ /* Only cache aligned DMA transfers are reliable */
if (!IS_ALIGNED(vaddr | size, dma_align)) { if (!IS_ALIGNED(vaddr | size, dma_align)) {
...@@ -654,8 +665,12 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, ...@@ -654,8 +665,12 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
kfree(pages); kfree(pages);
pages = NULL; pages = NULL;
sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents, /*
buf->dma_dir); * No need to sync to the device, this will happen later when the
* prepare() memop is called.
*/
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
buf->dma_dir, &attrs);
if (sgt->nents <= 0) { if (sgt->nents <= 0) {
pr_err("failed to map scatterlist\n"); pr_err("failed to map scatterlist\n");
ret = -EIO; ret = -EIO;
...@@ -677,7 +692,8 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, ...@@ -677,7 +692,8 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
return buf; return buf;
fail_map_sg: fail_map_sg:
dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
buf->dma_dir, &attrs);
fail_sgt_init: fail_sgt_init:
if (!vma_is_io(buf->vma)) if (!vma_is_io(buf->vma))
......
...@@ -107,6 +107,9 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, ...@@ -107,6 +107,9 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
struct sg_table *sgt; struct sg_table *sgt;
int ret; int ret;
int num_pages; int num_pages;
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
if (WARN_ON(alloc_ctx == NULL)) if (WARN_ON(alloc_ctx == NULL))
return NULL; return NULL;
...@@ -140,9 +143,13 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, ...@@ -140,9 +143,13 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
buf->dev = get_device(conf->dev); buf->dev = get_device(conf->dev);
sgt = &buf->sg_table; sgt = &buf->sg_table;
if (dma_map_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir) == 0) /*
* No need to sync to the device, this will happen later when the
* prepare() memop is called.
*/
if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
buf->dma_dir, &attrs) == 0)
goto fail_map; goto fail_map;
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
buf->handler.refcount = &buf->refcount; buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_dma_sg_put; buf->handler.put = vb2_dma_sg_put;
...@@ -175,9 +182,13 @@ static void vb2_dma_sg_put(void *buf_priv) ...@@ -175,9 +182,13 @@ static void vb2_dma_sg_put(void *buf_priv)
int i = buf->num_pages; int i = buf->num_pages;
if (atomic_dec_and_test(&buf->refcount)) { if (atomic_dec_and_test(&buf->refcount)) {
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
buf->num_pages); buf->num_pages);
dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
buf->dma_dir, &attrs);
if (buf->vaddr) if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages); vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt); sg_free_table(buf->dma_sgt);
...@@ -228,6 +239,9 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, ...@@ -228,6 +239,9 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
int num_pages_from_user; int num_pages_from_user;
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct sg_table *sgt; struct sg_table *sgt;
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
buf = kzalloc(sizeof *buf, GFP_KERNEL); buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf) if (!buf)
...@@ -296,9 +310,13 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, ...@@ -296,9 +310,13 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
goto userptr_fail_alloc_table_from_pages; goto userptr_fail_alloc_table_from_pages;
sgt = &buf->sg_table; sgt = &buf->sg_table;
if (dma_map_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir) == 0) /*
* No need to sync to the device, this will happen later when the
* prepare() memop is called.
*/
if (dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->nents,
buf->dma_dir, &attrs) == 0)
goto userptr_fail_map; goto userptr_fail_map;
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
return buf; return buf;
userptr_fail_map: userptr_fail_map:
...@@ -327,10 +345,13 @@ static void vb2_dma_sg_put_userptr(void *buf_priv) ...@@ -327,10 +345,13 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
struct vb2_dma_sg_buf *buf = buf_priv; struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = &buf->sg_table; struct sg_table *sgt = &buf->sg_table;
int i = buf->num_pages; int i = buf->num_pages;
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
dprintk(1, "%s: Releasing userspace buffer of %d pages\n", dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
__func__, buf->num_pages); __func__, buf->num_pages);
dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir, &attrs);
if (buf->vaddr) if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages); vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt); sg_free_table(buf->dma_sgt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment