Commit 8b7c0280 authored by Marek Szyprowski's avatar Marek Szyprowski Committed by Mauro Carvalho Chehab

media: videobuf2: use sgtable-based scatterlist wrappers

Use recently introduced common wrappers operating directly on the struct
sg_table objects and scatterlist page iterators to make the code a bit
more compact, robust, easier to follow and copy/paste safe.

No functional change, because the code already properly did all the
scatterlist related calls.
Signed-off-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarHans Verkuil <hverkuil-cisco@xs4all.nl>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab+huawei@kernel.org>
parent f95fc014
...@@ -58,10 +58,10 @@ static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt) ...@@ -58,10 +58,10 @@ static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
unsigned int i; unsigned int i;
unsigned long size = 0; unsigned long size = 0;
for_each_sg(sgt->sgl, s, sgt->nents, i) { for_each_sgtable_dma_sg(sgt, s, i) {
if (sg_dma_address(s) != expected) if (sg_dma_address(s) != expected)
break; break;
expected = sg_dma_address(s) + sg_dma_len(s); expected += sg_dma_len(s);
size += sg_dma_len(s); size += sg_dma_len(s);
} }
return size; return size;
...@@ -103,8 +103,7 @@ static void vb2_dc_prepare(void *buf_priv) ...@@ -103,8 +103,7 @@ static void vb2_dc_prepare(void *buf_priv)
if (!sgt) if (!sgt)
return; return;
dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents, dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
buf->dma_dir);
} }
static void vb2_dc_finish(void *buf_priv) static void vb2_dc_finish(void *buf_priv)
...@@ -115,7 +114,7 @@ static void vb2_dc_finish(void *buf_priv) ...@@ -115,7 +114,7 @@ static void vb2_dc_finish(void *buf_priv)
if (!sgt) if (!sgt)
return; return;
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
} }
/*********************************************/ /*********************************************/
...@@ -275,8 +274,8 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf, ...@@ -275,8 +274,8 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
* memory locations do not require any explicit cache * memory locations do not require any explicit cache
* maintenance prior or after being used by the device. * maintenance prior or after being used by the device.
*/ */
dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sgt); sg_free_table(sgt);
kfree(attach); kfree(attach);
db_attach->priv = NULL; db_attach->priv = NULL;
...@@ -301,8 +300,8 @@ static struct sg_table *vb2_dc_dmabuf_ops_map( ...@@ -301,8 +300,8 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
/* release any previous cache */ /* release any previous cache */
if (attach->dma_dir != DMA_NONE) { if (attach->dma_dir != DMA_NONE) {
dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); DMA_ATTR_SKIP_CPU_SYNC);
attach->dma_dir = DMA_NONE; attach->dma_dir = DMA_NONE;
} }
...@@ -310,9 +309,8 @@ static struct sg_table *vb2_dc_dmabuf_ops_map( ...@@ -310,9 +309,8 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
* mapping to the client with new direction, no cache sync * mapping to the client with new direction, no cache sync
* required see comment in vb2_dc_dmabuf_ops_detach() * required see comment in vb2_dc_dmabuf_ops_detach()
*/ */
sgt->nents = dma_map_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents, if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
dma_dir, DMA_ATTR_SKIP_CPU_SYNC); DMA_ATTR_SKIP_CPU_SYNC)) {
if (!sgt->nents) {
pr_err("failed to map scatterlist\n"); pr_err("failed to map scatterlist\n");
mutex_unlock(lock); mutex_unlock(lock);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
...@@ -455,8 +453,8 @@ static void vb2_dc_put_userptr(void *buf_priv) ...@@ -455,8 +453,8 @@ static void vb2_dc_put_userptr(void *buf_priv)
* No need to sync to CPU, it's already synced to the CPU * No need to sync to CPU, it's already synced to the CPU
* since the finish() memop will have been called before this. * since the finish() memop will have been called before this.
*/ */
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); DMA_ATTR_SKIP_CPU_SYNC);
pages = frame_vector_pages(buf->vec); pages = frame_vector_pages(buf->vec);
/* sgt should exist only if vector contains pages... */ /* sgt should exist only if vector contains pages... */
BUG_ON(IS_ERR(pages)); BUG_ON(IS_ERR(pages));
...@@ -553,9 +551,8 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr, ...@@ -553,9 +551,8 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
* No need to sync to the device, this will happen later when the * No need to sync to the device, this will happen later when the
* prepare() memop is called. * prepare() memop is called.
*/ */
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); DMA_ATTR_SKIP_CPU_SYNC)) {
if (sgt->nents <= 0) {
pr_err("failed to map scatterlist\n"); pr_err("failed to map scatterlist\n");
ret = -EIO; ret = -EIO;
goto fail_sgt_init; goto fail_sgt_init;
...@@ -577,8 +574,7 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr, ...@@ -577,8 +574,7 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
return buf; return buf;
fail_map_sg: fail_map_sg:
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
fail_sgt_init: fail_sgt_init:
sg_free_table(sgt); sg_free_table(sgt);
......
...@@ -148,9 +148,8 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs, ...@@ -148,9 +148,8 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
* No need to sync to the device, this will happen later when the * No need to sync to the device, this will happen later when the
* prepare() memop is called. * prepare() memop is called.
*/ */
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); DMA_ATTR_SKIP_CPU_SYNC))
if (!sgt->nents)
goto fail_map; goto fail_map;
buf->handler.refcount = &buf->refcount; buf->handler.refcount = &buf->refcount;
...@@ -186,8 +185,8 @@ static void vb2_dma_sg_put(void *buf_priv) ...@@ -186,8 +185,8 @@ static void vb2_dma_sg_put(void *buf_priv)
if (refcount_dec_and_test(&buf->refcount)) { if (refcount_dec_and_test(&buf->refcount)) {
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
buf->num_pages); buf->num_pages);
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); DMA_ATTR_SKIP_CPU_SYNC);
if (buf->vaddr) if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages); vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt); sg_free_table(buf->dma_sgt);
...@@ -204,8 +203,7 @@ static void vb2_dma_sg_prepare(void *buf_priv) ...@@ -204,8 +203,7 @@ static void vb2_dma_sg_prepare(void *buf_priv)
struct vb2_dma_sg_buf *buf = buf_priv; struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt; struct sg_table *sgt = buf->dma_sgt;
dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents, dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
buf->dma_dir);
} }
static void vb2_dma_sg_finish(void *buf_priv) static void vb2_dma_sg_finish(void *buf_priv)
...@@ -213,7 +211,7 @@ static void vb2_dma_sg_finish(void *buf_priv) ...@@ -213,7 +211,7 @@ static void vb2_dma_sg_finish(void *buf_priv)
struct vb2_dma_sg_buf *buf = buf_priv; struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt; struct sg_table *sgt = buf->dma_sgt;
dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
} }
static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr, static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
...@@ -256,9 +254,8 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr, ...@@ -256,9 +254,8 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
* No need to sync to the device, this will happen later when the * No need to sync to the device, this will happen later when the
* prepare() memop is called. * prepare() memop is called.
*/ */
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); DMA_ATTR_SKIP_CPU_SYNC))
if (!sgt->nents)
goto userptr_fail_map; goto userptr_fail_map;
return buf; return buf;
...@@ -284,8 +281,7 @@ static void vb2_dma_sg_put_userptr(void *buf_priv) ...@@ -284,8 +281,7 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
dprintk(1, "%s: Releasing userspace buffer of %d pages\n", dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
__func__, buf->num_pages); __func__, buf->num_pages);
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
DMA_ATTR_SKIP_CPU_SYNC);
if (buf->vaddr) if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages); vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt); sg_free_table(buf->dma_sgt);
...@@ -408,8 +404,7 @@ static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf, ...@@ -408,8 +404,7 @@ static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
/* release the scatterlist cache */ /* release the scatterlist cache */
if (attach->dma_dir != DMA_NONE) if (attach->dma_dir != DMA_NONE)
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
attach->dma_dir);
sg_free_table(sgt); sg_free_table(sgt);
kfree(attach); kfree(attach);
db_attach->priv = NULL; db_attach->priv = NULL;
...@@ -434,15 +429,12 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map( ...@@ -434,15 +429,12 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
/* release any previous cache */ /* release any previous cache */
if (attach->dma_dir != DMA_NONE) { if (attach->dma_dir != DMA_NONE) {
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
attach->dma_dir);
attach->dma_dir = DMA_NONE; attach->dma_dir = DMA_NONE;
} }
/* mapping to the client with new direction */ /* mapping to the client with new direction */
sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
dma_dir);
if (!sgt->nents) {
pr_err("failed to map scatterlist\n"); pr_err("failed to map scatterlist\n");
mutex_unlock(lock); mutex_unlock(lock);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
......
...@@ -229,7 +229,7 @@ static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, ...@@ -229,7 +229,7 @@ static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
kfree(attach); kfree(attach);
return ret; return ret;
} }
for_each_sg(sgt->sgl, sg, sgt->nents, i) { for_each_sgtable_sg(sgt, sg, i) {
struct page *page = vmalloc_to_page(vaddr); struct page *page = vmalloc_to_page(vaddr);
if (!page) { if (!page) {
...@@ -259,8 +259,7 @@ static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf, ...@@ -259,8 +259,7 @@ static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
/* release the scatterlist cache */ /* release the scatterlist cache */
if (attach->dma_dir != DMA_NONE) if (attach->dma_dir != DMA_NONE)
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
attach->dma_dir);
sg_free_table(sgt); sg_free_table(sgt);
kfree(attach); kfree(attach);
db_attach->priv = NULL; db_attach->priv = NULL;
...@@ -285,15 +284,12 @@ static struct sg_table *vb2_vmalloc_dmabuf_ops_map( ...@@ -285,15 +284,12 @@ static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
/* release any previous cache */ /* release any previous cache */
if (attach->dma_dir != DMA_NONE) { if (attach->dma_dir != DMA_NONE) {
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
attach->dma_dir);
attach->dma_dir = DMA_NONE; attach->dma_dir = DMA_NONE;
} }
/* mapping to the client with new direction */ /* mapping to the client with new direction */
sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
dma_dir);
if (!sgt->nents) {
pr_err("failed to map scatterlist\n"); pr_err("failed to map scatterlist\n");
mutex_unlock(lock); mutex_unlock(lock);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment