Commit cd474037 authored by Hans Verkuil's avatar Hans Verkuil Committed by Mauro Carvalho Chehab

[media] vb2: replace 'write' by 'dma_dir'

The 'write' argument is very ambiguous. I first assumed that if it is 1,
then we're doing video output but instead it meant the reverse.

Since it is used to setup the dma_dir value anyway it is now replaced by
the correct dma_dir value which is unambiguous.
Signed-off-by: default avatarHans Verkuil <hans.verkuil@cisco.com>
Acked-by: default avatarPawel Osciak <pawel@osciak.com>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab@osg.samsung.com>
parent cf227429
...@@ -1358,7 +1358,8 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) ...@@ -1358,7 +1358,8 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
void *mem_priv; void *mem_priv;
unsigned int plane; unsigned int plane;
int ret; int ret;
int write = !V4L2_TYPE_IS_OUTPUT(q->type); enum dma_data_direction dma_dir =
V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
bool reacquired = vb->planes[0].mem_priv == NULL; bool reacquired = vb->planes[0].mem_priv == NULL;
memset(planes, 0, sizeof(planes[0]) * vb->num_planes); memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
...@@ -1400,7 +1401,7 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) ...@@ -1400,7 +1401,7 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
/* Acquire each plane's memory */ /* Acquire each plane's memory */
mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane], mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane],
planes[plane].m.userptr, planes[plane].m.userptr,
planes[plane].length, write); planes[plane].length, dma_dir);
if (IS_ERR_OR_NULL(mem_priv)) { if (IS_ERR_OR_NULL(mem_priv)) {
dprintk(1, "failed acquiring userspace " dprintk(1, "failed acquiring userspace "
"memory for plane %d\n", plane); "memory for plane %d\n", plane);
...@@ -1461,7 +1462,8 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) ...@@ -1461,7 +1462,8 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
void *mem_priv; void *mem_priv;
unsigned int plane; unsigned int plane;
int ret; int ret;
int write = !V4L2_TYPE_IS_OUTPUT(q->type); enum dma_data_direction dma_dir =
V4L2_TYPE_IS_OUTPUT(q->type) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
bool reacquired = vb->planes[0].mem_priv == NULL; bool reacquired = vb->planes[0].mem_priv == NULL;
memset(planes, 0, sizeof(planes[0]) * vb->num_planes); memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
...@@ -1509,7 +1511,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) ...@@ -1509,7 +1511,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
/* Acquire each plane's memory */ /* Acquire each plane's memory */
mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane], mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane],
dbuf, planes[plane].length, write); dbuf, planes[plane].length, dma_dir);
if (IS_ERR(mem_priv)) { if (IS_ERR(mem_priv)) {
dprintk(1, "failed to attach dmabuf\n"); dprintk(1, "failed to attach dmabuf\n");
ret = PTR_ERR(mem_priv); ret = PTR_ERR(mem_priv);
......
...@@ -229,7 +229,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) ...@@ -229,7 +229,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
struct vb2_dc_attachment { struct vb2_dc_attachment {
struct sg_table sgt; struct sg_table sgt;
enum dma_data_direction dir; enum dma_data_direction dma_dir;
}; };
static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
...@@ -264,7 +264,7 @@ static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, ...@@ -264,7 +264,7 @@ static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
wr = sg_next(wr); wr = sg_next(wr);
} }
attach->dir = DMA_NONE; attach->dma_dir = DMA_NONE;
dbuf_attach->priv = attach; dbuf_attach->priv = attach;
return 0; return 0;
...@@ -282,16 +282,16 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf, ...@@ -282,16 +282,16 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
sgt = &attach->sgt; sgt = &attach->sgt;
/* release the scatterlist cache */ /* release the scatterlist cache */
if (attach->dir != DMA_NONE) if (attach->dma_dir != DMA_NONE)
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
attach->dir); attach->dma_dir);
sg_free_table(sgt); sg_free_table(sgt);
kfree(attach); kfree(attach);
db_attach->priv = NULL; db_attach->priv = NULL;
} }
static struct sg_table *vb2_dc_dmabuf_ops_map( static struct sg_table *vb2_dc_dmabuf_ops_map(
struct dma_buf_attachment *db_attach, enum dma_data_direction dir) struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
{ {
struct vb2_dc_attachment *attach = db_attach->priv; struct vb2_dc_attachment *attach = db_attach->priv;
/* stealing dmabuf mutex to serialize map/unmap operations */ /* stealing dmabuf mutex to serialize map/unmap operations */
...@@ -303,27 +303,27 @@ static struct sg_table *vb2_dc_dmabuf_ops_map( ...@@ -303,27 +303,27 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
sgt = &attach->sgt; sgt = &attach->sgt;
/* return previously mapped sg table */ /* return previously mapped sg table */
if (attach->dir == dir) { if (attach->dma_dir == dma_dir) {
mutex_unlock(lock); mutex_unlock(lock);
return sgt; return sgt;
} }
/* release any previous cache */ /* release any previous cache */
if (attach->dir != DMA_NONE) { if (attach->dma_dir != DMA_NONE) {
dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
attach->dir); attach->dma_dir);
attach->dir = DMA_NONE; attach->dma_dir = DMA_NONE;
} }
/* mapping to the client with new direction */ /* mapping to the client with new direction */
ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir); ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
if (ret <= 0) { if (ret <= 0) {
pr_err("failed to map scatterlist\n"); pr_err("failed to map scatterlist\n");
mutex_unlock(lock); mutex_unlock(lock);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
} }
attach->dir = dir; attach->dma_dir = dma_dir;
mutex_unlock(lock); mutex_unlock(lock);
...@@ -331,7 +331,7 @@ static struct sg_table *vb2_dc_dmabuf_ops_map( ...@@ -331,7 +331,7 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
} }
static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
struct sg_table *sgt, enum dma_data_direction dir) struct sg_table *sgt, enum dma_data_direction dma_dir)
{ {
/* nothing to be done here */ /* nothing to be done here */
} }
...@@ -460,7 +460,8 @@ static int vb2_dc_get_user_pfn(unsigned long start, int n_pages, ...@@ -460,7 +460,8 @@ static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
} }
static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
int n_pages, struct vm_area_struct *vma, int write) int n_pages, struct vm_area_struct *vma,
enum dma_data_direction dma_dir)
{ {
if (vma_is_io(vma)) { if (vma_is_io(vma)) {
unsigned int i; unsigned int i;
...@@ -482,7 +483,7 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages, ...@@ -482,7 +483,7 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
int n; int n;
n = get_user_pages(current, current->mm, start & PAGE_MASK, n = get_user_pages(current, current->mm, start & PAGE_MASK,
n_pages, write, 1, pages, NULL); n_pages, dma_dir == DMA_FROM_DEVICE, 1, pages, NULL);
/* negative error means that no page was pinned */ /* negative error means that no page was pinned */
n = max(n, 0); n = max(n, 0);
if (n != n_pages) { if (n != n_pages) {
...@@ -551,7 +552,7 @@ static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn ...@@ -551,7 +552,7 @@ static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn
#endif #endif
static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write) unsigned long size, enum dma_data_direction dma_dir)
{ {
struct vb2_dc_conf *conf = alloc_ctx; struct vb2_dc_conf *conf = alloc_ctx;
struct vb2_dc_buf *buf; struct vb2_dc_buf *buf;
...@@ -582,7 +583,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, ...@@ -582,7 +583,7 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
buf->dev = conf->dev; buf->dev = conf->dev;
buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; buf->dma_dir = dma_dir;
start = vaddr & PAGE_MASK; start = vaddr & PAGE_MASK;
offset = vaddr & ~PAGE_MASK; offset = vaddr & ~PAGE_MASK;
...@@ -618,7 +619,8 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr, ...@@ -618,7 +619,8 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
} }
/* extract page list from userspace mapping */ /* extract page list from userspace mapping */
ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write); ret = vb2_dc_get_user_pages(start, pages, n_pages, vma,
dma_dir == DMA_FROM_DEVICE);
if (ret) { if (ret) {
unsigned long pfn; unsigned long pfn;
if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) { if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
...@@ -782,7 +784,7 @@ static void vb2_dc_detach_dmabuf(void *mem_priv) ...@@ -782,7 +784,7 @@ static void vb2_dc_detach_dmabuf(void *mem_priv)
} }
static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
unsigned long size, int write) unsigned long size, enum dma_data_direction dma_dir)
{ {
struct vb2_dc_conf *conf = alloc_ctx; struct vb2_dc_conf *conf = alloc_ctx;
struct vb2_dc_buf *buf; struct vb2_dc_buf *buf;
...@@ -804,7 +806,7 @@ static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, ...@@ -804,7 +806,7 @@ static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
return dba; return dba;
} }
buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; buf->dma_dir = dma_dir;
buf->size = size; buf->size = size;
buf->db_attach = dba; buf->db_attach = dba;
......
...@@ -33,8 +33,8 @@ module_param(debug, int, 0644); ...@@ -33,8 +33,8 @@ module_param(debug, int, 0644);
struct vb2_dma_sg_buf { struct vb2_dma_sg_buf {
void *vaddr; void *vaddr;
struct page **pages; struct page **pages;
int write;
int offset; int offset;
enum dma_data_direction dma_dir;
struct sg_table sg_table; struct sg_table sg_table;
size_t size; size_t size;
unsigned int num_pages; unsigned int num_pages;
...@@ -97,7 +97,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla ...@@ -97,7 +97,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_fla
return NULL; return NULL;
buf->vaddr = NULL; buf->vaddr = NULL;
buf->write = 0; buf->dma_dir = DMA_NONE;
buf->offset = 0; buf->offset = 0;
buf->size = size; buf->size = size;
/* size is already page aligned */ /* size is already page aligned */
...@@ -162,7 +162,8 @@ static inline int vma_is_io(struct vm_area_struct *vma) ...@@ -162,7 +162,8 @@ static inline int vma_is_io(struct vm_area_struct *vma)
} }
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write) unsigned long size,
enum dma_data_direction dma_dir)
{ {
struct vb2_dma_sg_buf *buf; struct vb2_dma_sg_buf *buf;
unsigned long first, last; unsigned long first, last;
...@@ -174,7 +175,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, ...@@ -174,7 +175,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
return NULL; return NULL;
buf->vaddr = NULL; buf->vaddr = NULL;
buf->write = write; buf->dma_dir = dma_dir;
buf->offset = vaddr & ~PAGE_MASK; buf->offset = vaddr & ~PAGE_MASK;
buf->size = size; buf->size = size;
...@@ -221,7 +222,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, ...@@ -221,7 +222,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
num_pages_from_user = get_user_pages(current, current->mm, num_pages_from_user = get_user_pages(current, current->mm,
vaddr & PAGE_MASK, vaddr & PAGE_MASK,
buf->num_pages, buf->num_pages,
write, buf->dma_dir == DMA_FROM_DEVICE,
1, /* force */ 1, /* force */
buf->pages, buf->pages,
NULL); NULL);
...@@ -265,7 +266,7 @@ static void vb2_dma_sg_put_userptr(void *buf_priv) ...@@ -265,7 +266,7 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
vm_unmap_ram(buf->vaddr, buf->num_pages); vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(&buf->sg_table); sg_free_table(&buf->sg_table);
while (--i >= 0) { while (--i >= 0) {
if (buf->write) if (buf->dma_dir == DMA_FROM_DEVICE)
set_page_dirty_lock(buf->pages[i]); set_page_dirty_lock(buf->pages[i]);
if (!vma_is_io(buf->vma)) if (!vma_is_io(buf->vma))
put_page(buf->pages[i]); put_page(buf->pages[i]);
......
...@@ -25,7 +25,7 @@ struct vb2_vmalloc_buf { ...@@ -25,7 +25,7 @@ struct vb2_vmalloc_buf {
void *vaddr; void *vaddr;
struct page **pages; struct page **pages;
struct vm_area_struct *vma; struct vm_area_struct *vma;
int write; enum dma_data_direction dma_dir;
unsigned long size; unsigned long size;
unsigned int n_pages; unsigned int n_pages;
atomic_t refcount; atomic_t refcount;
...@@ -70,7 +70,8 @@ static void vb2_vmalloc_put(void *buf_priv) ...@@ -70,7 +70,8 @@ static void vb2_vmalloc_put(void *buf_priv)
} }
static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write) unsigned long size,
enum dma_data_direction dma_dir)
{ {
struct vb2_vmalloc_buf *buf; struct vb2_vmalloc_buf *buf;
unsigned long first, last; unsigned long first, last;
...@@ -82,7 +83,7 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, ...@@ -82,7 +83,7 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
if (!buf) if (!buf)
return NULL; return NULL;
buf->write = write; buf->dma_dir = dma_dir;
offset = vaddr & ~PAGE_MASK; offset = vaddr & ~PAGE_MASK;
buf->size = size; buf->size = size;
...@@ -107,7 +108,8 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr, ...@@ -107,7 +108,8 @@ static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
/* current->mm->mmap_sem is taken by videobuf2 core */ /* current->mm->mmap_sem is taken by videobuf2 core */
n_pages = get_user_pages(current, current->mm, n_pages = get_user_pages(current, current->mm,
vaddr & PAGE_MASK, buf->n_pages, vaddr & PAGE_MASK, buf->n_pages,
write, 1, /* force */ dma_dir == DMA_FROM_DEVICE,
1, /* force */
buf->pages, NULL); buf->pages, NULL);
if (n_pages != buf->n_pages) if (n_pages != buf->n_pages)
goto fail_get_user_pages; goto fail_get_user_pages;
...@@ -144,7 +146,7 @@ static void vb2_vmalloc_put_userptr(void *buf_priv) ...@@ -144,7 +146,7 @@ static void vb2_vmalloc_put_userptr(void *buf_priv)
if (vaddr) if (vaddr)
vm_unmap_ram((void *)vaddr, buf->n_pages); vm_unmap_ram((void *)vaddr, buf->n_pages);
for (i = 0; i < buf->n_pages; ++i) { for (i = 0; i < buf->n_pages; ++i) {
if (buf->write) if (buf->dma_dir == DMA_FROM_DEVICE)
set_page_dirty_lock(buf->pages[i]); set_page_dirty_lock(buf->pages[i]);
put_page(buf->pages[i]); put_page(buf->pages[i]);
} }
...@@ -240,7 +242,7 @@ static void vb2_vmalloc_detach_dmabuf(void *mem_priv) ...@@ -240,7 +242,7 @@ static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
} }
static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
unsigned long size, int write) unsigned long size, enum dma_data_direction dma_dir)
{ {
struct vb2_vmalloc_buf *buf; struct vb2_vmalloc_buf *buf;
...@@ -252,7 +254,7 @@ static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, ...@@ -252,7 +254,7 @@ static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
buf->dbuf = dbuf; buf->dbuf = dbuf;
buf->write = write; buf->dma_dir = dma_dir;
buf->size = size; buf->size = size;
return buf; return buf;
......
...@@ -87,14 +87,16 @@ struct vb2_mem_ops { ...@@ -87,14 +87,16 @@ struct vb2_mem_ops {
struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags); struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags);
void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr, void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write); unsigned long size,
enum dma_data_direction dma_dir);
void (*put_userptr)(void *buf_priv); void (*put_userptr)(void *buf_priv);
void (*prepare)(void *buf_priv); void (*prepare)(void *buf_priv);
void (*finish)(void *buf_priv); void (*finish)(void *buf_priv);
void *(*attach_dmabuf)(void *alloc_ctx, struct dma_buf *dbuf, void *(*attach_dmabuf)(void *alloc_ctx, struct dma_buf *dbuf,
unsigned long size, int write); unsigned long size,
enum dma_data_direction dma_dir);
void (*detach_dmabuf)(void *buf_priv); void (*detach_dmabuf)(void *buf_priv);
int (*map_dmabuf)(void *buf_priv); int (*map_dmabuf)(void *buf_priv);
void (*unmap_dmabuf)(void *buf_priv); void (*unmap_dmabuf)(void *buf_priv);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment