Commit 5b6f9abe authored by Stanimir Varbanov's avatar Stanimir Varbanov Committed by Mauro Carvalho Chehab

media: vb2: add bidirectional flag in vb2_queue

This change is intended to give to the v4l2 drivers a choice to
change the default behavior of the v4l2-core DMA mapping direction
from DMA_TO/FROM_DEVICE (depending on the buffer type CAPTURE or
OUTPUT) to DMA_BIDIRECTIONAL during queue_init time.

Initially the issue with DMA mapping direction has been found in
Venus encoder driver where the hardware (firmware side) adds few
lines padding on bottom of the image buffer, and the consequence
is triggering of IOMMU protection faults.

This will help supporting venus encoder (and probably other drivers
in the future) which wants to map output type of buffers as
read/write.
Signed-off-by: default avatarStanimir Varbanov <stanimir.varbanov@linaro.org>
Acked-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: default avatarHans Verkuil <hansverk@cisco.com>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab@s-opensource.com>
parent d533d501
......@@ -194,8 +194,6 @@ static void __enqueue_in_driver(struct vb2_buffer *vb);
static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
{
struct vb2_queue *q = vb->vb2_queue;
enum dma_data_direction dma_dir =
q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
void *mem_priv;
int plane;
int ret = -ENOMEM;
......@@ -209,7 +207,7 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
mem_priv = call_ptr_memop(vb, alloc,
q->alloc_devs[plane] ? : q->dev,
q->dma_attrs, size, dma_dir, q->gfp_flags);
q->dma_attrs, size, q->dma_dir, q->gfp_flags);
if (IS_ERR_OR_NULL(mem_priv)) {
if (mem_priv)
ret = PTR_ERR(mem_priv);
......@@ -978,8 +976,6 @@ static int __prepare_userptr(struct vb2_buffer *vb, const void *pb)
void *mem_priv;
unsigned int plane;
int ret = 0;
enum dma_data_direction dma_dir =
q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
bool reacquired = vb->planes[0].mem_priv == NULL;
memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
......@@ -1030,7 +1026,7 @@ static int __prepare_userptr(struct vb2_buffer *vb, const void *pb)
mem_priv = call_ptr_memop(vb, get_userptr,
q->alloc_devs[plane] ? : q->dev,
planes[plane].m.userptr,
planes[plane].length, dma_dir);
planes[plane].length, q->dma_dir);
if (IS_ERR(mem_priv)) {
dprintk(1, "failed acquiring userspace memory for plane %d\n",
plane);
......@@ -1096,8 +1092,6 @@ static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb)
void *mem_priv;
unsigned int plane;
int ret = 0;
enum dma_data_direction dma_dir =
q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
bool reacquired = vb->planes[0].mem_priv == NULL;
memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
......@@ -1156,7 +1150,7 @@ static int __prepare_dmabuf(struct vb2_buffer *vb, const void *pb)
/* Acquire each plane's memory */
mem_priv = call_ptr_memop(vb, attach_dmabuf,
q->alloc_devs[plane] ? : q->dev,
dbuf, planes[plane].length, dma_dir);
dbuf, planes[plane].length, q->dma_dir);
if (IS_ERR(mem_priv)) {
dprintk(1, "failed to attach dmabuf\n");
ret = PTR_ERR(mem_priv);
......@@ -2003,6 +1997,11 @@ int vb2_core_queue_init(struct vb2_queue *q)
if (q->buf_struct_size == 0)
q->buf_struct_size = sizeof(struct vb2_buffer);
if (q->bidirectional)
q->dma_dir = DMA_BIDIRECTIONAL;
else
q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
return 0;
}
EXPORT_SYMBOL_GPL(vb2_core_queue_init);
......
......@@ -508,7 +508,8 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
buf->dma_dir = dma_dir;
offset = vaddr & ~PAGE_MASK;
vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
dma_dir == DMA_BIDIRECTIONAL);
if (IS_ERR(vec)) {
ret = PTR_ERR(vec);
goto fail_buf;
......
......@@ -239,7 +239,8 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
buf->offset = vaddr & ~PAGE_MASK;
buf->size = size;
buf->dma_sgt = &buf->sg_table;
vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE);
vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
dma_dir == DMA_BIDIRECTIONAL);
if (IS_ERR(vec))
goto userptr_fail_pfnvec;
buf->vec = vec;
......@@ -292,7 +293,8 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt);
while (--i >= 0) {
if (buf->dma_dir == DMA_FROM_DEVICE)
if (buf->dma_dir == DMA_FROM_DEVICE ||
buf->dma_dir == DMA_BIDIRECTIONAL)
set_page_dirty_lock(buf->pages[i]);
}
vb2_destroy_framevec(buf->vec);
......
......@@ -87,7 +87,8 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
buf->dma_dir = dma_dir;
offset = vaddr & ~PAGE_MASK;
buf->size = size;
vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE);
vec = vb2_create_framevec(vaddr, size, dma_dir == DMA_FROM_DEVICE ||
dma_dir == DMA_BIDIRECTIONAL);
if (IS_ERR(vec)) {
ret = PTR_ERR(vec);
goto fail_pfnvec_create;
......@@ -137,7 +138,8 @@ static void vb2_vmalloc_put_userptr(void *buf_priv)
pages = frame_vector_pages(buf->vec);
if (vaddr)
vm_unmap_ram((void *)vaddr, n_pages);
if (buf->dma_dir == DMA_FROM_DEVICE)
if (buf->dma_dir == DMA_FROM_DEVICE ||
buf->dma_dir == DMA_BIDIRECTIONAL)
for (i = 0; i < n_pages; i++)
set_page_dirty_lock(pages[i]);
} else {
......
......@@ -427,6 +427,16 @@ struct vb2_buf_ops {
* @dev: device to use for the default allocation context if the driver
* doesn't fill in the @alloc_devs array.
* @dma_attrs: DMA attributes to use for the DMA.
* @bidirectional: when this flag is set the DMA direction for the buffers of
* this queue will be overridden with DMA_BIDIRECTIONAL direction.
* This is useful in cases where the hardware (firmware) writes to
* a buffer which is mapped as read (DMA_TO_DEVICE), or reads from
* buffer which is mapped for write (DMA_FROM_DEVICE) in order
* to satisfy some internal hardware restrictions or adds a padding
* needed by the processing algorithm. In case the DMA mapping is
* not bidirectional but the hardware (firmware) trying to access
* the buffer (in the opposite direction) this could lead to an
* IOMMU protection faults.
* @fileio_read_once: report EOF after reading the first buffer
* @fileio_write_immediately: queue buffer after each write() call
* @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
......@@ -465,6 +475,7 @@ struct vb2_buf_ops {
* Private elements (won't appear at the uAPI book):
* @mmap_lock: private mutex used when buffers are allocated/freed/mmapped
* @memory: current memory type used
* @dma_dir: DMA mapping direction.
* @bufs: videobuf buffer structures
* @num_buffers: number of allocated/used buffers
* @queued_list: list of buffers currently queued from userspace
......@@ -495,6 +506,7 @@ struct vb2_queue {
unsigned int io_modes;
struct device *dev;
unsigned long dma_attrs;
unsigned bidirectional:1;
unsigned fileio_read_once:1;
unsigned fileio_write_immediately:1;
unsigned allow_zero_bytesused:1;
......@@ -516,6 +528,7 @@ struct vb2_queue {
/* private: internal use only */
struct mutex mmap_lock;
unsigned int memory;
enum dma_data_direction dma_dir;
struct vb2_buffer *bufs[VB2_MAX_FRAME];
unsigned int num_buffers;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment