Commit b037c0fd authored by Marek Szyprowski's avatar Marek Szyprowski Committed by Mauro Carvalho Chehab

[media] media: vb2: fix potential deadlock in mmap vs. get_userptr handling

To get direct access to userspace memory pages vb2 allocator needs to
gather read access on mmap semaphore in the current process.
The same semaphore is taken before calling mmap operation, while
both mmap and qbuf are called by the driver or v4l2 core with
driver's lock held. To avoid a AB-BA deadlock (mmap_sem then
driver's lock in mmap and driver's lock then mmap_sem in qbuf)
the videobuf2 core release driver's lock, takes mmap_sem and then
takes again driver's lock. get_userptr methods are now called with
all needed locks already taken to avoid further lock magic inside
memory allocator's code.
Reported-by: default avatarLaurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: default avatarKyungmin Park <kyungmin.park@samsung.com>
CC: Pawel Osciak <pawel@osciak.com>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab@redhat.com>
parent f0b7c7fc
...@@ -1082,46 +1082,76 @@ EXPORT_SYMBOL_GPL(vb2_prepare_buf); ...@@ -1082,46 +1082,76 @@ EXPORT_SYMBOL_GPL(vb2_prepare_buf);
*/ */
int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b) int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
{ {
struct rw_semaphore *mmap_sem = NULL;
struct vb2_buffer *vb; struct vb2_buffer *vb;
int ret; int ret = 0;
/*
* In case of user pointer buffers vb2 allocator needs to get direct
* access to userspace pages. This requires getting read access on
* mmap semaphore in the current process structure. The same
* semaphore is taken before calling mmap operation, while both mmap
* and qbuf are called by the driver or v4l2 core with driver's lock
* held. To avoid a AB-BA deadlock (mmap_sem then driver's lock in
* mmap and driver's lock then mmap_sem in qbuf) the videobuf2 core
* release driver's lock, takes mmap_sem and then takes again driver's
* lock.
*
* To avoid race with other vb2 calls, which might be called after
* releasing driver's lock, this operation is performed at the
* beggining of qbuf processing. This way the queue status is
* consistent after getting driver's lock back.
*/
if (b->type == V4L2_MEMORY_USERPTR) {
mmap_sem = &current->mm->mmap_sem;
call_qop(q, wait_prepare, q);
down_read(mmap_sem);
call_qop(q, wait_finish, q);
}
if (q->fileio) { if (q->fileio) {
dprintk(1, "qbuf: file io in progress\n"); dprintk(1, "qbuf: file io in progress\n");
return -EBUSY; ret = -EBUSY;
goto unlock;
} }
if (b->type != q->type) { if (b->type != q->type) {
dprintk(1, "qbuf: invalid buffer type\n"); dprintk(1, "qbuf: invalid buffer type\n");
return -EINVAL; ret = -EINVAL;
goto unlock;
} }
if (b->index >= q->num_buffers) { if (b->index >= q->num_buffers) {
dprintk(1, "qbuf: buffer index out of range\n"); dprintk(1, "qbuf: buffer index out of range\n");
return -EINVAL; ret = -EINVAL;
goto unlock;
} }
vb = q->bufs[b->index]; vb = q->bufs[b->index];
if (NULL == vb) { if (NULL == vb) {
/* Should never happen */ /* Should never happen */
dprintk(1, "qbuf: buffer is NULL\n"); dprintk(1, "qbuf: buffer is NULL\n");
return -EINVAL; ret = -EINVAL;
goto unlock;
} }
if (b->memory != q->memory) { if (b->memory != q->memory) {
dprintk(1, "qbuf: invalid memory type\n"); dprintk(1, "qbuf: invalid memory type\n");
return -EINVAL; ret = -EINVAL;
goto unlock;
} }
switch (vb->state) { switch (vb->state) {
case VB2_BUF_STATE_DEQUEUED: case VB2_BUF_STATE_DEQUEUED:
ret = __buf_prepare(vb, b); ret = __buf_prepare(vb, b);
if (ret) if (ret)
return ret; goto unlock;
case VB2_BUF_STATE_PREPARED: case VB2_BUF_STATE_PREPARED:
break; break;
default: default:
dprintk(1, "qbuf: buffer already in use\n"); dprintk(1, "qbuf: buffer already in use\n");
return -EINVAL; ret = -EINVAL;
goto unlock;
} }
/* /*
...@@ -1142,7 +1172,10 @@ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b) ...@@ -1142,7 +1172,10 @@ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
__fill_v4l2_buffer(vb, b); __fill_v4l2_buffer(vb, b);
dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index); dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
return 0; unlock:
if (mmap_sem)
up_read(mmap_sem);
return ret;
} }
EXPORT_SYMBOL_GPL(vb2_qbuf); EXPORT_SYMBOL_GPL(vb2_qbuf);
......
...@@ -140,7 +140,6 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, ...@@ -140,7 +140,6 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
if (!buf->pages) if (!buf->pages)
goto userptr_fail_pages_array_alloc; goto userptr_fail_pages_array_alloc;
down_read(&current->mm->mmap_sem);
num_pages_from_user = get_user_pages(current, current->mm, num_pages_from_user = get_user_pages(current, current->mm,
vaddr & PAGE_MASK, vaddr & PAGE_MASK,
buf->sg_desc.num_pages, buf->sg_desc.num_pages,
...@@ -148,7 +147,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, ...@@ -148,7 +147,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
1, /* force */ 1, /* force */
buf->pages, buf->pages,
NULL); NULL);
up_read(&current->mm->mmap_sem);
if (num_pages_from_user != buf->sg_desc.num_pages) if (num_pages_from_user != buf->sg_desc.num_pages)
goto userptr_fail_get_user_pages; goto userptr_fail_get_user_pages;
......
...@@ -100,29 +100,26 @@ int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size, ...@@ -100,29 +100,26 @@ int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
unsigned long offset, start, end; unsigned long offset, start, end;
unsigned long this_pfn, prev_pfn; unsigned long this_pfn, prev_pfn;
dma_addr_t pa = 0; dma_addr_t pa = 0;
int ret = -EFAULT;
start = vaddr; start = vaddr;
offset = start & ~PAGE_MASK; offset = start & ~PAGE_MASK;
end = start + size; end = start + size;
down_read(&mm->mmap_sem);
vma = find_vma(mm, start); vma = find_vma(mm, start);
if (vma == NULL || vma->vm_end < end) if (vma == NULL || vma->vm_end < end)
goto done; return -EFAULT;
for (prev_pfn = 0; start < end; start += PAGE_SIZE) { for (prev_pfn = 0; start < end; start += PAGE_SIZE) {
ret = follow_pfn(vma, start, &this_pfn); int ret = follow_pfn(vma, start, &this_pfn);
if (ret) if (ret)
goto done; return ret;
if (prev_pfn == 0) if (prev_pfn == 0)
pa = this_pfn << PAGE_SHIFT; pa = this_pfn << PAGE_SHIFT;
else if (this_pfn != prev_pfn + 1) { else if (this_pfn != prev_pfn + 1)
ret = -EFAULT; return -EFAULT;
goto done;
}
prev_pfn = this_pfn; prev_pfn = this_pfn;
} }
...@@ -130,16 +127,11 @@ int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size, ...@@ -130,16 +127,11 @@ int vb2_get_contig_userptr(unsigned long vaddr, unsigned long size,
* Memory is contigous, lock vma and return to the caller * Memory is contigous, lock vma and return to the caller
*/ */
*res_vma = vb2_get_vma(vma); *res_vma = vb2_get_vma(vma);
if (*res_vma == NULL) { if (*res_vma == NULL)
ret = -ENOMEM; return -ENOMEM;
goto done;
}
*res_pa = pa + offset;
ret = 0;
done: *res_pa = pa + offset;
up_read(&mm->mmap_sem); return 0;
return ret;
} }
EXPORT_SYMBOL_GPL(vb2_get_contig_userptr); EXPORT_SYMBOL_GPL(vb2_get_contig_userptr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment