Commit cb132cd5 authored by Mauro Carvalho Chehab's avatar Mauro Carvalho Chehab

[media] videobuf-dma-contig: remove support for cached mem

videobuf_queue_dma_contig_init_cached() is not used anywhere.
Drop support for it, cleaning up the code a little bit.
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab@redhat.com>
Acked-by: default avatarHans Verkuil <hans.verkuil@cisco.com>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab@redhat.com>
parent d95d7c64
......@@ -27,7 +27,6 @@ struct videobuf_dma_contig_memory {
u32 magic;
void *vaddr;
dma_addr_t dma_handle;
bool cached;
unsigned long size;
};
......@@ -43,24 +42,6 @@ static int __videobuf_dc_alloc(struct device *dev,
unsigned long size, gfp_t flags)
{
mem->size = size;
if (mem->cached) {
mem->vaddr = alloc_pages_exact(mem->size, flags | GFP_DMA);
if (mem->vaddr) {
int err;
mem->dma_handle = dma_map_single(dev, mem->vaddr,
mem->size,
DMA_FROM_DEVICE);
err = dma_mapping_error(dev, mem->dma_handle);
if (err) {
dev_err(dev, "dma_map_single failed\n");
free_pages_exact(mem->vaddr, mem->size);
mem->vaddr = NULL;
return err;
}
}
} else
mem->vaddr = dma_alloc_coherent(dev, mem->size,
&mem->dma_handle, flags);
......@@ -77,13 +58,6 @@ static int __videobuf_dc_alloc(struct device *dev,
static void __videobuf_dc_free(struct device *dev,
struct videobuf_dma_contig_memory *mem)
{
if (mem->cached) {
if (!mem->vaddr)
return;
dma_unmap_single(dev, mem->dma_handle, mem->size,
DMA_FROM_DEVICE);
free_pages_exact(mem->vaddr, mem->size);
} else
dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
mem->vaddr = NULL;
......@@ -234,7 +208,7 @@ static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
return ret;
}
static struct videobuf_buffer *__videobuf_alloc_vb(size_t size, bool cached)
static struct videobuf_buffer *__videobuf_alloc(size_t size)
{
struct videobuf_dma_contig_memory *mem;
struct videobuf_buffer *vb;
......@@ -244,22 +218,11 @@ static struct videobuf_buffer *__videobuf_alloc_vb(size_t size, bool cached)
vb->priv = ((char *)vb) + size;
mem = vb->priv;
mem->magic = MAGIC_DC_MEM;
mem->cached = cached;
}
return vb;
}
static struct videobuf_buffer *__videobuf_alloc_uncached(size_t size)
{
return __videobuf_alloc_vb(size, false);
}
static struct videobuf_buffer *__videobuf_alloc_cached(size_t size)
{
return __videobuf_alloc_vb(size, true);
}
static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
......@@ -310,19 +273,6 @@ static int __videobuf_iolock(struct videobuf_queue *q,
return 0;
}
static int __videobuf_sync(struct videobuf_queue *q,
struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
dma_sync_single_for_cpu(q->dev, mem->dma_handle, mem->size,
DMA_FROM_DEVICE);
return 0;
}
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
struct videobuf_buffer *buf,
struct vm_area_struct *vma)
......@@ -331,8 +281,6 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
struct videobuf_mapping *map;
int retval;
unsigned long size;
unsigned long pos, start = vma->vm_start;
struct page *page;
dev_dbg(q->dev, "%s\n", __func__);
......@@ -359,7 +307,6 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
size = vma->vm_end - vma->vm_start;
size = (size < mem->size) ? size : mem->size;
if (!mem->cached) {
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
retval = remap_pfn_range(vma, vma->vm_start,
mem->dma_handle >> PAGE_SHIFT,
......@@ -371,32 +318,6 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
mem->vaddr, mem->dma_handle);
goto error;
}
} else {
pos = (unsigned long)mem->vaddr;
while (size > 0) {
page = virt_to_page((void *)pos);
if (NULL == page) {
dev_err(q->dev, "mmap: virt_to_page failed\n");
__videobuf_dc_free(q->dev, mem);
goto error;
}
retval = vm_insert_page(vma, start, page);
if (retval) {
dev_err(q->dev, "mmap: insert failed with error %d\n",
retval);
__videobuf_dc_free(q->dev, mem);
goto error;
}
start += PAGE_SIZE;
pos += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
else
size = 0;
}
}
vma->vm_ops = &videobuf_vm_ops;
vma->vm_flags |= VM_DONTEXPAND;
......@@ -417,21 +338,12 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
static struct videobuf_qtype_ops qops = {
.magic = MAGIC_QTYPE_OPS,
.alloc_vb = __videobuf_alloc_uncached,
.alloc_vb = __videobuf_alloc,
.iolock = __videobuf_iolock,
.mmap_mapper = __videobuf_mmap_mapper,
.vaddr = __videobuf_to_vaddr,
};
static struct videobuf_qtype_ops qops_cached = {
.magic = MAGIC_QTYPE_OPS,
.alloc_vb = __videobuf_alloc_cached,
.iolock = __videobuf_iolock,
.sync = __videobuf_sync,
.mmap_mapper = __videobuf_mmap_mapper,
.vaddr = __videobuf_to_vaddr,
};
void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
......@@ -447,20 +359,6 @@ void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
}
EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
void videobuf_queue_dma_contig_init_cached(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
spinlock_t *irqlock,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
void *priv, struct mutex *ext_lock)
{
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
priv, &qops_cached, ext_lock);
}
EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init_cached);
dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
......
......@@ -26,16 +26,6 @@ void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
void *priv,
struct mutex *ext_lock);
void videobuf_queue_dma_contig_init_cached(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
spinlock_t *irqlock,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
void *priv,
struct mutex *ext_lock);
dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf);
void videobuf_dma_contig_free(struct videobuf_queue *q,
struct videobuf_buffer *buf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment