Commit f82ad60e authored by Laura Abbott's avatar Laura Abbott Committed by Greg Kroah-Hartman

staging: android: ion: Get rid of map_dma/unmap_dma

The map_dma API interface was designed to generate an sg_table.
Currently, every client just creates the table at allocation time and
then returns the one table. Nothing happens on unmap_dma either.
Just get rid of the API and assign the sg_table directly.
Signed-off-by: default avatarLaura Abbott <labbott@redhat.com>
Reviewed-by: default avatarBenjamin Gaignard <benjamin.gaignard@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent da792ccb
......@@ -205,19 +205,16 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
goto err2;
}
buffer->dev = dev;
buffer->size = len;
table = heap->ops->map_dma(heap, buffer);
if (WARN_ONCE(table == NULL,
"heap->ops->map_dma should return ERR_PTR on error"))
table = ERR_PTR(-EINVAL);
if (IS_ERR(table)) {
if (buffer->sg_table == NULL) {
WARN_ONCE(1, "This heap needs to set the sgtable");
ret = -EINVAL;
goto err1;
}
buffer->sg_table = table;
table = buffer->sg_table;
buffer->dev = dev;
buffer->size = len;
if (ion_buffer_fault_user_mappings(buffer)) {
int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
struct scatterlist *sg;
......@@ -226,7 +223,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
if (!buffer->pages) {
ret = -ENOMEM;
goto err;
goto err1;
}
for_each_sg(table->sgl, sg, table->nents, i) {
......@@ -260,8 +257,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
mutex_unlock(&dev->buffer_lock);
return buffer;
err:
heap->ops->unmap_dma(heap, buffer);
err1:
heap->ops->free(buffer);
err2:
......@@ -273,7 +268,6 @@ void ion_buffer_destroy(struct ion_buffer *buffer)
{
if (WARN_ON(buffer->kmap_cnt > 0))
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
buffer->heap->ops->free(buffer);
vfree(buffer->pages);
kfree(buffer);
......@@ -1542,8 +1536,7 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
{
struct dentry *debug_file;
if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
!heap->ops->unmap_dma)
if (!heap->ops->allocate || !heap->ops->free)
pr_err("%s: can not add heap with invalid ops struct.\n",
__func__);
......
......@@ -82,7 +82,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
}
sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
buffer->priv_virt = table;
buffer->sg_table = table;
return 0;
......@@ -96,7 +96,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
static void ion_carveout_heap_free(struct ion_buffer *buffer)
{
struct ion_heap *heap = buffer->heap;
struct sg_table *table = buffer->priv_virt;
struct sg_table *table = buffer->sg_table;
struct page *page = sg_page(table->sgl);
ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
......@@ -111,22 +111,9 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
kfree(table);
}
static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
}
static struct ion_heap_ops carveout_heap_ops = {
.allocate = ion_carveout_heap_allocate,
.free = ion_carveout_heap_free,
.map_dma = ion_carveout_heap_map_dma,
.unmap_dma = ion_carveout_heap_unmap_dma,
.map_user = ion_heap_map_user,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
......
......@@ -75,7 +75,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
sg = sg_next(sg);
}
buffer->priv_virt = table;
buffer->sg_table = table;
chunk_heap->allocated += allocated_size;
return 0;
err:
......@@ -95,7 +95,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
struct ion_heap *heap = buffer->heap;
struct ion_chunk_heap *chunk_heap =
container_of(heap, struct ion_chunk_heap, heap);
struct sg_table *table = buffer->priv_virt;
struct sg_table *table = buffer->sg_table;
struct scatterlist *sg;
int i;
unsigned long allocated_size;
......@@ -117,22 +117,9 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
kfree(table);
}
static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
}
static struct ion_heap_ops chunk_heap_ops = {
.allocate = ion_chunk_heap_allocate,
.free = ion_chunk_heap_free,
.map_dma = ion_chunk_heap_map_dma,
.unmap_dma = ion_chunk_heap_unmap_dma,
.map_user = ion_heap_map_user,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
......
......@@ -78,6 +78,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
goto free_table;
/* keep this for memory release */
buffer->priv_virt = info;
buffer->sg_table = info->table;
dev_dbg(dev, "Allocate buffer %p\n", buffer);
return 0;
......@@ -105,19 +106,6 @@ static void ion_cma_free(struct ion_buffer *buffer)
kfree(info);
}
static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct ion_cma_buffer_info *info = buffer->priv_virt;
return info->table;
}
static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
}
static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
struct vm_area_struct *vma)
{
......@@ -145,8 +133,6 @@ static void ion_cma_unmap_kernel(struct ion_heap *heap,
static struct ion_heap_ops ion_cma_ops = {
.allocate = ion_cma_allocate,
.free = ion_cma_free,
.map_dma = ion_cma_heap_map_dma,
.unmap_dma = ion_cma_heap_unmap_dma,
.map_user = ion_cma_mmap,
.map_kernel = ion_cma_map_kernel,
.unmap_kernel = ion_cma_unmap_kernel,
......
......@@ -86,8 +86,6 @@ void ion_buffer_destroy(struct ion_buffer *buffer);
* struct ion_heap_ops - ops to operate on a given heap
* @allocate: allocate memory
* @free: free memory
* @map_dma map the memory for dma to a scatterlist
* @unmap_dma unmap the memory for dma
* @map_kernel map memory to the kernel
* @unmap_kernel unmap memory to the kernel
* @map_user map memory to userspace
......@@ -104,9 +102,6 @@ struct ion_heap_ops {
struct ion_buffer *buffer, unsigned long len,
unsigned long align, unsigned long flags);
void (*free)(struct ion_buffer *buffer);
struct sg_table * (*map_dma)(struct ion_heap *heap,
struct ion_buffer *buffer);
void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer);
void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
......
......@@ -164,7 +164,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
list_del(&page->lru);
}
buffer->priv_virt = table;
buffer->sg_table = table;
return 0;
free_table:
......@@ -199,17 +199,6 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
kfree(table);
}
static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
static void ion_system_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
}
static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
int nr_to_scan)
{
......@@ -243,8 +232,6 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
static struct ion_heap_ops system_heap_ops = {
.allocate = ion_system_heap_allocate,
.free = ion_system_heap_free,
.map_dma = ion_system_heap_map_dma,
.unmap_dma = ion_system_heap_unmap_dma,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
.map_user = ion_heap_map_user,
......@@ -358,7 +345,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
sg_set_page(table->sgl, page, len, 0);
buffer->priv_virt = table;
buffer->sg_table = table;
ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
......@@ -375,7 +362,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
static void ion_system_contig_heap_free(struct ion_buffer *buffer)
{
struct sg_table *table = buffer->priv_virt;
struct sg_table *table = buffer->sg_table;
struct page *page = sg_page(table->sgl);
unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
unsigned long i;
......@@ -386,22 +373,9 @@ static void ion_system_contig_heap_free(struct ion_buffer *buffer)
kfree(table);
}
static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
}
static struct ion_heap_ops kmalloc_ops = {
.allocate = ion_system_contig_heap_allocate,
.free = ion_system_contig_heap_free,
.map_dma = ion_system_contig_heap_map_dma,
.unmap_dma = ion_system_contig_heap_unmap_dma,
.map_kernel = ion_heap_map_kernel,
.unmap_kernel = ion_heap_unmap_kernel,
.map_user = ion_heap_map_user,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment