Commit f63958d8 authored by Colin Cross's avatar Colin Cross Committed by Greg Kroah-Hartman

ion: fix sparse warnings

Fix sparse warnings in ion.
Signed-off-by: default avatarColin Cross <ccross@android.com>
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 5c6a4705
......@@ -669,7 +669,7 @@ static int ion_debug_client_show(struct seq_file *s, void *unused)
struct ion_client *client = s->private;
struct rb_node *n;
size_t sizes[ION_NUM_HEAP_IDS] = {0};
const char *names[ION_NUM_HEAP_IDS] = {0};
const char *names[ION_NUM_HEAP_IDS] = {NULL};
int i;
mutex_lock(&client->lock);
......@@ -887,7 +887,7 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
mutex_unlock(&buffer->lock);
}
int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ion_buffer *buffer = vma->vm_private_data;
unsigned long pfn;
......@@ -939,7 +939,7 @@ static void ion_vm_close(struct vm_area_struct *vma)
mutex_unlock(&buffer->lock);
}
struct vm_operations_struct ion_vma_ops = {
static struct vm_operations_struct ion_vma_ops = {
.open = ion_vm_open,
.close = ion_vm_close,
.fault = ion_vm_fault,
......@@ -1030,7 +1030,7 @@ static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
mutex_unlock(&buffer->lock);
}
struct dma_buf_ops dma_buf_ops = {
static struct dma_buf_ops dma_buf_ops = {
.map_dma_buf = ion_map_dma_buf,
.unmap_dma_buf = ion_unmap_dma_buf,
.mmap = ion_mmap,
......
......@@ -85,7 +85,7 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
buffer->priv_phys = ION_CARVEOUT_ALLOCATE_FAIL;
}
struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct sg_table *table;
......@@ -104,7 +104,7 @@ struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
return table;
}
void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
sg_free_table(buffer->sg_table);
......
......@@ -115,13 +115,13 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
kfree(table);
}
struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return;
......
......@@ -44,7 +44,7 @@ struct ion_cma_buffer_info {
* This function could be replaced by dma_common_get_sgtable
* as soon as it will avalaible.
*/
int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size)
{
struct page *page = virt_to_page(cpu_addr);
......@@ -137,7 +137,7 @@ static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
return 0;
}
struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct ion_cma_buffer_info *info = buffer->priv_virt;
......@@ -145,7 +145,7 @@ struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
return info->table;
}
void ion_cma_heap_unmap_dma(struct ion_heap *heap,
static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return;
......@@ -162,7 +162,8 @@ static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
buffer->size);
}
void *ion_cma_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
static void *ion_cma_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct ion_cma_buffer_info *info = buffer->priv_virt;
/* kernel memory mapping has been done at allocation time */
......
......@@ -38,7 +38,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
struct page **tmp = pages;
if (!pages)
return 0;
return NULL;
if (buffer->flags & ION_FLAG_CACHED)
pgprot = PAGE_KERNEL;
......@@ -193,7 +193,7 @@ size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
return total_drained;
}
int ion_heap_deferred_free(void *data)
static int ion_heap_deferred_free(void *data)
{
struct ion_heap *heap = data;
......
......@@ -134,7 +134,7 @@ int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
int i;
bool high;
high = gfp_mask & __GFP_HIGHMEM;
high = !!(gfp_mask & __GFP_HIGHMEM);
if (nr_to_scan == 0)
return ion_page_pool_total(pool, high);
......
......@@ -26,11 +26,9 @@
#include "ion.h"
#include "ion_priv.h"
static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
__GFP_NOWARN | __GFP_NORETRY) &
~__GFP_WAIT;
static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
__GFP_NOWARN);
static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
__GFP_NORETRY) & ~__GFP_WAIT;
static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
static const unsigned int orders[] = {8, 4, 0};
static const int num_orders = ARRAY_SIZE(orders);
static int order_to_index(unsigned int order)
......@@ -76,12 +74,12 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
gfp_flags = high_order_gfp_flags;
page = alloc_pages(gfp_flags, order);
if (!page)
return 0;
return NULL;
ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
DMA_BIDIRECTIONAL);
}
if (!page)
return 0;
return NULL;
return page;
}
......@@ -187,7 +185,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
return -ENOMEM;
}
void ion_system_heap_free(struct ion_buffer *buffer)
static void ion_system_heap_free(struct ion_buffer *buffer)
{
struct ion_heap *heap = buffer->heap;
struct ion_system_heap *sys_heap = container_of(heap,
......@@ -211,13 +209,13 @@ void ion_system_heap_free(struct ion_buffer *buffer)
kfree(table);
}
struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
void ion_system_heap_unmap_dma(struct ion_heap *heap,
static void ion_system_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return;
......@@ -403,7 +401,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
return ret;
}
void ion_system_contig_heap_free(struct ion_buffer *buffer)
static void ion_system_contig_heap_free(struct ion_buffer *buffer)
{
struct sg_table *table = buffer->priv_virt;
struct page *page = sg_page(table->sgl);
......@@ -427,13 +425,13 @@ static int ion_system_contig_heap_phys(struct ion_heap *heap,
return 0;
}
struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return buffer->priv_virt;
}
void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment