Commit 7e416174 authored by Sriram Raghunathan's avatar Sriram Raghunathan Committed by Greg Kroah-Hartman

staging: android: Fix checkpatch block comments warnings

This patch is intended to fix the checkpatch warning for ``block``
comments for staging/android driver.
Signed-off-by: default avatarSriram Raghunathan <sriram@marirs.net.in>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 56b4c049
/* /*
* drivers/staging/android/ion/compat_ion.h * drivers/staging/android/ion/compat_ion.h
* *
* Copyright (C) 2013 Google, Inc. * Copyright (C) 2013 Google, Inc.
......
/* /*
*
* drivers/staging/android/ion/ion.c * drivers/staging/android/ion/ion.c
* *
* Copyright (C) 2011 Google, Inc. * Copyright (C) 2011 Google, Inc.
...@@ -244,14 +244,16 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, ...@@ -244,14 +244,16 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
buffer->size = len; buffer->size = len;
INIT_LIST_HEAD(&buffer->vmas); INIT_LIST_HEAD(&buffer->vmas);
mutex_init(&buffer->lock); mutex_init(&buffer->lock);
/* this will set up dma addresses for the sglist -- it is not /*
technically correct as per the dma api -- a specific * this will set up dma addresses for the sglist -- it is not
device isn't really taking ownership here. However, in practice on * technically correct as per the dma api -- a specific
our systems the only dma_address space is physical addresses. * device isn't really taking ownership here. However, in practice on
Additionally, we can't afford the overhead of invalidating every * our systems the only dma_address space is physical addresses.
allocation via dma_map_sg. The implicit contract here is that * Additionally, we can't afford the overhead of invalidating every
memory coming from the heaps is ready for dma, ie if it has a * allocation via dma_map_sg. The implicit contract here is that
cached mapping that mapping has been invalidated */ * memory coming from the heaps is ready for dma, ie if it has a
* cached mapping that mapping has been invalidated
*/
for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
sg_dma_address(sg) = sg_phys(sg); sg_dma_address(sg) = sg_phys(sg);
mutex_lock(&dev->buffer_lock); mutex_lock(&dev->buffer_lock);
...@@ -753,8 +755,10 @@ struct ion_client *ion_client_create(struct ion_device *dev, ...@@ -753,8 +755,10 @@ struct ion_client *ion_client_create(struct ion_device *dev,
get_task_struct(current->group_leader); get_task_struct(current->group_leader);
task_lock(current->group_leader); task_lock(current->group_leader);
pid = task_pid_nr(current->group_leader); pid = task_pid_nr(current->group_leader);
/* don't bother to store task struct for kernel threads, /*
they can't be killed anyway */ * don't bother to store task struct for kernel threads,
* they can't be killed anyway
*/
if (current->group_leader->flags & PF_KTHREAD) { if (current->group_leader->flags & PF_KTHREAD) {
put_task_struct(current->group_leader); put_task_struct(current->group_leader);
task = NULL; task = NULL;
...@@ -1521,8 +1525,10 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) ...@@ -1521,8 +1525,10 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
heap->dev = dev; heap->dev = dev;
down_write(&dev->lock); down_write(&dev->lock);
/* use negative heap->id to reverse the priority -- when traversing /*
the list later attempt higher id numbers first */ * use negative heap->id to reverse the priority -- when traversing
* the list later attempt higher id numbers first
*/
plist_node_init(&heap->node, -heap->id); plist_node_init(&heap->node, -heap->id);
plist_add(&heap->node, &dev->heaps); plist_add(&heap->node, &dev->heaps);
debug_file = debugfs_create_file(heap->name, 0664, debug_file = debugfs_create_file(heap->name, 0664,
......
...@@ -28,10 +28,12 @@ struct ion_mapper; ...@@ -28,10 +28,12 @@ struct ion_mapper;
struct ion_client; struct ion_client;
struct ion_buffer; struct ion_buffer;
/* This should be removed some day when phys_addr_t's are fully /*
plumbed in the kernel, and all instances of ion_phys_addr_t should * This should be removed some day when phys_addr_t's are fully
be converted to phys_addr_t. For the time being many kernel interfaces * plumbed in the kernel, and all instances of ion_phys_addr_t should
do not accept phys_addr_t's that would have to */ * be converted to phys_addr_t. For the time being many kernel interfaces
* do not accept phys_addr_t's that would have to
*/
#define ion_phys_addr_t unsigned long #define ion_phys_addr_t unsigned long
/** /**
......
...@@ -180,8 +180,10 @@ struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data) ...@@ -180,8 +180,10 @@ struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
cma_heap->heap.ops = &ion_cma_ops; cma_heap->heap.ops = &ion_cma_ops;
/* get device from private heaps data, later it will be /*
* used to make the link with reserved CMA memory */ * get device from private heaps data, later it will be
* used to make the link with reserved CMA memory
*/
cma_heap->dev = data->priv; cma_heap->dev = data->priv;
cma_heap->heap.type = ION_HEAP_TYPE_DMA; cma_heap->heap.type = ION_HEAP_TYPE_DMA;
return &cma_heap->heap; return &cma_heap->heap;
......
...@@ -346,7 +346,8 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, ...@@ -346,7 +346,8 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
* to keep a pool of pre allocated memory to use from your heap. Keeping * to keep a pool of pre allocated memory to use from your heap. Keeping
* a pool of memory that is ready for dma, ie any cached mapping have been * a pool of memory that is ready for dma, ie any cached mapping have been
* invalidated from the cache, provides a significant performance benefit on * invalidated from the cache, provides a significant performance benefit on
* many systems */ * many systems
*/
/** /**
* struct ion_page_pool - pagepool struct * struct ion_page_pool - pagepool struct
......
...@@ -185,8 +185,11 @@ static void ion_system_heap_free(struct ion_buffer *buffer) ...@@ -185,8 +185,11 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
struct scatterlist *sg; struct scatterlist *sg;
int i; int i;
/* uncached pages come from the page pools, zero them before returning /*
for security purposes (other allocations are zerod at alloc time */ * uncached pages come from the page pools, zero them before returning
* for security purposes (other allocations are zerod at
* alloc time
*/
if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
ion_heap_buffer_zero(buffer); ion_heap_buffer_zero(buffer);
......
...@@ -40,8 +40,10 @@ enum ion_heap_type { ...@@ -40,8 +40,10 @@ enum ion_heap_type {
ION_HEAP_TYPE_CARVEOUT, ION_HEAP_TYPE_CARVEOUT,
ION_HEAP_TYPE_CHUNK, ION_HEAP_TYPE_CHUNK,
ION_HEAP_TYPE_DMA, ION_HEAP_TYPE_DMA,
ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always ION_HEAP_TYPE_CUSTOM, /*
are at the end of this enum */ * must be last so device specific heaps always
* are at the end of this enum
*/
ION_NUM_HEAPS = 16, ION_NUM_HEAPS = 16,
}; };
...@@ -56,13 +58,18 @@ enum ion_heap_type { ...@@ -56,13 +58,18 @@ enum ion_heap_type {
* allocation flags - the lower 16 bits are used by core ion, the upper 16 * allocation flags - the lower 16 bits are used by core ion, the upper 16
* bits are reserved for use by the heaps themselves. * bits are reserved for use by the heaps themselves.
*/ */
#define ION_FLAG_CACHED 1 /* mappings of this buffer should be #define ION_FLAG_CACHED 1 /*
cached, ion will do cache * mappings of this buffer should be
maintenance when the buffer is * cached, ion will do cache
mapped for dma */ * maintenance when the buffer is
#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created * mapped for dma
at mmap time, if this is set */
caches must be managed manually */ #define ION_FLAG_CACHED_NEEDS_SYNC 2 /*
* mappings of this buffer will created
* at mmap time, if this is set
* caches must be managed
* manually
*/
/** /**
* DOC: Ion Userspace API * DOC: Ion Userspace API
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment