Commit 8d21af81 authored by Philipp Guendisch's avatar Philipp Guendisch Committed by Mauro Carvalho Chehab

media: staging: atomisp: hmm: Alignment code (rebased)

This patch fixed code alignment to open paranthesis.
Semantic should not be affected by this patch.

It has been rebased on top of media_tree atomisp branch
Signed-off-by: default avatarPhilipp Guendisch <philipp.guendisch@fau.de>
Signed-off-by: default avatarChris Baller <chris.baller@gmx.de>
Signed-off-by: default avatarSakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab@s-opensource.com>
parent cd31dae7
...@@ -93,7 +93,8 @@ static ssize_t bo_show(struct device *dev, struct device_attribute *attr, ...@@ -93,7 +93,8 @@ static ssize_t bo_show(struct device *dev, struct device_attribute *attr,
ret = scnprintf(buf + index1 + index2, ret = scnprintf(buf + index1 + index2,
PAGE_SIZE - index1 - index2, PAGE_SIZE - index1 - index2,
"%ld %c buffer objects: %ld KB\n", "%ld %c buffer objects: %ld KB\n",
count[i], hmm_bo_type_string[i], total[i] * 4); count[i], hmm_bo_type_string[i],
total[i] * 4);
if (ret > 0) if (ret > 0)
index2 += ret; index2 += ret;
} }
...@@ -103,15 +104,13 @@ static ssize_t bo_show(struct device *dev, struct device_attribute *attr, ...@@ -103,15 +104,13 @@ static ssize_t bo_show(struct device *dev, struct device_attribute *attr,
return index1 + index2 + 1; return index1 + index2 + 1;
} }
static ssize_t active_bo_show(struct device *dev, static ssize_t active_bo_show(struct device *dev, struct device_attribute *attr,
struct device_attribute *attr,
char *buf) char *buf)
{ {
return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true); return bo_show(dev, attr, buf, &bo_device.entire_bo_list, true);
} }
static ssize_t free_bo_show(struct device *dev, static ssize_t free_bo_show(struct device *dev, struct device_attribute *attr,
struct device_attribute *attr,
char *buf) char *buf)
{ {
return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false); return bo_show(dev, attr, buf, &bo_device.entire_bo_list, false);
...@@ -250,8 +249,7 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type, ...@@ -250,8 +249,7 @@ ia_css_ptr hmm_alloc(size_t bytes, enum hmm_bo_type type,
/* Allocate pages for memory */ /* Allocate pages for memory */
ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached); ret = hmm_bo_alloc_pages(bo, type, from_highmem, userptr, cached);
if (ret) { if (ret) {
dev_err(atomisp_dev, dev_err(atomisp_dev, "hmm_bo_alloc_pages failed.\n");
"hmm_bo_alloc_pages failed.\n");
goto alloc_page_err; goto alloc_page_err;
} }
...@@ -284,8 +282,8 @@ void hmm_free(ia_css_ptr virt) ...@@ -284,8 +282,8 @@ void hmm_free(ia_css_ptr virt)
if (!bo) { if (!bo) {
dev_err(atomisp_dev, dev_err(atomisp_dev,
"can not find buffer object start with " "can not find buffer object start with address 0x%x\n",
"address 0x%x\n", (unsigned int)virt); (unsigned int)virt);
return; return;
} }
...@@ -300,8 +298,8 @@ static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr) ...@@ -300,8 +298,8 @@ static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
{ {
if (!bo) { if (!bo) {
dev_err(atomisp_dev, dev_err(atomisp_dev,
"can not find buffer object contains " "can not find buffer object contains address 0x%x\n",
"address 0x%x\n", ptr); ptr);
return -EINVAL; return -EINVAL;
} }
...@@ -313,8 +311,7 @@ static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr) ...@@ -313,8 +311,7 @@ static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
if (!hmm_bo_allocated(bo)) { if (!hmm_bo_allocated(bo)) {
dev_err(atomisp_dev, dev_err(atomisp_dev,
"buffer object has no virtual address" "buffer object has no virtual address space allocated.\n");
" space allocated.\n");
return -EINVAL; return -EINVAL;
} }
...@@ -322,7 +319,8 @@ static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr) ...@@ -322,7 +319,8 @@ static inline int hmm_check_bo(struct hmm_buffer_object *bo, unsigned int ptr)
} }
/* Read function in ISP memory management */ /* Read function in ISP memory management */
static int load_and_flush_by_kmap(ia_css_ptr virt, void *data, unsigned int bytes) static int load_and_flush_by_kmap(ia_css_ptr virt, void *data,
unsigned int bytes)
{ {
struct hmm_buffer_object *bo; struct hmm_buffer_object *bo;
unsigned int idx, offset, len; unsigned int idx, offset, len;
...@@ -462,8 +460,8 @@ int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes) ...@@ -462,8 +460,8 @@ int hmm_store(ia_css_ptr virt, const void *data, unsigned int bytes)
if (!des) { if (!des) {
dev_err(atomisp_dev, dev_err(atomisp_dev,
"kmap buffer object page failed: " "kmap buffer object page failed: pg_idx = %d\n",
"pg_idx = %d\n", idx); idx);
return -EINVAL; return -EINVAL;
} }
...@@ -645,8 +643,7 @@ void hmm_vunmap(ia_css_ptr virt) ...@@ -645,8 +643,7 @@ void hmm_vunmap(ia_css_ptr virt)
hmm_bo_vunmap(bo); hmm_bo_vunmap(bo);
} }
int hmm_pool_register(unsigned int pool_size, int hmm_pool_register(unsigned int pool_size, enum hmm_pool_type pool_type)
enum hmm_pool_type pool_type)
{ {
switch (pool_type) { switch (pool_type) {
case HMM_POOL_TYPE_RESERVED: case HMM_POOL_TYPE_RESERVED:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment