Commit 447a3621 authored by Julian Merida's avatar Julian Merida Committed by Greg Kroah-Hartman

staging: erofs: fix parenthesis alignment

Fix all checkpatch issues: "CHECK: Alignment should match open parenthesis"
Signed-off-by: default avatarJulian Merida <julianmr97@gmail.com>
Reviewed-by: default avatarGao Xiang <gaoxiang25@huawei.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent e54c2b0a
......@@ -260,7 +260,8 @@ static inline struct inode *erofs_iget_locked(struct super_block *sb,
}
struct inode *erofs_iget(struct super_block *sb,
erofs_nid_t nid, bool isdir)
erofs_nid_t nid,
bool isdir)
{
struct inode *inode = erofs_iget_locked(sb, nid);
......
......@@ -211,7 +211,8 @@ int erofs_namei(struct inode *dir,
/* NOTE: i_mutex is already held by vfs */
static struct dentry *erofs_lookup(struct inode *dir,
struct dentry *dentry, unsigned int flags)
struct dentry *dentry,
unsigned int flags)
{
int err;
erofs_nid_t nid;
......
......@@ -34,7 +34,8 @@ static int __init erofs_init_inode_cache(void)
{
erofs_inode_cachep = kmem_cache_create("erofs_inode",
sizeof(struct erofs_vnode), 0,
SLAB_RECLAIM_ACCOUNT, init_once);
SLAB_RECLAIM_ACCOUNT,
init_once);
return erofs_inode_cachep != NULL ? 0 : -ENOMEM;
}
......@@ -313,7 +314,8 @@ static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
}
static void managed_cache_invalidatepage(struct page *page,
unsigned int offset, unsigned int length)
unsigned int offset,
unsigned int length)
{
const unsigned int stop = length + offset;
......@@ -352,7 +354,8 @@ static struct inode *erofs_init_managed_cache(struct super_block *sb)
#endif
static int erofs_read_super(struct super_block *sb,
const char *dev_name, void *data, int silent)
const char *dev_name,
void *data, int silent)
{
struct inode *inode;
struct erofs_sb_info *sbi;
......
......@@ -464,8 +464,7 @@ z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
grp->obj.index = f->idx;
grp->llen = map->m_llen;
z_erofs_vle_set_workgrp_fmt(grp,
(map->m_flags & EROFS_MAP_ZIPPED) ?
z_erofs_vle_set_workgrp_fmt(grp, (map->m_flags & EROFS_MAP_ZIPPED) ?
Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
......@@ -554,7 +553,8 @@ static int z_erofs_vle_work_iter_begin(struct z_erofs_vle_work_builder *builder,
return PTR_ERR(work);
got_it:
z_erofs_pagevec_ctor_init(&builder->vector,
Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, work->vcnt);
Z_EROFS_VLE_INLINE_PAGEVECS,
work->pagevec, work->vcnt);
if (builder->role >= Z_EROFS_VLE_WORK_PRIMARY) {
/* enable possibly in-place decompression */
......@@ -594,7 +594,8 @@ void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
call_rcu(&work->rcu, z_erofs_rcu_callback);
}
static void __z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
static void
__z_erofs_vle_work_release(struct z_erofs_vle_workgroup *grp,
struct z_erofs_vle_work *work __maybe_unused)
{
erofs_workgroup_put(&grp->obj);
......@@ -781,8 +782,8 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
struct page *const newpage =
__stagingpage_alloc(page_pool, GFP_NOFS);
err = z_erofs_vle_work_add_page(builder,
newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
err = z_erofs_vle_work_add_page(builder, newpage,
Z_EROFS_PAGE_TYPE_EXCLUSIVE);
if (likely(!err))
goto retry;
}
......@@ -923,8 +924,8 @@ static int z_erofs_vle_unzip(struct super_block *sb,
pages = z_pagemap_global;
else {
repeat:
pages = kvmalloc_array(nr_pages,
sizeof(struct page *), GFP_KERNEL);
pages = kvmalloc_array(nr_pages, sizeof(struct page *),
GFP_KERNEL);
/* fallback to global pagemap for the lowmem scenario */
if (unlikely(!pages)) {
......@@ -940,8 +941,8 @@ static int z_erofs_vle_unzip(struct super_block *sb,
for (i = 0; i < nr_pages; ++i)
pages[i] = NULL;
z_erofs_pagevec_ctor_init(&ctor,
Z_EROFS_VLE_INLINE_PAGEVECS, work->pagevec, 0);
z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_VLE_INLINE_PAGEVECS,
work->pagevec, 0);
for (i = 0; i < work->vcnt; ++i) {
unsigned int pagenr;
......@@ -1030,8 +1031,8 @@ static int z_erofs_vle_unzip(struct super_block *sb,
skip_allocpage:
vout = erofs_vmap(pages, nr_pages);
err = z_erofs_vle_unzip_vmap(compressed_pages,
clusterpages, vout, llen, work->pageofs, overlapped);
err = z_erofs_vle_unzip_vmap(compressed_pages, clusterpages, vout,
llen, work->pageofs, overlapped);
erofs_vunmap(vout, nr_pages);
......
......@@ -122,8 +122,8 @@ static int init_inode_xattrs(struct inode *inode)
BUG_ON(it.ofs != EROFS_BLKSIZ);
xattr_iter_end(&it, atomic_map);
it.page = erofs_get_meta_page(sb,
++it.blkaddr, S_ISDIR(inode->i_mode));
it.page = erofs_get_meta_page(sb, ++it.blkaddr,
S_ISDIR(inode->i_mode));
if (IS_ERR(it.page)) {
kfree(vi->xattr_shared_xattrs);
vi->xattr_shared_xattrs = NULL;
......@@ -217,7 +217,8 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
* `ofs' pointing to the next xattr item rather than an arbitrary position.
*/
static int xattr_foreach(struct xattr_iter *it,
const struct xattr_iter_handlers *op, unsigned int *tlimit)
const struct xattr_iter_handlers *op,
unsigned int *tlimit)
{
struct erofs_xattr_entry entry;
unsigned int value_sz, processed, slice;
......@@ -348,7 +349,8 @@ static int xattr_checkbuffer(struct xattr_iter *_it,
}
static void xattr_copyvalue(struct xattr_iter *_it,
unsigned int processed, char *buf, unsigned int len)
unsigned int processed,
char *buf, unsigned int len)
{
struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment