Commit 24c198e9 authored by Oleg Drokin's avatar Oleg Drokin Committed by Greg Kroah-Hartman

staging/lustre: Make alignment match open parenthesis

This patch fixes most of checkpatch occurences of
"CHECK: Alignment should match open parenthesis"
in Lustre code.
Signed-off-by: default avatarEmoly Liu <emoly.liu@intel.com>
Signed-off-by: default avatarOleg Drokin <green@linuxhacker.ru>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent cd94f231
......@@ -138,8 +138,8 @@ struct lnet_debugfs_symlink_def {
void lustre_insert_debugfs(struct ctl_table *table,
const struct lnet_debugfs_symlink_def *symlinks);
int lprocfs_call_handler(void *data, int write, loff_t *ppos,
void __user *buffer, size_t *lenp,
int (*handler)(void *data, int write,
loff_t pos, void __user *buffer, int len));
void __user *buffer, size_t *lenp,
int (*handler)(void *data, int write, loff_t pos,
void __user *buffer, int len));
#endif /* _LIBCFS_H */
......@@ -247,19 +247,19 @@ do { \
#define LCONSOLE_EMERG(format, ...) CDEBUG(D_CONSOLE | D_EMERG, format, ## __VA_ARGS__)
int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
const char *format1, ...)
const char *format1, ...)
__printf(2, 3);
int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
const char *format1,
va_list args, const char *format2, ...)
const char *format1,
va_list args, const char *format2, ...)
__printf(4, 5);
/* other external symbols that tracefile provides: */
int cfs_trace_copyin_string(char *knl_buffer, int knl_buffer_nob,
const char __user *usr_buffer, int usr_buffer_nob);
const char __user *usr_buffer, int usr_buffer_nob);
int cfs_trace_copyout_string(char __user *usr_buffer, int usr_buffer_nob,
const char *knl_buffer, char *append);
const char *knl_buffer, char *append);
#define LIBCFS_DEBUG_FILE_PATH_DEFAULT "/tmp/lustre-log"
......
......@@ -618,7 +618,7 @@ static int kiblnd_get_completion_vector(struct kib_conn *conn, int cpt)
}
struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cmid,
int state, int version)
int state, int version)
{
/*
* CAVEAT EMPTOR:
......
......@@ -41,9 +41,10 @@
static void kiblnd_peer_alive(struct kib_peer *peer);
static void kiblnd_peer_connect_failed(struct kib_peer *peer, int active, int error);
static void kiblnd_init_tx_msg(lnet_ni_t *ni, struct kib_tx *tx,
int type, int body_nob);
int type, int body_nob);
static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
int resid, struct kib_rdma_desc *dstrd, __u64 dstcookie);
int resid, struct kib_rdma_desc *dstrd,
__u64 dstcookie);
static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn);
static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn);
static void kiblnd_unmap_tx(lnet_ni_t *ni, struct kib_tx *tx);
......
......@@ -631,7 +631,7 @@ int ksocknal_close_peer_conns_locked(struct ksock_peer *peer,
int ksocknal_close_conn_and_siblings(struct ksock_conn *conn, int why);
int ksocknal_close_matching_conns(lnet_process_id_t id, __u32 ipaddr);
struct ksock_conn *ksocknal_find_conn_locked(struct ksock_peer *peer,
struct ksock_tx *tx, int nonblk);
struct ksock_tx *tx, int nonblk);
int ksocknal_launch_packet(lnet_ni_t *ni, struct ksock_tx *tx,
lnet_process_id_t id);
......
......@@ -95,8 +95,8 @@ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
err = crypto_ahash_setkey(tfm, key, key_len);
else if ((*type)->cht_key != 0)
err = crypto_ahash_setkey(tfm,
(unsigned char *)&((*type)->cht_key),
(*type)->cht_size);
(unsigned char *)&((*type)->cht_key),
(*type)->cht_size);
if (err != 0) {
ahash_request_free(*req);
......
......@@ -323,7 +323,7 @@ struct cl_object_operations {
* to be used instead of newly created.
*/
int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, pgoff_t index);
struct cl_page *page, pgoff_t index);
/**
* Initialize lock slice for this layer. Called top-to-bottom through
* every object layer when a new cl_lock is instantiated. Layer
......
......@@ -221,7 +221,7 @@ void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
}
void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
ldlm_policy_data_t *lpolicy)
ldlm_policy_data_t *lpolicy)
{
memset(lpolicy, 0, sizeof(*lpolicy));
lpolicy->l_extent.start = wpolicy->l_extent.start;
......@@ -230,7 +230,7 @@ void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
}
void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
ldlm_wire_policy_data_t *wpolicy)
ldlm_wire_policy_data_t *wpolicy)
{
memset(wpolicy, 0, sizeof(*wpolicy));
wpolicy->l_extent.start = lpolicy->l_extent.start;
......
......@@ -166,7 +166,7 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
*/
list_for_each(tmp, &res->lr_granted) {
lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
l_res_link);
if (ldlm_same_flock_owner(lock, req)) {
ownlocks = tmp;
break;
......@@ -182,7 +182,7 @@ static int ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
*/
list_for_each(tmp, &res->lr_granted) {
lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
l_res_link);
if (ldlm_same_flock_owner(lock, req)) {
if (!ownlocks)
......
......@@ -100,8 +100,8 @@ enum {
int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
enum ldlm_cancel_flags sync, int flags);
int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
struct list_head *cancels, int count, int max,
enum ldlm_cancel_flags cancel_flags, int flags);
struct list_head *cancels, int count, int max,
enum ldlm_cancel_flags cancel_flags, int flags);
extern int ldlm_enqueue_min;
/* ldlm_resource.c */
......@@ -200,8 +200,7 @@ ldlm_interval_extent(struct ldlm_interval *node)
LASSERT(!list_empty(&node->li_group));
lock = list_entry(node->li_group.next, struct ldlm_lock,
l_sl_policy);
lock = list_entry(node->li_group.next, struct ldlm_lock, l_sl_policy);
return &lock->l_policy_data.l_extent;
}
......@@ -302,7 +301,7 @@ static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
lock_res_and_lock(lock);
if ((lock->l_req_mode == lock->l_granted_mode) &&
!ldlm_is_cp_reqd(lock))
!ldlm_is_cp_reqd(lock))
ret = 1;
else if (ldlm_is_failed(lock) || ldlm_is_cancel(lock))
ret = 1;
......@@ -326,13 +325,13 @@ void ldlm_ibits_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
void ldlm_ibits_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
ldlm_wire_policy_data_t *wpolicy);
void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
ldlm_policy_data_t *lpolicy);
ldlm_policy_data_t *lpolicy);
void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
ldlm_wire_policy_data_t *wpolicy);
ldlm_wire_policy_data_t *wpolicy);
void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
ldlm_policy_data_t *lpolicy);
ldlm_policy_data_t *lpolicy);
void ldlm_flock_policy_wire21_to_local(const ldlm_wire_policy_data_t *wpolicy,
ldlm_policy_data_t *lpolicy);
ldlm_policy_data_t *lpolicy);
void ldlm_flock_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
ldlm_wire_policy_data_t *wpolicy);
......@@ -82,7 +82,7 @@ static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
if (priority) {
list_del(&item->oic_item);
list_add(&item->oic_item,
&imp->imp_conn_list);
&imp->imp_conn_list);
item->oic_last_attempt = 0;
}
CDEBUG(D_HA, "imp %p@%s: found existing conn %s%s\n",
......@@ -102,7 +102,7 @@ static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
list_add(&imp_conn->oic_item, &imp->imp_conn_list);
else
list_add_tail(&imp_conn->oic_item,
&imp->imp_conn_list);
&imp->imp_conn_list);
CDEBUG(D_HA, "imp %p@%s: add connection %s at %s\n",
imp, imp->imp_obd->obd_name, uuid->uuid,
(priority ? "head" : "tail"));
......@@ -692,7 +692,7 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
if (rs->rs_transno > exp->exp_last_committed) {
/* not committed already */
list_add_tail(&rs->rs_obd_list,
&exp->exp_uncommitted_replies);
&exp->exp_uncommitted_replies);
}
spin_unlock(&exp->exp_uncommitted_replies_lock);
......@@ -797,7 +797,7 @@ void ldlm_dump_export_locks(struct obd_export *exp)
CERROR("dumping locks for export %p,ignore if the unmount doesn't hang\n",
exp);
list_for_each_entry(lock, &exp->exp_locks_list,
l_exp_refs_link)
l_exp_refs_link)
LDLM_ERROR(lock, "lock:");
}
spin_unlock(&exp->exp_locks_list_guard);
......
......@@ -937,7 +937,7 @@ static void search_granted_lock(struct list_head *queue,
/* go to next policy group within mode group */
tmp = policy_end->l_res_link.next;
lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
l_res_link);
} /* loop over policy groups within the mode group */
/* insert point is last lock of the mode group,
......@@ -1116,7 +1116,7 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
* of bits.
*/
if (lock->l_resource->lr_type == LDLM_IBITS &&
((lock->l_policy_data.l_inodebits.bits &
((lock->l_policy_data.l_inodebits.bits &
policy->l_inodebits.bits) !=
policy->l_inodebits.bits))
continue;
......@@ -1376,12 +1376,12 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
if (size == sizeof(struct ost_lvb)) {
if (loc == RCL_CLIENT)
lvb = req_capsule_client_swab_get(pill,
&RMF_DLM_LVB,
lustre_swab_ost_lvb);
&RMF_DLM_LVB,
lustre_swab_ost_lvb);
else
lvb = req_capsule_server_swab_get(pill,
&RMF_DLM_LVB,
lustre_swab_ost_lvb);
&RMF_DLM_LVB,
lustre_swab_ost_lvb);
if (unlikely(!lvb)) {
LDLM_ERROR(lock, "no LVB");
return -EPROTO;
......@@ -1393,8 +1393,8 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
if (loc == RCL_CLIENT)
lvb = req_capsule_client_swab_get(pill,
&RMF_DLM_LVB,
lustre_swab_ost_lvb_v1);
&RMF_DLM_LVB,
lustre_swab_ost_lvb_v1);
else
lvb = req_capsule_server_sized_swab_get(pill,
&RMF_DLM_LVB, size,
......@@ -1418,12 +1418,12 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
if (size == sizeof(struct lquota_lvb)) {
if (loc == RCL_CLIENT)
lvb = req_capsule_client_swab_get(pill,
&RMF_DLM_LVB,
lustre_swab_lquota_lvb);
&RMF_DLM_LVB,
lustre_swab_lquota_lvb);
else
lvb = req_capsule_server_swab_get(pill,
&RMF_DLM_LVB,
lustre_swab_lquota_lvb);
&RMF_DLM_LVB,
lustre_swab_lquota_lvb);
if (unlikely(!lvb)) {
LDLM_ERROR(lock, "no LVB");
return -EPROTO;
......@@ -1709,7 +1709,7 @@ static int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
return -ENOENT;
gl_work = list_entry(arg->list->next, struct ldlm_glimpse_work,
gl_list);
gl_list);
list_del_init(&gl_work->gl_list);
lock = gl_work->gl_lock;
......
......@@ -706,12 +706,12 @@ static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
if (!list_empty(&blp->blp_list) &&
(list_empty(&blp->blp_prio_list) || num_bl == 0))
blwi = list_entry(blp->blp_list.next,
struct ldlm_bl_work_item, blwi_entry);
struct ldlm_bl_work_item, blwi_entry);
else
if (!list_empty(&blp->blp_prio_list))
blwi = list_entry(blp->blp_prio_list.next,
struct ldlm_bl_work_item,
blwi_entry);
struct ldlm_bl_work_item,
blwi_entry);
if (blwi) {
if (++num_bl >= atomic_read(&blp->blp_num_threads))
......@@ -741,7 +741,7 @@ static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
init_completion(&bltd.bltd_comp);
bltd.bltd_num = atomic_read(&blp->blp_num_threads);
snprintf(bltd.bltd_name, sizeof(bltd.bltd_name),
"ldlm_bl_%02d", bltd.bltd_num);
"ldlm_bl_%02d", bltd.bltd_num);
task = kthread_run(ldlm_bl_thread_main, &bltd, "%s", bltd.bltd_name);
if (IS_ERR(task)) {
CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
......@@ -786,8 +786,8 @@ static int ldlm_bl_thread_main(void *arg)
if (!blwi) {
atomic_dec(&blp->blp_busy_threads);
l_wait_event_exclusive(blp->blp_waitq,
(blwi = ldlm_bl_get_work(blp)),
&lwi);
(blwi = ldlm_bl_get_work(blp)),
&lwi);
busy = atomic_inc_return(&blp->blp_busy_threads);
} else {
busy = atomic_read(&blp->blp_busy_threads);
......@@ -1094,16 +1094,17 @@ int ldlm_init(void)
return -ENOMEM;
ldlm_lock_slab = kmem_cache_create("ldlm_locks",
sizeof(struct ldlm_lock), 0,
SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU, NULL);
sizeof(struct ldlm_lock), 0,
SLAB_HWCACHE_ALIGN |
SLAB_DESTROY_BY_RCU, NULL);
if (!ldlm_lock_slab) {
kmem_cache_destroy(ldlm_resource_slab);
return -ENOMEM;
}
ldlm_interval_slab = kmem_cache_create("interval_node",
sizeof(struct ldlm_interval),
0, SLAB_HWCACHE_ALIGN, NULL);
sizeof(struct ldlm_interval),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!ldlm_interval_slab) {
kmem_cache_destroy(ldlm_resource_slab);
kmem_cache_destroy(ldlm_lock_slab);
......
......@@ -995,7 +995,7 @@ static int ldlm_pools_thread_main(void *arg)
wake_up(&thread->t_ctl_waitq);
CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
"ldlm_poold", current_pid());
"ldlm_poold", current_pid());
while (1) {
struct l_wait_info lwi;
......@@ -1025,7 +1025,7 @@ static int ldlm_pools_thread_main(void *arg)
wake_up(&thread->t_ctl_waitq);
CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
"ldlm_poold", current_pid());
"ldlm_poold", current_pid());
complete_and_exit(&ldlm_pools_comp, 0);
}
......
......@@ -1178,8 +1178,7 @@ static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
slv = ldlm_pool_get_slv(pl);
lvf = ldlm_pool_get_lvf(pl);
la = cfs_duration_sec(cfs_time_sub(cur,
lock->l_last_used));
la = cfs_duration_sec(cfs_time_sub(cur, lock->l_last_used));
lv = lvf * la * unused;
/* Inform pool about current CLV to see it via debugfs. */
......@@ -1372,7 +1371,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
break;
list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
l_lru) {
l_lru) {
/* No locks which got blocking requests. */
LASSERT(!ldlm_is_bl_ast(lock));
......@@ -1608,8 +1607,7 @@ int ldlm_cli_cancel_list(struct list_head *cancels, int count,
*/
while (count > 0) {
LASSERT(!list_empty(cancels));
lock = list_entry(cancels->next, struct ldlm_lock,
l_bl_ast);
lock = list_entry(cancels->next, struct ldlm_lock, l_bl_ast);
LASSERT(lock->l_conn_export);
if (exp_connect_cancelset(lock->l_conn_export)) {
......@@ -2008,7 +2006,7 @@ static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
canceled, ldlm_ns_name(ns));
canceled, ldlm_ns_name(ns));
}
int ldlm_replay_locks(struct obd_import *imp)
......
......@@ -758,8 +758,7 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
*/
lock_res(res);
list_for_each(tmp, q) {
lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
lock = list_entry(tmp, struct ldlm_lock, l_res_link);
if (ldlm_is_cleaned(lock)) {
lock = NULL;
continue;
......@@ -1381,7 +1380,7 @@ void ldlm_resource_dump(int level, struct ldlm_resource *res)
if (!list_empty(&res->lr_granted)) {
CDEBUG(level, "Granted locks (in reverse order):\n");
list_for_each_entry_reverse(lock, &res->lr_granted,
l_res_link) {
l_res_link) {
LDLM_DEBUG_LIMIT(level, lock, "###");
if (!(level & D_CANTMASK) &&
++granted > ldlm_dump_granted_max) {
......
......@@ -1357,8 +1357,8 @@ static int ll_setattr_done_writing(struct inode *inode,
rc = ll_som_update(inode, op_data);
else if (rc) {
CERROR("%s: inode "DFID" mdc truncate failed: rc = %d\n",
ll_i2sbi(inode)->ll_md_exp->exp_obd->obd_name,
PFID(ll_inode2fid(inode)), rc);
ll_i2sbi(inode)->ll_md_exp->exp_obd->obd_name,
PFID(ll_inode2fid(inode)), rc);
}
return rc;
}
......
......@@ -648,8 +648,8 @@ static void ras_update_stride_detector(struct ll_readahead_state *ras,
{
unsigned long stride_gap = index - ras->ras_last_readpage - 1;
if (!stride_io_mode(ras) && (stride_gap != 0 ||
ras->ras_consecutive_stride_requests == 0)) {
if ((stride_gap != 0 || ras->ras_consecutive_stride_requests == 0) &&
!stride_io_mode(ras)) {
ras->ras_stride_pages = ras->ras_consecutive_pages;
ras->ras_stride_length = ras->ras_consecutive_pages +
stride_gap;
......
......@@ -918,7 +918,8 @@ static void ll_statahead_one(struct dentry *parent, const char *entry_name,
if (rc) {
rc1 = ll_sa_entry_to_stated(sai, entry,
rc < 0 ? SA_ENTRY_INVA : SA_ENTRY_SUCC);
rc < 0 ? SA_ENTRY_INVA :
SA_ENTRY_SUCC);
if (rc1 == 0 && entry->se_index == sai->sai_index_wait)
wake_up(&sai->sai_waitq);
} else {
......
......@@ -249,7 +249,7 @@ static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret
set_bit(AS_EIO, &inode->i_mapping->flags);
if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
obj->vob_discard_page_warned == 0) {
obj->vob_discard_page_warned == 0) {
obj->vob_discard_page_warned = 1;
ll_dirty_page_discard_warn(vmpage, ioret);
}
......@@ -549,7 +549,7 @@ static const struct cl_page_operations vvp_transient_page_ops = {
};
int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, pgoff_t index)
struct cl_page *page, pgoff_t index)
{
struct vvp_page *vpg = cl_object_page_slice(obj, page);
struct page *vmpage = page->cp_vmpage;
......
......@@ -2162,8 +2162,8 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
&mgi->group, set);
} else if (next_id) {
err = obd_set_info_async(env, tgt->ltd_exp,
keylen, key, vallen,
((struct obd_id_info *)val)->data, set);
keylen, key, vallen,
((struct obd_id_info *)val)->data, set);
} else {
/* Only want a specific OSC */
if (check_uuid &&
......
......@@ -665,7 +665,8 @@ static int mdc_finish_enqueue(struct obd_export *exp,
lvb_len = req_capsule_get_size(pill, &RMF_DLM_LVB, RCL_SERVER);
if (lvb_len > 0) {
lvb_data = req_capsule_server_sized_get(pill,
&RMF_DLM_LVB, lvb_len);
&RMF_DLM_LVB,
lvb_len);
if (!lvb_data)
return -EPROTO;
}
......
......@@ -68,7 +68,7 @@ static void (*kill_super_cb)(struct super_block *sb);
* this log, and is added to the mgc's list of logs to follow.
*/
int lustre_process_log(struct super_block *sb, char *logname,
struct config_llog_instance *cfg)
struct config_llog_instance *cfg)
{
struct lustre_cfg *lcfg;
struct lustre_cfg_bufs *bufs;
......
......@@ -958,8 +958,8 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state), &lwi);
if (rc == -ETIMEDOUT) {
OSC_EXTENT_DUMP(D_ERROR, ext,
"%s: wait ext to %d timedout, recovery in progress?\n",
osc_export(obj)->exp_obd->obd_name, state);
"%s: wait ext to %d timedout, recovery in progress?\n",
osc_export(obj)->exp_obd->obd_name, state);
lwi = LWI_INTR(NULL, NULL);
rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
......
......@@ -398,7 +398,8 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
lustre_msg_set_status(req->rq_repmsg,
ptlrpc_status_hton(req->rq_status));
lustre_msg_set_opc(req->rq_repmsg,
req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0);
req->rq_reqmsg ?
lustre_msg_get_opc(req->rq_reqmsg) : 0);
target_pack_pool_reply(req);
......
......@@ -1203,8 +1203,9 @@ __u32 lustre_msg_calc_cksum(struct lustre_msg *msg)
unsigned int hsize = 4;
cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32, (unsigned char *)pb,
lustre_msg_buflen(msg, MSG_PTLRPC_BODY_OFF),
NULL, 0, (unsigned char *)&crc, &hsize);
lustre_msg_buflen(msg,
MSG_PTLRPC_BODY_OFF),
NULL, 0, (unsigned char *)&crc, &hsize);
return crc;
}
default:
......
......@@ -523,8 +523,9 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
for (i = 0; i < desc->bd_iov_count; i++) {
cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].bv_page,
desc->bd_iov[i].bv_offset & ~PAGE_MASK,
desc->bd_iov[i].bv_len);
desc->bd_iov[i].bv_offset &
~PAGE_MASK,
desc->bd_iov[i].bv_len);
}
if (hashsize > buflen) {
......
......@@ -249,9 +249,12 @@ int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
unsigned int hsize = 4;
cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
NULL, 0, (unsigned char *)&cksum, &hsize);
lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF,
0),
lustre_msg_buflen(msg,
PLAIN_PACK_MSG_OFF),
NULL, 0, (unsigned char *)&cksum,
&hsize);
if (cksum != msg->lm_cksum) {
CDEBUG(D_SEC,
"early reply checksum mismatch: %08x != %08x\n",
......@@ -869,9 +872,12 @@ int plain_authorize(struct ptlrpc_request *req)
unsigned int hsize = 4;
cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
NULL, 0, (unsigned char *)&msg->lm_cksum, &hsize);
lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF,
0),
lustre_msg_buflen(msg,
PLAIN_PACK_MSG_OFF),
NULL, 0, (unsigned char *)&msg->lm_cksum,
&hsize);
req->rq_reply_off = 0;
}
......
......@@ -1990,11 +1990,12 @@ ptlrpc_wait_event(struct ptlrpc_service_part *svcpt,
cond_resched();
l_wait_event_exclusive_head(svcpt->scp_waitq,
ptlrpc_thread_stopping(thread) ||
ptlrpc_server_request_incoming(svcpt) ||
ptlrpc_server_request_pending(svcpt, false) ||
ptlrpc_rqbd_pending(svcpt) ||
ptlrpc_at_check(svcpt), &lwi);
ptlrpc_thread_stopping(thread) ||
ptlrpc_server_request_incoming(svcpt) ||
ptlrpc_server_request_pending(svcpt,
false) ||
ptlrpc_rqbd_pending(svcpt) ||
ptlrpc_at_check(svcpt), &lwi);
if (ptlrpc_thread_stopping(thread))
return -EINTR;
......@@ -2357,7 +2358,7 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
while (!list_empty(&zombie)) {
thread = list_entry(zombie.next,
struct ptlrpc_thread, t_link);
struct ptlrpc_thread, t_link);
list_del(&thread->t_link);
kfree(thread);
}
......@@ -2547,8 +2548,8 @@ int ptlrpc_hr_init(void)
LASSERT(hrp->hrp_nthrs > 0);
hrp->hrp_thrs =
kzalloc_node(hrp->hrp_nthrs * sizeof(*hrt), GFP_NOFS,
cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table,
i));
cfs_cpt_spread_node(ptlrpc_hr.hr_cpt_table,
i));
if (!hrp->hrp_thrs) {
rc = -ENOMEM;
goto out;
......@@ -2601,7 +2602,8 @@ static void ptlrpc_wait_replies(struct ptlrpc_service_part *svcpt)
NULL, NULL);
rc = l_wait_event(svcpt->scp_waitq,
atomic_read(&svcpt->scp_nreps_difficult) == 0, &lwi);
atomic_read(&svcpt->scp_nreps_difficult) == 0,
&lwi);
if (rc == 0)
break;
CWARN("Unexpectedly long timeout %s %p\n",
......@@ -2647,7 +2649,7 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc)
* event with its 'unlink' flag set for each posted rqbd
*/
list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted,
rqbd_list) {
rqbd_list) {
rc = LNetMDUnlink(rqbd->rqbd_md_h);
LASSERT(rc == 0 || rc == -ENOENT);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment