Commit 3949015e authored by Kristina Martsenko's avatar Kristina Martsenko Committed by Greg Kroah-Hartman

staging: lustre: ptlrpc: clean up whitespace around parentheses

Fix the following types of checkpatch errors and warnings to comply with
coding style:

ERROR: space required before the open parenthesis '('
ERROR: space prohibited after that open parenthesis '('
ERROR: space prohibited before that close parenthesis ')'
WARNING: space prohibited between function name and open parenthesis '('
Signed-off-by: default avatarKristina Martsenko <kristina.martsenko@gmail.com>
Reviewed-by: default avatarSarah Sharp <sarah.a.sharp@linux.intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 0d100bfc
......@@ -459,7 +459,7 @@ ptlrpc_init_rq_pool(int num_rq, int msgsize,
{
struct ptlrpc_request_pool *pool;
OBD_ALLOC(pool, sizeof (struct ptlrpc_request_pool));
OBD_ALLOC(pool, sizeof(struct ptlrpc_request_pool));
if (!pool)
return NULL;
......@@ -475,7 +475,7 @@ ptlrpc_init_rq_pool(int num_rq, int msgsize,
if (list_empty(&pool->prp_req_list)) {
/* have not allocated a single request for the pool */
OBD_FREE(pool, sizeof (struct ptlrpc_request_pool));
OBD_FREE(pool, sizeof(struct ptlrpc_request_pool));
pool = NULL;
}
return pool;
......@@ -881,7 +881,7 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
/* Requests on the set should either all be completed, or all be new */
expected_phase = (atomic_read(&set->set_remaining) == 0) ?
RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
list_for_each (tmp, &set->set_requests) {
list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
list_entry(tmp, struct ptlrpc_request,
rq_set_chain);
......@@ -912,7 +912,7 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
req->rq_invalid_rqset = 0;
spin_unlock(&req->rq_lock);
ptlrpc_req_finished (req);
ptlrpc_req_finished(req);
}
LASSERT(atomic_read(&set->set_remaining) == 0);
......@@ -1020,7 +1020,7 @@ static int ptlrpc_import_delay_req(struct obd_import *imp,
{
int delay = 0;
LASSERT (status != NULL);
LASSERT(status != NULL);
*status = 0;
if (req->rq_ctx_init || req->rq_ctx_fini) {
......@@ -1907,7 +1907,7 @@ int ptlrpc_expired_set(void *data)
/*
* A timeout expired. See which reqs it applies to...
*/
list_for_each (tmp, &set->set_requests) {
list_for_each(tmp, &set->set_requests) {
struct ptlrpc_request *req =
list_entry(tmp, struct ptlrpc_request,
rq_set_chain);
......@@ -2688,7 +2688,7 @@ int ptlrpc_replay_req(struct ptlrpc_request *req)
LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
LASSERT (sizeof (*aa) <= sizeof (req->rq_async_args));
LASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
memset(aa, 0, sizeof(*aa));
......@@ -2962,7 +2962,7 @@ void *ptlrpcd_alloc_work(struct obd_import *imp,
init_waitqueue_head(&req->rq_set_waitq);
atomic_set(&req->rq_refcount, 1);
CLASSERT (sizeof(*args) <= sizeof(req->rq_async_args));
CLASSERT(sizeof(*args) <= sizeof(req->rq_async_args));
args = ptlrpc_req_async_args(req);
args->magic = PTLRPC_WORK_MAGIC;
args->cb = cb;
......
......@@ -56,9 +56,9 @@ void request_out_callback(lnet_event_t *ev)
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_request *req = cbid->cbid_arg;
LASSERT (ev->type == LNET_EVENT_SEND ||
ev->type == LNET_EVENT_UNLINK);
LASSERT (ev->unlinked);
LASSERT(ev->type == LNET_EVENT_SEND ||
ev->type == LNET_EVENT_UNLINK);
LASSERT(ev->unlinked);
DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
......@@ -90,9 +90,9 @@ void reply_in_callback(lnet_event_t *ev)
DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
LASSERT (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
LASSERT (ev->md.start == req->rq_repbuf);
LASSERT (ev->offset + ev->mlength <= req->rq_repbuf_len);
LASSERT(ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
LASSERT(ev->md.start == req->rq_repbuf);
LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len);
/* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
for adaptive timeouts' early reply. */
LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
......@@ -113,7 +113,7 @@ void reply_in_callback(lnet_event_t *ev)
goto out_wake;
}
if (ev->mlength < ev->rlength ) {
if (ev->mlength < ev->rlength) {
CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
req->rq_replen, ev->rlength, ev->offset);
req->rq_reply_truncate = 1;
......@@ -167,18 +167,18 @@ void reply_in_callback(lnet_event_t *ev)
/*
* Client's bulk has been written/read
*/
void client_bulk_callback (lnet_event_t *ev)
void client_bulk_callback(lnet_event_t *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
struct ptlrpc_request *req;
LASSERT ((desc->bd_type == BULK_PUT_SINK &&
ev->type == LNET_EVENT_PUT) ||
(desc->bd_type == BULK_GET_SOURCE &&
ev->type == LNET_EVENT_GET) ||
ev->type == LNET_EVENT_UNLINK);
LASSERT (ev->unlinked);
LASSERT((desc->bd_type == BULK_PUT_SINK &&
ev->type == LNET_EVENT_PUT) ||
(desc->bd_type == BULK_GET_SOURCE &&
ev->type == LNET_EVENT_GET) ||
ev->type == LNET_EVENT_UNLINK);
LASSERT(ev->unlinked);
if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
ev->status = -EIO;
......@@ -283,11 +283,11 @@ void request_in_callback(lnet_event_t *ev)
struct ptlrpc_service *service = svcpt->scp_service;
struct ptlrpc_request *req;
LASSERT (ev->type == LNET_EVENT_PUT ||
ev->type == LNET_EVENT_UNLINK);
LASSERT ((char *)ev->md.start >= rqbd->rqbd_buffer);
LASSERT ((char *)ev->md.start + ev->offset + ev->mlength <=
rqbd->rqbd_buffer + service->srv_buf_size);
LASSERT(ev->type == LNET_EVENT_PUT ||
ev->type == LNET_EVENT_UNLINK);
LASSERT((char *)ev->md.start >= rqbd->rqbd_buffer);
LASSERT((char *)ev->md.start + ev->offset + ev->mlength <=
rqbd->rqbd_buffer + service->srv_buf_size);
CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
"event type %d, status %d, service %s\n",
......@@ -300,9 +300,9 @@ void request_in_callback(lnet_event_t *ev)
* we'd have to re-post the rqbd, which we can't do in this
* context. */
req = &rqbd->rqbd_req;
memset(req, 0, sizeof (*req));
memset(req, 0, sizeof(*req));
} else {
LASSERT (ev->type == LNET_EVENT_PUT);
LASSERT(ev->type == LNET_EVENT_PUT);
if (ev->status != 0) {
/* We moaned above already... */
return;
......@@ -381,19 +381,19 @@ void reply_out_callback(lnet_event_t *ev)
struct ptlrpc_reply_state *rs = cbid->cbid_arg;
struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
LASSERT (ev->type == LNET_EVENT_SEND ||
ev->type == LNET_EVENT_ACK ||
ev->type == LNET_EVENT_UNLINK);
LASSERT(ev->type == LNET_EVENT_SEND ||
ev->type == LNET_EVENT_ACK ||
ev->type == LNET_EVENT_UNLINK);
if (!rs->rs_difficult) {
/* 'Easy' replies have no further processing so I drop the
* net's ref on 'rs' */
LASSERT (ev->unlinked);
LASSERT(ev->unlinked);
ptlrpc_rs_decref(rs);
return;
}
LASSERT (rs->rs_on_net);
LASSERT(rs->rs_on_net);
if (ev->unlinked) {
/* Last network callback. The net's ref on 'rs' stays put
......@@ -419,18 +419,17 @@ static void ptlrpc_master_callback(lnet_event_t *ev)
void (*callback)(lnet_event_t *ev) = cbid->cbid_fn;
/* Honestly, it's best to find out early. */
LASSERT (cbid->cbid_arg != LP_POISON);
LASSERT (callback == request_out_callback ||
callback == reply_in_callback ||
callback == client_bulk_callback ||
callback == request_in_callback ||
callback == reply_out_callback
);
callback (ev);
LASSERT(cbid->cbid_arg != LP_POISON);
LASSERT(callback == request_out_callback ||
callback == reply_in_callback ||
callback == client_bulk_callback ||
callback == request_in_callback ||
callback == reply_out_callback);
callback(ev);
}
int ptlrpc_uuid_to_peer (struct obd_uuid *uuid,
int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
lnet_process_id_t *peer, lnet_nid_t *self)
{
int best_dist = 0;
......@@ -538,7 +537,7 @@ int ptlrpc_ni_init(void)
/* We're not passing any limits yet... */
rc = LNetNIInit(pid);
if (rc < 0) {
CDEBUG (D_NET, "Can't init network interface: %d\n", rc);
CDEBUG(D_NET, "Can't init network interface: %d\n", rc);
return (-ENOENT);
}
......@@ -552,7 +551,7 @@ int ptlrpc_ni_init(void)
if (rc == 0)
return 0;
CERROR ("Failed to allocate event queue: %d\n", rc);
CERROR("Failed to allocate event queue: %d\n", rc);
LNetNIFini();
return (-ENOMEM);
......
......@@ -165,7 +165,7 @@ void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, long timeout)
init_timer(timer);
timer->expires = timeout;
timer->data = (unsigned long ) ctx;
timer->data = (unsigned long) ctx;
timer->function = ctx_upcall_timeout_kr;
add_timer(timer);
......
......@@ -1276,7 +1276,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
arc4_out_key:
rawobj_free(&arc4_keye);
arc4_out:
do {} while(0); /* just to avoid compile warning */
do {} while (0); /* just to avoid compile warning */
} else {
rc = krb5_encrypt_rawobjs(kctx->kc_keye.kb_tfm, 0,
3, data_desc, &cipher, 1);
......
......@@ -483,7 +483,7 @@ int gss_do_check_seq(unsigned long *window, __u32 win_size, __u32 *max_seq,
memset(window, 0, win_size / 8);
*max_seq = seq_num;
} else {
while(*max_seq < seq_num) {
while (*max_seq < seq_num) {
(*max_seq)++;
__clear_bit((*max_seq) % win_size, window);
}
......@@ -2369,7 +2369,7 @@ int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
if (swabbed)
gss_header_swabber(ghdr);
switch(ghdr->gh_proc) {
switch (ghdr->gh_proc) {
case PTLRPC_GSS_PROC_INIT:
case PTLRPC_GSS_PROC_CONTINUE_INIT:
rc = gss_svc_handle_init(req, gw);
......@@ -2388,7 +2388,7 @@ int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
switch (rc) {
case SECSVC_OK:
LASSERT (grctx->src_ctx);
LASSERT(grctx->src_ctx);
req->rq_auth_gss = 1;
req->rq_auth_remote = grctx->src_ctx->gsc_remote;
......
......@@ -81,14 +81,14 @@ do { \
ptlrpc_import_state_name(state)); \
__import_set_state(imp, state); \
} \
} while(0)
} while (0)
#define IMPORT_SET_STATE(imp, state) \
do { \
spin_lock(&imp->imp_lock); \
IMPORT_SET_STATE_NOLOCK(imp, state); \
spin_unlock(&imp->imp_lock); \
} while(0)
} while (0)
static int ptlrpc_connect_interpret(const struct lu_env *env,
......@@ -680,7 +680,7 @@ int ptlrpc_connect_import(struct obd_import *imp)
ptlrpc_request_set_replen(request);
request->rq_interpret_reply = ptlrpc_connect_interpret;
CLASSERT(sizeof (*aa) <= sizeof (request->rq_async_args));
CLASSERT(sizeof(*aa) <= sizeof(request->rq_async_args));
aa = ptlrpc_req_async_args(request);
memset(aa, 0, sizeof(*aa));
......@@ -859,7 +859,7 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
if (MSG_CONNECT_RECONNECT & msg_flags) {
memset(&old_hdl, 0, sizeof(old_hdl));
if (!memcmp(&old_hdl, lustre_msg_get_handle(request->rq_repmsg),
sizeof (old_hdl))) {
sizeof(old_hdl))) {
LCONSOLE_WARN("Reconnect to %s (at @%s) failed due "
"bad handle "LPX64"\n",
obd2cli_tgt(imp->imp_obd),
......@@ -1507,7 +1507,7 @@ int at_measured(struct adaptive_timeout *at, unsigned int val)
at->at_worst_time = now;
at->at_hist[0] = val;
at->at_binstart = now;
} else if (now - at->at_binstart < binlimit ) {
} else if (now - at->at_binstart < binlimit) {
/* in bin 0 */
at->at_hist[0] = max(val, at->at_hist[0]);
at->at_current = max(val, at->at_current);
......@@ -1517,7 +1517,7 @@ int at_measured(struct adaptive_timeout *at, unsigned int val)
/* move bins over */
shift = (now - at->at_binstart) / binlimit;
LASSERT(shift > 0);
for(i = AT_BINS - 1; i >= 0; i--) {
for (i = AT_BINS - 1; i >= 0; i--) {
if (i >= shift) {
at->at_hist[i] = at->at_hist[i - shift];
maxv = max(maxv, at->at_hist[i]);
......
......@@ -1808,7 +1808,7 @@ swabber_dumper_helper(struct req_capsule *pill,
const struct req_msg_field *field,
enum req_location loc,
int offset,
void *value, int len, int dump, void (*swabber)( void *))
void *value, int len, int dump, void (*swabber)(void *))
{
void *p;
int i;
......@@ -1883,7 +1883,7 @@ swabber_dumper_helper(struct req_capsule *pill,
static void *__req_capsule_get(struct req_capsule *pill,
const struct req_msg_field *field,
enum req_location loc,
void (*swabber)( void *),
void (*swabber)(void *),
int dump)
{
const struct req_format *fmt;
......
......@@ -63,7 +63,7 @@
return (-EINVAL); \
} \
mutex_unlock(&ctxt->loc_mutex); \
} while(0)
} while (0)
#define LLOG_CLIENT_EXIT(ctxt, imp) do { \
mutex_lock(&ctxt->loc_mutex); \
......@@ -72,7 +72,7 @@
ctxt->loc_imp, imp); \
class_import_put(imp); \
mutex_unlock(&ctxt->loc_mutex); \
} while(0)
} while (0)
/* This is a callback from the llog_* functions.
* Assumes caller has already pushed us into the kernel context. */
......@@ -302,7 +302,7 @@ static int llog_client_read_header(const struct lu_env *env,
if (hdr == NULL)
GOTO(out, rc =-EFAULT);
memcpy(handle->lgh_hdr, hdr, sizeof (*hdr));
memcpy(handle->lgh_hdr, hdr, sizeof(*hdr));
handle->lgh_last_idx = handle->lgh_hdr->llh_tail.lrt_index;
/* sanity checks */
......
......@@ -221,7 +221,7 @@ void ptlrpc_lprocfs_register(struct proc_dir_entry *root, char *dir,
for (i = 0; i < EXTRA_LAST_OPC; i++) {
char *units;
switch(i) {
switch (i) {
case BRW_WRITE_BYTES:
case BRW_READ_BYTES:
units = "bytes";
......
......@@ -47,17 +47,17 @@
* over \a conn connection to portal \a portal.
* Returns 0 on success or error code.
*/
static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
struct ptlrpc_connection *conn, int portal, __u64 xid,
unsigned int offset)
static int ptl_send_buf(lnet_handle_md_t *mdh, void *base, int len,
lnet_ack_req_t ack, struct ptlrpc_cb_id *cbid,
struct ptlrpc_connection *conn, int portal, __u64 xid,
unsigned int offset)
{
int rc;
lnet_md_t md;
LASSERT (portal != 0);
LASSERT (conn != NULL);
CDEBUG (D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
LASSERT(portal != 0);
LASSERT(conn != NULL);
CDEBUG(D_INFO, "conn=%p id %s\n", conn, libcfs_id2str(conn->c_peer));
md.start = base;
md.length = len;
md.threshold = (ack == LNET_ACK_REQ) ? 2 : 1;
......@@ -71,18 +71,18 @@ static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
ack = LNET_NOACK_REQ;
}
rc = LNetMDBind (md, LNET_UNLINK, mdh);
rc = LNetMDBind(md, LNET_UNLINK, mdh);
if (unlikely(rc != 0)) {
CERROR ("LNetMDBind failed: %d\n", rc);
LASSERT (rc == -ENOMEM);
CERROR("LNetMDBind failed: %d\n", rc);
LASSERT(rc == -ENOMEM);
return -ENOMEM;
}
CDEBUG(D_NET, "Sending %d bytes to portal %d, xid "LPD64", offset %u\n",
len, portal, xid, offset);
rc = LNetPut (conn->c_self, *mdh, ack,
conn->c_peer, portal, xid, offset, 0);
rc = LNetPut(conn->c_self, *mdh, ack,
conn->c_peer, portal, xid, offset, 0);
if (unlikely(rc != 0)) {
int rc2;
/* We're going to get an UNLINK event when I unlink below,
......@@ -363,14 +363,14 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
* request, or a saved copy if this is a req saved in
* target_queue_final_reply().
*/
LASSERT (req->rq_no_reply == 0);
LASSERT (req->rq_reqbuf != NULL);
LASSERT (rs != NULL);
LASSERT ((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
LASSERT (req->rq_repmsg != NULL);
LASSERT (req->rq_repmsg == rs->rs_msg);
LASSERT (rs->rs_cb_id.cbid_fn == reply_out_callback);
LASSERT (rs->rs_cb_id.cbid_arg == rs);
LASSERT(req->rq_no_reply == 0);
LASSERT(req->rq_reqbuf != NULL);
LASSERT(rs != NULL);
LASSERT((flags & PTLRPC_REPLY_MAYBE_DIFFICULT) || !rs->rs_difficult);
LASSERT(req->rq_repmsg != NULL);
LASSERT(req->rq_repmsg == rs->rs_msg);
LASSERT(rs->rs_cb_id.cbid_fn == reply_out_callback);
LASSERT(rs->rs_cb_id.cbid_arg == rs);
/* There may be no rq_export during failover */
......@@ -423,12 +423,12 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
req->rq_sent = cfs_time_current_sec();
rc = ptl_send_buf (&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
(rs->rs_difficult && !rs->rs_no_ack) ?
LNET_ACK_REQ : LNET_NOACK_REQ,
&rs->rs_cb_id, conn,
ptlrpc_req2svc(req)->srv_rep_portal,
req->rq_xid, req->rq_reply_off);
rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
(rs->rs_difficult && !rs->rs_no_ack) ?
LNET_ACK_REQ : LNET_NOACK_REQ,
&rs->rs_cb_id, conn,
ptlrpc_req2svc(req)->srv_rep_portal,
req->rq_xid, req->rq_reply_off);
out:
if (unlikely(rc != 0))
ptlrpc_req_drop_rs(req);
......@@ -437,7 +437,7 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
}
EXPORT_SYMBOL(ptlrpc_send_reply);
int ptlrpc_reply (struct ptlrpc_request *req)
int ptlrpc_reply(struct ptlrpc_request *req)
{
if (req->rq_no_reply)
return 0;
......@@ -537,13 +537,13 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
/* bulk register should be done after wrap_request() */
if (request->rq_bulk != NULL) {
rc = ptlrpc_register_bulk (request);
rc = ptlrpc_register_bulk(request);
if (rc != 0)
GOTO(out, rc);
}
if (!noreply) {
LASSERT (request->rq_replen != 0);
LASSERT(request->rq_replen != 0);
if (request->rq_repbuf == NULL) {
LASSERT(request->rq_repdata == NULL);
LASSERT(request->rq_repmsg == NULL);
......@@ -566,7 +566,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
LNET_UNLINK, LNET_INS_AFTER, &reply_me_h);
if (rc != 0) {
CERROR("LNetMEAttach failed: %d\n", rc);
LASSERT (rc == -ENOMEM);
LASSERT(rc == -ENOMEM);
GOTO(cleanup_bulk, rc = -ENOMEM);
}
}
......@@ -604,7 +604,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
&request->rq_reply_md_h);
if (rc != 0) {
CERROR("LNetMDAttach failed: %d\n", rc);
LASSERT (rc == -ENOMEM);
LASSERT(rc == -ENOMEM);
spin_lock(&request->rq_lock);
/* ...but the MD attach didn't succeed... */
request->rq_receiving_reply = 0;
......@@ -655,7 +655,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
* nobody apart from the PUT's target has the right nid+XID to
* access the reply buffer. */
rc2 = LNetMEUnlink(reply_me_h);
LASSERT (rc2 == 0);
LASSERT(rc2 == 0);
/* UNLINKED callback called synchronously */
LASSERT(!request->rq_receiving_reply);
......@@ -715,9 +715,9 @@ int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
return (0);
CERROR("LNetMDAttach failed: %d; \n", rc);
LASSERT (rc == -ENOMEM);
rc = LNetMEUnlink (me_h);
LASSERT (rc == 0);
LASSERT(rc == -ENOMEM);
rc = LNetMEUnlink(me_h);
LASSERT(rc == 0);
rqbd->rqbd_refcount = 0;
return (-ENOMEM);
......
......@@ -77,13 +77,13 @@ void ptlrpc_lprocfs_register_service(struct proc_dir_entry *proc_entry,
struct ptlrpc_service *svc);
void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc);
void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req, long amount);
void ptlrpc_lprocfs_do_request_stat (struct ptlrpc_request *req,
void ptlrpc_lprocfs_do_request_stat(struct ptlrpc_request *req,
long q_usec, long work_usec);
#else
#define ptlrpc_lprocfs_register_service(params...) do{}while(0)
#define ptlrpc_lprocfs_unregister_service(params...) do{}while(0)
#define ptlrpc_lprocfs_rpc_sent(params...) do{}while(0)
#define ptlrpc_lprocfs_do_request_stat(params...) do{}while(0)
#define ptlrpc_lprocfs_register_service(params...) do{}while (0)
#define ptlrpc_lprocfs_unregister_service(params...) do{}while (0)
#define ptlrpc_lprocfs_rpc_sent(params...) do{}while (0)
#define ptlrpc_lprocfs_do_request_stat(params...) do{}while (0)
#endif /* LPROCFS */
/* NRS */
......
......@@ -112,7 +112,7 @@ __init int ptlrpc_init(void)
return 0;
cleanup:
switch(cleanup_phase) {
switch (cleanup_phase) {
case 8:
ptlrpc_nrs_fini();
case 7:
......
......@@ -386,7 +386,7 @@ ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs)
{
LASSERT(spin_is_locked(&rs->rs_svcpt->scp_rep_lock));
LASSERT(spin_is_locked(&rs->rs_lock));
LASSERT (rs->rs_difficult);
LASSERT(rs->rs_difficult);
rs->rs_scheduled_ever = 1; /* flag any notification attempt */
if (rs->rs_scheduled) { /* being set up or already notified */
......@@ -412,7 +412,7 @@ void ptlrpc_commit_replies(struct obd_export *exp)
spin_lock(&exp->exp_uncommitted_replies_lock);
list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
rs_obd_list) {
LASSERT (rs->rs_difficult);
LASSERT(rs->rs_difficult);
/* VBR: per-export last_committed */
LASSERT(rs->rs_export);
if (rs->rs_transno <= exp->exp_last_committed) {
......@@ -796,7 +796,7 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf,
LASSERT(rc == 0);
mutex_lock(&ptlrpc_all_services_mutex);
list_add (&service->srv_list, &ptlrpc_all_services);
list_add(&service->srv_list, &ptlrpc_all_services);
mutex_unlock(&ptlrpc_all_services_mutex);
if (proc_entry != NULL)
......@@ -1777,9 +1777,9 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
if (rc) {
CERROR ("error unpacking ptlrpc body: ptl %d from %s x"
LPU64"\n", svc->srv_req_portal,
libcfs_id2str(req->rq_peer), req->rq_xid);
CERROR("error unpacking ptlrpc body: ptl %d from %s x"
LPU64"\n", svc->srv_req_portal,
libcfs_id2str(req->rq_peer), req->rq_xid);
goto err_req;
}
......@@ -1798,7 +1798,7 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
goto err_req;
}
switch(lustre_msg_get_opc(req->rq_reqmsg)) {
switch (lustre_msg_get_opc(req->rq_reqmsg)) {
case MDS_WRITEPAGE:
case OST_WRITE:
req->rq_bulk_write = 1;
......@@ -1895,7 +1895,7 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET);
if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
libcfs_debug_dumplog();
do_gettimeofday(&work_start);
......@@ -2037,13 +2037,13 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
exp = rs->rs_export;
LASSERT (rs->rs_difficult);
LASSERT (rs->rs_scheduled);
LASSERT (list_empty(&rs->rs_list));
LASSERT(rs->rs_difficult);
LASSERT(rs->rs_scheduled);
LASSERT(list_empty(&rs->rs_list));
spin_lock(&exp->exp_lock);
/* Noop if removed already */
list_del_init (&rs->rs_exp_list);
list_del_init(&rs->rs_exp_list);
spin_unlock(&exp->exp_lock);
/* The disk commit callback holds exp_uncommitted_replies_lock while it
......@@ -2113,9 +2113,9 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
/* Off the net */
spin_unlock(&rs->rs_lock);
class_export_put (exp);
class_export_put(exp);
rs->rs_export = NULL;
ptlrpc_rs_decref (rs);
ptlrpc_rs_decref(rs);
if (atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
svc->srv_is_stopping)
wake_up_all(&svcpt->scp_waitq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment