Commit 0a3bdb00 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

staging: lustre: remove RETURN macro

We have a kernel-wide function tracing system, so use that instead of
rolling a custom one just for one filesystem.

Cc: Peng Tao <tao.peng@emc.com>
Cc: Andreas Dilger <andreas.dilger@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 23f14e79
......@@ -52,11 +52,11 @@ cfs_bitmap_t *CFS_ALLOCATE_BITMAP(int size)
OBD_ALLOC(ptr, CFS_BITMAP_SIZE(size));
if (ptr == NULL)
RETURN(ptr);
return ptr;
ptr->size = size;
RETURN (ptr);
return ptr;
}
#define CFS_FREE_BITMAP(ptr) OBD_FREE(ptr, CFS_BITMAP_SIZE(ptr->size))
......
......@@ -80,20 +80,8 @@ static inline int __is_po2(unsigned long long val)
#define LERRCHKSUM(hexnum) (((hexnum) & 0xf) ^ ((hexnum) >> 4 & 0xf) ^ \
((hexnum) >> 8 & 0xf))
/*
* Some (nomina odiosa sunt) platforms define NULL as naked 0. This confuses
* Lustre RETURN(NULL) macro.
*/
#if defined(NULL)
#undef NULL
#endif
#define NULL ((void *)0)
#define LUSTRE_SRV_LNET_PID LUSTRE_LNET_PID
#include <linux/list.h>
#ifndef cfs_for_each_possible_cpu
......
......@@ -262,53 +262,6 @@ do { \
} while (0)
/*
* if rc == NULL, we need to code as RETURN((void *)NULL), otherwise
* there will be a warning in osx.
*/
#if defined(__GNUC__)
long libcfs_log_return(struct libcfs_debug_msg_data *, long rc);
#if BITS_PER_LONG > 32
#define RETURN(rc) \
do { \
if (cfs_cdebug_show(D_TRACE, DEBUG_SUBSYSTEM)) { \
LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_TRACE, NULL); \
return (typeof(rc))libcfs_log_return(&msgdata, \
(long)(rc)); \
} \
\
return (rc); \
} while (0)
#else /* BITS_PER_LONG == 32 */
/* We need an on-stack variable, because we cannot case a 32-bit pointer
* directly to (long long) without generating a complier warning/error, yet
* casting directly to (long) will truncate 64-bit return values. The log
* values will print as 32-bit values, but they always have been. LU-1436
*/
#define RETURN(rc) \
do { \
if (cfs_cdebug_show(D_TRACE, DEBUG_SUBSYSTEM)) { \
typeof(rc) __rc = (rc); \
LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_TRACE, NULL); \
libcfs_log_return(&msgdata, (long_ptr_t)__rc); \
return __rc; \
} \
\
return (rc); \
} while (0)
#endif /* BITS_PER_LONG > 32 */
#elif defined(_MSC_VER)
#define RETURN(rc) \
do { \
CDEBUG(D_TRACE, "Process leaving.\n"); \
return (rc); \
} while (0)
#else
# error "Unkown compiler"
#endif /* __GNUC__ */
extern int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
const char *format1, ...)
__attribute__ ((format (printf, 2, 3)));
......
......@@ -380,7 +380,7 @@ ksocknal_receive (ksock_conn_t *conn)
}
ksocknal_connsock_decref(conn);
RETURN (rc);
return rc;
}
void
......
......@@ -247,7 +247,7 @@ lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev)
/* must called with lnet_eq_wait_lock hold */
if (LNET_SEQ_GT(eq->eq_deq_seq, new_event->sequence))
RETURN(0);
return 0;
/* We've got a new event... */
*ev = *new_event;
......@@ -267,7 +267,7 @@ lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev)
}
eq->eq_deq_seq = new_event->sequence + 1;
RETURN(rc);
return rc;
}
/**
......@@ -404,7 +404,7 @@ LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
LASSERT (the_lnet.ln_refcount > 0);
if (neq < 1)
RETURN(-ENOENT);
return -ENOENT;
lnet_eq_wait_lock();
......@@ -414,14 +414,14 @@ LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
if (eq == NULL) {
lnet_eq_wait_unlock();
RETURN(-ENOENT);
return -ENOENT;
}
rc = lnet_eq_dequeue_event(eq, event);
if (rc != 0) {
lnet_eq_wait_unlock();
*which = i;
RETURN(rc);
return rc;
}
}
......@@ -441,5 +441,5 @@ LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
}
lnet_eq_wait_unlock();
RETURN(0);
return 0;
}
......@@ -120,7 +120,7 @@ init_lnet(void)
rc = LNetInit();
if (rc != 0) {
CERROR("LNetInit: error %d\n", rc);
RETURN(rc);
return rc;
}
rc = libcfs_register_ioctl(&lnet_ioctl_handler);
......@@ -132,7 +132,7 @@ init_lnet(void)
(void) kthread_run(lnet_configure, NULL, "lnet_initd");
}
RETURN(0);
return 0;
}
void
......
......@@ -67,7 +67,7 @@ static int seq_client_rpc(struct lu_client_seq *seq,
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
LUSTRE_MDS_VERSION, SEQ_QUERY);
if (req == NULL)
RETURN(-ENOMEM);
return -ENOMEM;
/* Init operation code */
op = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_OPC);
......@@ -153,14 +153,14 @@ int seq_client_alloc_super(struct lu_client_seq *seq,
* setup (lcs_exp != NULL) */
if (seq->lcs_exp == NULL) {
mutex_unlock(&seq->lcs_mutex);
RETURN(-EINPROGRESS);
return -EINPROGRESS;
}
rc = seq_client_rpc(seq, &seq->lcs_space,
SEQ_ALLOC_SUPER, "super");
}
mutex_unlock(&seq->lcs_mutex);
RETURN(rc);
return rc;
}
/* Request sequence-controller node to allocate new meta-sequence. */
......@@ -182,7 +182,7 @@ static int seq_client_alloc_meta(const struct lu_env *env,
} while (rc == -EINPROGRESS || rc == -EAGAIN);
}
RETURN(rc);
return rc;
}
/* Allocate new sequence for client. */
......@@ -198,7 +198,7 @@ static int seq_client_alloc_seq(const struct lu_env *env,
if (rc) {
CERROR("%s: Can't allocate new meta-sequence,"
"rc %d\n", seq->lcs_name, rc);
RETURN(rc);
return rc;
} else {
CDEBUG(D_INFO, "%s: New range - "DRANGE"\n",
seq->lcs_name, PRANGE(&seq->lcs_space));
......@@ -214,7 +214,7 @@ static int seq_client_alloc_seq(const struct lu_env *env,
CDEBUG(D_INFO, "%s: Allocated sequence ["LPX64"]\n", seq->lcs_name,
*seqnr);
RETURN(rc);
return rc;
}
static int seq_fid_alloc_prep(struct lu_client_seq *seq,
......@@ -333,7 +333,7 @@ int seq_client_alloc_fid(const struct lu_env *env,
"rc %d\n", seq->lcs_name, rc);
seq_fid_alloc_fini(seq);
mutex_unlock(&seq->lcs_mutex);
RETURN(rc);
return rc;
}
CDEBUG(D_INFO, "%s: Switch to sequence "
......@@ -357,7 +357,7 @@ int seq_client_alloc_fid(const struct lu_env *env,
mutex_unlock(&seq->lcs_mutex);
CDEBUG(D_INFO, "%s: Allocated FID "DFID"\n", seq->lcs_name, PFID(fid));
RETURN(rc);
return rc;
}
EXPORT_SYMBOL(seq_client_alloc_fid);
......@@ -422,7 +422,7 @@ static int seq_client_proc_init(struct lu_client_seq *seq)
CERROR("%s: LProcFS failed in seq-init\n",
seq->lcs_name);
rc = PTR_ERR(seq->lcs_proc_dir);
RETURN(rc);
return rc;
}
rc = lprocfs_add_vars(seq->lcs_proc_dir,
......@@ -433,7 +433,7 @@ static int seq_client_proc_init(struct lu_client_seq *seq)
GOTO(out_cleanup, rc);
}
RETURN(0);
return 0;
out_cleanup:
seq_client_proc_fini(seq);
......@@ -479,7 +479,7 @@ int seq_client_init(struct lu_client_seq *seq,
rc = seq_client_proc_init(seq);
if (rc)
seq_client_fini(seq);
RETURN(rc);
return rc;
}
EXPORT_SYMBOL(seq_client_init);
......@@ -505,7 +505,7 @@ int client_fid_init(struct obd_device *obd,
OBD_ALLOC_PTR(cli->cl_seq);
if (cli->cl_seq == NULL)
RETURN(-ENOMEM);
return -ENOMEM;
OBD_ALLOC(prefix, MAX_OBD_NAME + 5);
if (prefix == NULL)
......@@ -519,7 +519,7 @@ int client_fid_init(struct obd_device *obd,
if (rc)
GOTO(out_free_seq, rc);
RETURN(rc);
return rc;
out_free_seq:
OBD_FREE_PTR(cli->cl_seq);
cli->cl_seq = NULL;
......@@ -537,7 +537,7 @@ int client_fid_fini(struct obd_device *obd)
cli->cl_seq = NULL;
}
RETURN(0);
return 0;
}
EXPORT_SYMBOL(client_fid_fini);
......
......@@ -72,9 +72,9 @@ lprocfs_fid_write_common(const char *buffer, unsigned long count,
(long long unsigned *)&tmp.lsr_start,
(long long unsigned *)&tmp.lsr_end);
if (rc != 2 || !range_is_sane(&tmp) || range_is_zero(&tmp))
RETURN(-EINVAL);
return -EINVAL;
*range = tmp;
RETURN(0);
return 0;
}
/* Client side procfs stuff */
......@@ -97,7 +97,7 @@ lprocfs_fid_space_seq_write(struct file *file, const char *buffer,
mutex_unlock(&seq->lcs_mutex);
RETURN(count);
return count;
}
static int
......@@ -112,7 +112,7 @@ lprocfs_fid_space_seq_show(struct seq_file *m, void *unused)
rc = seq_printf(m, "["LPX64" - "LPX64"]:%x:%s\n", PRANGE(&seq->lcs_space));
mutex_unlock(&seq->lcs_mutex);
RETURN(rc);
return rc;
}
static ssize_t
......@@ -127,7 +127,7 @@ lprocfs_fid_width_seq_write(struct file *file, const char *buffer,
rc = lprocfs_write_helper(buffer, count, &val);
if (rc)
RETURN(rc);
return rc;
mutex_lock(&seq->lcs_mutex);
if (seq->lcs_type == LUSTRE_SEQ_DATA)
......@@ -146,7 +146,7 @@ lprocfs_fid_width_seq_write(struct file *file, const char *buffer,
mutex_unlock(&seq->lcs_mutex);
RETURN(count);
return count;
}
static int
......@@ -161,7 +161,7 @@ lprocfs_fid_width_seq_show(struct seq_file *m, void *unused)
rc = seq_printf(m, LPU64"\n", seq->lcs_width);
mutex_unlock(&seq->lcs_mutex);
RETURN(rc);
return rc;
}
static int
......@@ -176,7 +176,7 @@ lprocfs_fid_fid_seq_show(struct seq_file *m, void *unused)
rc = seq_printf(m, DFID"\n", PFID(&seq->lcs_fid));
mutex_unlock(&seq->lcs_mutex);
RETURN(rc);
return rc;
}
static int
......@@ -194,7 +194,7 @@ lprocfs_fid_server_seq_show(struct seq_file *m, void *unused)
} else {
rc = seq_printf(m, "%s\n", seq->lcs_srv->lss_name);
}
RETURN(rc);
return rc;
}
LPROC_SEQ_FOPS(lprocfs_fid_space);
......
......@@ -72,7 +72,7 @@ struct fld_cache *fld_cache_init(const char *name,
OBD_ALLOC_PTR(cache);
if (cache == NULL)
RETURN(ERR_PTR(-ENOMEM));
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&cache->fci_entries_head);
INIT_LIST_HEAD(&cache->fci_lru);
......@@ -92,7 +92,7 @@ struct fld_cache *fld_cache_init(const char *name,
CDEBUG(D_INFO, "%s: FLD cache - Size: %d, Threshold: %d\n",
cache->fci_name, cache_size, cache_threshold);
RETURN(cache);
return cache;
}
/**
......@@ -223,7 +223,7 @@ static int fld_cache_shrink(struct fld_cache *cache)
LASSERT(cache != NULL);
if (cache->fci_cache_count < cache->fci_cache_size)
RETURN(0);
return 0;
curr = cache->fci_lru.prev;
......@@ -239,7 +239,7 @@ static int fld_cache_shrink(struct fld_cache *cache)
CDEBUG(D_INFO, "%s: FLD cache - Shrunk by "
"%d entries\n", cache->fci_name, num);
RETURN(0);
return 0;
}
/**
......@@ -367,10 +367,10 @@ struct fld_cache_entry
OBD_ALLOC_PTR(f_new);
if (!f_new)
RETURN(ERR_PTR(-ENOMEM));
return ERR_PTR(-ENOMEM);
f_new->fce_range = *range;
RETURN(f_new);
return f_new;
}
/**
......@@ -424,7 +424,7 @@ int fld_cache_insert_nolock(struct fld_cache *cache,
/* Add new entry to cache and lru list. */
fld_cache_entry_add(cache, f_new, prev);
out:
RETURN(0);
return 0;
}
int fld_cache_insert(struct fld_cache *cache,
......@@ -435,7 +435,7 @@ int fld_cache_insert(struct fld_cache *cache,
flde = fld_cache_entry_create(range);
if (IS_ERR(flde))
RETURN(PTR_ERR(flde));
return PTR_ERR(flde);
write_lock(&cache->fci_lock);
rc = fld_cache_insert_nolock(cache, flde);
......@@ -443,7 +443,7 @@ int fld_cache_insert(struct fld_cache *cache,
if (rc)
OBD_FREE_PTR(flde);
RETURN(rc);
return rc;
}
void fld_cache_delete_nolock(struct fld_cache *cache,
......@@ -495,7 +495,7 @@ struct fld_cache_entry
}
}
RETURN(got);
return got;
}
/**
......@@ -509,7 +509,7 @@ struct fld_cache_entry
read_lock(&cache->fci_lock);
got = fld_cache_entry_lookup_nolock(cache, range);
read_unlock(&cache->fci_lock);
RETURN(got);
return got;
}
/**
......@@ -539,9 +539,9 @@ int fld_cache_lookup(struct fld_cache *cache,
cache->fci_stat.fst_cache++;
read_unlock(&cache->fci_lock);
RETURN(0);
return 0;
}
}
read_unlock(&cache->fci_lock);
RETURN(-ENOENT);
return -ENOENT;
}
......@@ -70,7 +70,7 @@ static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
client_obd_list_lock(&cli->cl_loi_list_lock);
rc = list_empty(&mcw->mcw_entry);
client_obd_list_unlock(&cli->cl_loi_list_lock);
RETURN(rc);
return rc;
};
static void fld_enter_request(struct client_obd *cli)
......@@ -137,7 +137,7 @@ fld_rrb_scan(struct lu_client_fld *fld, seqno_t seq)
list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
if (target->ft_idx == hash)
RETURN(target);
return target;
}
CERROR("%s: Can't find target by hash %d (seq "LPX64"). "
......@@ -161,7 +161,7 @@ fld_rrb_scan(struct lu_client_fld *fld, seqno_t seq)
* LBUG() to catch this situation.
*/
LBUG();
RETURN(NULL);
return NULL;
}
struct lu_fld_hash fld_hash[] = {
......@@ -192,7 +192,7 @@ fld_client_get_target(struct lu_client_fld *fld, seqno_t seq)
target->ft_idx, seq);
}
RETURN(target);
return target;
}
/*
......@@ -214,7 +214,7 @@ int fld_client_add_target(struct lu_client_fld *fld,
CERROR("%s: Attempt to add target %s (idx "LPU64") "
"on fly - skip it\n", fld->lcf_name, name,
tar->ft_idx);
RETURN(0);
return 0;
} else {
CDEBUG(D_INFO, "%s: Adding target %s (idx "
LPU64")\n", fld->lcf_name, name, tar->ft_idx);
......@@ -222,7 +222,7 @@ int fld_client_add_target(struct lu_client_fld *fld,
OBD_ALLOC_PTR(target);
if (target == NULL)
RETURN(-ENOMEM);
return -ENOMEM;
spin_lock(&fld->lcf_lock);
list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) {
......@@ -231,7 +231,7 @@ int fld_client_add_target(struct lu_client_fld *fld,
OBD_FREE_PTR(target);
CERROR("Target %s exists in FLD and known as %s:#"LPU64"\n",
name, fld_target_name(tmp), tmp->ft_idx);
RETURN(-EEXIST);
return -EEXIST;
}
}
......@@ -247,7 +247,7 @@ int fld_client_add_target(struct lu_client_fld *fld,
fld->lcf_count++;
spin_unlock(&fld->lcf_lock);
RETURN(0);
return 0;
}
EXPORT_SYMBOL(fld_client_add_target);
......@@ -268,11 +268,11 @@ int fld_client_del_target(struct lu_client_fld *fld, __u64 idx)
class_export_put(target->ft_exp);
OBD_FREE_PTR(target);
RETURN(0);
return 0;
}
}
spin_unlock(&fld->lcf_lock);
RETURN(-ENOENT);
return -ENOENT;
}
EXPORT_SYMBOL(fld_client_del_target);
......@@ -291,7 +291,7 @@ static int fld_client_proc_init(struct lu_client_fld *fld)
CERROR("%s: LProcFS failed in fld-init\n",
fld->lcf_name);
rc = PTR_ERR(fld->lcf_proc_dir);
RETURN(rc);
return rc;
}
rc = lprocfs_add_vars(fld->lcf_proc_dir,
......@@ -302,7 +302,7 @@ static int fld_client_proc_init(struct lu_client_fld *fld)
GOTO(out_cleanup, rc);
}
RETURN(0);
return 0;
out_cleanup:
fld_client_proc_fini(fld);
......@@ -350,7 +350,7 @@ int fld_client_init(struct lu_client_fld *fld,
if (!hash_is_sane(hash)) {
CERROR("%s: Wrong hash function %#x\n",
fld->lcf_name, hash);
RETURN(-EINVAL);
return -EINVAL;
}
fld->lcf_count = 0;
......@@ -424,7 +424,7 @@ int fld_client_rpc(struct obd_export *exp,
req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY, LUSTRE_MDS_VERSION,
FLD_QUERY);
if (req == NULL)
RETURN(-ENOMEM);
return -ENOMEM;
op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC);
*op = fld_op;
......@@ -471,7 +471,7 @@ int fld_client_lookup(struct lu_client_fld *fld, seqno_t seq, mdsno_t *mds,
rc = fld_cache_lookup(fld->lcf_cache, seq, &res);
if (rc == 0) {
*mds = res.lsr_index;
RETURN(0);
return 0;
}
/* Can not find it in the cache */
......@@ -491,7 +491,7 @@ int fld_client_lookup(struct lu_client_fld *fld, seqno_t seq, mdsno_t *mds,
fld_cache_insert(fld->lcf_cache, &res);
}
RETURN(rc);
return rc;
}
EXPORT_SYMBOL(fld_client_lookup);
......
......@@ -71,7 +71,7 @@ fld_proc_targets_seq_show(struct seq_file *m, void *unused)
seq_printf(m, "%s\n", fld_target_name(target));
spin_unlock(&fld->lcf_lock);
RETURN(0);
return 0;
}
static int
......@@ -85,7 +85,7 @@ fld_proc_hash_seq_show(struct seq_file *m, void *unused)
seq_printf(m, "%s\n", fld->lcf_hash->fh_name);
spin_unlock(&fld->lcf_lock);
RETURN(0);
return 0;
}
static ssize_t
......@@ -117,7 +117,7 @@ fld_proc_hash_seq_write(struct file *file, const char *buffer,
fld->lcf_name, hash->fh_name);
}
RETURN(count);
return count;
}
static ssize_t
......@@ -132,7 +132,7 @@ fld_proc_cache_flush_write(struct file *file, const char __user *buffer,
CDEBUG(D_INFO, "%s: Lookup cache is flushed\n", fld->lcf_name);
RETURN(count);
return count;
}
static int fld_proc_cache_flush_open(struct inode *inode, struct file *file)
......
......@@ -216,7 +216,7 @@ static inline int lustre_cfg_len(__u32 bufcount, __u32 *buflens)
for (i = 0; i < bufcount; i++)
len += cfs_size_round(buflens[i]);
RETURN(cfs_size_round(len));
return cfs_size_round(len);
}
......@@ -232,7 +232,7 @@ static inline struct lustre_cfg *lustre_cfg_new(int cmd,
OBD_ALLOC(lcfg, lustre_cfg_len(bufs->lcfg_bufcount,
bufs->lcfg_buflen));
if (!lcfg)
RETURN(ERR_PTR(-ENOMEM));
return ERR_PTR(-ENOMEM);
lcfg->lcfg_version = LUSTRE_CFG_VERSION;
lcfg->lcfg_command = cmd;
......@@ -243,7 +243,7 @@ static inline struct lustre_cfg *lustre_cfg_new(int cmd,
lcfg->lcfg_buflens[i] = bufs->lcfg_buflen[i];
LOGL((char *)bufs->lcfg_buf[i], bufs->lcfg_buflen[i], ptr);
}
RETURN(lcfg);
return lcfg;
}
static inline void lustre_cfg_free(struct lustre_cfg *lcfg)
......@@ -261,27 +261,27 @@ static inline int lustre_cfg_sanity_check(void *buf, int len)
struct lustre_cfg *lcfg = (struct lustre_cfg *)buf;
if (!lcfg)
RETURN(-EINVAL);
return -EINVAL;
/* check that the first bits of the struct are valid */
if (len < LCFG_HDR_SIZE(0))
RETURN(-EINVAL);
return -EINVAL;
if (lcfg->lcfg_version != LUSTRE_CFG_VERSION)
RETURN(-EINVAL);
return -EINVAL;
if (lcfg->lcfg_bufcount >= LUSTRE_CFG_MAX_BUFCOUNT)
RETURN(-EINVAL);
return -EINVAL;
/* check that the buflens are valid */
if (len < LCFG_HDR_SIZE(lcfg->lcfg_bufcount))
RETURN(-EINVAL);
return -EINVAL;
/* make sure all the pointers point inside the data */
if (len < lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens))
RETURN(-EINVAL);
return -EINVAL;
RETURN(0);
return 0;
}
#include <lustre/lustre_user.h>
......
......@@ -677,14 +677,14 @@ static inline __u64 fid_flatten(const struct lu_fid *fid)
if (fid_is_igif(fid)) {
ino = lu_igif_ino(fid);
RETURN(ino);
return ino;
}
seq = fid_seq(fid);
ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid);
RETURN(ino ? ino : fid_oid(fid));
return ino ? ino : fid_oid(fid);
}
static inline __u32 fid_hash(const struct lu_fid *f, int bits)
......@@ -703,7 +703,7 @@ static inline __u32 fid_flatten32(const struct lu_fid *fid)
if (fid_is_igif(fid)) {
ino = lu_igif_ino(fid);
RETURN(ino);
return ino;
}
seq = fid_seq(fid) - FID_SEQ_START;
......@@ -717,7 +717,7 @@ static inline __u32 fid_flatten32(const struct lu_fid *fid)
(seq >> (64 - (40-8)) & 0xffffff00) +
(fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
RETURN(ino ? ino : fid_oid(fid));
return ino ? ino : fid_oid(fid);
}
static inline int lu_fid_diff(struct lu_fid *fid1, struct lu_fid *fid2)
......
......@@ -471,12 +471,12 @@ static inline int llog_destroy(const struct lu_env *env,
rc = llog_handle2ops(handle, &lop);
if (rc)
RETURN(rc);
return rc;
if (lop->lop_destroy == NULL)
RETURN(-EOPNOTSUPP);
return -EOPNOTSUPP;
rc = lop->lop_destroy(env, handle);
RETURN(rc);
return rc;
}
static inline int llog_next_block(const struct lu_env *env,
......@@ -489,13 +489,13 @@ static inline int llog_next_block(const struct lu_env *env,
rc = llog_handle2ops(loghandle, &lop);
if (rc)
RETURN(rc);
return rc;
if (lop->lop_next_block == NULL)
RETURN(-EOPNOTSUPP);
return -EOPNOTSUPP;
rc = lop->lop_next_block(env, loghandle, cur_idx, next_idx,
cur_offset, buf, len);
RETURN(rc);
return rc;
}
static inline int llog_prev_block(const struct lu_env *env,
......@@ -507,12 +507,12 @@ static inline int llog_prev_block(const struct lu_env *env,
rc = llog_handle2ops(loghandle, &lop);
if (rc)
RETURN(rc);
return rc;
if (lop->lop_prev_block == NULL)
RETURN(-EOPNOTSUPP);
return -EOPNOTSUPP;
rc = lop->lop_prev_block(env, loghandle, prev_idx, buf, len);
RETURN(rc);
return rc;
}
static inline int llog_connect(struct llog_ctxt *ctxt,
......@@ -524,12 +524,12 @@ static inline int llog_connect(struct llog_ctxt *ctxt,
rc = llog_obd2ops(ctxt, &lop);
if (rc)
RETURN(rc);
return rc;
if (lop->lop_connect == NULL)
RETURN(-EOPNOTSUPP);
return -EOPNOTSUPP;
rc = lop->lop_connect(ctxt, logid, gen, uuid);
RETURN(rc);
return rc;
}
/* llog.c */
......
......@@ -130,10 +130,10 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
cio->cui_glimpse = 0;
if (lock == NULL)
RETURN(0);
return 0;
if (IS_ERR(lock))
RETURN(PTR_ERR(lock));
return PTR_ERR(lock);
LASSERT(agl == 0);
result = cl_wait(env, lock);
......@@ -158,7 +158,7 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
}
}
RETURN(result);
return result;
}
static int cl_io_get(struct inode *inode, struct lu_env **envout,
......@@ -223,7 +223,7 @@ int cl_glimpse_size0(struct inode *inode, int agl)
goto again;
cl_env_put(env, &refcheck);
}
RETURN(result);
return result;
}
int cl_local_size(struct inode *inode)
......@@ -238,11 +238,11 @@ int cl_local_size(struct inode *inode)
int refcheck;
if (!cl_i2info(inode)->lli_has_smd)
RETURN(0);
return 0;
result = cl_io_get(inode, &env, &io, &refcheck);
if (result <= 0)
RETURN(result);
return result;
clob = io->ci_obj;
result = cl_io_init(env, io, CIT_MISC, clob);
......@@ -265,5 +265,5 @@ int cl_local_size(struct inode *inode)
}
cl_io_fini(env, io);
cl_env_put(env, &refcheck);
RETURN(result);
return result;
}
......@@ -181,7 +181,7 @@ int ccc_device_init(const struct lu_env *env, struct lu_device *d,
lu_device_get(next);
lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
}
RETURN(rc);
return rc;
}
struct lu_device *ccc_device_fini(const struct lu_env *env,
......@@ -203,7 +203,7 @@ struct lu_device *ccc_device_alloc(const struct lu_env *env,
OBD_ALLOC_PTR(vdv);
if (vdv == NULL)
RETURN(ERR_PTR(-ENOMEM));
return ERR_PTR(-ENOMEM);
lud = &vdv->cdv_cl.cd_lu_dev;
cl_device_init(&vdv->cdv_cl, t);
......@@ -226,7 +226,7 @@ struct lu_device *ccc_device_alloc(const struct lu_env *env,
ccc_device_free(env, lud);
lud = ERR_PTR(rc);
}
RETURN(lud);
return lud;
}
struct lu_device *ccc_device_free(const struct lu_env *env,
......@@ -426,7 +426,7 @@ int ccc_object_glimpse(const struct lu_env *env,
*/
if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
lvb->lvb_blocks = dirty_cnt(inode);
RETURN(0);
return 0;
}
......@@ -490,7 +490,7 @@ int ccc_page_is_under_lock(const struct lu_env *env,
}
} else
result = 0;
RETURN(result);
return result;
}
int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice)
......@@ -555,7 +555,7 @@ int ccc_transient_page_prep(const struct lu_env *env,
struct cl_io *unused)
{
/* transient page should always be sent. */
RETURN(0);
return 0;
}
/*****************************************************************************
......@@ -635,7 +635,7 @@ int ccc_lock_fits_into(const struct lu_env *env,
result = lock->cll_state >= CLS_ENQUEUED;
else
result = 1;
RETURN(result);
return result;
}
/**
......@@ -715,7 +715,7 @@ int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
descr->cld_enq_flags = enqflags;
cl_io_lock_add(env, io, &cio->cui_link);
RETURN(0);
return 0;
}
void ccc_io_update_iov(const struct lu_env *env,
......@@ -978,7 +978,7 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
env = cl_env_get(&refcheck);
if (IS_ERR(env))
RETURN(PTR_ERR(env));
return PTR_ERR(env);
io = ccc_env_thread_io(env);
io->ci_obj = cl_i2info(inode)->lli_clob;
......@@ -1007,7 +1007,7 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
if (unlikely(io->ci_need_restart))
goto again;
cl_env_put(env, &refcheck);
RETURN(result);
return result;
}
/*****************************************************************************
......@@ -1272,9 +1272,9 @@ __u16 ll_dirent_type_get(struct lu_dirent *ent)
__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
{
if (BITS_PER_LONG == 32 || api32)
RETURN(fid_flatten32(fid));
return fid_flatten32(fid);
else
RETURN(fid_flatten(fid));
return fid_flatten(fid);
}
/**
......@@ -1286,11 +1286,11 @@ __u32 cl_fid_build_gen(const struct lu_fid *fid)
if (fid_is_igif(fid)) {
gen = lu_igif_gen(fid);
RETURN(gen);
return gen;
}
gen = (fid_flatten(fid) >> 32);
RETURN(gen);
return gen;
}
/* lsm is unreliable after hsm implementation as layout can be changed at
......
......@@ -61,7 +61,7 @@ int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
rc = obd_get_info(NULL, dt_exp, sizeof(KEY_LOVDESC), KEY_LOVDESC,
&valsize, &desc, NULL);
if (rc)
RETURN(rc);
return rc;
stripes = min(desc.ld_tgt_count, (__u32)LOV_MAX_STRIPE_COUNT);
lsm.lsm_stripe_count = stripes;
......@@ -76,7 +76,7 @@ int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
easize, cookiesize);
rc = md_init_ea_size(md_exp, easize, def_easize, cookiesize);
RETURN(rc);
return rc;
}
/**
......@@ -114,7 +114,7 @@ int cl_ocd_update(struct obd_device *host,
watched->obd_name);
result = -EINVAL;
}
RETURN(result);
return result;
}
#define GROUPLOCK_SCOPE "grouplock"
......
......@@ -134,44 +134,44 @@ for (node = interval_last(root); node != NULL; \
static struct interval_node *interval_first(struct interval_node *node)
{
if (!node)
RETURN(NULL);
return NULL;
while (node->in_left)
node = node->in_left;
RETURN(node);
return node;
}
static struct interval_node *interval_last(struct interval_node *node)
{
if (!node)
RETURN(NULL);
return NULL;
while (node->in_right)
node = node->in_right;
RETURN(node);
return node;
}
static struct interval_node *interval_next(struct interval_node *node)
{
if (!node)
RETURN(NULL);
return NULL;
if (node->in_right)
RETURN(interval_first(node->in_right));
return interval_first(node->in_right);
while (node->in_parent && node_is_right_child(node))
node = node->in_parent;
RETURN(node->in_parent);
return node->in_parent;
}
static struct interval_node *interval_prev(struct interval_node *node)
{
if (!node)
RETURN(NULL);
return NULL;
if (node->in_left)
RETURN(interval_last(node->in_left));
return interval_last(node->in_left);
while (node->in_parent && node_is_left_child(node))
node = node->in_parent;
RETURN(node->in_parent);
return node->in_parent;
}
enum interval_iter interval_iterate(struct interval_node *root,
......@@ -187,7 +187,7 @@ enum interval_iter interval_iterate(struct interval_node *root,
break;
}
RETURN(rc);
return rc;
}
EXPORT_SYMBOL(interval_iterate);
......@@ -204,7 +204,7 @@ enum interval_iter interval_iterate_reverse(struct interval_node *root,
break;
}
RETURN(rc);
return rc;
}
EXPORT_SYMBOL(interval_iterate_reverse);
......@@ -226,7 +226,7 @@ struct interval_node *interval_find(struct interval_node *root,
walk = walk->in_right;
}
RETURN(walk);
return walk;
}
EXPORT_SYMBOL(interval_find);
......@@ -374,7 +374,7 @@ struct interval_node *interval_insert(struct interval_node *node,
while (*p) {
parent = *p;
if (node_equal(parent, node))
RETURN(parent);
return parent;
/* max_high field must be updated after each iteration */
if (parent->in_max_high < interval_high(node))
......@@ -395,7 +395,7 @@ struct interval_node *interval_insert(struct interval_node *node,
interval_insert_color(node, root);
node->in_intree = 1;
RETURN(NULL);
return NULL;
}
EXPORT_SYMBOL(interval_insert);
......
......@@ -85,7 +85,7 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
continue;
if (lck->l_policy_data.l_extent.end >= old_kms)
RETURN(old_kms);
return old_kms;
/* This extent _has_ to be smaller than old_kms (checked above)
* so kms can only ever be smaller or the same as old_kms. */
......@@ -94,7 +94,7 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
}
LASSERTF(kms <= old_kms, "kms "LPU64" old_kms "LPU64"\n", kms, old_kms);
RETURN(kms);
return kms;
}
EXPORT_SYMBOL(ldlm_extent_shift_kms);
......@@ -106,11 +106,11 @@ struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO);
if (node == NULL)
RETURN(NULL);
return NULL;
INIT_LIST_HEAD(&node->li_group);
ldlm_interval_attach(node, lock);
RETURN(node);
return node;
}
void ldlm_interval_free(struct ldlm_interval *node)
......
......@@ -305,12 +305,12 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
continue;
if (!first_enq)
RETURN(LDLM_ITER_CONTINUE);
return LDLM_ITER_CONTINUE;
if (*flags & LDLM_FL_BLOCK_NOWAIT) {
ldlm_flock_destroy(req, mode, *flags);
*err = -EAGAIN;
RETURN(LDLM_ITER_STOP);
return LDLM_ITER_STOP;
}
if (*flags & LDLM_FL_TEST_LOCK) {
......@@ -323,7 +323,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
req->l_policy_data.l_flock.end =
lock->l_policy_data.l_flock.end;
*flags |= LDLM_FL_LOCK_CHANGED;
RETURN(LDLM_ITER_STOP);
return LDLM_ITER_STOP;
}
/* add lock to blocking list before deadlock
......@@ -332,18 +332,18 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
if (rc) {
ldlm_flock_destroy(req, mode, *flags);
*err = rc;
RETURN(LDLM_ITER_STOP);
return LDLM_ITER_STOP;
}
if (ldlm_flock_deadlock(req, lock)) {
ldlm_flock_blocking_unlink(req);
ldlm_flock_destroy(req, mode, *flags);
*err = -EDEADLK;
RETURN(LDLM_ITER_STOP);
return LDLM_ITER_STOP;
}
ldlm_resource_add_lock(res, &res->lr_waiting, req);
*flags |= LDLM_FL_BLOCK_GRANTED;
RETURN(LDLM_ITER_STOP);
return LDLM_ITER_STOP;
}
}
......@@ -351,7 +351,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
ldlm_flock_destroy(req, mode, *flags);
req->l_req_mode = LCK_NL;
*flags |= LDLM_FL_LOCK_CHANGED;
RETURN(LDLM_ITER_STOP);
return LDLM_ITER_STOP;
}
/* In case we had slept on this lock request take it off of the
......@@ -463,7 +463,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
ldlm_flock_destroy(req, lock->l_granted_mode,
*flags);
*err = -ENOLCK;
RETURN(LDLM_ITER_STOP);
return LDLM_ITER_STOP;
}
goto reprocess;
}
......@@ -530,7 +530,7 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
ldlm_flock_destroy(req, mode, *flags);
ldlm_resource_dump(D_INFO, res);
RETURN(LDLM_ITER_CONTINUE);
return LDLM_ITER_CONTINUE;
}
struct ldlm_flock_wait_data {
......@@ -591,7 +591,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
/* Need to wake up the waiter if we were evicted */
wake_up(&lock->l_waitq);
RETURN(0);
return 0;
}
LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
......@@ -603,7 +603,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
goto granted;
/* CP AST RPC: lock get granted, wake it up */
wake_up(&lock->l_waitq);
RETURN(0);
return 0;
}
LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
......@@ -629,7 +629,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
if (rc) {
LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
rc);
RETURN(rc);
return rc;
}
granted:
......@@ -637,18 +637,18 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
if (lock->l_flags & LDLM_FL_DESTROYED) {
LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
RETURN(0);
return 0;
}
if (lock->l_flags & LDLM_FL_FAILED) {
LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
RETURN(-EIO);
return -EIO;
}
if (rc) {
LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
rc);
RETURN(rc);
return rc;
}
LDLM_DEBUG(lock, "client-side enqueue granted");
......@@ -690,7 +690,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
}
unlock_res_and_lock(lock);
RETURN(0);
return 0;
}
EXPORT_SYMBOL(ldlm_flock_completion_ast);
......@@ -704,7 +704,7 @@ int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
lock_res_and_lock(lock);
ldlm_flock_blocking_unlink(lock);
unlock_res_and_lock(lock);
RETURN(0);
return 0;
}
void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
......@@ -825,9 +825,9 @@ int ldlm_init_flock_export(struct obd_export *exp)
&ldlm_export_flock_ops,
CFS_HASH_DEFAULT | CFS_HASH_NBLK_CHANGE);
if (!exp->exp_flock_hash)
RETURN(-ENOMEM);
return -ENOMEM;
RETURN(0);
return 0;
}
EXPORT_SYMBOL(ldlm_init_flock_export);
......
......@@ -63,13 +63,13 @@ static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
if (!create && !priority) {
CDEBUG(D_HA, "Nothing to do\n");
RETURN(-EINVAL);
return -EINVAL;
}
ptlrpc_conn = ptlrpc_uuid_to_connection(uuid);
if (!ptlrpc_conn) {
CDEBUG(D_HA, "can't find connection %s\n", uuid->uuid);
RETURN (-ENOENT);
return -ENOENT;
}
if (create) {
......@@ -114,13 +114,13 @@ static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
}
spin_unlock(&imp->imp_lock);
RETURN(0);
return 0;
out_free:
if (imp_conn)
OBD_FREE(imp_conn, sizeof(*imp_conn));
out_put:
ptlrpc_connection_put(ptlrpc_conn);
RETURN(rc);
return rc;
}
int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid)
......@@ -185,7 +185,7 @@ int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
spin_unlock(&imp->imp_lock);
if (rc == -ENOENT)
CERROR("connection %s not found\n", uuid->uuid);
RETURN(rc);
return rc;
}
EXPORT_SYMBOL(client_import_del_conn);
......@@ -209,7 +209,7 @@ int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
}
}
spin_unlock(&imp->imp_lock);
RETURN(rc);
return rc;
}
EXPORT_SYMBOL(client_import_find_conn);
......@@ -301,27 +301,27 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
} else {
CERROR("unknown client OBD type \"%s\", can't setup\n",
name);
RETURN(-EINVAL);
return -EINVAL;
}
if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
CERROR("requires a TARGET UUID\n");
RETURN(-EINVAL);
return -EINVAL;
}
if (LUSTRE_CFG_BUFLEN(lcfg, 1) > 37) {
CERROR("client UUID must be less than 38 characters\n");
RETURN(-EINVAL);
return -EINVAL;
}
if (LUSTRE_CFG_BUFLEN(lcfg, 2) < 1) {
CERROR("setup requires a SERVER UUID\n");
RETURN(-EINVAL);
return -EINVAL;
}
if (LUSTRE_CFG_BUFLEN(lcfg, 2) > 37) {
CERROR("target UUID must be less than 38 characters\n");
RETURN(-EINVAL);
return -EINVAL;
}
init_rwsem(&cli->cl_sem);
......@@ -448,14 +448,14 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
cli->cl_qchk_stat = CL_NOT_QUOTACHECKED;
RETURN(rc);
return rc;
err_import:
class_destroy_import(imp);
err_ldlm:
ldlm_put_ref();
err:
RETURN(rc);
return rc;
}
EXPORT_SYMBOL(client_obd_setup);
......@@ -468,7 +468,7 @@ int client_obd_cleanup(struct obd_device *obddev)
LASSERT(obddev->u.cli.cl_import == NULL);
ldlm_put_ref();
RETURN(0);
return 0;
}
EXPORT_SYMBOL(client_obd_cleanup);
......@@ -548,7 +548,7 @@ int client_disconnect_export(struct obd_export *exp)
if (!obd) {
CERROR("invalid export for disconnect: exp %p cookie "LPX64"\n",
exp, exp ? exp->exp_handle.h_cookie : -1);
RETURN(-EINVAL);
return -EINVAL;
}
cli = &obd->u.cli;
......@@ -604,7 +604,7 @@ int client_disconnect_export(struct obd_export *exp)
up_write(&cli->cl_sem);
RETURN(rc);
return rc;
}
EXPORT_SYMBOL(client_disconnect_export);
......@@ -622,7 +622,7 @@ int target_pack_pool_reply(struct ptlrpc_request *req)
!exp_connect_lru_resize(req->rq_export))) {
lustre_msg_set_slv(req->rq_repmsg, 0);
lustre_msg_set_limit(req->rq_repmsg, 0);
RETURN(0);
return 0;
}
/* OBD is alive here as export is alive, which we checked above. */
......@@ -633,7 +633,7 @@ int target_pack_pool_reply(struct ptlrpc_request *req)
lustre_msg_set_limit(req->rq_repmsg, obd->obd_pool_limit);
read_unlock(&obd->obd_pool_lock);
RETURN(0);
return 0;
}
EXPORT_SYMBOL(target_pack_pool_reply);
......
......@@ -251,7 +251,7 @@ int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
if (lock->l_flags & LDLM_FL_NS_SRV) {
LASSERT(list_empty(&lock->l_lru));
RETURN(0);
return 0;
}
spin_lock(&ns->ns_lock);
......@@ -439,7 +439,7 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, __GFP_IO);
if (lock == NULL)
RETURN(NULL);
return NULL;
spin_lock_init(&lock->l_lock);
lock->l_resource = resource;
......@@ -475,7 +475,7 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
#endif
INIT_LIST_HEAD(&lock->l_exp_list);
RETURN(lock);
return lock;
}
/**
......@@ -497,7 +497,7 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
sizeof(lock->l_resource->lr_name)) == 0) {
/* Nothing to do */
unlock_res_and_lock(lock);
RETURN(0);
return 0;
}
LASSERT(new_resid->name[0] != 0);
......@@ -510,7 +510,7 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
if (newres == NULL)
RETURN(-ENOMEM);
return -ENOMEM;
lu_ref_add(&newres->lr_reference, "lock", lock);
/*
......@@ -538,7 +538,7 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
lu_ref_del(&oldres->lr_reference, "lock", lock);
ldlm_resource_putref(oldres);
RETURN(0);
return 0;
}
EXPORT_SYMBOL(ldlm_lock_change_resource);
......@@ -572,13 +572,13 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
lock = class_handle2object(handle->cookie);
if (lock == NULL)
RETURN(NULL);
return NULL;
/* It's unlikely but possible that someone marked the lock as
* destroyed after we did handle2object on it */
if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED)== 0)) {
lu_ref_add(&lock->l_reference, "handle", current);
RETURN(lock);
return lock;
}
lock_res_and_lock(lock);
......@@ -590,20 +590,20 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
unlock_res_and_lock(lock);
CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
LDLM_LOCK_PUT(lock);
RETURN(NULL);
return NULL;
}
if (flags && (lock->l_flags & flags)) {
unlock_res_and_lock(lock);
LDLM_LOCK_PUT(lock);
RETURN(NULL);
return NULL;
}
if (flags)
lock->l_flags |= flags;
unlock_res_and_lock(lock);
RETURN(lock);
return lock;
}
EXPORT_SYMBOL(__ldlm_handle2lock);
/** @} ldlm_handles */
......@@ -1280,7 +1280,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
res = ldlm_resource_get(ns, NULL, res_id, type, 0);
if (res == NULL) {
LASSERT(old_lock == NULL);
RETURN(0);
return 0;
}
LDLM_RESOURCE_ADDREF(res);
......@@ -1433,7 +1433,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
lustre_swab_ost_lvb);
if (unlikely(lvb == NULL)) {
LDLM_ERROR(lock, "no LVB");
RETURN(-EPROTO);
return -EPROTO;
}
memcpy(data, lvb, size);
......@@ -1450,7 +1450,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
lustre_swab_ost_lvb_v1);
if (unlikely(lvb == NULL)) {
LDLM_ERROR(lock, "no LVB");
RETURN(-EPROTO);
return -EPROTO;
}
memcpy(data, lvb, size);
......@@ -1460,7 +1460,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
} else {
LDLM_ERROR(lock, "Replied unexpected ost LVB size %d",
size);
RETURN(-EINVAL);
return -EINVAL;
}
break;
case LVB_T_LQUOTA:
......@@ -1475,14 +1475,14 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
lustre_swab_lquota_lvb);
if (unlikely(lvb == NULL)) {
LDLM_ERROR(lock, "no LVB");
RETURN(-EPROTO);
return -EPROTO;
}
memcpy(data, lvb, size);
} else {
LDLM_ERROR(lock, "Replied unexpected lquota LVB size %d",
size);
RETURN(-EINVAL);
return -EINVAL;
}
break;
case LVB_T_LAYOUT:
......@@ -1495,7 +1495,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
lvb = req_capsule_server_get(pill, &RMF_DLM_LVB);
if (unlikely(lvb == NULL)) {
LDLM_ERROR(lock, "no LVB");
RETURN(-EPROTO);
return -EPROTO;
}
memcpy(data, lvb, size);
......@@ -1503,10 +1503,10 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
default:
LDLM_ERROR(lock, "Unknown LVB type: %d\n", lock->l_lvb_type);
dump_stack();
RETURN(-EINVAL);
return -EINVAL;
}
RETURN(0);
return 0;
}
/**
......@@ -1526,12 +1526,12 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
res = ldlm_resource_get(ns, NULL, res_id, type, 1);
if (res == NULL)
RETURN(NULL);
return NULL;
lock = ldlm_lock_new(res);
if (lock == NULL)
RETURN(NULL);
return NULL;
lock->l_req_mode = mode;
lock->l_ast_data = data;
......@@ -1562,7 +1562,7 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
GOTO(out, 0);
RETURN(lock);
return lock;
out:
ldlm_lock_destroy(lock);
......@@ -1606,11 +1606,11 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
LDLM_LOCK_RELEASE(lock);
}
*flags |= LDLM_FL_LOCK_CHANGED;
RETURN(0);
return 0;
} else if (rc != ELDLM_OK ||
(rc == ELDLM_OK && (*flags & LDLM_FL_INTENT_ONLY))) {
ldlm_lock_destroy(lock);
RETURN(rc);
return rc;
}
}
......@@ -1692,7 +1692,7 @@ ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
struct ldlm_lock *lock;
if (list_empty(arg->list))
RETURN(-ENOENT);
return -ENOENT;
lock = list_entry(arg->list->next, struct ldlm_lock, l_bl_ast);
......@@ -1713,7 +1713,7 @@ ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
lock->l_blocking_lock = NULL;
LDLM_LOCK_RELEASE(lock);
RETURN(rc);
return rc;
}
/**
......@@ -1728,7 +1728,7 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
ldlm_completion_callback completion_callback;
if (list_empty(arg->list))
RETURN(-ENOENT);
return -ENOENT;
lock = list_entry(arg->list->next, struct ldlm_lock, l_cp_ast);
......@@ -1757,7 +1757,7 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
rc = completion_callback(lock, 0, (void *)arg);
LDLM_LOCK_RELEASE(lock);
RETURN(rc);
return rc;
}
/**
......@@ -1772,7 +1772,7 @@ ldlm_work_revoke_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
struct ldlm_lock *lock;
if (list_empty(arg->list))
RETURN(-ENOENT);
return -ENOENT;
lock = list_entry(arg->list->next, struct ldlm_lock, l_rk_ast);
list_del_init(&lock->l_rk_ast);
......@@ -1785,7 +1785,7 @@ ldlm_work_revoke_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
rc = lock->l_blocking_ast(lock, &desc, (void*)arg, LDLM_CB_BLOCKING);
LDLM_LOCK_RELEASE(lock);
RETURN(rc);
return rc;
}
/**
......@@ -1799,7 +1799,7 @@ int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
int rc = 0;
if (list_empty(arg->list))
RETURN(-ENOENT);
return -ENOENT;
gl_work = list_entry(arg->list->next, struct ldlm_glimpse_work,
gl_list);
......@@ -1819,7 +1819,7 @@ int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
if ((gl_work->gl_flags & LDLM_GL_WORK_NOFREE) == 0)
OBD_FREE_PTR(gl_work);
RETURN(rc);
return rc;
}
/**
......@@ -1836,11 +1836,11 @@ int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
int rc;
if (list_empty(rpc_list))
RETURN(0);
return 0;
OBD_ALLOC_PTR(arg);
if (arg == NULL)
RETURN(-ENOMEM);
return -ENOMEM;
atomic_set(&arg->restart, 0);
arg->list = rpc_list;
......@@ -2027,7 +2027,7 @@ int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
rc = 0;
LDLM_LOCK_PUT(lock);
}
RETURN(rc);
return rc;
}
EXPORT_SYMBOL(ldlm_lock_set_data);
......@@ -2136,7 +2136,7 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
/* Just return if mode is unchanged. */
if (new_mode == lock->l_granted_mode) {
*flags |= LDLM_FL_BLOCK_GRANTED;
RETURN(lock->l_resource);
return lock->l_resource;
}
/* I can't check the type of lock here because the bitlock of lock
......@@ -2144,7 +2144,7 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO);
if (node == NULL)
/* Actually, this causes EDEADLOCK to be returned */
RETURN(NULL);
return NULL;
LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR),
"new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
......@@ -2203,7 +2203,7 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
ldlm_run_ast_work(ns, &rpc_list, LDLM_WORK_CP_AST);
if (node)
OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
RETURN(res);
return res;
}
EXPORT_SYMBOL(ldlm_lock_convert);
......
......@@ -127,12 +127,12 @@ struct ldlm_bl_work_item {
int ldlm_del_waiting_lock(struct ldlm_lock *lock)
{
RETURN(0);
return 0;
}
int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
{
RETURN(0);
return 0;
}
......@@ -395,7 +395,7 @@ static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
if (!(cancel_flags & LCF_ASYNC))
wait_for_completion(&blwi->blwi_comp);
RETURN(0);
return 0;
}
static inline void init_blwi(struct ldlm_bl_work_item *blwi,
......@@ -440,17 +440,17 @@ static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
ldlm_cancel_flags_t cancel_flags)
{
if (cancels && count == 0)
RETURN(0);
return 0;
if (cancel_flags & LCF_ASYNC) {
struct ldlm_bl_work_item *blwi;
OBD_ALLOC(blwi, sizeof(*blwi));
if (blwi == NULL)
RETURN(-ENOMEM);
return -ENOMEM;
init_blwi(blwi, ns, ld, cancels, count, lock, cancel_flags);
RETURN(__ldlm_bl_to_thread(blwi, cancel_flags));
return __ldlm_bl_to_thread(blwi, cancel_flags);
} else {
/* if it is synchronous call do minimum mem alloc, as it could
* be triggered from kernel shrinker
......@@ -459,7 +459,7 @@ static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
memset(&blwi, 0, sizeof(blwi));
init_blwi(&blwi, ns, ld, cancels, count, lock, cancel_flags);
RETURN(__ldlm_bl_to_thread(&blwi, cancel_flags));
return __ldlm_bl_to_thread(&blwi, cancel_flags);
}
}
......@@ -493,14 +493,14 @@ static int ldlm_handle_setinfo(struct ptlrpc_request *req)
key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
if (key == NULL) {
DEBUG_REQ(D_IOCTL, req, "no set_info key");
RETURN(-EFAULT);
return -EFAULT;
}
keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
RCL_CLIENT);
val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
if (val == NULL) {
DEBUG_REQ(D_IOCTL, req, "no set_info val");
RETURN(-EFAULT);
return -EFAULT;
}
vallen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_VAL,
RCL_CLIENT);
......@@ -542,7 +542,7 @@ static int ldlm_handle_qc_callback(struct ptlrpc_request *req)
oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
if (oqctl == NULL) {
CERROR("Can't unpack obd_quotactl\n");
RETURN(-EPROTO);
return -EPROTO;
}
oqctl->qc_stat = ptlrpc_status_ntoh(oqctl->qc_stat);
......@@ -566,7 +566,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
/* do nothing for sec context finalize */
if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
RETURN(0);
return 0;
req_capsule_init(&req->rq_pill, req, RCL_SERVER);
......@@ -574,7 +574,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
rc = ldlm_callback_reply(req, -ENOTCONN);
ldlm_callback_errmsg(req, "Operate on unconnected server",
rc, NULL);
RETURN(0);
return 0;
}
LASSERT(req->rq_export != NULL);
......@@ -583,71 +583,71 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
switch (lustre_msg_get_opc(req->rq_reqmsg)) {
case LDLM_BL_CALLBACK:
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
RETURN(0);
return 0;
break;
case LDLM_CP_CALLBACK:
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK_NET))
RETURN(0);
return 0;
break;
case LDLM_GL_CALLBACK:
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK_NET))
RETURN(0);
return 0;
break;
case LDLM_SET_INFO:
rc = ldlm_handle_setinfo(req);
ldlm_callback_reply(req, rc);
RETURN(0);
return 0;
case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
CERROR("shouldn't be handling OBD_LOG_CANCEL on DLM thread\n");
req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
RETURN(0);
return 0;
rc = llog_origin_handle_cancel(req);
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
RETURN(0);
return 0;
ldlm_callback_reply(req, rc);
RETURN(0);
return 0;
case LLOG_ORIGIN_HANDLE_CREATE:
req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
RETURN(0);
return 0;
rc = llog_origin_handle_open(req);
ldlm_callback_reply(req, rc);
RETURN(0);
return 0;
case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
req_capsule_set(&req->rq_pill,
&RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
RETURN(0);
return 0;
rc = llog_origin_handle_next_block(req);
ldlm_callback_reply(req, rc);
RETURN(0);
return 0;
case LLOG_ORIGIN_HANDLE_READ_HEADER:
req_capsule_set(&req->rq_pill,
&RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
RETURN(0);
return 0;
rc = llog_origin_handle_read_header(req);
ldlm_callback_reply(req, rc);
RETURN(0);
return 0;
case LLOG_ORIGIN_HANDLE_CLOSE:
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
RETURN(0);
return 0;
rc = llog_origin_handle_close(req);
ldlm_callback_reply(req, rc);
RETURN(0);
return 0;
case OBD_QC_CALLBACK:
req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
RETURN(0);
return 0;
rc = ldlm_handle_qc_callback(req);
ldlm_callback_reply(req, rc);
RETURN(0);
return 0;
default:
CERROR("unknown opcode %u\n",
lustre_msg_get_opc(req->rq_reqmsg));
ldlm_callback_reply(req, -EPROTO);
RETURN(0);
return 0;
}
ns = req->rq_export->exp_obd->obd_namespace;
......@@ -660,7 +660,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
rc = ldlm_callback_reply(req, -EPROTO);
ldlm_callback_errmsg(req, "Operate without parameter", rc,
NULL);
RETURN(0);
return 0;
}
/* Force a known safe race, send a cancel to the server for a lock
......@@ -679,7 +679,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
rc = ldlm_callback_reply(req, -EINVAL);
ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
&dlm_req->lock_handle[0]);
RETURN(0);
return 0;
}
if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
......@@ -706,7 +706,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
rc = ldlm_callback_reply(req, -EINVAL);
ldlm_callback_errmsg(req, "Operate on stale lock", rc,
&dlm_req->lock_handle[0]);
RETURN(0);
return 0;
}
/* BL_AST locks are not needed in LRU.
* Let ldlm_cancel_lru() be fast. */
......@@ -752,7 +752,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
LBUG(); /* checked above */
}
RETURN(0);
return 0;
}
......@@ -894,7 +894,7 @@ static int ldlm_bl_thread_main(void *arg)
atomic_dec(&blp->blp_busy_threads);
atomic_dec(&blp->blp_num_threads);
complete(&blp->blp_comp);
RETURN(0);
return 0;
}
......@@ -913,7 +913,7 @@ int ldlm_get_ref(void)
}
mutex_unlock(&ldlm_ref_mutex);
RETURN(rc);
return rc;
}
EXPORT_SYMBOL(ldlm_get_ref);
......@@ -1014,9 +1014,9 @@ int ldlm_init_export(struct obd_export *exp)
CFS_HASH_NBLK_CHANGE);
if (!exp->exp_lock_hash)
RETURN(-ENOMEM);
return -ENOMEM;
RETURN(0);
return 0;
}
EXPORT_SYMBOL(ldlm_init_export);
......@@ -1037,11 +1037,11 @@ static int ldlm_setup(void)
int i;
if (ldlm_state != NULL)
RETURN(-EALREADY);
return -EALREADY;
OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
if (ldlm_state == NULL)
RETURN(-ENOMEM);
return -ENOMEM;
#ifdef LPROCFS
rc = ldlm_proc_setup();
......@@ -1121,11 +1121,11 @@ static int ldlm_setup(void)
CERROR("Failed to initialize LDLM pools: %d\n", rc);
GOTO(out, rc);
}
RETURN(0);
return 0;
out:
ldlm_cleanup();
RETURN(rc);
return rc;
}
static int ldlm_cleanup(void)
......@@ -1135,7 +1135,7 @@ static int ldlm_cleanup(void)
CERROR("ldlm still has namespaces; clean these up first.\n");
ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
RETURN(-EBUSY);
return -EBUSY;
}
ldlm_pools_fini();
......@@ -1168,7 +1168,7 @@ static int ldlm_cleanup(void)
OBD_FREE(ldlm_state, sizeof(*ldlm_state));
ldlm_state = NULL;
RETURN(0);
return 0;
}
int ldlm_init(void)
......
......@@ -338,13 +338,13 @@ static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period)
RETURN(0);
return 0;
spin_lock(&pl->pl_lock);
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period) {
spin_unlock(&pl->pl_lock);
RETURN(0);
return 0;
}
/*
* Recalc SLV after last period. This should be done
......@@ -366,7 +366,7 @@ static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
recalc_interval_sec);
spin_unlock(&pl->pl_lock);
RETURN(0);
return 0;
}
/**
......@@ -393,7 +393,7 @@ static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
* and can't cancel anything. Let's catch this race.
*/
if (atomic_read(&pl->pl_granted) == 0)
RETURN(0);
return 0;
spin_lock(&pl->pl_lock);
......@@ -475,7 +475,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period)
RETURN(0);
return 0;
spin_lock(&pl->pl_lock);
/*
......@@ -484,7 +484,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period) {
spin_unlock(&pl->pl_lock);
RETURN(0);
return 0;
}
/*
......@@ -501,7 +501,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
* Do not cancel locks in case lru resize is disabled for this ns.
*/
if (!ns_connect_lru_resize(ldlm_pl2ns(pl)))
RETURN(0);
return 0;
/*
* In the time of canceling locks on client we do not need to maintain
......@@ -509,8 +509,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
* It may be called when SLV has changed much, this is why we do not
* take into account pl->pl_recalc_time here.
*/
RETURN(ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC,
LDLM_CANCEL_LRUR));
return ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC, LDLM_CANCEL_LRUR);
}
/**
......@@ -530,7 +529,7 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
* Do not cancel locks in case lru resize is disabled for this ns.
*/
if (!ns_connect_lru_resize(ns))
RETURN(0);
return 0;
/*
* Make sure that pool knows last SLV and Limit from obd.
......@@ -734,7 +733,7 @@ static int ldlm_pool_proc_init(struct ldlm_pool *pl)
OBD_ALLOC(var_name, MAX_STRING_SIZE + 1);
if (!var_name)
RETURN(-ENOMEM);
return -ENOMEM;
parent_ns_proc = ns->ns_proc_dir_entry;
if (parent_ns_proc == NULL) {
......@@ -858,11 +857,11 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
pl->pl_client_lock_volume = 0;
rc = ldlm_pool_proc_init(pl);
if (rc)
RETURN(rc);
return rc;
CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
RETURN(rc);
return rc;
}
EXPORT_SYMBOL(ldlm_pool_init);
......@@ -1343,11 +1342,11 @@ static int ldlm_pools_thread_start(void)
task_t *task;
if (ldlm_pools_thread != NULL)
RETURN(-EALREADY);
return -EALREADY;
OBD_ALLOC_PTR(ldlm_pools_thread);
if (ldlm_pools_thread == NULL)
RETURN(-ENOMEM);
return -ENOMEM;
init_completion(&ldlm_pools_comp);
init_waitqueue_head(&ldlm_pools_thread->t_ctl_waitq);
......@@ -1358,11 +1357,11 @@ static int ldlm_pools_thread_start(void)
CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task));
OBD_FREE(ldlm_pools_thread, sizeof(*ldlm_pools_thread));
ldlm_pools_thread = NULL;
RETURN(PTR_ERR(task));
return PTR_ERR(task);
}
l_wait_event(ldlm_pools_thread->t_ctl_waitq,
thread_is_running(ldlm_pools_thread), &lwi);
RETURN(0);
return 0;
}
static void ldlm_pools_thread_stop(void)
......@@ -1397,7 +1396,7 @@ int ldlm_pools_init(void)
set_shrinker(DEFAULT_SEEKS,
ldlm_pools_cli_shrink);
}
RETURN(rc);
return rc;
}
EXPORT_SYMBOL(ldlm_pools_init);
......
......@@ -78,7 +78,7 @@ static ssize_t lprocfs_wr_dump_ns(struct file *file, const char *buffer,
{
ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
RETURN(count);
return count;
}
LPROC_SEQ_FOPS_WR_ONLY(ldlm, dump_ns);
......@@ -126,7 +126,7 @@ int ldlm_proc_setup(void)
rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL);
RETURN(0);
return 0;
err_ns:
lprocfs_remove(&ldlm_ns_proc_dir);
......@@ -136,7 +136,7 @@ int ldlm_proc_setup(void)
ldlm_svc_proc_dir = NULL;
ldlm_type_proc_dir = NULL;
ldlm_ns_proc_dir = NULL;
RETURN(rc);
return rc;
}
void ldlm_proc_cleanup(void)
......@@ -573,7 +573,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
rc = ldlm_get_ref();
if (rc) {
CERROR("ldlm_get_ref failed: %d\n", rc);
RETURN(NULL);
return NULL;
}
for (idx = 0;;idx++) {
......@@ -647,7 +647,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
}
ldlm_namespace_register(ns, client);
RETURN(ns);
return ns;
out_proc:
ldlm_namespace_proc_unregister(ns);
ldlm_namespace_cleanup(ns, 0);
......@@ -657,7 +657,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
OBD_FREE_PTR(ns);
out_ref:
ldlm_put_ref();
RETURN(NULL);
return NULL;
}
EXPORT_SYMBOL(ldlm_namespace_new);
......@@ -837,13 +837,13 @@ static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
"with %d resources in use, (rc=%d)\n",
ldlm_ns_name(ns),
atomic_read(&ns->ns_bref), rc);
RETURN(ELDLM_NAMESPACE_EXISTS);
return ELDLM_NAMESPACE_EXISTS;
}
CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
ldlm_ns_name(ns));
}
RETURN(ELDLM_OK);
return ELDLM_OK;
}
/**
......
......@@ -458,14 +458,6 @@ void libcfs_debug_set_level(unsigned int debug_level)
EXPORT_SYMBOL(libcfs_debug_set_level);
long libcfs_log_return(struct libcfs_debug_msg_data *msgdata, long rc)
{
libcfs_debug_msg(msgdata, "Process leaving (rc=%lu : %ld : %lx)\n",
rc, rc, rc);
return rc;
}
EXPORT_SYMBOL(libcfs_log_return);
void libcfs_log_goto(struct libcfs_debug_msg_data *msgdata, const char *label,
long_ptr_t rc)
{
......
......@@ -1053,7 +1053,7 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
LIBCFS_ALLOC(hs, offsetof(cfs_hash_t, hs_name[len]));
if (hs == NULL)
RETURN(NULL);
return NULL;
strncpy(hs->hs_name, name, len);
hs->hs_name[len - 1] = '\0';
......@@ -1085,7 +1085,7 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
return hs;
LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[len]));
RETURN(NULL);
return NULL;
}
EXPORT_SYMBOL(cfs_hash_create);
......@@ -1483,7 +1483,7 @@ cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
cfs_hash_unlock(hs, 0);
cfs_hash_for_each_exit(hs);
RETURN(count);
return count;
}
typedef struct {
......@@ -1645,18 +1645,18 @@ cfs_hash_for_each_nolock(cfs_hash_t *hs,
if (cfs_hash_with_no_lock(hs) ||
cfs_hash_with_rehash_key(hs) ||
!cfs_hash_with_no_itemref(hs))
RETURN(-EOPNOTSUPP);
return -EOPNOTSUPP;
if (CFS_HOP(hs, get) == NULL ||
(CFS_HOP(hs, put) == NULL &&
CFS_HOP(hs, put_locked) == NULL))
RETURN(-EOPNOTSUPP);
return -EOPNOTSUPP;
cfs_hash_for_each_enter(hs);
cfs_hash_for_each_relax(hs, func, data);
cfs_hash_for_each_exit(hs);
RETURN(0);
return 0;
}
EXPORT_SYMBOL(cfs_hash_for_each_nolock);
......@@ -1691,7 +1691,7 @@ cfs_hash_for_each_empty(cfs_hash_t *hs,
hs->hs_name, i++);
}
cfs_hash_for_each_exit(hs);
RETURN(0);
return 0;
}
EXPORT_SYMBOL(cfs_hash_for_each_empty);
......
......@@ -248,7 +248,7 @@ int libcfs_kkuc_group_rem(int uid, int group)
struct kkuc_reg *reg, *next;
if (kkuc_groups[group].next == NULL)
RETURN(0);
return 0;
if (uid == 0) {
/* Broadcast a shutdown message */
......@@ -274,7 +274,7 @@ int libcfs_kkuc_group_rem(int uid, int group)
}
up_write(&kg_sem);
RETURN(0);
return 0;
}
EXPORT_SYMBOL(libcfs_kkuc_group_rem);
......@@ -303,7 +303,7 @@ int libcfs_kkuc_group_put(int group, void *payload)
if (one_success)
rc = 0;
RETURN(rc);
return rc;
}
EXPORT_SYMBOL(libcfs_kkuc_group_put);
......@@ -321,12 +321,12 @@ int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
if (group > KUC_GRP_MAX) {
CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group);
RETURN(-EINVAL);
return -EINVAL;
}
/* no link for this group */
if (kkuc_groups[group].next == NULL)
RETURN(0);
return 0;
down_read(&kg_sem);
list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
......@@ -336,7 +336,7 @@ int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
}
up_read(&kg_sem);
RETURN(rc);
return rc;
}
EXPORT_SYMBOL(libcfs_kkuc_group_foreach);
......
......@@ -215,12 +215,12 @@ int cfs_get_environ(const char *key, char *value, int *val_len)
buffer = kmalloc(buf_len, GFP_USER);
if (!buffer)
RETURN(-ENOMEM);
return -ENOMEM;
mm = get_task_mm(current);
if (!mm) {
kfree(buffer);
RETURN(-EINVAL);
return -EINVAL;
}
/* Avoid deadlocks on mmap_sem if called from sys_mmap_pgoff(),
......
......@@ -51,31 +51,31 @@ int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
err = copy_from_user(buf, (void *)arg, sizeof(*hdr));
if (err)
RETURN(err);
return err;
if (hdr->ioc_version != LIBCFS_IOCTL_VERSION) {
CERROR("PORTALS: version mismatch kernel vs application\n");
RETURN(-EINVAL);
return -EINVAL;
}
if (hdr->ioc_len + buf >= end) {
CERROR("PORTALS: user buffer exceeds kernel buffer\n");
RETURN(-EINVAL);
return -EINVAL;
}
if (hdr->ioc_len < sizeof(struct libcfs_ioctl_data)) {
CERROR("PORTALS: user buffer too small for ioctl\n");
RETURN(-EINVAL);
return -EINVAL;
}
err = copy_from_user(buf, (void *)arg, hdr->ioc_len);
if (err)
RETURN(err);
return err;
if (libcfs_ioctl_is_invalid(data)) {
CERROR("PORTALS: ioctl not correctly formatted\n");
RETURN(-EINVAL);
return -EINVAL;
}
if (data->ioc_inllen1)
......@@ -85,7 +85,7 @@ int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
data->ioc_inlbuf2 = &data->ioc_bulk[0] +
cfs_size_round(data->ioc_inllen1);
RETURN(0);
return 0;
}
int libcfs_ioctl_popdata(void *arg, void *data, int size)
......
......@@ -165,7 +165,7 @@ static int libcfs_psdev_open(unsigned long flags, void *args)
}
*(struct libcfs_device_userstate **)args = ldu;
RETURN(0);
return 0;
}
/* called when closing /dev/device */
......@@ -180,7 +180,7 @@ static int libcfs_psdev_release(unsigned long flags, void *args)
}
module_put(THIS_MODULE);
RETURN(0);
return 0;
}
static struct rw_semaphore ioctl_list_sem;
......@@ -224,7 +224,7 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
switch (cmd) {
case IOC_LIBCFS_CLEAR_DEBUG:
libcfs_debug_clear_buffer();
RETURN(0);
return 0;
/*
* case IOC_LIBCFS_PANIC:
* Handled in arch/cfs_module.c
......@@ -232,9 +232,9 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
case IOC_LIBCFS_MARK_DEBUG:
if (data->ioc_inlbuf1 == NULL ||
data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0')
RETURN(-EINVAL);
return -EINVAL;
libcfs_debug_mark_buffer(data->ioc_inlbuf1);
RETURN(0);
return 0;
#if LWT_SUPPORT
case IOC_LIBCFS_LWT_CONTROL:
err = lwt_control ((data->ioc_flags & 1) != 0,
......@@ -298,7 +298,7 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
ping(data);
symbol_put(kping_client);
}
RETURN(0);
return 0;
}
default: {
......@@ -319,7 +319,7 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
}
}
RETURN(err);
return err;
}
static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, void *arg)
......@@ -330,7 +330,7 @@ static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, void *a
LIBCFS_ALLOC_GFP(buf, 1024, GFP_IOFS);
if (buf == NULL)
RETURN(-ENOMEM);
return -ENOMEM;
/* 'cmd' and permissions get checked in our arch-specific caller */
if (libcfs_ioctl_getdata(buf, buf + 800, (void *)arg)) {
......@@ -343,7 +343,7 @@ static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, void *a
out:
LIBCFS_FREE(buf, 1024);
RETURN(err);
return err;
}
......
......@@ -793,15 +793,15 @@ cfs_parse_nidlist(char *str, int len, struct list_head *nidlist)
rc = cfs_gettok(&src, ' ', &res);
if (rc == 0) {
cfs_free_nidlist(nidlist);
RETURN(0);
return 0;
}
rc = parse_nidrange(&res, nidlist);
if (rc == 0) {
cfs_free_nidlist(nidlist);
RETURN(0);
return 0;
}
}
RETURN(1);
return 1;
}
/*
......@@ -840,13 +840,13 @@ int cfs_match_nid(lnet_nid_t nid, struct list_head *nidlist)
if (nr->nr_netnum != LNET_NETNUM(LNET_NIDNET(nid)))
continue;
if (nr->nr_all)
RETURN(1);
return 1;
list_for_each_entry(ar, &nr->nr_addrranges, ar_link)
if (nr->nr_netstrfns->nf_match_addr(LNET_NIDADDR(nid),
&ar->ar_numaddr_ranges))
RETURN(1);
return 1;
}
RETURN(0);
return 0;
}
......
......@@ -175,7 +175,7 @@ struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
new = alloc_entry(cache, key, args);
if (!new) {
CERROR("fail to alloc entry\n");
RETURN(ERR_PTR(-ENOMEM));
return ERR_PTR(-ENOMEM);
}
goto find_again;
} else {
......@@ -265,7 +265,7 @@ struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
/* Now we know it's good */
out:
spin_unlock(&cache->uc_lock);
RETURN(entry);
return entry;
}
EXPORT_SYMBOL(upcall_cache_get_entry);
......@@ -308,7 +308,7 @@ int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
cache->uc_name, key);
/* haven't found, it's possible */
spin_unlock(&cache->uc_lock);
RETURN(-EINVAL);
return -EINVAL;
}
if (err) {
......@@ -350,7 +350,7 @@ int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
wake_up_all(&entry->ue_waitq);
put_entry(cache, entry);
RETURN(rc);
return rc;
}
EXPORT_SYMBOL(upcall_cache_downcall);
......@@ -425,7 +425,7 @@ struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
LIBCFS_ALLOC(cache, sizeof(*cache));
if (!cache)
RETURN(ERR_PTR(-ENOMEM));
return ERR_PTR(-ENOMEM);
spin_lock_init(&cache->uc_lock);
rwlock_init(&cache->uc_upcall_rwlock);
......@@ -438,7 +438,7 @@ struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
cache->uc_acquire_expire = 30;
cache->uc_ops = ops;
RETURN(cache);
return cache;
}
EXPORT_SYMBOL(upcall_cache_init);
......
......@@ -86,10 +86,10 @@ int ll_dcompare(const struct dentry *parent, const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
if (len != name->len)
RETURN(1);
return 1;
if (memcmp(str, name->name, len))
RETURN(1);
return 1;
CDEBUG(D_DENTRY, "found name %.*s(%p) flags %#x refc %d\n",
name->len, name->name, dentry, dentry->d_flags,
......@@ -97,12 +97,12 @@ int ll_dcompare(const struct dentry *parent, const struct dentry *dentry,
/* mountpoint is always valid */
if (d_mountpoint((struct dentry *)dentry))
RETURN(0);
return 0;
if (d_lustre_invalid(dentry))
RETURN(1);
return 1;
RETURN(0);
return 0;
}
static inline int return_if_equal(struct ldlm_lock *lock, void *data)
......@@ -128,16 +128,16 @@ static int find_cbdata(struct inode *inode)
rc = md_find_cbdata(sbi->ll_md_exp, ll_inode2fid(inode),
return_if_equal, NULL);
if (rc != 0)
RETURN(rc);
return rc;
lsm = ccc_inode_lsm_get(inode);
if (lsm == NULL)
RETURN(rc);
return rc;
rc = obd_find_cbdata(sbi->ll_dt_exp, lsm, return_if_equal, NULL);
ccc_inode_lsm_put(inode, lsm);
RETURN(rc);
return rc;
}
/**
......@@ -172,8 +172,8 @@ static int ll_ddelete(const struct dentry *de)
#endif
if (d_lustre_invalid((struct dentry *)de))
RETURN(1);
RETURN(0);
return 1;
return 0;
}
static int ll_set_dd(struct dentry *de)
......@@ -196,11 +196,11 @@ static int ll_set_dd(struct dentry *de)
OBD_FREE_PTR(lld);
spin_unlock(&de->d_lock);
} else {
RETURN(-ENOMEM);
return -ENOMEM;
}
}
RETURN(0);
return 0;
}
int ll_dops_init(struct dentry *de, int block, int init_sa)
......@@ -304,14 +304,14 @@ int ll_revalidate_it_finish(struct ptlrpc_request *request,
int rc = 0;
if (!request)
RETURN(0);
return 0;
if (it_disposition(it, DISP_LOOKUP_NEG))
RETURN(-ENOENT);
return -ENOENT;
rc = ll_prep_inode(&de->d_inode, request, NULL, it);
RETURN(rc);
return rc;
}
void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry)
......@@ -368,10 +368,10 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
away this negative dentry and actually do the request to
kernel to create whatever needs to be created (if possible)*/
if (it && (it->it_op & IT_CREAT))
RETURN(0);
return 0;
if (d_lustre_invalid(de))
RETURN(0);
return 0;
ibits = MDS_INODELOCK_UPDATE;
rc = ll_have_md_lock(parent, &ibits, LCK_MINMODE);
......@@ -398,7 +398,7 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
LASSERT(it);
if (it->it_op == IT_LOOKUP && !d_lustre_invalid(de))
RETURN(1);
return 1;
if (it->it_op == IT_OPEN) {
struct inode *inode = de->d_inode;
......@@ -445,7 +445,7 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
if it would be, we'll reopen the open request to
MDS later during file open path */
mutex_unlock(&lli->lli_och_mutex);
RETURN(1);
return 1;
} else {
mutex_unlock(&lli->lli_och_mutex);
}
......@@ -464,7 +464,7 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
de->d_name.name, de->d_name.len,
0, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
return PTR_ERR(op_data);
if (!IS_POSIXACL(parent) || !exp_connect_umask(exp))
it->it_create_mode &= ~current_umask();
......@@ -551,7 +551,7 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
mark:
if (it != NULL && it->it_op == IT_GETATTR && rc > 0)
ll_statahead_mark(parent, de);
RETURN(rc);
return rc;
/*
* This part is here to combat evil-evil race in real_lookup on 2.6
......@@ -583,7 +583,7 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
LUSTRE_OPC_CREATE :
LUSTRE_OPC_ANY), NULL);
if (IS_ERR(op_data))
RETURN(PTR_ERR(op_data));
return PTR_ERR(op_data);
rc = md_intent_lock(exp, op_data, NULL, 0, it, 0, &req,
ll_md_blocking_ast, 0);
......@@ -630,7 +630,7 @@ int ll_revalidate_nd(struct dentry *dentry, unsigned int flags)
if (!(flags & (LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE)) &&
ll_need_statahead(parent, dentry) > 0) {
if (flags & LOOKUP_RCU)
RETURN(-ECHILD);
return -ECHILD;
if (dentry->d_inode == NULL)
unplug = 1;
......@@ -638,7 +638,7 @@ int ll_revalidate_nd(struct dentry *dentry, unsigned int flags)
ll_statahead_mark(parent, dentry);
}
RETURN(1);
return 1;
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -279,7 +279,7 @@ static int capa_thread_main(void *unused)
thread_set_flags(&ll_capa_thread, SVC_STOPPED);
wake_up(&ll_capa_thread.t_ctl_waitq);
RETURN(0);
return 0;
}
void ll_capa_timer_callback(unsigned long unused)
......@@ -297,12 +297,12 @@ int ll_capa_thread_start(void)
if (IS_ERR(task)) {
CERROR("cannot start expired capa thread: rc %ld\n",
PTR_ERR(task));
RETURN(PTR_ERR(task));
return PTR_ERR(task);
}
wait_event(ll_capa_thread.t_ctl_waitq,
thread_is_running(&ll_capa_thread));
RETURN(0);
return 0;
}
void ll_capa_thread_stop(void)
......@@ -320,7 +320,7 @@ struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
int found = 0;
if ((ll_i2sbi(inode)->ll_flags & LL_SBI_OSS_CAPA) == 0)
RETURN(NULL);
return NULL;
LASSERT(opc == CAPA_OPC_OSS_WRITE || opc == CAPA_OPC_OSS_RW ||
opc == CAPA_OPC_OSS_TRUNC);
......@@ -364,7 +364,7 @@ struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
}
spin_unlock(&capa_lock);
RETURN(ocapa);
return ocapa;
}
EXPORT_SYMBOL(ll_osscapa_get);
......@@ -376,7 +376,7 @@ struct obd_capa *ll_mdscapa_get(struct inode *inode)
LASSERT(inode != NULL);
if ((ll_i2sbi(inode)->ll_flags & LL_SBI_MDS_CAPA) == 0)
RETURN(NULL);
return NULL;
spin_lock(&capa_lock);
ocapa = capa_get(lli->lli_mds_capa);
......@@ -386,7 +386,7 @@ struct obd_capa *ll_mdscapa_get(struct inode *inode)
atomic_set(&ll_capa_debug, 0);
}
RETURN(ocapa);
return ocapa;
}
static struct obd_capa *do_add_mds_capa(struct inode *inode,
......@@ -554,7 +554,7 @@ int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
capa_put(ocapa);
iput(inode);
RETURN(rc);
return rc;
}
spin_lock(&ocapa->c_lock);
......
......@@ -225,7 +225,7 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data)
OBDO_ALLOC(oa);
if (!oa) {
CERROR("can't allocate memory for Size-on-MDS update.\n");
RETURN(-ENOMEM);
return -ENOMEM;
}
old_flags = op_data->op_flags;
......@@ -255,7 +255,7 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data)
ptlrpc_req_finished(request);
OBDO_FREE(oa);
RETURN(rc);
return rc;
}
/**
......@@ -356,7 +356,7 @@ static int ll_close_thread(void *arg)
CDEBUG(D_INFO, "ll_close exiting\n");
complete(&lcq->lcq_comp);
RETURN(0);
return 0;
}
int ll_close_thread_start(struct ll_close_queue **lcq_ret)
......
......@@ -384,17 +384,17 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file, const char *buffer,
buffer = lprocfs_find_named_value(buffer, "max_cached_mb:", &count);
rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
if (rc)
RETURN(rc);
return rc;
if (pages_number < 0 || pages_number > totalram_pages) {
CERROR("%s: can't set max cache more than %lu MB\n",
ll_get_fsname(sb, NULL, 0),
totalram_pages >> (20 - PAGE_CACHE_SHIFT));
RETURN(-ERANGE);
return -ERANGE;
}
if (sbi->ll_dt_exp == NULL)
RETURN(-ENODEV);
return -ENODEV;
spin_lock(&sbi->ll_lock);
diff = pages_number - cache->ccc_lru_max;
......@@ -878,7 +878,7 @@ int lprocfs_register_mountpoint(struct proc_dir_entry *parent,
if (IS_ERR(sbi->ll_proc_root)) {
err = PTR_ERR(sbi->ll_proc_root);
sbi->ll_proc_root = NULL;
RETURN(err);
return err;
}
rc = lprocfs_seq_create(sbi->ll_proc_root, "dump_page_cache", 0444,
......@@ -992,7 +992,7 @@ int lprocfs_register_mountpoint(struct proc_dir_entry *parent,
lprocfs_free_stats(&sbi->ll_ra_stats);
lprocfs_free_stats(&sbi->ll_stats);
}
RETURN(err);
return err;
}
void lprocfs_unregister_mountpoint(struct ll_sb_info *sbi)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -213,7 +213,7 @@ int cl_sb_init(struct super_block *sb)
cl_env_put(env, &refcheck);
} else
rc = PTR_ERR(env);
RETURN(rc);
return rc;
}
int cl_sb_fini(struct super_block *sb)
......@@ -246,7 +246,7 @@ int cl_sb_fini(struct super_block *sb)
* automatically when last device is destroyed).
*/
lu_types_stop();
RETURN(result);
return result;
}
/****************************************************************************
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment