Commit 5e42bc9d authored by Li Xi's avatar Li Xi Committed by Greg Kroah-Hartman

staging/lustre: remove assertion of spin_is_locked()

spin_is_locked() is always false when the platform is
uniprocessor and CONFIG_DEBUG_SPINLOCK is not enabled.
This patch replaces its assertion by assert_spin_locked().
Signed-off-by: default avatarLi Xi <lixi@ddn.com>
Signed-off-by: default avatarJames Simmons <uja.ornl@gmail.com>
Reviewed-on: http://review.whamcloud.com/8144
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-4199Reviewed-by: default avatarAlexey Lyashkov <alexey_lyashkov@xyratex.com>
Reviewed-by: default avatarAndreas Dilger <andreas.dilger@intel.com>
Signed-off-by: default avatarOleg Drokin <oleg.drokin@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 2d95f10e
...@@ -66,7 +66,7 @@ ...@@ -66,7 +66,7 @@
* - spin_unlock(x) * - spin_unlock(x)
* - spin_unlock_bh(x) * - spin_unlock_bh(x)
* - spin_trylock(x) * - spin_trylock(x)
* - spin_is_locked(x) * - assert_spin_locked(x)
* *
* - spin_lock_irq(x) * - spin_lock_irq(x)
* - spin_lock_irqsave(x, f) * - spin_lock_irqsave(x, f)
......
...@@ -1445,7 +1445,7 @@ static inline void unlock_res(struct ldlm_resource *res) ...@@ -1445,7 +1445,7 @@ static inline void unlock_res(struct ldlm_resource *res)
/** Check if resource is already locked, assert if not. */ /** Check if resource is already locked, assert if not. */
static inline void check_res_locked(struct ldlm_resource *res) static inline void check_res_locked(struct ldlm_resource *res)
{ {
LASSERT(spin_is_locked(&res->lr_lock)); assert_spin_locked(&res->lr_lock);
} }
struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock); struct ldlm_resource * lock_res_and_lock(struct ldlm_lock *lock);
......
...@@ -719,7 +719,7 @@ struct ptlrpc_nrs_pol_ops { ...@@ -719,7 +719,7 @@ struct ptlrpc_nrs_pol_ops {
* \a nrq * \a nrq
* \param[in,out] nrq The request * \param[in,out] nrq The request
* *
* \pre spin_is_locked(&svcpt->scp_req_lock) * \pre assert_spin_locked(&svcpt->scp_req_lock)
* *
* \see ptlrpc_nrs_req_stop_nolock() * \see ptlrpc_nrs_req_stop_nolock()
*/ */
......
...@@ -58,7 +58,7 @@ int lov_merge_lvb_kms(struct lov_stripe_md *lsm, ...@@ -58,7 +58,7 @@ int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
int i; int i;
int rc = 0; int rc = 0;
LASSERT(spin_is_locked(&lsm->lsm_lock)); assert_spin_locked(&lsm->lsm_lock);
LASSERT(lsm->lsm_lock_owner == current_pid()); LASSERT(lsm->lsm_lock_owner == current_pid());
CDEBUG(D_INODE, "MDT ID "DOSTID" initial value: s="LPU64" m="LPU64 CDEBUG(D_INODE, "MDT ID "DOSTID" initial value: s="LPU64" m="LPU64
...@@ -145,7 +145,7 @@ int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm, ...@@ -145,7 +145,7 @@ int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
int stripe = 0; int stripe = 0;
__u64 kms; __u64 kms;
LASSERT(spin_is_locked(&lsm->lsm_lock)); assert_spin_locked(&lsm->lsm_lock);
LASSERT(lsm->lsm_lock_owner == current_pid()); LASSERT(lsm->lsm_lock_owner == current_pid());
if (shrink) { if (shrink) {
......
...@@ -478,7 +478,7 @@ static struct cl_lock *cl_lock_lookup(const struct lu_env *env, ...@@ -478,7 +478,7 @@ static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
struct cl_object_header *head; struct cl_object_header *head;
head = cl_object_header(obj); head = cl_object_header(obj);
LINVRNT(spin_is_locked(&head->coh_lock_guard)); assert_spin_locked(&head->coh_lock_guard);
CS_LOCK_INC(obj, lookup); CS_LOCK_INC(obj, lookup);
list_for_each_entry(lock, &head->coh_locks, cll_linkage) { list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
int matched; int matched;
......
...@@ -220,7 +220,7 @@ int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj, ...@@ -220,7 +220,7 @@ int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
struct lu_object_header *top; struct lu_object_header *top;
int result; int result;
LASSERT(spin_is_locked(cl_object_attr_guard(obj))); assert_spin_locked(cl_object_attr_guard(obj));
top = obj->co_lu.lo_header; top = obj->co_lu.lo_header;
result = 0; result = 0;
...@@ -251,7 +251,7 @@ int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj, ...@@ -251,7 +251,7 @@ int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
struct lu_object_header *top; struct lu_object_header *top;
int result; int result;
LASSERT(spin_is_locked(cl_object_attr_guard(obj))); assert_spin_locked(cl_object_attr_guard(obj));
top = obj->co_lu.lo_header; top = obj->co_lu.lo_header;
result = 0; result = 0;
......
...@@ -130,7 +130,7 @@ struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index) ...@@ -130,7 +130,7 @@ struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
{ {
struct cl_page *page; struct cl_page *page;
LASSERT(spin_is_locked(&hdr->coh_page_guard)); assert_spin_locked(&hdr->coh_page_guard);
page = radix_tree_lookup(&hdr->coh_tree, index); page = radix_tree_lookup(&hdr->coh_tree, index);
if (page != NULL) if (page != NULL)
......
...@@ -1311,7 +1311,7 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, ...@@ -1311,7 +1311,7 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
static void osc_consume_write_grant(struct client_obd *cli, static void osc_consume_write_grant(struct client_obd *cli,
struct brw_page *pga) struct brw_page *pga)
{ {
LASSERT(spin_is_locked(&cli->cl_loi_list_lock.lock)); assert_spin_locked(&cli->cl_loi_list_lock.lock);
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT)); LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
atomic_inc(&obd_dirty_pages); atomic_inc(&obd_dirty_pages);
cli->cl_dirty += PAGE_CACHE_SIZE; cli->cl_dirty += PAGE_CACHE_SIZE;
...@@ -1326,7 +1326,7 @@ static void osc_consume_write_grant(struct client_obd *cli, ...@@ -1326,7 +1326,7 @@ static void osc_consume_write_grant(struct client_obd *cli,
static void osc_release_write_grant(struct client_obd *cli, static void osc_release_write_grant(struct client_obd *cli,
struct brw_page *pga) struct brw_page *pga)
{ {
LASSERT(spin_is_locked(&cli->cl_loi_list_lock.lock)); assert_spin_locked(&cli->cl_loi_list_lock.lock);
if (!(pga->flag & OBD_BRW_FROM_GRANT)) { if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
return; return;
} }
......
...@@ -176,7 +176,16 @@ static inline void osc_object_unlock(struct osc_object *obj) ...@@ -176,7 +176,16 @@ static inline void osc_object_unlock(struct osc_object *obj)
static inline int osc_object_is_locked(struct osc_object *obj) static inline int osc_object_is_locked(struct osc_object *obj)
{ {
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
return spin_is_locked(&obj->oo_lock); return spin_is_locked(&obj->oo_lock);
#else
/*
* It is not perfect to return true all the time.
* But since this function is only used for assertion
* and checking, it seems OK.
*/
return 1;
#endif
} }
/* /*
......
...@@ -2271,7 +2271,7 @@ static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked); ...@@ -2271,7 +2271,7 @@ static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
*/ */
void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request) void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
{ {
LASSERT(spin_is_locked(&request->rq_import->imp_lock)); assert_spin_locked(&request->rq_import->imp_lock);
(void)__ptlrpc_req_finished(request, 1); (void)__ptlrpc_req_finished(request, 1);
} }
EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock); EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock);
...@@ -2452,9 +2452,7 @@ void ptlrpc_free_committed(struct obd_import *imp) ...@@ -2452,9 +2452,7 @@ void ptlrpc_free_committed(struct obd_import *imp)
bool skip_committed_list = true; bool skip_committed_list = true;
LASSERT(imp != NULL); LASSERT(imp != NULL);
assert_spin_locked(&imp->imp_lock);
LASSERT(spin_is_locked(&imp->imp_lock));
if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked && if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
imp->imp_generation == imp->imp_last_generation_checked) { imp->imp_generation == imp->imp_last_generation_checked) {
...@@ -2585,7 +2583,7 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, ...@@ -2585,7 +2583,7 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
{ {
struct list_head *tmp; struct list_head *tmp;
LASSERT(spin_is_locked(&imp->imp_lock)); assert_spin_locked(&imp->imp_lock);
if (req->rq_transno == 0) { if (req->rq_transno == 0) {
DEBUG_REQ(D_EMERG, req, "saving request with zero transno"); DEBUG_REQ(D_EMERG, req, "saving request with zero transno");
......
...@@ -137,7 +137,7 @@ void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *hash) ...@@ -137,7 +137,7 @@ void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *hash)
static static
void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist) void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
{ {
LASSERT(spin_is_locked(&ctx->cc_sec->ps_lock)); assert_spin_locked(&ctx->cc_sec->ps_lock);
LASSERT(atomic_read(&ctx->cc_refcount) > 0); LASSERT(atomic_read(&ctx->cc_refcount) > 0);
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)); LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
LASSERT(!hlist_unhashed(&ctx->cc_cache)); LASSERT(!hlist_unhashed(&ctx->cc_cache));
...@@ -719,7 +719,7 @@ void gss_unhash_msg_nolock(struct gss_upcall_msg *gmsg) ...@@ -719,7 +719,7 @@ void gss_unhash_msg_nolock(struct gss_upcall_msg *gmsg)
__u32 idx = gmsg->gum_mechidx; __u32 idx = gmsg->gum_mechidx;
LASSERT(idx < MECH_MAX); LASSERT(idx < MECH_MAX);
LASSERT(spin_is_locked(&upcall_locks[idx])); assert_spin_locked(&upcall_locks[idx]);
if (list_empty(&gmsg->gum_list)) if (list_empty(&gmsg->gum_list))
return; return;
......
...@@ -194,7 +194,7 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt) ...@@ -194,7 +194,7 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
/* Must be called with imp_lock held! */ /* Must be called with imp_lock held! */
static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp) static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
{ {
LASSERT(spin_is_locked(&imp->imp_lock)); assert_spin_locked(&imp->imp_lock);
CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd)); CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
imp->imp_invalid = 1; imp->imp_invalid = 1;
......
...@@ -449,7 +449,7 @@ void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy, ...@@ -449,7 +449,7 @@ void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy,
{ {
LASSERT(policy != NULL); LASSERT(policy != NULL);
LASSERT(info != NULL); LASSERT(info != NULL);
LASSERT(spin_is_locked(&policy->pol_nrs->nrs_lock)); assert_spin_locked(&policy->pol_nrs->nrs_lock);
memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX); memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX);
......
...@@ -368,7 +368,7 @@ EXPORT_SYMBOL(ptlrpc_pinger_sending_on_import); ...@@ -368,7 +368,7 @@ EXPORT_SYMBOL(ptlrpc_pinger_sending_on_import);
void ptlrpc_pinger_commit_expected(struct obd_import *imp) void ptlrpc_pinger_commit_expected(struct obd_import *imp)
{ {
ptlrpc_update_next_ping(imp, 1); ptlrpc_update_next_ping(imp, 1);
LASSERT(spin_is_locked(&imp->imp_lock)); assert_spin_locked(&imp->imp_lock);
/* /*
* Avoid reading stale imp_connect_data. When not sure if pings are * Avoid reading stale imp_connect_data. When not sure if pings are
* expected or not on next connection, we assume they are not and force * expected or not on next connection, we assume they are not and force
......
...@@ -450,7 +450,7 @@ static int enc_pools_add_pages(int npages) ...@@ -450,7 +450,7 @@ static int enc_pools_add_pages(int npages)
static inline void enc_pools_wakeup(void) static inline void enc_pools_wakeup(void)
{ {
LASSERT(spin_is_locked(&page_pools.epp_lock)); assert_spin_locked(&page_pools.epp_lock);
LASSERT(page_pools.epp_waitqlen >= 0); LASSERT(page_pools.epp_waitqlen >= 0);
if (unlikely(page_pools.epp_waitqlen)) { if (unlikely(page_pools.epp_waitqlen)) {
......
...@@ -384,8 +384,8 @@ void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs) ...@@ -384,8 +384,8 @@ void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
void void
ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs) ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs)
{ {
LASSERT(spin_is_locked(&rs->rs_svcpt->scp_rep_lock)); assert_spin_locked(&rs->rs_svcpt->scp_rep_lock);
LASSERT(spin_is_locked(&rs->rs_lock)); assert_spin_locked(&rs->rs_lock);
LASSERT(rs->rs_difficult); LASSERT(rs->rs_difficult);
rs->rs_scheduled_ever = 1; /* flag any notification attempt */ rs->rs_scheduled_ever = 1; /* flag any notification attempt */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment