Commit 7cb58f19 authored by Andriy Skulysh's avatar Andriy Skulysh Committed by Greg Kroah-Hartman

staging: lustre: ldlm: crash on umount in cleanup_resource

cfs_hash_for_each_relax() assumes that cfs_hash_put_locked()
doesn't release bd lock, but it isn't true for
ldlm_res_hop_put_locked().

Add recfcount on next hnode in cfs_hash_for_each_relax() and
remove ldlm_res_hop_put_locked()
Signed-off-by: default avatarAndriy Skulysh <andriy.skulysh@seagate.com>
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-6304
Xyratex-bug-id: MRP-2352
Reviewed-by: default avatarVitaly Fertman <vitaly.fertman@seagate.com>
Reviewed-by: default avatarAlexander Boyko <alexander.boyko@seagate.com>
Tested-by: default avatarAlexander Lezhoev <alexander.lezhoev@seagate.com>
Reviewed-on: http://review.whamcloud.com/13908Reviewed-by: default avatarOleg Drokin <oleg.drokin@intel.com>
Signed-off-by: default avatarJames Simmons <jsimmons@infradead.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent cd60eb59
...@@ -1008,7 +1008,7 @@ cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits, ...@@ -1008,7 +1008,7 @@ cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits,
LASSERT(ops->hs_object); LASSERT(ops->hs_object);
LASSERT(ops->hs_keycmp); LASSERT(ops->hs_keycmp);
LASSERT(ops->hs_get); LASSERT(ops->hs_get);
LASSERT(ops->hs_put_locked); LASSERT(ops->hs_put || ops->hs_put_locked);
if (flags & CFS_HASH_REHASH) if (flags & CFS_HASH_REHASH)
flags |= CFS_HASH_COUNTER; /* must have counter */ flags |= CFS_HASH_COUNTER; /* must have counter */
...@@ -1553,19 +1553,20 @@ static int ...@@ -1553,19 +1553,20 @@ static int
cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
void *data, int start) void *data, int start)
{ {
struct hlist_node *next = NULL;
struct hlist_node *hnode; struct hlist_node *hnode;
struct hlist_node *tmp;
struct cfs_hash_bd bd; struct cfs_hash_bd bd;
u32 version; u32 version;
int count = 0; int count = 0;
int stop_on_change; int stop_on_change;
int has_put_locked;
int end = -1; int end = -1;
int rc = 0; int rc = 0;
int i; int i;
stop_on_change = cfs_hash_with_rehash_key(hs) || stop_on_change = cfs_hash_with_rehash_key(hs) ||
!cfs_hash_with_no_itemref(hs) || !cfs_hash_with_no_itemref(hs);
!hs->hs_ops->hs_put_locked; has_put_locked = hs->hs_ops->hs_put_locked != NULL;
cfs_hash_lock(hs, 0); cfs_hash_lock(hs, 0);
again: again:
LASSERT(!cfs_hash_is_rehashing(hs)); LASSERT(!cfs_hash_is_rehashing(hs));
...@@ -1582,38 +1583,52 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, ...@@ -1582,38 +1583,52 @@ cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
version = cfs_hash_bd_version_get(&bd); version = cfs_hash_bd_version_get(&bd);
cfs_hash_bd_for_each_hlist(hs, &bd, hhead) { cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
for (hnode = hhead->first; hnode;) { hnode = hhead->first;
if (!hnode)
continue;
cfs_hash_get(hs, hnode);
for (; hnode; hnode = next) {
cfs_hash_bucket_validate(hs, &bd, hnode); cfs_hash_bucket_validate(hs, &bd, hnode);
cfs_hash_get(hs, hnode); next = hnode->next;
if (next)
cfs_hash_get(hs, next);
cfs_hash_bd_unlock(hs, &bd, 0); cfs_hash_bd_unlock(hs, &bd, 0);
cfs_hash_unlock(hs, 0); cfs_hash_unlock(hs, 0);
rc = func(hs, &bd, hnode, data); rc = func(hs, &bd, hnode, data);
if (stop_on_change) if (stop_on_change || !has_put_locked)
cfs_hash_put(hs, hnode); cfs_hash_put(hs, hnode);
cond_resched(); cond_resched();
count++; count++;
cfs_hash_lock(hs, 0); cfs_hash_lock(hs, 0);
cfs_hash_bd_lock(hs, &bd, 0); cfs_hash_bd_lock(hs, &bd, 0);
if (!stop_on_change) { if (stop_on_change) {
tmp = hnode->next;
cfs_hash_put_locked(hs, hnode);
hnode = tmp;
} else { /* bucket changed? */
if (version != if (version !=
cfs_hash_bd_version_get(&bd)) cfs_hash_bd_version_get(&bd))
break; rc = -EINTR;
/* safe to continue because no change */ } else if (has_put_locked) {
hnode = hnode->next; cfs_hash_put_locked(hs, hnode);
} }
if (rc) /* callback wants to break iteration */ if (rc) /* callback wants to break iteration */
break; break;
} }
if (rc) /* callback wants to break iteration */ if (next) {
if (has_put_locked) {
cfs_hash_put_locked(hs, next);
next = NULL;
}
break; break;
} else if (rc) {
break;
}
} }
cfs_hash_bd_unlock(hs, &bd, 0); cfs_hash_bd_unlock(hs, &bd, 0);
if (next && !has_put_locked) {
cfs_hash_put(hs, next);
next = NULL;
}
if (rc) /* callback wants to break iteration */ if (rc) /* callback wants to break iteration */
break; break;
} }
......
...@@ -106,9 +106,6 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns, ...@@ -106,9 +106,6 @@ int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
extern unsigned int ldlm_enqueue_min; extern unsigned int ldlm_enqueue_min;
extern unsigned int ldlm_cancel_unused_locks_before_replay; extern unsigned int ldlm_cancel_unused_locks_before_replay;
/* ldlm_resource.c */
int ldlm_resource_putref_locked(struct ldlm_resource *res);
/* ldlm_lock.c */ /* ldlm_lock.c */
struct ldlm_cb_set_arg { struct ldlm_cb_set_arg {
......
...@@ -536,16 +536,6 @@ static void ldlm_res_hop_get_locked(struct cfs_hash *hs, ...@@ -536,16 +536,6 @@ static void ldlm_res_hop_get_locked(struct cfs_hash *hs,
ldlm_resource_getref(res); ldlm_resource_getref(res);
} }
static void ldlm_res_hop_put_locked(struct cfs_hash *hs,
struct hlist_node *hnode)
{
struct ldlm_resource *res;
res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
/* cfs_hash_for_each_nolock is the only chance we call it */
ldlm_resource_putref_locked(res);
}
static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode) static void ldlm_res_hop_put(struct cfs_hash *hs, struct hlist_node *hnode)
{ {
struct ldlm_resource *res; struct ldlm_resource *res;
...@@ -561,7 +551,6 @@ static struct cfs_hash_ops ldlm_ns_hash_ops = { ...@@ -561,7 +551,6 @@ static struct cfs_hash_ops ldlm_ns_hash_ops = {
.hs_keycpy = NULL, .hs_keycpy = NULL,
.hs_object = ldlm_res_hop_object, .hs_object = ldlm_res_hop_object,
.hs_get = ldlm_res_hop_get_locked, .hs_get = ldlm_res_hop_get_locked,
.hs_put_locked = ldlm_res_hop_put_locked,
.hs_put = ldlm_res_hop_put .hs_put = ldlm_res_hop_put
}; };
...@@ -572,7 +561,6 @@ static struct cfs_hash_ops ldlm_ns_fid_hash_ops = { ...@@ -572,7 +561,6 @@ static struct cfs_hash_ops ldlm_ns_fid_hash_ops = {
.hs_keycpy = NULL, .hs_keycpy = NULL,
.hs_object = ldlm_res_hop_object, .hs_object = ldlm_res_hop_object,
.hs_get = ldlm_res_hop_get_locked, .hs_get = ldlm_res_hop_get_locked,
.hs_put_locked = ldlm_res_hop_put_locked,
.hs_put = ldlm_res_hop_put .hs_put = ldlm_res_hop_put
}; };
...@@ -1249,37 +1237,6 @@ int ldlm_resource_putref(struct ldlm_resource *res) ...@@ -1249,37 +1237,6 @@ int ldlm_resource_putref(struct ldlm_resource *res)
} }
EXPORT_SYMBOL(ldlm_resource_putref); EXPORT_SYMBOL(ldlm_resource_putref);
/* Returns 1 if the resource was freed, 0 if it remains. */
int ldlm_resource_putref_locked(struct ldlm_resource *res)
{
struct ldlm_namespace *ns = ldlm_res_to_ns(res);
LASSERT_ATOMIC_GT_LT(&res->lr_refcount, 0, LI_POISON);
CDEBUG(D_INFO, "putref res: %p count: %d\n",
res, atomic_read(&res->lr_refcount) - 1);
if (atomic_dec_and_test(&res->lr_refcount)) {
struct cfs_hash_bd bd;
cfs_hash_bd_get(ldlm_res_to_ns(res)->ns_rs_hash,
&res->lr_name, &bd);
__ldlm_resource_putref_final(&bd, res);
cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
/* NB: ns_rs_hash is created with CFS_HASH_NO_ITEMREF,
* so we should never be here while calling cfs_hash_del,
* cfs_hash_for_each_nolock is the only case we can get
* here, which is safe to release cfs_hash_bd_lock.
*/
if (ns->ns_lvbo && ns->ns_lvbo->lvbo_free)
ns->ns_lvbo->lvbo_free(res);
kmem_cache_free(ldlm_resource_slab, res);
cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
return 1;
}
return 0;
}
/** /**
* Add a lock into a given resource into specified lock list. * Add a lock into a given resource into specified lock list.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment