Commit e9131359 authored by Alexander Aring's avatar Alexander Aring Committed by David Teigland

dlm: use rwlock for rsb hash table

The conversion to rhashtable introduced a hash table lock per lockspace,
in place of per bucket locks.  To make this more scalable, switch to
using a rwlock for hash table access.  The common case fast path uses
it as a read lock.
Signed-off-by: default avatarAlexander Aring <aahringo@redhat.com>
Signed-off-by: default avatarDavid Teigland <teigland@redhat.com>
parent b1f2381c
......@@ -413,7 +413,7 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
else
list = &ls->ls_keep;
spin_lock_bh(&ls->ls_rsbtbl_lock);
read_lock_bh(&ls->ls_rsbtbl_lock);
return seq_list_start(list, *pos);
}
......@@ -434,7 +434,7 @@ static void table_seq_stop(struct seq_file *seq, void *iter_ptr)
{
struct dlm_ls *ls = seq->private;
spin_unlock_bh(&ls->ls_rsbtbl_lock);
read_unlock_bh(&ls->ls_rsbtbl_lock);
}
static const struct seq_operations format1_seq_ops = {
......
......@@ -200,9 +200,9 @@ static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, const char *name,
struct dlm_rsb *r;
int rv;
spin_lock_bh(&ls->ls_rsbtbl_lock);
read_lock_bh(&ls->ls_rsbtbl_lock);
rv = dlm_search_rsb_tree(&ls->ls_rsbtbl, name, len, &r);
spin_unlock_bh(&ls->ls_rsbtbl_lock);
read_unlock_bh(&ls->ls_rsbtbl_lock);
if (!rv)
return r;
......
......@@ -585,7 +585,7 @@ struct dlm_ls {
spinlock_t ls_lkbidr_spin;
struct rhashtable ls_rsbtbl;
spinlock_t ls_rsbtbl_lock;
rwlock_t ls_rsbtbl_lock;
struct list_head ls_toss;
struct list_head ls_keep;
......
This diff is collapsed.
......@@ -424,7 +424,7 @@ static int new_lockspace(const char *name, const char *cluster,
INIT_LIST_HEAD(&ls->ls_toss);
INIT_LIST_HEAD(&ls->ls_keep);
spin_lock_init(&ls->ls_rsbtbl_lock);
rwlock_init(&ls->ls_rsbtbl_lock);
error = rhashtable_init(&ls->ls_rsbtbl, &dlm_rhash_rsb_params);
if (error)
......
......@@ -884,7 +884,7 @@ void dlm_clear_toss(struct dlm_ls *ls)
struct dlm_rsb *r, *safe;
unsigned int count = 0;
spin_lock_bh(&ls->ls_rsbtbl_lock);
write_lock_bh(&ls->ls_rsbtbl_lock);
list_for_each_entry_safe(r, safe, &ls->ls_toss, res_rsbs_list) {
list_del(&r->res_rsbs_list);
rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node,
......@@ -897,7 +897,7 @@ void dlm_clear_toss(struct dlm_ls *ls)
free_toss_rsb(r);
count++;
}
spin_unlock_bh(&ls->ls_rsbtbl_lock);
write_unlock_bh(&ls->ls_rsbtbl_lock);
if (count)
log_rinfo(ls, "dlm_clear_toss %u done", count);
......
......@@ -32,7 +32,7 @@ static int dlm_create_masters_list(struct dlm_ls *ls)
goto out;
}
spin_lock_bh(&ls->ls_rsbtbl_lock);
read_lock_bh(&ls->ls_rsbtbl_lock);
list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) {
if (r->res_nodeid)
continue;
......@@ -40,7 +40,7 @@ static int dlm_create_masters_list(struct dlm_ls *ls)
list_add(&r->res_masters_list, &ls->ls_masters_list);
dlm_hold_rsb(r);
}
spin_unlock_bh(&ls->ls_rsbtbl_lock);
read_unlock_bh(&ls->ls_rsbtbl_lock);
out:
write_unlock_bh(&ls->ls_masters_lock);
return error;
......@@ -62,14 +62,14 @@ static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list)
{
struct dlm_rsb *r;
spin_lock_bh(&ls->ls_rsbtbl_lock);
read_lock_bh(&ls->ls_rsbtbl_lock);
list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) {
list_add(&r->res_root_list, root_list);
dlm_hold_rsb(r);
}
WARN_ON_ONCE(!list_empty(&ls->ls_toss));
spin_unlock_bh(&ls->ls_rsbtbl_lock);
read_unlock_bh(&ls->ls_rsbtbl_lock);
}
static void dlm_release_root_list(struct list_head *root_list)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment