Commit 931ee56c authored by Benny Halevy's avatar Benny Halevy Committed by J. Bruce Fields

nfsd4: use recall_lock for delegation hashing

This fixes a bug in the handling of the fi_delegations list.

nfs4_setlease does not hold the recall_lock when adding to it. The
client_mutex is held, which prevents against concurrent list changes,
but nfsd_break_deleg_cb does not hold while walking it. New delegations
could theoretically creep onto the list while we're walking it there.
Signed-off-by: default avatarBenny Halevy <bhalevy@primarydata.com>
Signed-off-by: default avatarJeff Layton <jlayton@primarydata.com>
Cc: stable@vger.kernel.org
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent a832e7ae
...@@ -433,12 +433,21 @@ static void unhash_stid(struct nfs4_stid *s) ...@@ -433,12 +433,21 @@ static void unhash_stid(struct nfs4_stid *s)
s->sc_type = 0; s->sc_type = 0;
} }
static void
hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
{
lockdep_assert_held(&recall_lock);
list_add(&dp->dl_perfile, &fp->fi_delegations);
list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
}
/* Called under the state lock. */ /* Called under the state lock. */
static void static void
unhash_delegation(struct nfs4_delegation *dp) unhash_delegation(struct nfs4_delegation *dp)
{ {
list_del_init(&dp->dl_perclnt);
spin_lock(&recall_lock); spin_lock(&recall_lock);
list_del_init(&dp->dl_perclnt);
list_del_init(&dp->dl_perfile); list_del_init(&dp->dl_perfile);
list_del_init(&dp->dl_recall_lru); list_del_init(&dp->dl_recall_lru);
spin_unlock(&recall_lock); spin_unlock(&recall_lock);
...@@ -3065,11 +3074,12 @@ static int nfs4_setlease(struct nfs4_delegation *dp) ...@@ -3065,11 +3074,12 @@ static int nfs4_setlease(struct nfs4_delegation *dp)
status = vfs_setlease(fl->fl_file, fl->fl_type, &fl); status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
if (status) if (status)
goto out_free; goto out_free;
list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
fp->fi_lease = fl; fp->fi_lease = fl;
fp->fi_deleg_file = get_file(fl->fl_file); fp->fi_deleg_file = get_file(fl->fl_file);
atomic_set(&fp->fi_delegees, 1); atomic_set(&fp->fi_delegees, 1);
list_add(&dp->dl_perfile, &fp->fi_delegations); spin_lock(&recall_lock);
hash_delegation_locked(dp, fp);
spin_unlock(&recall_lock);
return 0; return 0;
out_free: out_free:
locks_free_lock(fl); locks_free_lock(fl);
...@@ -3090,9 +3100,8 @@ static int nfs4_set_delegation(struct nfs4_delegation *dp, struct nfs4_file *fp) ...@@ -3090,9 +3100,8 @@ static int nfs4_set_delegation(struct nfs4_delegation *dp, struct nfs4_file *fp)
spin_unlock(&recall_lock); spin_unlock(&recall_lock);
return -EAGAIN; return -EAGAIN;
} }
list_add(&dp->dl_perfile, &fp->fi_delegations); hash_delegation_locked(dp, fp);
spin_unlock(&recall_lock); spin_unlock(&recall_lock);
list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
return 0; return 0;
} }
...@@ -4903,6 +4912,7 @@ static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max, ...@@ -4903,6 +4912,7 @@ static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
struct nfs4_delegation *dp, *next; struct nfs4_delegation *dp, *next;
u64 count = 0; u64 count = 0;
lockdep_assert_held(&recall_lock);
list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) { list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
if (victims) if (victims)
list_move(&dp->dl_recall_lru, victims); list_move(&dp->dl_recall_lru, victims);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment