Commit 47446d74 authored by Vasily Averin's avatar Vasily Averin Committed by Chuck Lever

nfsd4: add refcount for nfsd4_blocked_lock

nbl allocated in nfsd4_lock can be released by a several ways:
directly in nfsd4_lock(), via nfs4_laundromat(), via another nfs
command RELEASE_LOCKOWNER or via nfsd4_callback.
This structure should be refcounted to be used and released correctly
in all these cases.

Refcount is initialized to 1 during allocation and is incremented
when nbl is added into nbl_list/nbl_lru lists.

Usually nbl is linked into both lists together, so only one refcount
is used for both lists.

However nfsd4_lock() should keep in mind that nbl can be present
in one of lists only. This can happen if nbl was handled already
by nfs4_laundromat/nfsd4_callback/etc.

Refcount is decremented if vfs_lock_file() returns FILE_LOCK_DEFERRED,
because nbl can be handled already by nfs4_laundromat/nfsd4_callback/etc.

Refcount is not changed in find_blocked_lock() because of it reuses counter
released after removing nbl from lists.
Signed-off-by: default avatarVasily Averin <vvs@virtuozzo.com>
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
parent 40595cdc
...@@ -246,6 +246,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh, ...@@ -246,6 +246,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
list_for_each_entry(cur, &lo->lo_blocked, nbl_list) { list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
if (fh_match(fh, &cur->nbl_fh)) { if (fh_match(fh, &cur->nbl_fh)) {
list_del_init(&cur->nbl_list); list_del_init(&cur->nbl_list);
WARN_ON(list_empty(&cur->nbl_lru));
list_del_init(&cur->nbl_lru); list_del_init(&cur->nbl_lru);
found = cur; found = cur;
break; break;
...@@ -271,6 +272,7 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh, ...@@ -271,6 +272,7 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
INIT_LIST_HEAD(&nbl->nbl_lru); INIT_LIST_HEAD(&nbl->nbl_lru);
fh_copy_shallow(&nbl->nbl_fh, fh); fh_copy_shallow(&nbl->nbl_fh, fh);
locks_init_lock(&nbl->nbl_lock); locks_init_lock(&nbl->nbl_lock);
kref_init(&nbl->nbl_kref);
nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client, nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
&nfsd4_cb_notify_lock_ops, &nfsd4_cb_notify_lock_ops,
NFSPROC4_CLNT_CB_NOTIFY_LOCK); NFSPROC4_CLNT_CB_NOTIFY_LOCK);
...@@ -279,12 +281,21 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh, ...@@ -279,12 +281,21 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
return nbl; return nbl;
} }
static void
free_nbl(struct kref *kref)
{
struct nfsd4_blocked_lock *nbl;
nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref);
kfree(nbl);
}
static void static void
free_blocked_lock(struct nfsd4_blocked_lock *nbl) free_blocked_lock(struct nfsd4_blocked_lock *nbl)
{ {
locks_delete_block(&nbl->nbl_lock); locks_delete_block(&nbl->nbl_lock);
locks_release_private(&nbl->nbl_lock); locks_release_private(&nbl->nbl_lock);
kfree(nbl); kref_put(&nbl->nbl_kref, free_nbl);
} }
static void static void
...@@ -302,6 +313,7 @@ remove_blocked_locks(struct nfs4_lockowner *lo) ...@@ -302,6 +313,7 @@ remove_blocked_locks(struct nfs4_lockowner *lo)
struct nfsd4_blocked_lock, struct nfsd4_blocked_lock,
nbl_list); nbl_list);
list_del_init(&nbl->nbl_list); list_del_init(&nbl->nbl_list);
WARN_ON(list_empty(&nbl->nbl_lru));
list_move(&nbl->nbl_lru, &reaplist); list_move(&nbl->nbl_lru, &reaplist);
} }
spin_unlock(&nn->blocked_locks_lock); spin_unlock(&nn->blocked_locks_lock);
...@@ -6987,6 +6999,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ...@@ -6987,6 +6999,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
spin_lock(&nn->blocked_locks_lock); spin_lock(&nn->blocked_locks_lock);
list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked); list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru); list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
kref_get(&nbl->nbl_kref);
spin_unlock(&nn->blocked_locks_lock); spin_unlock(&nn->blocked_locks_lock);
} }
...@@ -6999,6 +7012,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ...@@ -6999,6 +7012,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nn->somebody_reclaimed = true; nn->somebody_reclaimed = true;
break; break;
case FILE_LOCK_DEFERRED: case FILE_LOCK_DEFERRED:
kref_put(&nbl->nbl_kref, free_nbl);
nbl = NULL; nbl = NULL;
fallthrough; fallthrough;
case -EAGAIN: /* conflock holds conflicting lock */ case -EAGAIN: /* conflock holds conflicting lock */
...@@ -7019,8 +7033,13 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ...@@ -7019,8 +7033,13 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
/* dequeue it if we queued it before */ /* dequeue it if we queued it before */
if (fl_flags & FL_SLEEP) { if (fl_flags & FL_SLEEP) {
spin_lock(&nn->blocked_locks_lock); spin_lock(&nn->blocked_locks_lock);
if (!list_empty(&nbl->nbl_list) &&
!list_empty(&nbl->nbl_lru)) {
list_del_init(&nbl->nbl_list); list_del_init(&nbl->nbl_list);
list_del_init(&nbl->nbl_lru); list_del_init(&nbl->nbl_lru);
kref_put(&nbl->nbl_kref, free_nbl);
}
/* nbl can use one of lists to be linked to reaplist */
spin_unlock(&nn->blocked_locks_lock); spin_unlock(&nn->blocked_locks_lock);
} }
free_blocked_lock(nbl); free_blocked_lock(nbl);
......
...@@ -633,6 +633,7 @@ struct nfsd4_blocked_lock { ...@@ -633,6 +633,7 @@ struct nfsd4_blocked_lock {
struct file_lock nbl_lock; struct file_lock nbl_lock;
struct knfsd_fh nbl_fh; struct knfsd_fh nbl_fh;
struct nfsd4_callback nbl_cb; struct nfsd4_callback nbl_cb;
struct kref nbl_kref;
}; };
struct nfsd4_compound_state; struct nfsd4_compound_state;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment