Commit b9fa4cbd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nfsd-6.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux

Pull nfsd fixes from Chuck Lever:

 - Fix in-kernel RPC UDP transport

 - Fix NFSv4.0 RELEASE_LOCKOWNER

* tag 'nfsd-6.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux:
  nfsd: fix RELEASE_LOCKOWNER
  SUNRPC: use request size to initialize bio_vec in svc_udp_sendto()
parents 3cb9871f edcf9725
...@@ -7911,14 +7911,16 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) ...@@ -7911,14 +7911,16 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
{ {
struct file_lock *fl; struct file_lock *fl;
int status = false; int status = false;
struct nfsd_file *nf = find_any_file(fp); struct nfsd_file *nf;
struct inode *inode; struct inode *inode;
struct file_lock_context *flctx; struct file_lock_context *flctx;
spin_lock(&fp->fi_lock);
nf = find_any_file_locked(fp);
if (!nf) { if (!nf) {
/* Any valid lock stateid should have some sort of access */ /* Any valid lock stateid should have some sort of access */
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
return status; goto out;
} }
inode = file_inode(nf->nf_file); inode = file_inode(nf->nf_file);
...@@ -7934,7 +7936,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) ...@@ -7934,7 +7936,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
} }
spin_unlock(&flctx->flc_lock); spin_unlock(&flctx->flc_lock);
} }
nfsd_file_put(nf); out:
spin_unlock(&fp->fi_lock);
return status; return status;
} }
...@@ -7944,10 +7947,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) ...@@ -7944,10 +7947,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
* @cstate: NFSv4 COMPOUND state * @cstate: NFSv4 COMPOUND state
* @u: RELEASE_LOCKOWNER arguments * @u: RELEASE_LOCKOWNER arguments
* *
* The lockowner's so_count is bumped when a lock record is added * Check if theree are any locks still held and if not - free the lockowner
* or when copying a conflicting lock. The latter case is brief, * and any lock state that is owned.
* but can lead to fleeting false positives when looking for
* locks-in-use.
* *
* Return values: * Return values:
* %nfs_ok: lockowner released or not found * %nfs_ok: lockowner released or not found
...@@ -7983,10 +7984,13 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp, ...@@ -7983,10 +7984,13 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
spin_unlock(&clp->cl_lock); spin_unlock(&clp->cl_lock);
return nfs_ok; return nfs_ok;
} }
if (atomic_read(&lo->lo_owner.so_count) != 2) {
spin_unlock(&clp->cl_lock); list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
nfs4_put_stateowner(&lo->lo_owner); if (check_for_locks(stp->st_stid.sc_file, lo)) {
return nfserr_locks_held; spin_unlock(&clp->cl_lock);
nfs4_put_stateowner(&lo->lo_owner);
return nfserr_locks_held;
}
} }
unhash_lockowner_locked(lo); unhash_lockowner_locked(lo);
while (!list_empty(&lo->lo_owner.so_stateids)) { while (!list_empty(&lo->lo_owner.so_stateids)) {
......
...@@ -717,12 +717,12 @@ static int svc_udp_sendto(struct svc_rqst *rqstp) ...@@ -717,12 +717,12 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
ARRAY_SIZE(rqstp->rq_bvec), xdr); ARRAY_SIZE(rqstp->rq_bvec), xdr);
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec, iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
count, 0); count, rqstp->rq_res.len);
err = sock_sendmsg(svsk->sk_sock, &msg); err = sock_sendmsg(svsk->sk_sock, &msg);
if (err == -ECONNREFUSED) { if (err == -ECONNREFUSED) {
/* ICMP error on earlier request. */ /* ICMP error on earlier request. */
iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec, iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
count, 0); count, rqstp->rq_res.len);
err = sock_sendmsg(svsk->sk_sock, &msg); err = sock_sendmsg(svsk->sk_sock, &msg);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment