Commit 1c0cc5f1 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nfs-for-5.4-2' of git://git.linux-nfs.org/projects/anna/linux-nfs

Pull NFS client bugfixes from Anna Schumaker:
 "Stable bugfixes:
   - Fix O_DIRECT accounting of number of bytes read/written # v4.1+

  Other fixes:
   - Fix nfsi->nrequests count error on nfs_inode_remove_request()
   - Remove redundant mirror tracking in O_DIRECT
   - Fix leak of clp->cl_acceptor string
   - Fix race to sk_err after xs_error_report"

* tag 'nfs-for-5.4-2' of git://git.linux-nfs.org/projects/anna/linux-nfs:
  SUNRPC: fix race to sk_err after xs_error_report
  NFSv4: Fix leak of clp->cl_acceptor string
  NFS: Remove redundant mirror tracking in O_DIRECT
  NFS: Fix O_DIRECT accounting of number of bytes read/written
  nfs: Fix nfsi->nrequests count error on nfs_inode_remove_request
parents c6ad7c3c af84537d
......@@ -64,13 +64,6 @@
static struct kmem_cache *nfs_direct_cachep;
/*
* This represents a set of asynchronous requests that we're waiting on
*/
struct nfs_direct_mirror {
ssize_t count;
};
struct nfs_direct_req {
struct kref kref; /* release manager */
......@@ -84,9 +77,6 @@ struct nfs_direct_req {
atomic_t io_count; /* i/os we're waiting for */
spinlock_t lock; /* protect completion state */
struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
int mirror_count;
loff_t io_start; /* Start offset for I/O */
ssize_t count, /* bytes actually processed */
max_count, /* max expected count */
......@@ -123,32 +113,42 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
}
static void
nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
const struct nfs_pgio_header *hdr,
ssize_t dreq_len)
{
int i;
ssize_t count;
if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
test_bit(NFS_IOHDR_EOF, &hdr->flags)))
return;
if (dreq->max_count >= dreq_len) {
dreq->max_count = dreq_len;
if (dreq->count > dreq_len)
dreq->count = dreq_len;
WARN_ON_ONCE(dreq->count >= dreq->max_count);
if (dreq->mirror_count == 1) {
dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
dreq->count += hdr->good_bytes;
} else {
/* mirrored writes */
count = dreq->mirrors[hdr->pgio_mirror_idx].count;
if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
count = hdr->io_start + hdr->good_bytes - dreq->io_start;
dreq->mirrors[hdr->pgio_mirror_idx].count = count;
if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
dreq->error = hdr->error;
else /* Clear outstanding error if this is EOF */
dreq->error = 0;
}
/* update the dreq->count by finding the minimum agreed count from all
* mirrors */
count = dreq->mirrors[0].count;
}
for (i = 1; i < dreq->mirror_count; i++)
count = min(count, dreq->mirrors[i].count);
static void
nfs_direct_count_bytes(struct nfs_direct_req *dreq,
const struct nfs_pgio_header *hdr)
{
loff_t hdr_end = hdr->io_start + hdr->good_bytes;
ssize_t dreq_len = 0;
dreq->count = count;
}
if (hdr_end > dreq->io_start)
dreq_len = hdr_end - dreq->io_start;
nfs_direct_handle_truncated(dreq, hdr, dreq_len);
if (dreq_len > dreq->max_count)
dreq_len = dreq->max_count;
if (dreq->count < dreq_len)
dreq->count = dreq_len;
}
/*
......@@ -293,18 +293,6 @@ void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
cinfo->completion_ops = &nfs_direct_commit_completion_ops;
}
static inline void nfs_direct_setup_mirroring(struct nfs_direct_req *dreq,
struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
{
int mirror_count = 1;
if (pgio->pg_ops->pg_get_mirror_count)
mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
dreq->mirror_count = mirror_count;
}
static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
{
struct nfs_direct_req *dreq;
......@@ -319,7 +307,6 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
INIT_LIST_HEAD(&dreq->mds_cinfo.list);
dreq->verf.committed = NFS_INVALID_STABLE_HOW; /* not set yet */
INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
dreq->mirror_count = 1;
spin_lock_init(&dreq->lock);
return dreq;
......@@ -402,20 +389,12 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
struct nfs_direct_req *dreq = hdr->dreq;
spin_lock(&dreq->lock);
if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
dreq->error = hdr->error;
if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
spin_unlock(&dreq->lock);
goto out_put;
}
if (hdr->good_bytes != 0)
nfs_direct_good_bytes(dreq, hdr);
if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
dreq->error = 0;
nfs_direct_count_bytes(dreq, hdr);
spin_unlock(&dreq->lock);
while (!list_empty(&hdr->pages)) {
......@@ -646,29 +625,22 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
LIST_HEAD(reqs);
struct nfs_commit_info cinfo;
LIST_HEAD(failed);
int i;
nfs_init_cinfo_from_dreq(&cinfo, dreq);
nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
dreq->count = 0;
dreq->max_count = 0;
list_for_each_entry(req, &reqs, wb_list)
dreq->max_count += req->wb_bytes;
dreq->verf.committed = NFS_INVALID_STABLE_HOW;
nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
for (i = 0; i < dreq->mirror_count; i++)
dreq->mirrors[i].count = 0;
get_dreq(dreq);
nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
&nfs_direct_write_completion_ops);
desc.pg_dreq = dreq;
req = nfs_list_entry(reqs.next);
nfs_direct_setup_mirroring(dreq, &desc, req);
if (desc.pg_error < 0) {
list_splice_init(&reqs, &failed);
goto out_failed;
}
list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
/* Bump the transmission count */
req->wb_nio++;
......@@ -686,7 +658,6 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
}
nfs_pageio_complete(&desc);
out_failed:
while (!list_empty(&failed)) {
req = nfs_list_entry(failed.next);
nfs_list_remove_request(req);
......@@ -791,17 +762,13 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
nfs_init_cinfo_from_dreq(&cinfo, dreq);
spin_lock(&dreq->lock);
if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
dreq->error = hdr->error;
if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
spin_unlock(&dreq->lock);
goto out_put;
}
nfs_direct_count_bytes(dreq, hdr);
if (hdr->good_bytes != 0) {
nfs_direct_good_bytes(dreq, hdr);
if (nfs_write_need_commit(hdr)) {
if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
request_commit = true;
......@@ -923,7 +890,6 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
break;
}
nfs_direct_setup_mirroring(dreq, &desc, req);
if (desc.pg_error < 0) {
nfs_free_request(req);
result = desc.pg_error;
......
......@@ -6106,6 +6106,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
status = nfs4_call_sync_custom(&task_setup_data);
if (setclientid.sc_cred) {
kfree(clp->cl_acceptor);
clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
put_rpccred(setclientid.sc_cred);
}
......
......@@ -786,7 +786,6 @@ static void nfs_inode_remove_request(struct nfs_page *req)
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_page *head;
atomic_long_dec(&nfsi->nrequests);
if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
head = req->wb_head;
......@@ -799,8 +798,10 @@ static void nfs_inode_remove_request(struct nfs_page *req)
spin_unlock(&mapping->private_lock);
}
if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
nfs_release_request(req);
atomic_long_dec(&nfsi->nrequests);
}
}
static void
......
......@@ -61,6 +61,7 @@ struct sock_xprt {
struct mutex recv_mutex;
struct sockaddr_storage srcaddr;
unsigned short srcport;
int xprt_err;
/*
* UDP socket buffer size parameters
......
......@@ -1249,19 +1249,21 @@ static void xs_error_report(struct sock *sk)
{
struct sock_xprt *transport;
struct rpc_xprt *xprt;
int err;
read_lock_bh(&sk->sk_callback_lock);
if (!(xprt = xprt_from_sock(sk)))
goto out;
transport = container_of(xprt, struct sock_xprt, xprt);
err = -sk->sk_err;
if (err == 0)
transport->xprt_err = -sk->sk_err;
if (transport->xprt_err == 0)
goto out;
dprintk("RPC: xs_error_report client %p, error=%d...\n",
xprt, -err);
trace_rpc_socket_error(xprt, sk->sk_socket, err);
xprt, -transport->xprt_err);
trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err);
/* barrier ensures xprt_err is set before XPRT_SOCK_WAKE_ERROR */
smp_mb__before_atomic();
xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR);
out:
read_unlock_bh(&sk->sk_callback_lock);
......@@ -2476,7 +2478,6 @@ static void xs_wake_write(struct sock_xprt *transport)
static void xs_wake_error(struct sock_xprt *transport)
{
int sockerr;
int sockerr_len = sizeof(sockerr);
if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
return;
......@@ -2485,9 +2486,7 @@ static void xs_wake_error(struct sock_xprt *transport)
goto out;
if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
goto out;
if (kernel_getsockopt(transport->sock, SOL_SOCKET, SO_ERROR,
(char *)&sockerr, &sockerr_len) != 0)
goto out;
sockerr = xchg(&transport->xprt_err, 0);
if (sockerr < 0)
xprt_wake_pending_tasks(&transport->xprt, sockerr);
out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment