Commit 3e9487c8 authored by Trond Myklebust's avatar Trond Myklebust

Merge http://nfsclient.bkbits.net/linux-2.5

into fys.uio.no:/home/linux/bitkeeper/nfsclient-2.5
parents 7c2b7264 957ca757
...@@ -557,7 +557,7 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd) ...@@ -557,7 +557,7 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
/* Force a full look up iff the parent directory has changed */ /* Force a full look up iff the parent directory has changed */
if (nfs_check_verifier(dir, dentry)) { if (nfs_check_verifier(dir, dentry)) {
if (nfs_lookup_verify_inode(inode, isopen)) if (nfs_lookup_verify_inode(inode, isopen))
goto out_bad; goto out_zap_parent;
goto out_valid; goto out_valid;
} }
...@@ -566,7 +566,7 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd) ...@@ -566,7 +566,7 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
if (memcmp(NFS_FH(inode), &fhandle, sizeof(struct nfs_fh))!= 0) if (memcmp(NFS_FH(inode), &fhandle, sizeof(struct nfs_fh))!= 0)
goto out_bad; goto out_bad;
if (nfs_lookup_verify_inode(inode, isopen)) if (nfs_lookup_verify_inode(inode, isopen))
goto out_bad; goto out_zap_parent;
goto out_valid_renew; goto out_valid_renew;
} }
...@@ -587,6 +587,8 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd) ...@@ -587,6 +587,8 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
unlock_kernel(); unlock_kernel();
dput(parent); dput(parent);
return 1; return 1;
out_zap_parent:
nfs_zap_caches(dir);
out_bad: out_bad:
NFS_CACHEINV(dir); NFS_CACHEINV(dir);
if (inode && S_ISDIR(inode->i_mode)) { if (inode && S_ISDIR(inode->i_mode)) {
......
...@@ -15,7 +15,6 @@ struct rpc_rtt { ...@@ -15,7 +15,6 @@ struct rpc_rtt {
unsigned long timeo; /* default timeout value */ unsigned long timeo; /* default timeout value */
unsigned long srtt[5]; /* smoothed round trip time << 3 */ unsigned long srtt[5]; /* smoothed round trip time << 3 */
unsigned long sdrtt[5]; /* smoothed medium deviation of RTT */ unsigned long sdrtt[5]; /* smoothed medium deviation of RTT */
atomic_t ntimeouts; /* Global count of the number of timeouts */
}; };
...@@ -23,19 +22,4 @@ extern void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo); ...@@ -23,19 +22,4 @@ extern void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo);
extern void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m); extern void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m);
extern unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer); extern unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer);
static inline void rpc_inc_timeo(struct rpc_rtt *rt)
{
atomic_inc(&rt->ntimeouts);
}
static inline void rpc_clear_timeo(struct rpc_rtt *rt)
{
atomic_set(&rt->ntimeouts, 0);
}
static inline int rpc_ntimeo(struct rpc_rtt *rt)
{
return atomic_read(&rt->ntimeouts);
}
#endif /* _LINUX_SUNRPC_TIMER_H */ #endif /* _LINUX_SUNRPC_TIMER_H */
...@@ -98,6 +98,10 @@ struct rpc_rqst { ...@@ -98,6 +98,10 @@ struct rpc_rqst {
struct list_head rq_list; struct list_head rq_list;
struct xdr_buf rq_private_buf; /* The receive buffer
* used in the softirq.
*/
/* /*
* For authentication (e.g. auth_des) * For authentication (e.g. auth_des)
*/ */
...@@ -111,7 +115,7 @@ struct rpc_rqst { ...@@ -111,7 +115,7 @@ struct rpc_rqst {
unsigned long rq_xtime; /* when transmitted */ unsigned long rq_xtime; /* when transmitted */
int rq_ntimeo; int rq_ntimeo;
int rq_nresend; int rq_ntrans;
}; };
#define rq_svec rq_snd_buf.head #define rq_svec rq_snd_buf.head
#define rq_slen rq_snd_buf.len #define rq_slen rq_snd_buf.len
......
...@@ -659,7 +659,7 @@ call_transmit(struct rpc_task *task) ...@@ -659,7 +659,7 @@ call_transmit(struct rpc_task *task)
if (task->tk_status < 0) if (task->tk_status < 0)
return; return;
task->tk_status = xprt_prepare_transmit(task); task->tk_status = xprt_prepare_transmit(task);
if (task->tk_status < 0) if (task->tk_status != 0)
return; return;
/* Encode here so that rpcsec_gss can use correct sequence number. */ /* Encode here so that rpcsec_gss can use correct sequence number. */
if (!task->tk_rqstp->rq_bytes_sent) if (!task->tk_rqstp->rq_bytes_sent)
...@@ -685,7 +685,7 @@ call_status(struct rpc_task *task) ...@@ -685,7 +685,7 @@ call_status(struct rpc_task *task)
struct rpc_rqst *req = task->tk_rqstp; struct rpc_rqst *req = task->tk_rqstp;
int status; int status;
if (req->rq_received != 0) if (req->rq_received > 0 && !req->rq_bytes_sent)
task->tk_status = req->rq_received; task->tk_status = req->rq_received;
dprintk("RPC: %4d call_status (status %d)\n", dprintk("RPC: %4d call_status (status %d)\n",
...@@ -744,14 +744,14 @@ call_timeout(struct rpc_task *task) ...@@ -744,14 +744,14 @@ call_timeout(struct rpc_task *task)
dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid); dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid);
if (clnt->cl_softrtry) { if (clnt->cl_softrtry) {
if (clnt->cl_chatty && !task->tk_exit) if (clnt->cl_chatty)
printk(KERN_NOTICE "%s: server %s not responding, timed out\n", printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
clnt->cl_protname, clnt->cl_server); clnt->cl_protname, clnt->cl_server);
rpc_exit(task, -EIO); rpc_exit(task, -EIO);
return; return;
} }
if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN) && rpc_ntimeo(&clnt->cl_rtt) > 7) { if (clnt->cl_chatty && !(task->tk_flags & RPC_CALL_MAJORSEEN)) {
task->tk_flags |= RPC_CALL_MAJORSEEN; task->tk_flags |= RPC_CALL_MAJORSEEN;
printk(KERN_NOTICE "%s: server %s not responding, still trying\n", printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
clnt->cl_protname, clnt->cl_server); clnt->cl_protname, clnt->cl_server);
...@@ -787,19 +787,26 @@ call_decode(struct rpc_task *task) ...@@ -787,19 +787,26 @@ call_decode(struct rpc_task *task)
if (task->tk_status < 12) { if (task->tk_status < 12) {
if (!clnt->cl_softrtry) { if (!clnt->cl_softrtry) {
task->tk_action = call_transmit; task->tk_action = call_bind;
clnt->cl_stats->rpcretrans++; clnt->cl_stats->rpcretrans++;
} else { goto out_retry;
printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n",
clnt->cl_protname, task->tk_status);
rpc_exit(task, -EIO);
} }
printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n",
clnt->cl_protname, task->tk_status);
rpc_exit(task, -EIO);
return; return;
} }
/* Check that the softirq receive buffer is valid */
WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
sizeof(req->rq_rcv_buf)) != 0);
/* Verify the RPC header */ /* Verify the RPC header */
if (!(p = call_verify(task))) if (!(p = call_verify(task))) {
return; if (task->tk_action == NULL)
return;
goto out_retry;
}
/* /*
* The following is an NFS-specific hack to cater for setuid * The following is an NFS-specific hack to cater for setuid
...@@ -812,7 +819,7 @@ call_decode(struct rpc_task *task) ...@@ -812,7 +819,7 @@ call_decode(struct rpc_task *task)
task->tk_flags ^= RPC_CALL_REALUID; task->tk_flags ^= RPC_CALL_REALUID;
task->tk_action = call_bind; task->tk_action = call_bind;
task->tk_suid_retry--; task->tk_suid_retry--;
return; goto out_retry;
} }
} }
...@@ -822,6 +829,10 @@ call_decode(struct rpc_task *task) ...@@ -822,6 +829,10 @@ call_decode(struct rpc_task *task)
task->tk_status = decode(req, p, task->tk_msg.rpc_resp); task->tk_status = decode(req, p, task->tk_msg.rpc_resp);
dprintk("RPC: %4d call_decode result %d\n", task->tk_pid, dprintk("RPC: %4d call_decode result %d\n", task->tk_pid,
task->tk_status); task->tk_status);
return;
out_retry:
req->rq_received = 0;
task->tk_status = 0;
} }
/* /*
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#define RPC_RTO_MAX (60*HZ) #define RPC_RTO_MAX (60*HZ)
#define RPC_RTO_INIT (HZ/5) #define RPC_RTO_INIT (HZ/5)
#define RPC_RTO_MIN (2) #define RPC_RTO_MIN (HZ/10)
void void
rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo) rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
...@@ -41,8 +41,6 @@ rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo) ...@@ -41,8 +41,6 @@ rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
rt->srtt[i] = init; rt->srtt[i] = init;
rt->sdrtt[i] = RPC_RTO_INIT; rt->sdrtt[i] = RPC_RTO_INIT;
} }
atomic_set(&rt->ntimeouts, 0);
} }
/* /*
...@@ -52,7 +50,7 @@ rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo) ...@@ -52,7 +50,7 @@ rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
void void
rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m) rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m)
{ {
unsigned long *srtt, *sdrtt; long *srtt, *sdrtt;
if (timer-- == 0) if (timer-- == 0)
return; return;
...@@ -64,14 +62,14 @@ rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m) ...@@ -64,14 +62,14 @@ rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m)
if (m == 0) if (m == 0)
m = 1L; m = 1L;
srtt = &rt->srtt[timer]; srtt = (long *)&rt->srtt[timer];
m -= *srtt >> 3; m -= *srtt >> 3;
*srtt += m; *srtt += m;
if (m < 0) if (m < 0)
m = -m; m = -m;
sdrtt = &rt->sdrtt[timer]; sdrtt = (long *)&rt->sdrtt[timer];
m -= *sdrtt >> 2; m -= *sdrtt >> 2;
*sdrtt += m; *sdrtt += m;
...@@ -101,7 +99,7 @@ rpc_calc_rto(struct rpc_rtt *rt, unsigned timer) ...@@ -101,7 +99,7 @@ rpc_calc_rto(struct rpc_rtt *rt, unsigned timer)
if (timer-- == 0) if (timer-- == 0)
return rt->timeo; return rt->timeo;
res = (rt->srtt[timer] >> 3) + rt->sdrtt[timer]; res = ((rt->srtt[timer] + 7) >> 3) + rt->sdrtt[timer];
if (res > RPC_RTO_MAX) if (res > RPC_RTO_MAX)
res = RPC_RTO_MAX; res = RPC_RTO_MAX;
......
...@@ -138,15 +138,22 @@ xprt_from_sock(struct sock *sk) ...@@ -138,15 +138,22 @@ xprt_from_sock(struct sock *sk)
static int static int
__xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task) __xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
{ {
struct rpc_rqst *req = task->tk_rqstp;
if (!xprt->snd_task) { if (!xprt->snd_task) {
if (xprt->nocong || __xprt_get_cong(xprt, task)) if (xprt->nocong || __xprt_get_cong(xprt, task)) {
xprt->snd_task = task; xprt->snd_task = task;
if (req) {
req->rq_bytes_sent = 0;
req->rq_ntrans++;
}
}
} }
if (xprt->snd_task != task) { if (xprt->snd_task != task) {
dprintk("RPC: %4d TCP write queue full\n", task->tk_pid); dprintk("RPC: %4d TCP write queue full\n", task->tk_pid);
task->tk_timeout = 0; task->tk_timeout = 0;
task->tk_status = -EAGAIN; task->tk_status = -EAGAIN;
if (task->tk_rqstp && task->tk_rqstp->rq_nresend) if (req && req->rq_ntrans)
rpc_sleep_on(&xprt->resend, task, NULL, NULL); rpc_sleep_on(&xprt->resend, task, NULL, NULL);
else else
rpc_sleep_on(&xprt->sending, task, NULL, NULL); rpc_sleep_on(&xprt->sending, task, NULL, NULL);
...@@ -181,8 +188,14 @@ __xprt_lock_write_next(struct rpc_xprt *xprt) ...@@ -181,8 +188,14 @@ __xprt_lock_write_next(struct rpc_xprt *xprt)
if (!task) if (!task)
return; return;
} }
if (xprt->nocong || __xprt_get_cong(xprt, task)) if (xprt->nocong || __xprt_get_cong(xprt, task)) {
struct rpc_rqst *req = task->tk_rqstp;
xprt->snd_task = task; xprt->snd_task = task;
if (req) {
req->rq_bytes_sent = 0;
req->rq_ntrans++;
}
}
} }
/* /*
...@@ -422,6 +435,9 @@ xprt_connect(struct rpc_task *task) ...@@ -422,6 +435,9 @@ xprt_connect(struct rpc_task *task)
if (xprt_connected(xprt)) if (xprt_connected(xprt))
goto out_write; goto out_write;
if (task->tk_rqstp)
task->tk_rqstp->rq_bytes_sent = 0;
/* /*
* We're here because the xprt was marked disconnected. * We're here because the xprt was marked disconnected.
* Start by resetting any existing state. * Start by resetting any existing state.
...@@ -566,14 +582,13 @@ xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied) ...@@ -566,14 +582,13 @@ xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied)
if (!xprt->nocong) { if (!xprt->nocong) {
xprt_adjust_cwnd(xprt, copied); xprt_adjust_cwnd(xprt, copied);
__xprt_put_cong(xprt, req); __xprt_put_cong(xprt, req);
if (!req->rq_nresend) { if (req->rq_ntrans == 1) {
unsigned timer = unsigned timer =
task->tk_msg.rpc_proc->p_timer; task->tk_msg.rpc_proc->p_timer;
if (timer) if (timer)
rpc_update_rtt(&clnt->cl_rtt, timer, rpc_update_rtt(&clnt->cl_rtt, timer,
(long)jiffies - req->rq_xtime); (long)jiffies - req->rq_xtime);
} }
rpc_clear_timeo(&clnt->cl_rtt);
} }
#ifdef RPC_PROFILE #ifdef RPC_PROFILE
...@@ -714,11 +729,11 @@ udp_data_ready(struct sock *sk, int len) ...@@ -714,11 +729,11 @@ udp_data_ready(struct sock *sk, int len)
dprintk("RPC: %4d received reply\n", task->tk_pid); dprintk("RPC: %4d received reply\n", task->tk_pid);
if ((copied = rovr->rq_rlen) > repsize) if ((copied = rovr->rq_private_buf.len) > repsize)
copied = repsize; copied = repsize;
/* Suck it into the iovec, verify checksum if not done by hw. */ /* Suck it into the iovec, verify checksum if not done by hw. */
if (csum_partial_copy_to_xdr(&rovr->rq_rcv_buf, skb)) if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb))
goto out_unlock; goto out_unlock;
/* Something worked... */ /* Something worked... */
...@@ -841,7 +856,7 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) ...@@ -841,7 +856,7 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
return; return;
} }
rcvbuf = &req->rq_rcv_buf; rcvbuf = &req->rq_private_buf;
len = desc->count; len = desc->count;
if (len > xprt->tcp_reclen - xprt->tcp_offset) { if (len > xprt->tcp_reclen - xprt->tcp_offset) {
skb_reader_t my_desc; skb_reader_t my_desc;
...@@ -859,7 +874,7 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) ...@@ -859,7 +874,7 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
xprt->tcp_copied += len; xprt->tcp_copied += len;
xprt->tcp_offset += len; xprt->tcp_offset += len;
if (xprt->tcp_copied == req->rq_rlen) if (xprt->tcp_copied == req->rq_private_buf.len)
xprt->tcp_flags &= ~XPRT_COPY_DATA; xprt->tcp_flags &= ~XPRT_COPY_DATA;
else if (xprt->tcp_offset == xprt->tcp_reclen) { else if (xprt->tcp_offset == xprt->tcp_reclen) {
if (xprt->tcp_flags & XPRT_LAST_FRAG) if (xprt->tcp_flags & XPRT_LAST_FRAG)
...@@ -1039,21 +1054,6 @@ xprt_write_space(struct sock *sk) ...@@ -1039,21 +1054,6 @@ xprt_write_space(struct sock *sk)
read_unlock(&sk->sk_callback_lock); read_unlock(&sk->sk_callback_lock);
} }
/*
* Exponential backoff for UDP retries
*/
static inline int
xprt_expbackoff(struct rpc_task *task, struct rpc_rqst *req)
{
int backoff;
req->rq_ntimeo++;
backoff = min(rpc_ntimeo(&task->tk_client->cl_rtt), XPRT_MAX_BACKOFF);
if (req->rq_ntimeo < (1 << backoff))
return 1;
return 0;
}
/* /*
* RPC receive timeout handler. * RPC receive timeout handler.
*/ */
...@@ -1067,15 +1067,8 @@ xprt_timer(struct rpc_task *task) ...@@ -1067,15 +1067,8 @@ xprt_timer(struct rpc_task *task)
if (req->rq_received) if (req->rq_received)
goto out; goto out;
if (!xprt->nocong) { xprt_adjust_cwnd(req->rq_xprt, -ETIMEDOUT);
if (xprt_expbackoff(task, req)) { __xprt_put_cong(xprt, req);
rpc_add_timer(task, xprt_timer);
goto out_unlock;
}
rpc_inc_timeo(&task->tk_client->cl_rtt);
xprt_adjust_cwnd(req->rq_xprt, -ETIMEDOUT);
}
req->rq_nresend++;
dprintk("RPC: %4d xprt_timer (%s request)\n", dprintk("RPC: %4d xprt_timer (%s request)\n",
task->tk_pid, req ? "pending" : "backlogged"); task->tk_pid, req ? "pending" : "backlogged");
...@@ -1084,7 +1077,6 @@ xprt_timer(struct rpc_task *task) ...@@ -1084,7 +1077,6 @@ xprt_timer(struct rpc_task *task)
out: out:
task->tk_timeout = 0; task->tk_timeout = 0;
rpc_wake_up_task(task); rpc_wake_up_task(task);
out_unlock:
spin_unlock(&xprt->sock_lock); spin_unlock(&xprt->sock_lock);
} }
...@@ -1104,10 +1096,11 @@ xprt_prepare_transmit(struct rpc_task *task) ...@@ -1104,10 +1096,11 @@ xprt_prepare_transmit(struct rpc_task *task)
if (xprt->shutdown) if (xprt->shutdown)
return -EIO; return -EIO;
if (task->tk_rpcwait)
rpc_remove_wait_queue(task);
spin_lock_bh(&xprt->sock_lock); spin_lock_bh(&xprt->sock_lock);
if (req->rq_received && !req->rq_bytes_sent) {
err = req->rq_received;
goto out_unlock;
}
if (!__xprt_lock_write(xprt, task)) { if (!__xprt_lock_write(xprt, task)) {
err = -EAGAIN; err = -EAGAIN;
goto out_unlock; goto out_unlock;
...@@ -1117,11 +1110,6 @@ xprt_prepare_transmit(struct rpc_task *task) ...@@ -1117,11 +1110,6 @@ xprt_prepare_transmit(struct rpc_task *task)
err = -ENOTCONN; err = -ENOTCONN;
goto out_unlock; goto out_unlock;
} }
if (list_empty(&req->rq_list)) {
list_add_tail(&req->rq_list, &xprt->recv);
req->rq_received = 0;
}
out_unlock: out_unlock:
spin_unlock_bh(&xprt->sock_lock); spin_unlock_bh(&xprt->sock_lock);
return err; return err;
...@@ -1146,6 +1134,20 @@ xprt_transmit(struct rpc_task *task) ...@@ -1146,6 +1134,20 @@ xprt_transmit(struct rpc_task *task)
*marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker))); *marker = htonl(0x80000000|(req->rq_slen-sizeof(*marker)));
} }
smp_rmb();
if (!req->rq_received) {
if (list_empty(&req->rq_list)) {
spin_lock_bh(&xprt->sock_lock);
/* Update the softirq receive buffer */
memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
sizeof(req->rq_private_buf));
/* Add request to the receive list */
list_add_tail(&req->rq_list, &xprt->recv);
spin_unlock_bh(&xprt->sock_lock);
}
} else if (!req->rq_bytes_sent)
return;
/* Continue transmitting the packet/record. We must be careful /* Continue transmitting the packet/record. We must be careful
* to cope with writespace callbacks arriving _after_ we have * to cope with writespace callbacks arriving _after_ we have
* called xprt_sendmsg(). * called xprt_sendmsg().
...@@ -1160,8 +1162,12 @@ xprt_transmit(struct rpc_task *task) ...@@ -1160,8 +1162,12 @@ xprt_transmit(struct rpc_task *task)
if (xprt->stream) { if (xprt->stream) {
req->rq_bytes_sent += status; req->rq_bytes_sent += status;
if (req->rq_bytes_sent >= req->rq_slen) /* If we've sent the entire packet, immediately
* reset the count of bytes sent. */
if (req->rq_bytes_sent >= req->rq_slen) {
req->rq_bytes_sent = 0;
goto out_receive; goto out_receive;
}
} else { } else {
if (status >= req->rq_slen) if (status >= req->rq_slen)
goto out_receive; goto out_receive;
...@@ -1182,9 +1188,6 @@ xprt_transmit(struct rpc_task *task) ...@@ -1182,9 +1188,6 @@ xprt_transmit(struct rpc_task *task)
* hence there is no danger of the waking up task being put on * hence there is no danger of the waking up task being put on
* schedq, and being picked up by a parallel run of rpciod(). * schedq, and being picked up by a parallel run of rpciod().
*/ */
if (req->rq_received)
goto out_release;
task->tk_status = status; task->tk_status = status;
switch (status) { switch (status) {
...@@ -1214,22 +1217,21 @@ xprt_transmit(struct rpc_task *task) ...@@ -1214,22 +1217,21 @@ xprt_transmit(struct rpc_task *task)
if (xprt->stream) if (xprt->stream)
xprt_disconnect(xprt); xprt_disconnect(xprt);
} }
out_release:
xprt_release_write(xprt, task); xprt_release_write(xprt, task);
req->rq_bytes_sent = 0;
return; return;
out_receive: out_receive:
dprintk("RPC: %4d xmit complete\n", task->tk_pid); dprintk("RPC: %4d xmit complete\n", task->tk_pid);
/* Set the task's receive timeout value */ /* Set the task's receive timeout value */
spin_lock_bh(&xprt->sock_lock);
if (!xprt->nocong) { if (!xprt->nocong) {
task->tk_timeout = rpc_calc_rto(&clnt->cl_rtt, task->tk_timeout = rpc_calc_rto(&clnt->cl_rtt,
task->tk_msg.rpc_proc->p_timer); task->tk_msg.rpc_proc->p_timer);
req->rq_ntimeo = 0; task->tk_timeout <<= clnt->cl_timeout.to_retries
- req->rq_timeout.to_retries;
if (task->tk_timeout > req->rq_timeout.to_maxval) if (task->tk_timeout > req->rq_timeout.to_maxval)
task->tk_timeout = req->rq_timeout.to_maxval; task->tk_timeout = req->rq_timeout.to_maxval;
} else } else
task->tk_timeout = req->rq_timeout.to_current; task->tk_timeout = req->rq_timeout.to_current;
spin_lock_bh(&xprt->sock_lock);
/* Don't race with disconnect */ /* Don't race with disconnect */
if (!xprt_connected(xprt)) if (!xprt_connected(xprt))
task->tk_status = -ENOTCONN; task->tk_status = -ENOTCONN;
...@@ -1237,7 +1239,6 @@ xprt_transmit(struct rpc_task *task) ...@@ -1237,7 +1239,6 @@ xprt_transmit(struct rpc_task *task)
rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
__xprt_release_write(xprt, task); __xprt_release_write(xprt, task);
spin_unlock_bh(&xprt->sock_lock); spin_unlock_bh(&xprt->sock_lock);
req->rq_bytes_sent = 0;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment