Commit ebfd2036 authored by Trond Myklebust's avatar Trond Myklebust

Fix xprt.c so that it resends RPC requests immediately after a timeout.

Doing this ensures that we keep probing the connection to the server
rather than just waiting for the entire congestion window to time out.
The latter can be very expensive due to the exponential backoff rule.
parent 01a99f33
...@@ -175,10 +175,10 @@ __xprt_lock_write_next(struct rpc_xprt *xprt) ...@@ -175,10 +175,10 @@ __xprt_lock_write_next(struct rpc_xprt *xprt)
if (xprt->snd_task) if (xprt->snd_task)
return; return;
if (!xprt->nocong && RPCXPRT_CONGESTED(xprt))
return;
task = rpc_wake_up_next(&xprt->resend); task = rpc_wake_up_next(&xprt->resend);
if (!task) { if (!task) {
if (!xprt->nocong && RPCXPRT_CONGESTED(xprt))
return;
task = rpc_wake_up_next(&xprt->sending); task = rpc_wake_up_next(&xprt->sending);
if (!task) if (!task)
return; return;
...@@ -1071,7 +1071,6 @@ xprt_timer(struct rpc_task *task) ...@@ -1071,7 +1071,6 @@ xprt_timer(struct rpc_task *task)
} }
rpc_inc_timeo(&task->tk_client->cl_rtt); rpc_inc_timeo(&task->tk_client->cl_rtt);
xprt_adjust_cwnd(req->rq_xprt, -ETIMEDOUT); xprt_adjust_cwnd(req->rq_xprt, -ETIMEDOUT);
__xprt_put_cong(xprt, req);
} }
req->rq_nresend++; req->rq_nresend++;
...@@ -1211,10 +1210,7 @@ xprt_transmit(struct rpc_task *task) ...@@ -1211,10 +1210,7 @@ xprt_transmit(struct rpc_task *task)
req->rq_bytes_sent = 0; req->rq_bytes_sent = 0;
} }
out_release: out_release:
spin_lock_bh(&xprt->sock_lock); xprt_release_write(xprt, task);
__xprt_release_write(xprt, task);
__xprt_put_cong(xprt, req);
spin_unlock_bh(&xprt->sock_lock);
return; return;
out_receive: out_receive:
dprintk("RPC: %4d xmit complete\n", task->tk_pid); dprintk("RPC: %4d xmit complete\n", task->tk_pid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment