Commit 944b0429 authored by Trond Myklebust's avatar Trond Myklebust

SUNRPC: Add a transmission queue for RPC requests

Add the queue that will enforce the ordering of RPC task transmission.
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parent ef3f5434
...@@ -88,6 +88,8 @@ struct rpc_rqst { ...@@ -88,6 +88,8 @@ struct rpc_rqst {
struct list_head rq_recv; /* Receive queue */ struct list_head rq_recv; /* Receive queue */
}; };
struct list_head rq_xmit; /* Send queue */
void *rq_buffer; /* Call XDR encode buffer */ void *rq_buffer; /* Call XDR encode buffer */
size_t rq_callsize; size_t rq_callsize;
void *rq_rbuffer; /* Reply XDR decode buffer */ void *rq_rbuffer; /* Reply XDR decode buffer */
...@@ -242,6 +244,9 @@ struct rpc_xprt { ...@@ -242,6 +244,9 @@ struct rpc_xprt {
spinlock_t queue_lock; /* send/receive queue lock */ spinlock_t queue_lock; /* send/receive queue lock */
u32 xid; /* Next XID value to use */ u32 xid; /* Next XID value to use */
struct rpc_task * snd_task; /* Task blocked in send */ struct rpc_task * snd_task; /* Task blocked in send */
struct list_head xmit_queue; /* Send queue */
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
#if defined(CONFIG_SUNRPC_BACKCHANNEL) #if defined(CONFIG_SUNRPC_BACKCHANNEL)
struct svc_serv *bc_serv; /* The RPC service which will */ struct svc_serv *bc_serv; /* The RPC service which will */
...@@ -339,6 +344,7 @@ void xprt_free_slot(struct rpc_xprt *xprt, ...@@ -339,6 +344,7 @@ void xprt_free_slot(struct rpc_xprt *xprt,
struct rpc_rqst *req); struct rpc_rqst *req);
void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
bool xprt_prepare_transmit(struct rpc_task *task); bool xprt_prepare_transmit(struct rpc_task *task);
void xprt_request_enqueue_transmit(struct rpc_task *task);
void xprt_request_enqueue_receive(struct rpc_task *task); void xprt_request_enqueue_receive(struct rpc_task *task);
void xprt_request_wait_receive(struct rpc_task *task); void xprt_request_wait_receive(struct rpc_task *task);
void xprt_transmit(struct rpc_task *task); void xprt_transmit(struct rpc_task *task);
......
...@@ -1156,11 +1156,11 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req) ...@@ -1156,11 +1156,11 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
*/ */
xbufp->len = xbufp->head[0].iov_len + xbufp->page_len + xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
xbufp->tail[0].iov_len; xbufp->tail[0].iov_len;
set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
task->tk_action = call_bc_transmit; task->tk_action = call_bc_transmit;
atomic_inc(&task->tk_count); atomic_inc(&task->tk_count);
WARN_ON_ONCE(atomic_read(&task->tk_count) != 2); WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
xprt_request_enqueue_transmit(task);
rpc_execute(task); rpc_execute(task);
dprintk("RPC: rpc_run_bc_task: task= %p\n", task); dprintk("RPC: rpc_run_bc_task: task= %p\n", task);
...@@ -1759,8 +1759,6 @@ rpc_xdr_encode(struct rpc_task *task) ...@@ -1759,8 +1759,6 @@ rpc_xdr_encode(struct rpc_task *task)
task->tk_status = rpcauth_wrap_req(task, encode, req, p, task->tk_status = rpcauth_wrap_req(task, encode, req, p,
task->tk_msg.rpc_argp); task->tk_msg.rpc_argp);
if (task->tk_status == 0)
set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
} }
/* /*
...@@ -1964,6 +1962,7 @@ call_transmit(struct rpc_task *task) ...@@ -1964,6 +1962,7 @@ call_transmit(struct rpc_task *task)
/* Add task to reply queue before transmission to avoid races */ /* Add task to reply queue before transmission to avoid races */
if (rpc_reply_expected(task)) if (rpc_reply_expected(task))
xprt_request_enqueue_receive(task); xprt_request_enqueue_receive(task);
xprt_request_enqueue_transmit(task);
if (!xprt_prepare_transmit(task)) if (!xprt_prepare_transmit(task))
return; return;
...@@ -1998,7 +1997,6 @@ call_transmit_status(struct rpc_task *task) ...@@ -1998,7 +1997,6 @@ call_transmit_status(struct rpc_task *task)
xprt_end_transmit(task); xprt_end_transmit(task);
break; break;
case -EBADMSG: case -EBADMSG:
clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
task->tk_action = call_transmit; task->tk_action = call_transmit;
task->tk_status = 0; task->tk_status = 0;
xprt_end_transmit(task); xprt_end_transmit(task);
......
...@@ -1058,6 +1058,72 @@ void xprt_request_wait_receive(struct rpc_task *task) ...@@ -1058,6 +1058,72 @@ void xprt_request_wait_receive(struct rpc_task *task)
spin_unlock(&xprt->queue_lock); spin_unlock(&xprt->queue_lock);
} }
static bool
xprt_request_need_transmit(struct rpc_task *task)
{
return !(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) ||
xprt_request_retransmit_after_disconnect(task);
}
static bool
xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
{
return xprt_request_need_transmit(task) &&
!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
}
/**
* xprt_request_enqueue_transmit - queue a task for transmission
* @task: pointer to rpc_task
*
* Add a task to the transmission queue.
*/
void
xprt_request_enqueue_transmit(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
if (xprt_request_need_enqueue_transmit(task, req)) {
spin_lock(&xprt->queue_lock);
list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
spin_unlock(&xprt->queue_lock);
}
}
/**
* xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
* @task: pointer to rpc_task
*
* Remove a task from the transmission queue
* Caller must hold xprt->queue_lock
*/
static void
xprt_request_dequeue_transmit_locked(struct rpc_task *task)
{
xprt_task_clear_bytes_sent(task);
if (test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
list_del(&task->tk_rqstp->rq_xmit);
}
/**
* xprt_request_dequeue_transmit - remove a task from the transmission queue
* @task: pointer to rpc_task
*
* Remove a task from the transmission queue
*/
static void
xprt_request_dequeue_transmit(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
spin_lock(&xprt->queue_lock);
xprt_request_dequeue_transmit_locked(task);
spin_unlock(&xprt->queue_lock);
}
/** /**
* xprt_prepare_transmit - reserve the transport before sending a request * xprt_prepare_transmit - reserve the transport before sending a request
* @task: RPC task about to send a request * @task: RPC task about to send a request
...@@ -1077,12 +1143,8 @@ bool xprt_prepare_transmit(struct rpc_task *task) ...@@ -1077,12 +1143,8 @@ bool xprt_prepare_transmit(struct rpc_task *task)
task->tk_status = req->rq_reply_bytes_recvd; task->tk_status = req->rq_reply_bytes_recvd;
goto out_unlock; goto out_unlock;
} }
if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) && if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
!xprt_request_retransmit_after_disconnect(task)) {
xprt->ops->set_retrans_timeout(task);
rpc_sleep_on(&xprt->pending, task, xprt_timer);
goto out_unlock; goto out_unlock;
}
} }
if (!xprt->ops->reserve_xprt(xprt, task)) { if (!xprt->ops->reserve_xprt(xprt, task)) {
task->tk_status = -EAGAIN; task->tk_status = -EAGAIN;
...@@ -1116,11 +1178,11 @@ void xprt_transmit(struct rpc_task *task) ...@@ -1116,11 +1178,11 @@ void xprt_transmit(struct rpc_task *task)
if (!req->rq_bytes_sent) { if (!req->rq_bytes_sent) {
if (xprt_request_data_received(task)) if (xprt_request_data_received(task))
return; goto out_dequeue;
/* Verify that our message lies in the RPCSEC_GSS window */ /* Verify that our message lies in the RPCSEC_GSS window */
if (rpcauth_xmit_need_reencode(task)) { if (rpcauth_xmit_need_reencode(task)) {
task->tk_status = -EBADMSG; task->tk_status = -EBADMSG;
return; goto out_dequeue;
} }
} }
...@@ -1135,7 +1197,6 @@ void xprt_transmit(struct rpc_task *task) ...@@ -1135,7 +1197,6 @@ void xprt_transmit(struct rpc_task *task)
xprt_inject_disconnect(xprt); xprt_inject_disconnect(xprt);
dprintk("RPC: %5u xmit complete\n", task->tk_pid); dprintk("RPC: %5u xmit complete\n", task->tk_pid);
clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
task->tk_flags |= RPC_TASK_SENT; task->tk_flags |= RPC_TASK_SENT;
spin_lock_bh(&xprt->transport_lock); spin_lock_bh(&xprt->transport_lock);
...@@ -1147,6 +1208,8 @@ void xprt_transmit(struct rpc_task *task) ...@@ -1147,6 +1208,8 @@ void xprt_transmit(struct rpc_task *task)
spin_unlock_bh(&xprt->transport_lock); spin_unlock_bh(&xprt->transport_lock);
req->rq_connect_cookie = connect_cookie; req->rq_connect_cookie = connect_cookie;
out_dequeue:
xprt_request_dequeue_transmit(task);
} }
static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
...@@ -1420,9 +1483,11 @@ xprt_request_dequeue_all(struct rpc_task *task, struct rpc_rqst *req) ...@@ -1420,9 +1483,11 @@ xprt_request_dequeue_all(struct rpc_task *task, struct rpc_rqst *req)
{ {
struct rpc_xprt *xprt = req->rq_xprt; struct rpc_xprt *xprt = req->rq_xprt;
if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) || if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
xprt_is_pinned_rqst(req)) { xprt_is_pinned_rqst(req)) {
spin_lock(&xprt->queue_lock); spin_lock(&xprt->queue_lock);
xprt_request_dequeue_transmit_locked(task);
xprt_request_dequeue_receive_locked(task); xprt_request_dequeue_receive_locked(task);
while (xprt_is_pinned_rqst(req)) { while (xprt_is_pinned_rqst(req)) {
set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate); set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
...@@ -1493,6 +1558,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net) ...@@ -1493,6 +1558,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net)
INIT_LIST_HEAD(&xprt->free); INIT_LIST_HEAD(&xprt->free);
INIT_LIST_HEAD(&xprt->recv_queue); INIT_LIST_HEAD(&xprt->recv_queue);
INIT_LIST_HEAD(&xprt->xmit_queue);
#if defined(CONFIG_SUNRPC_BACKCHANNEL) #if defined(CONFIG_SUNRPC_BACKCHANNEL)
spin_lock_init(&xprt->bc_pa_lock); spin_lock_init(&xprt->bc_pa_lock);
INIT_LIST_HEAD(&xprt->bc_pa_list); INIT_LIST_HEAD(&xprt->bc_pa_list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment