Commit 79c99152 authored by Trond Myklebust's avatar Trond Myklebust

SUNRPC: Convert the xprt->sending queue back to an ordinary wait queue

We no longer need priority semantics on the xprt->sending queue, because
the order in which tasks are sent is now dictated by their position in
the send queue.
Note that the backlog queue remains a priority queue, meaning that
slot resources are still managed in order of task priority.
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parent f42f7c28
...@@ -192,7 +192,6 @@ static void xprt_clear_locked(struct rpc_xprt *xprt) ...@@ -192,7 +192,6 @@ static void xprt_clear_locked(struct rpc_xprt *xprt)
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
{ {
struct rpc_rqst *req = task->tk_rqstp; struct rpc_rqst *req = task->tk_rqstp;
int priority;
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
if (task == xprt->snd_task) if (task == xprt->snd_task)
...@@ -212,13 +211,7 @@ int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task) ...@@ -212,13 +211,7 @@ int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
task->tk_pid, xprt); task->tk_pid, xprt);
task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0; task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
task->tk_status = -EAGAIN; task->tk_status = -EAGAIN;
if (req == NULL) rpc_sleep_on(&xprt->sending, task, NULL);
priority = RPC_PRIORITY_LOW;
else if (!req->rq_ntrans)
priority = RPC_PRIORITY_NORMAL;
else
priority = RPC_PRIORITY_HIGH;
rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(xprt_reserve_xprt); EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
...@@ -260,7 +253,6 @@ xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt) ...@@ -260,7 +253,6 @@ xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{ {
struct rpc_rqst *req = task->tk_rqstp; struct rpc_rqst *req = task->tk_rqstp;
int priority;
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) { if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
if (task == xprt->snd_task) if (task == xprt->snd_task)
...@@ -283,13 +275,7 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) ...@@ -283,13 +275,7 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0; task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
task->tk_status = -EAGAIN; task->tk_status = -EAGAIN;
if (req == NULL) rpc_sleep_on(&xprt->sending, task, NULL);
priority = RPC_PRIORITY_LOW;
else if (!req->rq_ntrans)
priority = RPC_PRIORITY_NORMAL;
else
priority = RPC_PRIORITY_HIGH;
rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
...@@ -1796,7 +1782,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net) ...@@ -1796,7 +1782,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net)
rpc_init_wait_queue(&xprt->binding, "xprt_binding"); rpc_init_wait_queue(&xprt->binding, "xprt_binding");
rpc_init_wait_queue(&xprt->pending, "xprt_pending"); rpc_init_wait_queue(&xprt->pending, "xprt_pending");
rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending"); rpc_init_wait_queue(&xprt->sending, "xprt_sending");
rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog"); rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
xprt_init_xid(xprt); xprt_init_xid(xprt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment