Commit ae67bd38 authored by Trond Myklebust's avatar Trond Myklebust Committed by Anna Schumaker

SUNRPC: Fix up task signalling

The RPC_TASK_KILLED flag should really not be set from another context
because it can clobber data in the struct task when task->tk_flags is
changed non-atomically.
Let's therefore swap out RPC_TASK_KILLED with an atomic flag, and add
a function to set that flag and safely wake up the task.
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 085b7755
...@@ -715,7 +715,7 @@ static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) ...@@ -715,7 +715,7 @@ static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
struct nlm_rqst *req = data; struct nlm_rqst *req = data;
u32 status = ntohl(req->a_res.status); u32 status = ntohl(req->a_res.status);
if (RPC_ASSASSINATED(task)) if (RPC_SIGNALLED(task))
goto die; goto die;
if (task->tk_status < 0) { if (task->tk_status < 0) {
...@@ -783,7 +783,7 @@ static void nlmclnt_cancel_callback(struct rpc_task *task, void *data) ...@@ -783,7 +783,7 @@ static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
struct nlm_rqst *req = data; struct nlm_rqst *req = data;
u32 status = ntohl(req->a_res.status); u32 status = ntohl(req->a_res.status);
if (RPC_ASSASSINATED(task)) if (RPC_SIGNALLED(task))
goto die; goto die;
if (task->tk_status < 0) { if (task->tk_status < 0) {
......
...@@ -1032,7 +1032,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback ...@@ -1032,7 +1032,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
* the submission code will error out, so we don't need to * the submission code will error out, so we don't need to
* handle that case here. * handle that case here.
*/ */
if (task->tk_flags & RPC_TASK_KILLED) if (RPC_SIGNALLED(task))
goto need_restart; goto need_restart;
return true; return true;
...@@ -1081,7 +1081,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback ...@@ -1081,7 +1081,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
dprintk("%s: freed slot, new seqid=%d\n", __func__, dprintk("%s: freed slot, new seqid=%d\n", __func__,
clp->cl_cb_session->se_cb_seq_nr); clp->cl_cb_session->se_cb_seq_nr);
if (task->tk_flags & RPC_TASK_KILLED) if (RPC_SIGNALLED(task))
goto need_restart; goto need_restart;
out: out:
return ret; return ret;
......
...@@ -125,7 +125,6 @@ struct rpc_task_setup { ...@@ -125,7 +125,6 @@ struct rpc_task_setup {
#define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */ #define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
#define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */ #define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */
#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
#define RPC_TASK_KILLED 0x0100 /* task was killed */
#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */ #define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
#define RPC_TASK_SENT 0x0800 /* message was sent */ #define RPC_TASK_SENT 0x0800 /* message was sent */
...@@ -135,7 +134,6 @@ struct rpc_task_setup { ...@@ -135,7 +134,6 @@ struct rpc_task_setup {
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
#define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT)) #define RPC_IS_SOFT(t) ((t)->tk_flags & (RPC_TASK_SOFT|RPC_TASK_TIMEOUT))
#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN) #define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
#define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT) #define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT)
...@@ -146,6 +144,7 @@ struct rpc_task_setup { ...@@ -146,6 +144,7 @@ struct rpc_task_setup {
#define RPC_TASK_NEED_XMIT 3 #define RPC_TASK_NEED_XMIT 3
#define RPC_TASK_NEED_RECV 4 #define RPC_TASK_NEED_RECV 4
#define RPC_TASK_MSG_PIN_WAIT 5 #define RPC_TASK_MSG_PIN_WAIT 5
#define RPC_TASK_SIGNALLED 6
#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) #define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) #define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
...@@ -169,6 +168,8 @@ struct rpc_task_setup { ...@@ -169,6 +168,8 @@ struct rpc_task_setup {
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate) #define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
#define RPC_SIGNALLED(t) test_bit(RPC_TASK_SIGNALLED, &(t)->tk_runstate)
/* /*
* Task priorities. * Task priorities.
* Note: if you change these, you must also change * Note: if you change these, you must also change
...@@ -217,6 +218,7 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *); ...@@ -217,6 +218,7 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req); struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req);
void rpc_put_task(struct rpc_task *); void rpc_put_task(struct rpc_task *);
void rpc_put_task_async(struct rpc_task *); void rpc_put_task_async(struct rpc_task *);
void rpc_signal_task(struct rpc_task *);
void rpc_exit_task(struct rpc_task *); void rpc_exit_task(struct rpc_task *);
void rpc_exit(struct rpc_task *, int); void rpc_exit(struct rpc_task *, int);
void rpc_release_calldata(const struct rpc_call_ops *, void *); void rpc_release_calldata(const struct rpc_call_ops *, void *);
......
...@@ -82,7 +82,6 @@ TRACE_DEFINE_ENUM(RPC_TASK_SWAPPER); ...@@ -82,7 +82,6 @@ TRACE_DEFINE_ENUM(RPC_TASK_SWAPPER);
TRACE_DEFINE_ENUM(RPC_CALL_MAJORSEEN); TRACE_DEFINE_ENUM(RPC_CALL_MAJORSEEN);
TRACE_DEFINE_ENUM(RPC_TASK_ROOTCREDS); TRACE_DEFINE_ENUM(RPC_TASK_ROOTCREDS);
TRACE_DEFINE_ENUM(RPC_TASK_DYNAMIC); TRACE_DEFINE_ENUM(RPC_TASK_DYNAMIC);
TRACE_DEFINE_ENUM(RPC_TASK_KILLED);
TRACE_DEFINE_ENUM(RPC_TASK_SOFT); TRACE_DEFINE_ENUM(RPC_TASK_SOFT);
TRACE_DEFINE_ENUM(RPC_TASK_SOFTCONN); TRACE_DEFINE_ENUM(RPC_TASK_SOFTCONN);
TRACE_DEFINE_ENUM(RPC_TASK_SENT); TRACE_DEFINE_ENUM(RPC_TASK_SENT);
...@@ -97,7 +96,6 @@ TRACE_DEFINE_ENUM(RPC_TASK_NO_RETRANS_TIMEOUT); ...@@ -97,7 +96,6 @@ TRACE_DEFINE_ENUM(RPC_TASK_NO_RETRANS_TIMEOUT);
{ RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \ { RPC_CALL_MAJORSEEN, "MAJORSEEN" }, \
{ RPC_TASK_ROOTCREDS, "ROOTCREDS" }, \ { RPC_TASK_ROOTCREDS, "ROOTCREDS" }, \
{ RPC_TASK_DYNAMIC, "DYNAMIC" }, \ { RPC_TASK_DYNAMIC, "DYNAMIC" }, \
{ RPC_TASK_KILLED, "KILLED" }, \
{ RPC_TASK_SOFT, "SOFT" }, \ { RPC_TASK_SOFT, "SOFT" }, \
{ RPC_TASK_SOFTCONN, "SOFTCONN" }, \ { RPC_TASK_SOFTCONN, "SOFTCONN" }, \
{ RPC_TASK_SENT, "SENT" }, \ { RPC_TASK_SENT, "SENT" }, \
...@@ -111,6 +109,7 @@ TRACE_DEFINE_ENUM(RPC_TASK_ACTIVE); ...@@ -111,6 +109,7 @@ TRACE_DEFINE_ENUM(RPC_TASK_ACTIVE);
TRACE_DEFINE_ENUM(RPC_TASK_NEED_XMIT); TRACE_DEFINE_ENUM(RPC_TASK_NEED_XMIT);
TRACE_DEFINE_ENUM(RPC_TASK_NEED_RECV); TRACE_DEFINE_ENUM(RPC_TASK_NEED_RECV);
TRACE_DEFINE_ENUM(RPC_TASK_MSG_PIN_WAIT); TRACE_DEFINE_ENUM(RPC_TASK_MSG_PIN_WAIT);
TRACE_DEFINE_ENUM(RPC_TASK_SIGNALLED);
#define rpc_show_runstate(flags) \ #define rpc_show_runstate(flags) \
__print_flags(flags, "|", \ __print_flags(flags, "|", \
...@@ -119,7 +118,8 @@ TRACE_DEFINE_ENUM(RPC_TASK_MSG_PIN_WAIT); ...@@ -119,7 +118,8 @@ TRACE_DEFINE_ENUM(RPC_TASK_MSG_PIN_WAIT);
{ (1UL << RPC_TASK_ACTIVE), "ACTIVE" }, \ { (1UL << RPC_TASK_ACTIVE), "ACTIVE" }, \
{ (1UL << RPC_TASK_NEED_XMIT), "NEED_XMIT" }, \ { (1UL << RPC_TASK_NEED_XMIT), "NEED_XMIT" }, \
{ (1UL << RPC_TASK_NEED_RECV), "NEED_RECV" }, \ { (1UL << RPC_TASK_NEED_RECV), "NEED_RECV" }, \
{ (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" }) { (1UL << RPC_TASK_MSG_PIN_WAIT), "MSG_PIN_WAIT" }, \
{ (1UL << RPC_TASK_SIGNALLED), "SIGNALLED" })
DECLARE_EVENT_CLASS(rpc_task_running, DECLARE_EVENT_CLASS(rpc_task_running,
......
...@@ -827,14 +827,8 @@ void rpc_killall_tasks(struct rpc_clnt *clnt) ...@@ -827,14 +827,8 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
* Spin lock all_tasks to prevent changes... * Spin lock all_tasks to prevent changes...
*/ */
spin_lock(&clnt->cl_lock); spin_lock(&clnt->cl_lock);
list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) { list_for_each_entry(rovr, &clnt->cl_tasks, tk_task)
if (!RPC_IS_ACTIVATED(rovr)) rpc_signal_task(rovr);
continue;
if (!(rovr->tk_flags & RPC_TASK_KILLED)) {
rovr->tk_flags |= RPC_TASK_KILLED;
rpc_exit(rovr, -EIO);
}
}
spin_unlock(&clnt->cl_lock); spin_unlock(&clnt->cl_lock);
} }
EXPORT_SYMBOL_GPL(rpc_killall_tasks); EXPORT_SYMBOL_GPL(rpc_killall_tasks);
...@@ -1477,8 +1471,6 @@ EXPORT_SYMBOL_GPL(rpc_force_rebind); ...@@ -1477,8 +1471,6 @@ EXPORT_SYMBOL_GPL(rpc_force_rebind);
int int
rpc_restart_call_prepare(struct rpc_task *task) rpc_restart_call_prepare(struct rpc_task *task)
{ {
if (RPC_ASSASSINATED(task))
return 0;
task->tk_action = call_start; task->tk_action = call_start;
task->tk_status = 0; task->tk_status = 0;
if (task->tk_ops->rpc_call_prepare != NULL) if (task->tk_ops->rpc_call_prepare != NULL)
...@@ -1494,8 +1486,6 @@ EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); ...@@ -1494,8 +1486,6 @@ EXPORT_SYMBOL_GPL(rpc_restart_call_prepare);
int int
rpc_restart_call(struct rpc_task *task) rpc_restart_call(struct rpc_task *task)
{ {
if (RPC_ASSASSINATED(task))
return 0;
task->tk_action = call_start; task->tk_action = call_start;
task->tk_status = 0; task->tk_status = 0;
return 1; return 1;
......
...@@ -759,8 +759,7 @@ static void ...@@ -759,8 +759,7 @@ static void
rpc_reset_task_statistics(struct rpc_task *task) rpc_reset_task_statistics(struct rpc_task *task)
{ {
task->tk_timeouts = 0; task->tk_timeouts = 0;
task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT); task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
rpc_init_task_statistics(task); rpc_init_task_statistics(task);
} }
...@@ -773,7 +772,6 @@ void rpc_exit_task(struct rpc_task *task) ...@@ -773,7 +772,6 @@ void rpc_exit_task(struct rpc_task *task)
if (task->tk_ops->rpc_call_done != NULL) { if (task->tk_ops->rpc_call_done != NULL) {
task->tk_ops->rpc_call_done(task, task->tk_calldata); task->tk_ops->rpc_call_done(task, task->tk_calldata);
if (task->tk_action != NULL) { if (task->tk_action != NULL) {
WARN_ON(RPC_ASSASSINATED(task));
/* Always release the RPC slot and buffer memory */ /* Always release the RPC slot and buffer memory */
xprt_release(task); xprt_release(task);
rpc_reset_task_statistics(task); rpc_reset_task_statistics(task);
...@@ -781,6 +779,19 @@ void rpc_exit_task(struct rpc_task *task) ...@@ -781,6 +779,19 @@ void rpc_exit_task(struct rpc_task *task)
} }
} }
void rpc_signal_task(struct rpc_task *task)
{
struct rpc_wait_queue *queue;
if (!RPC_IS_ACTIVATED(task))
return;
set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
smp_mb__after_atomic();
queue = READ_ONCE(task->tk_waitqueue);
if (queue)
rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
}
void rpc_exit(struct rpc_task *task, int status) void rpc_exit(struct rpc_task *task, int status)
{ {
task->tk_status = status; task->tk_status = status;
...@@ -836,6 +847,13 @@ static void __rpc_execute(struct rpc_task *task) ...@@ -836,6 +847,13 @@ static void __rpc_execute(struct rpc_task *task)
*/ */
if (!RPC_IS_QUEUED(task)) if (!RPC_IS_QUEUED(task))
continue; continue;
/*
* Signalled tasks should exit rather than sleep.
*/
if (RPC_SIGNALLED(task))
rpc_exit(task, -ERESTARTSYS);
/* /*
* The queue->lock protects against races with * The queue->lock protects against races with
* rpc_make_runnable(). * rpc_make_runnable().
...@@ -861,7 +879,7 @@ static void __rpc_execute(struct rpc_task *task) ...@@ -861,7 +879,7 @@ static void __rpc_execute(struct rpc_task *task)
status = out_of_line_wait_on_bit(&task->tk_runstate, status = out_of_line_wait_on_bit(&task->tk_runstate,
RPC_TASK_QUEUED, rpc_wait_bit_killable, RPC_TASK_QUEUED, rpc_wait_bit_killable,
TASK_KILLABLE); TASK_KILLABLE);
if (status == -ERESTARTSYS) { if (status < 0) {
/* /*
* When a sync task receives a signal, it exits with * When a sync task receives a signal, it exits with
* -ERESTARTSYS. In order to catch any callbacks that * -ERESTARTSYS. In order to catch any callbacks that
...@@ -869,7 +887,7 @@ static void __rpc_execute(struct rpc_task *task) ...@@ -869,7 +887,7 @@ static void __rpc_execute(struct rpc_task *task)
* break the loop here, but go around once more. * break the loop here, but go around once more.
*/ */
dprintk("RPC: %5u got signal\n", task->tk_pid); dprintk("RPC: %5u got signal\n", task->tk_pid);
task->tk_flags |= RPC_TASK_KILLED; set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
rpc_exit(task, -ERESTARTSYS); rpc_exit(task, -ERESTARTSYS);
} }
dprintk("RPC: %5u sync task resuming\n", task->tk_pid); dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
......
...@@ -1337,6 +1337,10 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task) ...@@ -1337,6 +1337,10 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
if (status < 0) if (status < 0)
goto out_dequeue; goto out_dequeue;
} }
if (RPC_SIGNALLED(task)) {
status = -ERESTARTSYS;
goto out_dequeue;
}
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment