Commit ccdd29ff authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Create reasonable API for do_exit() TASKS_RCU processing

Currently, the exit-time support for TASKS_RCU is open-coded in do_exit().
This commit creates exit_tasks_rcu_start() and exit_tasks_rcu_finish()
APIs for do_exit() use.  This has the benefit of confining the use of the
tasks_rcu_exit_srcu variable to one file, allowing it to become static.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 7e42776d
...@@ -162,8 +162,6 @@ static inline void rcu_init_nohz(void) { } ...@@ -162,8 +162,6 @@ static inline void rcu_init_nohz(void) { }
* macro rather than an inline function to avoid #include hell. * macro rather than an inline function to avoid #include hell.
*/ */
#ifdef CONFIG_TASKS_RCU #ifdef CONFIG_TASKS_RCU
#define TASKS_RCU(x) x
extern struct srcu_struct tasks_rcu_exit_srcu;
#define rcu_note_voluntary_context_switch_lite(t) \ #define rcu_note_voluntary_context_switch_lite(t) \
do { \ do { \
if (READ_ONCE((t)->rcu_tasks_holdout)) \ if (READ_ONCE((t)->rcu_tasks_holdout)) \
...@@ -176,12 +174,15 @@ extern struct srcu_struct tasks_rcu_exit_srcu; ...@@ -176,12 +174,15 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
} while (0) } while (0)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void); void synchronize_rcu_tasks(void);
void exit_tasks_rcu_start(void);
void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU */ #else /* #ifdef CONFIG_TASKS_RCU */
#define TASKS_RCU(x) do { } while (0)
#define rcu_note_voluntary_context_switch_lite(t) do { } while (0) #define rcu_note_voluntary_context_switch_lite(t) do { } while (0)
#define rcu_note_voluntary_context_switch(t) rcu_all_qs() #define rcu_note_voluntary_context_switch(t) rcu_all_qs()
#define call_rcu_tasks call_rcu_sched #define call_rcu_tasks call_rcu_sched
#define synchronize_rcu_tasks synchronize_sched #define synchronize_rcu_tasks synchronize_sched
static inline void exit_tasks_rcu_start(void) { }
static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU */ #endif /* #else #ifdef CONFIG_TASKS_RCU */
/** /**
......
...@@ -589,9 +589,10 @@ struct task_struct { ...@@ -589,9 +589,10 @@ struct task_struct {
#ifdef CONFIG_TASKS_RCU #ifdef CONFIG_TASKS_RCU
unsigned long rcu_tasks_nvcsw; unsigned long rcu_tasks_nvcsw;
bool rcu_tasks_holdout; u8 rcu_tasks_holdout;
struct list_head rcu_tasks_holdout_list; u8 rcu_tasks_idx;
int rcu_tasks_idle_cpu; int rcu_tasks_idle_cpu;
struct list_head rcu_tasks_holdout_list;
#endif /* #ifdef CONFIG_TASKS_RCU */ #endif /* #ifdef CONFIG_TASKS_RCU */
struct sched_info sched_info; struct sched_info sched_info;
......
...@@ -764,7 +764,6 @@ void __noreturn do_exit(long code) ...@@ -764,7 +764,6 @@ void __noreturn do_exit(long code)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
int group_dead; int group_dead;
TASKS_RCU(int tasks_rcu_i);
profile_task_exit(tsk); profile_task_exit(tsk);
kcov_task_exit(tsk); kcov_task_exit(tsk);
...@@ -881,9 +880,7 @@ void __noreturn do_exit(long code) ...@@ -881,9 +880,7 @@ void __noreturn do_exit(long code)
*/ */
flush_ptrace_hw_breakpoint(tsk); flush_ptrace_hw_breakpoint(tsk);
TASKS_RCU(preempt_disable()); exit_tasks_rcu_start();
TASKS_RCU(tasks_rcu_i = __srcu_read_lock(&tasks_rcu_exit_srcu));
TASKS_RCU(preempt_enable());
exit_notify(tsk, group_dead); exit_notify(tsk, group_dead);
proc_exit_connector(tsk); proc_exit_connector(tsk);
mpol_put_task_policy(tsk); mpol_put_task_policy(tsk);
...@@ -918,7 +915,7 @@ void __noreturn do_exit(long code) ...@@ -918,7 +915,7 @@ void __noreturn do_exit(long code)
if (tsk->nr_dirtied) if (tsk->nr_dirtied)
__this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
exit_rcu(); exit_rcu();
TASKS_RCU(__srcu_read_unlock(&tasks_rcu_exit_srcu, tasks_rcu_i)); exit_tasks_rcu_finish();
do_task_dead(); do_task_dead();
} }
......
...@@ -568,7 +568,7 @@ static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq); ...@@ -568,7 +568,7 @@ static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock); static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
/* Track exiting tasks in order to allow them to be waited for. */ /* Track exiting tasks in order to allow them to be waited for. */
DEFINE_SRCU(tasks_rcu_exit_srcu); DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
/* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */ /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
#define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10) #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
...@@ -875,6 +875,22 @@ static void rcu_spawn_tasks_kthread(void) ...@@ -875,6 +875,22 @@ static void rcu_spawn_tasks_kthread(void)
mutex_unlock(&rcu_tasks_kthread_mutex); mutex_unlock(&rcu_tasks_kthread_mutex);
} }
/* Do the srcu_read_lock() for the above synchronize_srcu(). */
void exit_tasks_rcu_start(void)
{
preempt_disable();
current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
preempt_enable();
}
/* Do the srcu_read_unlock() for the above synchronize_srcu(). */
void exit_tasks_rcu_finish(void)
{
preempt_disable();
__srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx);
preempt_enable();
}
#endif /* #ifdef CONFIG_TASKS_RCU */ #endif /* #ifdef CONFIG_TASKS_RCU */
#ifndef CONFIG_TINY_RCU #ifndef CONFIG_TINY_RCU
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment