Commit 2727872d authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'timers/nohz-reviewed' of...

Merge branch 'timers/nohz-reviewed' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into timers/nohz

Pull full dynticks timekeeping and RCU improvements from Frederic Weisbecker.
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 65d798f0 f98823ac
...@@ -1916,8 +1916,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -1916,8 +1916,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
nohz_full= [KNL,BOOT] nohz_full= [KNL,BOOT]
In kernels built with CONFIG_NO_HZ_FULL=y, set In kernels built with CONFIG_NO_HZ_FULL=y, set
the specified list of CPUs whose tick will be stopped the specified list of CPUs whose tick will be stopped
whenever possible. You need to keep at least one online whenever possible. The boot CPU will be forced outside
CPU outside the range to maintain the timekeeping. the range to maintain the timekeeping.
The CPUs in this range must also be included in the
rcu_nocbs= set.
noiotrap [SH] Disables trapped I/O port accesses. noiotrap [SH] Disables trapped I/O port accesses.
......
...@@ -999,4 +999,11 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) ...@@ -999,4 +999,11 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define kfree_rcu(ptr, rcu_head) \ #define kfree_rcu(ptr, rcu_head) \
__kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
#ifdef CONFIG_RCU_NOCB_CPU
extern bool rcu_is_nocb_cpu(int cpu);
#else
static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
#endif /* __LINUX_RCUPDATE_H */ #endif /* __LINUX_RCUPDATE_H */
...@@ -158,8 +158,10 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } ...@@ -158,8 +158,10 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
# endif /* !CONFIG_NO_HZ_COMMON */ # endif /* !CONFIG_NO_HZ_COMMON */
#ifdef CONFIG_NO_HZ_FULL #ifdef CONFIG_NO_HZ_FULL
extern void tick_nohz_init(void);
extern int tick_nohz_full_cpu(int cpu); extern int tick_nohz_full_cpu(int cpu);
#else #else
static inline void tick_nohz_init(void) { }
static inline int tick_nohz_full_cpu(int cpu) { return 0; } static inline int tick_nohz_full_cpu(int cpu) { return 0; }
#endif #endif
......
...@@ -547,6 +547,7 @@ asmlinkage void __init start_kernel(void) ...@@ -547,6 +547,7 @@ asmlinkage void __init start_kernel(void)
idr_init_cache(); idr_init_cache();
perf_event_init(); perf_event_init();
rcu_init(); rcu_init();
tick_nohz_init();
radix_tree_init(); radix_tree_init();
/* init some links before init_ISA_irqs() */ /* init some links before init_ISA_irqs() */
early_irq_init(); early_irq_init();
......
...@@ -1695,7 +1695,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, ...@@ -1695,7 +1695,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
struct rcu_node *rnp, struct rcu_data *rdp) struct rcu_node *rnp, struct rcu_data *rdp)
{ {
/* No-CBs CPUs do not have orphanable callbacks. */ /* No-CBs CPUs do not have orphanable callbacks. */
if (is_nocb_cpu(rdp->cpu)) if (rcu_is_nocb_cpu(rdp->cpu))
return; return;
/* /*
...@@ -2757,10 +2757,10 @@ static void _rcu_barrier(struct rcu_state *rsp) ...@@ -2757,10 +2757,10 @@ static void _rcu_barrier(struct rcu_state *rsp)
* corresponding CPU's preceding callbacks have been invoked. * corresponding CPU's preceding callbacks have been invoked.
*/ */
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (!cpu_online(cpu) && !is_nocb_cpu(cpu)) if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
continue; continue;
rdp = per_cpu_ptr(rsp->rda, cpu); rdp = per_cpu_ptr(rsp->rda, cpu);
if (is_nocb_cpu(cpu)) { if (rcu_is_nocb_cpu(cpu)) {
_rcu_barrier_trace(rsp, "OnlineNoCB", cpu, _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
rsp->n_barrier_done); rsp->n_barrier_done);
atomic_inc(&rsp->barrier_cpu_count); atomic_inc(&rsp->barrier_cpu_count);
......
...@@ -529,7 +529,6 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu); ...@@ -529,7 +529,6 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
static void print_cpu_stall_info_end(void); static void print_cpu_stall_info_end(void);
static void zero_cpu_stall_ticks(struct rcu_data *rdp); static void zero_cpu_stall_ticks(struct rcu_data *rdp);
static void increment_cpu_stall_ticks(void); static void increment_cpu_stall_ticks(void);
static bool is_nocb_cpu(int cpu);
static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
bool lazy); bool lazy);
static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
......
...@@ -2167,7 +2167,7 @@ static int __init parse_rcu_nocb_poll(char *arg) ...@@ -2167,7 +2167,7 @@ static int __init parse_rcu_nocb_poll(char *arg)
early_param("rcu_nocb_poll", parse_rcu_nocb_poll); early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
/* Is the specified CPU a no-CPUs CPU? */ /* Is the specified CPU a no-CPUs CPU? */
static bool is_nocb_cpu(int cpu) bool rcu_is_nocb_cpu(int cpu)
{ {
if (have_rcu_nocb_mask) if (have_rcu_nocb_mask)
return cpumask_test_cpu(cpu, rcu_nocb_mask); return cpumask_test_cpu(cpu, rcu_nocb_mask);
...@@ -2225,7 +2225,7 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, ...@@ -2225,7 +2225,7 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
bool lazy) bool lazy)
{ {
if (!is_nocb_cpu(rdp->cpu)) if (!rcu_is_nocb_cpu(rdp->cpu))
return 0; return 0;
__call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy); __call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy);
return 1; return 1;
...@@ -2242,7 +2242,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, ...@@ -2242,7 +2242,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp,
long qll = rsp->qlen_lazy; long qll = rsp->qlen_lazy;
/* If this is not a no-CBs CPU, tell the caller to do it the old way. */ /* If this is not a no-CBs CPU, tell the caller to do it the old way. */
if (!is_nocb_cpu(smp_processor_id())) if (!rcu_is_nocb_cpu(smp_processor_id()))
return 0; return 0;
rsp->qlen = 0; rsp->qlen = 0;
rsp->qlen_lazy = 0; rsp->qlen_lazy = 0;
...@@ -2282,7 +2282,7 @@ static bool nocb_cpu_expendable(int cpu) ...@@ -2282,7 +2282,7 @@ static bool nocb_cpu_expendable(int cpu)
* If there are no no-CB CPUs or if this CPU is not a no-CB CPU, * If there are no no-CB CPUs or if this CPU is not a no-CB CPU,
* then offlining this CPU is harmless. Let it happen. * then offlining this CPU is harmless. Let it happen.
*/ */
if (!have_rcu_nocb_mask || is_nocb_cpu(cpu)) if (!have_rcu_nocb_mask || rcu_is_nocb_cpu(cpu))
return 1; return 1;
/* If no memory, play it safe and keep the CPU around. */ /* If no memory, play it safe and keep the CPU around. */
...@@ -2464,11 +2464,6 @@ static void __init rcu_init_nocb(void) ...@@ -2464,11 +2464,6 @@ static void __init rcu_init_nocb(void)
#else /* #ifdef CONFIG_RCU_NOCB_CPU */ #else /* #ifdef CONFIG_RCU_NOCB_CPU */
static bool is_nocb_cpu(int cpu)
{
return false;
}
static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
bool lazy) bool lazy)
{ {
......
...@@ -128,6 +128,16 @@ config NO_HZ_FULL ...@@ -128,6 +128,16 @@ config NO_HZ_FULL
endchoice endchoice
config NO_HZ_FULL_ALL
bool "Full dynticks system on all CPUs by default"
depends on NO_HZ_FULL
help
If the user doesn't pass the nohz_full boot option to
define the range of full dynticks CPUs, consider that all
CPUs in the system are full dynticks by default.
Note the boot CPU will still be kept outside the range to
handle the timekeeping duty.
config NO_HZ config NO_HZ
bool "Old Idle dynticks config" bool "Old Idle dynticks config"
depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
......
...@@ -158,11 +158,21 @@ int tick_nohz_full_cpu(int cpu) ...@@ -158,11 +158,21 @@ int tick_nohz_full_cpu(int cpu)
/* Parse the boot-time nohz CPU list from the kernel parameters. */ /* Parse the boot-time nohz CPU list from the kernel parameters. */
static int __init tick_nohz_full_setup(char *str) static int __init tick_nohz_full_setup(char *str)
{ {
int cpu;
alloc_bootmem_cpumask_var(&nohz_full_mask); alloc_bootmem_cpumask_var(&nohz_full_mask);
if (cpulist_parse(str, nohz_full_mask) < 0) if (cpulist_parse(str, nohz_full_mask) < 0) {
pr_warning("NOHZ: Incorrect nohz_full cpumask\n"); pr_warning("NOHZ: Incorrect nohz_full cpumask\n");
else return 1;
have_nohz_full_mask = true; }
cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, nohz_full_mask)) {
pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu);
cpumask_clear_cpu(cpu, nohz_full_mask);
}
have_nohz_full_mask = true;
return 1; return 1;
} }
__setup("nohz_full=", tick_nohz_full_setup); __setup("nohz_full=", tick_nohz_full_setup);
...@@ -193,51 +203,46 @@ static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb, ...@@ -193,51 +203,46 @@ static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb,
*/ */
static char __initdata nohz_full_buf[NR_CPUS + 1]; static char __initdata nohz_full_buf[NR_CPUS + 1];
static int __init init_tick_nohz_full(void) static int tick_nohz_init_all(void)
{ {
cpumask_var_t online_nohz; int err = -1;
int cpu;
if (!have_nohz_full_mask) #ifdef CONFIG_NO_HZ_FULL_ALL
return 0; if (!alloc_cpumask_var(&nohz_full_mask, GFP_KERNEL)) {
pr_err("NO_HZ: Can't allocate full dynticks cpumask\n");
return err;
}
err = 0;
cpumask_setall(nohz_full_mask);
cpumask_clear_cpu(smp_processor_id(), nohz_full_mask);
have_nohz_full_mask = true;
#endif
return err;
}
cpu_notifier(tick_nohz_cpu_down_callback, 0); void __init tick_nohz_init(void)
{
int cpu;
if (!zalloc_cpumask_var(&online_nohz, GFP_KERNEL)) { if (!have_nohz_full_mask) {
pr_warning("NO_HZ: Not enough memory to check full nohz mask\n"); if (tick_nohz_init_all() < 0)
return -ENOMEM; return;
} }
/* cpu_notifier(tick_nohz_cpu_down_callback, 0);
* CPUs can probably not be concurrently offlined on initcall time.
* But we are paranoid, aren't we?
*/
get_online_cpus();
/* Ensure we keep a CPU outside the dynticks range for timekeeping */ /* Make sure full dynticks CPU are also RCU nocbs */
cpumask_and(online_nohz, cpu_online_mask, nohz_full_mask); for_each_cpu(cpu, nohz_full_mask) {
if (cpumask_equal(online_nohz, cpu_online_mask)) { if (!rcu_is_nocb_cpu(cpu)) {
pr_warning("NO_HZ: Must keep at least one online CPU " pr_warning("NO_HZ: CPU %d is not RCU nocb: "
"out of nohz_full range\n"); "cleared from nohz_full range", cpu);
/* cpumask_clear_cpu(cpu, nohz_full_mask);
* We know the current CPU doesn't have its tick stopped. }
* Let's use it for the timekeeping duty.
*/
preempt_disable();
cpu = smp_processor_id();
pr_warning("NO_HZ: Clearing %d from nohz_full range\n", cpu);
cpumask_clear_cpu(cpu, nohz_full_mask);
preempt_enable();
} }
put_online_cpus();
free_cpumask_var(online_nohz);
cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask); cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask);
pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf); pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf);
return 0;
} }
core_initcall(init_tick_nohz_full);
#else #else
#define have_nohz_full_mask (0) #define have_nohz_full_mask (0)
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment