tree_stall.h 34.9 KB
Newer Older
1 2 3 4 5 6 7 8 9
// SPDX-License-Identifier: GPL-2.0+
/*
 * RCU CPU stall warnings for normal RCU grace periods
 *
 * Copyright IBM Corporation, 2019
 *
 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
 */

10
#include <linux/kvm_para.h>
11
#include <linux/rcu_notifier.h>
12

13 14 15
//////////////////////////////////////////////////////////////////////////////
//
// Controlling CPU stall warnings, including delay calculation.
16

17 18
/* panic() on RCU Stall sysctl. */
int sysctl_panic_on_rcu_stall __read_mostly;
19
int sysctl_max_rcu_stall_to_panic __read_mostly;
20

21
#ifdef CONFIG_PROVE_RCU
22
#define RCU_STALL_DELAY_DELTA		(5 * HZ)
23
#else
24
#define RCU_STALL_DELAY_DELTA		0
25
#endif
26 27
#define RCU_STALL_MIGHT_DIV		8
#define RCU_STALL_MIGHT_MIN		(2 * HZ)
28

29 30 31 32 33 34 35 36 37 38 39 40 41 42
int rcu_exp_jiffies_till_stall_check(void)
{
	int cpu_stall_timeout = READ_ONCE(rcu_exp_cpu_stall_timeout);
	int exp_stall_delay_delta = 0;
	int till_stall_check;

	// Zero says to use rcu_cpu_stall_timeout, but in milliseconds.
	if (!cpu_stall_timeout)
		cpu_stall_timeout = jiffies_to_msecs(rcu_jiffies_till_stall_check());

	// Limit check must be consistent with the Kconfig limits for
	// CONFIG_RCU_EXP_CPU_STALL_TIMEOUT, so check the allowed range.
	// The minimum clamped value is "2UL", because at least one full
	// tick has to be guaranteed.
43
	till_stall_check = clamp(msecs_to_jiffies(cpu_stall_timeout), 2UL, 300UL * HZ);
44 45 46 47 48 49 50 51 52 53 54 55 56

	if (cpu_stall_timeout && jiffies_to_msecs(till_stall_check) != cpu_stall_timeout)
		WRITE_ONCE(rcu_exp_cpu_stall_timeout, jiffies_to_msecs(till_stall_check));

#ifdef CONFIG_PROVE_RCU
	/* Add extra ~25% out of till_stall_check. */
	exp_stall_delay_delta = ((till_stall_check * 25) / 100) + 1;
#endif

	return till_stall_check + exp_stall_delay_delta;
}
EXPORT_SYMBOL_GPL(rcu_exp_jiffies_till_stall_check);

57
/* Limit-check stall timeouts specified at boottime and runtime. */
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76
int rcu_jiffies_till_stall_check(void)
{
	int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);

	/*
	 * Limit check must be consistent with the Kconfig limits
	 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
	 */
	if (till_stall_check < 3) {
		WRITE_ONCE(rcu_cpu_stall_timeout, 3);
		till_stall_check = 3;
	} else if (till_stall_check > 300) {
		WRITE_ONCE(rcu_cpu_stall_timeout, 300);
		till_stall_check = 300;
	}
	return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
}
EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
/**
 * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled?
 *
 * Returns @true if the current grace period is sufficiently old that
 * it is reasonable to assume that it might be stalled.  This can be
 * useful when deciding whether to allocate memory to enable RCU-mediated
 * freeing on the one hand or just invoking synchronize_rcu() on the other.
 * The latter is preferable when the grace period is stalled.
 *
 * Note that sampling of the .gp_start and .gp_seq fields must be done
 * carefully to avoid false positives at the beginnings and ends of
 * grace periods.
 */
bool rcu_gp_might_be_stalled(void)
{
	unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV;
	unsigned long j = jiffies;

	if (d < RCU_STALL_MIGHT_MIN)
		d = RCU_STALL_MIGHT_MIN;
	smp_mb(); // jiffies before .gp_seq to avoid false positives.
	if (!rcu_gp_in_progress())
		return false;
	// Long delays at this point avoids false positive, but a delay
	// of ULONG_MAX/4 jiffies voids your no-false-positive warranty.
	smp_mb(); // .gp_seq before second .gp_start
	// And ditto here.
	return !time_before(j, READ_ONCE(rcu_state.gp_start) + d);
}

107
/* Don't do RCU CPU stall warnings during long sysrq printouts. */
108 109 110 111 112 113 114 115 116 117 118 119
void rcu_sysrq_start(void)
{
	if (!rcu_cpu_stall_suppress)
		rcu_cpu_stall_suppress = 2;
}

void rcu_sysrq_end(void)
{
	if (rcu_cpu_stall_suppress == 2)
		rcu_cpu_stall_suppress = 0;
}

120
/* Don't print RCU CPU stall warnings during a kernel panic. */
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
{
	rcu_cpu_stall_suppress = 1;
	return NOTIFY_DONE;
}

static struct notifier_block rcu_panic_block = {
	.notifier_call = rcu_panic,
};

static int __init check_cpu_stall_init(void)
{
	atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
	return 0;
}
early_initcall(check_cpu_stall_init);
137

138 139 140
/* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
static void panic_on_rcu_stall(void)
{
141 142 143 144 145
	static int cpu_stall;

	if (++cpu_stall < sysctl_max_rcu_stall_to_panic)
		return;

146 147 148 149 150
	if (sysctl_panic_on_rcu_stall)
		panic("RCU Stall\n");
}

/**
151
 * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period
152
 *
153 154 155 156 157
 * To perform the reset request from the caller, disable stall detection until
 * 3 fqs loops have passed. This is required to ensure a fresh jiffies is
 * loaded.  It should be safe to do from the fqs loop as enough timer
 * interrupts and context switches should have passed.
 *
158 159 160 161
 * The caller must disable hard irqs.
 */
void rcu_cpu_stall_reset(void)
{
162 163
	WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 3);
	WRITE_ONCE(rcu_state.jiffies_stall, ULONG_MAX);
164 165 166 167 168 169 170 171 172 173 174 175
}

//////////////////////////////////////////////////////////////////////////////
//
// Interaction with RCU grace periods

/* Start of new grace period, so record stall time (and forcing times). */
static void record_gp_stall_check_time(void)
{
	unsigned long j = jiffies;
	unsigned long j1;

176
	WRITE_ONCE(rcu_state.gp_start, j);
177
	j1 = rcu_jiffies_till_stall_check();
178
	smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
179
	WRITE_ONCE(rcu_state.nr_fqs_jiffies_stall, 0);
180
	WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
	rcu_state.jiffies_resched = j + j1 / 2;
	rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
}

/* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
static void zero_cpu_stall_ticks(struct rcu_data *rdp)
{
	rdp->ticks_this_gp = 0;
	rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
	WRITE_ONCE(rdp->last_fqs_resched, jiffies);
}

/*
 * If too much time has passed in the current grace period, and if
 * so configured, go kick the relevant kthreads.
 */
static void rcu_stall_kick_kthreads(void)
{
	unsigned long j;

201
	if (!READ_ONCE(rcu_kick_kthreads))
202 203 204 205 206 207 208 209 210 211 212 213
		return;
	j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
	if (time_after(jiffies, j) && rcu_state.gp_kthread &&
	    (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
		WARN_ONCE(1, "Kicking %s grace-period kthread\n",
			  rcu_state.name);
		rcu_ftrace_dump(DUMP_ALL);
		wake_up_process(rcu_state.gp_kthread);
		WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
	}
}

214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
/*
 * Handler for the irq_work request posted about halfway into the RCU CPU
 * stall timeout, and used to detect excessive irq disabling.  Set state
 * appropriately, but just complain if there is unexpected state on entry.
 */
static void rcu_iw_handler(struct irq_work *iwp)
{
	struct rcu_data *rdp;
	struct rcu_node *rnp;

	rdp = container_of(iwp, struct rcu_data, rcu_iw);
	rnp = rdp->mynode;
	raw_spin_lock_rcu_node(rnp);
	if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
		rdp->rcu_iw_gp_seq = rnp->gp_seq;
		rdp->rcu_iw_pending = false;
	}
	raw_spin_unlock_rcu_node(rnp);
}

234 235 236 237
//////////////////////////////////////////////////////////////////////////////
//
// Printing RCU CPU stall warnings

238
#ifdef CONFIG_PREEMPT_RCU
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266

/*
 * Dump detailed information for all tasks blocking the current RCU
 * grace period on the specified rcu_node structure.
 */
static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
{
	unsigned long flags;
	struct task_struct *t;

	raw_spin_lock_irqsave_rcu_node(rnp, flags);
	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
		return;
	}
	t = list_entry(rnp->gp_tasks->prev,
		       struct task_struct, rcu_node_entry);
	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
		/*
		 * We could be printing a lot while holding a spinlock.
		 * Avoid triggering hard lockup.
		 */
		touch_nmi_watchdog();
		sched_show_task(t);
	}
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}

267 268 269 270 271 272 273 274 275 276 277
// Communicate task state back to the RCU CPU stall warning request.
struct rcu_stall_chk_rdr {
	int nesting;
	union rcu_special rs;
	bool on_blkd_list;
};

/*
 * Report out the state of a not-running task that is stalling the
 * current RCU grace period.
 */
278
static int check_slow_task(struct task_struct *t, void *arg)
279 280 281 282
{
	struct rcu_stall_chk_rdr *rscrp = arg;

	if (task_curr(t))
283
		return -EBUSY; // It is running, so decline to inspect it.
284 285 286
	rscrp->nesting = t->rcu_read_lock_nesting;
	rscrp->rs = t->rcu_read_unlock_special;
	rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry);
287
	return 0;
288 289
}

290 291
/*
 * Scan the current list of tasks blocked within RCU read-side critical
292
 * sections, printing out the tid of each of the first few of them.
293
 */
294 295
static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
	__releases(rnp->lock)
296
{
297
	int i = 0;
298
	int ndetected = 0;
299 300
	struct rcu_stall_chk_rdr rscr;
	struct task_struct *t;
301
	struct task_struct *ts[8];
302

303
	lockdep_assert_irqs_disabled();
304 305
	if (!rcu_preempt_blocked_readers_cgp(rnp)) {
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
306
		return 0;
307
	}
308 309
	pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
	       rnp->level, rnp->grplo, rnp->grphi);
310 311 312
	t = list_entry(rnp->gp_tasks->prev,
		       struct task_struct, rcu_node_entry);
	list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
313 314 315 316 317 318
		get_task_struct(t);
		ts[i++] = t;
		if (i >= ARRAY_SIZE(ts))
			break;
	}
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
319 320
	while (i) {
		t = ts[--i];
321
		if (task_call_func(t, check_slow_task, &rscr))
322 323 324 325 326 327 328 329
			pr_cont(" P%d", t->pid);
		else
			pr_cont(" P%d/%d:%c%c%c%c",
				t->pid, rscr.nesting,
				".b"[rscr.rs.b.blocked],
				".q"[rscr.rs.b.need_qs],
				".e"[rscr.rs.b.exp_hint],
				".l"[rscr.on_blkd_list]);
330
		lockdep_assert_irqs_disabled();
331
		put_task_struct(t);
332 333
		ndetected++;
	}
334
	pr_cont("\n");
335 336 337
	return ndetected;
}

338
#else /* #ifdef CONFIG_PREEMPT_RCU */
339 340 341 342 343

/*
 * Because preemptible RCU does not exist, we never have to check for
 * tasks blocked within RCU read-side critical sections.
 */
344
static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
345 346 347 348 349 350 351
{
}

/*
 * Because preemptible RCU does not exist, we never have to check for
 * tasks blocked within RCU read-side critical sections.
 */
352
static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
353
	__releases(rnp->lock)
354
{
355
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
356 357
	return 0;
}
358
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374

/*
 * Dump stacks of all tasks running on stalled CPUs.  First try using
 * NMIs, but fall back to manual remote stack tracing on architectures
 * that don't support NMI-based stack dumps.  The NMI-triggered stack
 * traces are more accurate because they are printed by the target CPU.
 */
static void rcu_dump_cpu_stacks(void)
{
	int cpu;
	unsigned long flags;
	struct rcu_node *rnp;

	rcu_for_each_leaf_node(rnp) {
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
		for_each_leaf_node_possible_cpu(rnp, cpu)
375 376 377
			if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
				if (cpu_is_offline(cpu))
					pr_err("Offline CPU %d blocking current GP.\n", cpu);
378
				else
379
					dump_cpu_task(cpu);
380
			}
381 382 383 384
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
	}
}

385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
static const char * const gp_state_names[] = {
	[RCU_GP_IDLE] = "RCU_GP_IDLE",
	[RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS",
	[RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS",
	[RCU_GP_ONOFF] = "RCU_GP_ONOFF",
	[RCU_GP_INIT] = "RCU_GP_INIT",
	[RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS",
	[RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS",
	[RCU_GP_CLEANUP] = "RCU_GP_CLEANUP",
	[RCU_GP_CLEANED] = "RCU_GP_CLEANED",
};

/*
 * Convert a ->gp_state value to a character string.
 */
static const char *gp_state_getname(short gs)
{
	if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
		return "???";
	return gp_state_names[gs];
}

407 408 409 410 411 412 413 414 415 416
/* Is the RCU grace-period kthread being starved of CPU time? */
static bool rcu_is_gp_kthread_starving(unsigned long *jp)
{
	unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity);

	if (jp)
		*jp = j;
	return j > 2 * HZ;
}

417 418
static bool rcu_is_rcuc_kthread_starving(struct rcu_data *rdp, unsigned long *jp)
{
419 420 421 422 423 424 425 426 427 428 429 430 431
	int cpu;
	struct task_struct *rcuc;
	unsigned long j;

	rcuc = rdp->rcu_cpu_kthread_task;
	if (!rcuc)
		return false;

	cpu = task_cpu(rcuc);
	if (cpu_is_offline(cpu) || idle_cpu(cpu))
		return false;

	j = jiffies - READ_ONCE(rdp->rcuc_activity);
432 433 434 435 436 437

	if (jp)
		*jp = j;
	return j > 2 * HZ;
}

438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466
static void print_cpu_stat_info(int cpu)
{
	struct rcu_snap_record rsr, *rsrp;
	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
	struct kernel_cpustat *kcsp = &kcpustat_cpu(cpu);

	if (!rcu_cpu_stall_cputime)
		return;

	rsrp = &rdp->snap_record;
	if (rsrp->gp_seq != rdp->gp_seq)
		return;

	rsr.cputime_irq     = kcpustat_field(kcsp, CPUTIME_IRQ, cpu);
	rsr.cputime_softirq = kcpustat_field(kcsp, CPUTIME_SOFTIRQ, cpu);
	rsr.cputime_system  = kcpustat_field(kcsp, CPUTIME_SYSTEM, cpu);

	pr_err("\t         hardirqs   softirqs   csw/system\n");
	pr_err("\t number: %8ld %10d %12lld\n",
		kstat_cpu_irqs_sum(cpu) - rsrp->nr_hardirqs,
		kstat_cpu_softirqs_sum(cpu) - rsrp->nr_softirqs,
		nr_context_switches_cpu(cpu) - rsrp->nr_csw);
	pr_err("\tcputime: %8lld %10lld %12lld   ==> %d(ms)\n",
		div_u64(rsr.cputime_irq - rsrp->cputime_irq, NSEC_PER_MSEC),
		div_u64(rsr.cputime_softirq - rsrp->cputime_softirq, NSEC_PER_MSEC),
		div_u64(rsr.cputime_system - rsrp->cputime_system, NSEC_PER_MSEC),
		jiffies_to_msecs(jiffies - rsrp->jiffies));
}

467 468 469 470 471 472 473 474 475
/*
 * Print out diagnostic information for the specified stalled CPU.
 *
 * If the specified CPU is aware of the current RCU grace period, then
 * print the number of scheduling clock interrupts the CPU has taken
 * during the time that it has been aware.  Otherwise, print the number
 * of RCU grace periods that this CPU is ignorant of, for example, "1"
 * if the CPU was aware of the previous grace period.
 *
476
 * Also print out idle info.
477 478 479 480
 */
static void print_cpu_stall_info(int cpu)
{
	unsigned long delta;
481
	bool falsepositive;
482 483 484
	struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
	char *ticks_title;
	unsigned long ticks_value;
485 486 487
	bool rcuc_starved;
	unsigned long j;
	char buf[32];
488 489 490 491 492 493 494 495 496 497 498 499 500 501 502

	/*
	 * We could be printing a lot while holding a spinlock.  Avoid
	 * triggering hard lockup.
	 */
	touch_nmi_watchdog();

	ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
	if (ticks_value) {
		ticks_title = "GPs behind";
	} else {
		ticks_title = "ticks this GP";
		ticks_value = rdp->ticks_this_gp;
	}
	delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
503
	falsepositive = rcu_is_gp_kthread_starving(NULL) &&
504
			rcu_dynticks_in_eqs(rcu_dynticks_snap(cpu));
505 506 507
	rcuc_starved = rcu_is_rcuc_kthread_starving(rdp, &j);
	if (rcuc_starved)
		sprintf(buf, " rcuc=%ld jiffies(starved)", j);
508
	pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%04x/%ld/%#lx softirq=%u/%u fqs=%ld%s%s\n",
509 510 511 512 513 514 515 516
	       cpu,
	       "O."[!!cpu_online(cpu)],
	       "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
	       "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
	       !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
			rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
				"!."[!delta],
	       ticks_value, ticks_title,
517
	       rcu_dynticks_snap(cpu) & 0xffff,
518
	       ct_dynticks_nesting_cpu(cpu), ct_dynticks_nmi_nesting_cpu(cpu),
519
	       rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
520
	       data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
521
	       rcuc_starved ? buf : "",
522
	       falsepositive ? " (false positive?)" : "");
523 524

	print_cpu_stat_info(cpu);
525 526
}

527 528
/* Complain about starvation of grace-period kthread.  */
static void rcu_check_gp_kthread_starvation(void)
529
{
530
	int cpu;
531 532 533
	struct task_struct *gpk = rcu_state.gp_kthread;
	unsigned long j;

534
	if (rcu_is_gp_kthread_starving(&j)) {
535
		cpu = gpk ? task_cpu(gpk) : -1;
536
		pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n",
537 538
		       rcu_state.name, j,
		       (long)rcu_seq_current(&rcu_state.gp_seq),
539 540 541 542
		       data_race(READ_ONCE(rcu_state.gp_flags)),
		       gp_state_getname(rcu_state.gp_state),
		       data_race(READ_ONCE(rcu_state.gp_state)),
		       gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu);
543
		if (gpk) {
544 545
			struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);

546
			pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
547 548
			pr_err("RCU grace-period kthread stack dump:\n");
			sched_show_task(gpk);
549 550
			if (cpu_is_offline(cpu)) {
				pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu);
551
			} else if (!(data_race(READ_ONCE(rdp->mynode->qsmask)) & rdp->grpmask)) {
552 553
				pr_err("Stack dump where RCU GP kthread last ran:\n");
				dump_cpu_task(cpu);
554
			}
555 556 557
			wake_up_process(gpk);
		}
	}
558 559
}

560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
/* Complain about missing wakeups from expired fqs wait timer */
static void rcu_check_gp_kthread_expired_fqs_timer(void)
{
	struct task_struct *gpk = rcu_state.gp_kthread;
	short gp_state;
	unsigned long jiffies_fqs;
	int cpu;

	/*
	 * Order reads of .gp_state and .jiffies_force_qs.
	 * Matching smp_wmb() is present in rcu_gp_fqs_loop().
	 */
	gp_state = smp_load_acquire(&rcu_state.gp_state);
	jiffies_fqs = READ_ONCE(rcu_state.jiffies_force_qs);

	if (gp_state == RCU_GP_WAIT_FQS &&
	    time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) &&
	    gpk && !READ_ONCE(gpk->on_rq)) {
		cpu = task_cpu(gpk);
579
		pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x\n",
580 581
		       rcu_state.name, (jiffies - jiffies_fqs),
		       (long)rcu_seq_current(&rcu_state.gp_seq),
582
		       data_race(READ_ONCE(rcu_state.gp_flags)), // Diagnostic read
583
		       gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS,
584
		       data_race(READ_ONCE(gpk->__state)));
585 586 587 588 589
		pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n",
		       cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu));
	}
}

590
static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
591 592 593 594 595 596
{
	int cpu;
	unsigned long flags;
	unsigned long gpa;
	unsigned long j;
	int ndetected = 0;
597
	struct rcu_node *rnp;
598 599
	long totqlen = 0;

600 601
	lockdep_assert_irqs_disabled();

602 603
	/* Kick and suppress, if so configured. */
	rcu_stall_kick_kthreads();
604
	if (rcu_stall_is_suppressed())
605 606 607 608
		return;

	/*
	 * OK, time to rat on our buddy...
609
	 * See Documentation/RCU/stallwarn.rst for info on how to debug
610 611
	 * RCU CPU stall warnings.
	 */
612
	trace_rcu_stall_warning(rcu_state.name, TPS("StallDetected"));
613
	pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
614 615 616 617 618 619 620 621 622
	rcu_for_each_leaf_node(rnp) {
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
		if (rnp->qsmask != 0) {
			for_each_leaf_node_possible_cpu(rnp, cpu)
				if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
					print_cpu_stall_info(cpu);
					ndetected++;
				}
		}
623
		ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock.
624
		lockdep_assert_irqs_disabled();
625 626 627 628
	}

	for_each_possible_cpu(cpu)
		totqlen += rcu_get_n_cbs_cpu(cpu);
629
	pr_err("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu ncpus=%d)\n",
630
	       smp_processor_id(), (long)(jiffies - gps),
631 632
	       (long)rcu_seq_current(&rcu_state.gp_seq), totqlen,
	       data_race(rcu_state.n_online_cpus)); // Diagnostic read
633 634 635 636
	if (ndetected) {
		rcu_dump_cpu_stacks();

		/* Complain about tasks blocking the grace period. */
637 638
		rcu_for_each_leaf_node(rnp)
			rcu_print_detail_task_stall_rnp(rnp);
639 640 641 642 643
	} else {
		if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
			pr_err("INFO: Stall ended before state dump start\n");
		} else {
			j = jiffies;
644
			gpa = data_race(READ_ONCE(rcu_state.gp_activity));
645 646
			pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
			       rcu_state.name, j - gpa, j, gpa,
647 648
			       data_race(READ_ONCE(jiffies_till_next_fqs)),
			       data_race(READ_ONCE(rcu_get_root()->qsmask)));
649 650 651 652 653 654 655
		}
	}
	/* Rewrite if needed in case of slow consoles. */
	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
		WRITE_ONCE(rcu_state.jiffies_stall,
			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);

656
	rcu_check_gp_kthread_expired_fqs_timer();
657 658 659 660 661 662 663
	rcu_check_gp_kthread_starvation();

	panic_on_rcu_stall();

	rcu_force_quiescent_state();  /* Kick them all. */
}

664
static void print_cpu_stall(unsigned long gps)
665 666 667 668 669 670 671
{
	int cpu;
	unsigned long flags;
	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
	struct rcu_node *rnp = rcu_get_root();
	long totqlen = 0;

672 673
	lockdep_assert_irqs_disabled();

674 675
	/* Kick and suppress, if so configured. */
	rcu_stall_kick_kthreads();
676
	if (rcu_stall_is_suppressed())
677 678 679 680
		return;

	/*
	 * OK, time to rat on ourselves...
681
	 * See Documentation/RCU/stallwarn.rst for info on how to debug
682 683
	 * RCU CPU stall warnings.
	 */
684
	trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
685
	pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
686 687 688 689 690
	raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
	print_cpu_stall_info(smp_processor_id());
	raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
	for_each_possible_cpu(cpu)
		totqlen += rcu_get_n_cbs_cpu(cpu);
691
	pr_err("\t(t=%lu jiffies g=%ld q=%lu ncpus=%d)\n",
692
		jiffies - gps,
693 694
		(long)rcu_seq_current(&rcu_state.gp_seq), totqlen,
		data_race(rcu_state.n_online_cpus)); // Diagnostic read
695

696
	rcu_check_gp_kthread_expired_fqs_timer();
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
	rcu_check_gp_kthread_starvation();

	rcu_dump_cpu_stacks();

	raw_spin_lock_irqsave_rcu_node(rnp, flags);
	/* Rewrite if needed in case of slow consoles. */
	if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
		WRITE_ONCE(rcu_state.jiffies_stall,
			   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);

	panic_on_rcu_stall();

	/*
	 * Attempt to revive the RCU machinery by forcing a context switch.
	 *
	 * A context switch would normally allow the RCU state machine to make
	 * progress and it could be we're stuck in kernel space without context
	 * switches for an entirely unreasonable amount of time.
	 */
	set_tsk_need_resched(current);
	set_preempt_need_resched();
}

static void check_cpu_stall(struct rcu_data *rdp)
{
723
	bool self_detected;
724 725 726 727 728 729 730 731
	unsigned long gs1;
	unsigned long gs2;
	unsigned long gps;
	unsigned long j;
	unsigned long jn;
	unsigned long js;
	struct rcu_node *rnp;

732
	lockdep_assert_irqs_disabled();
733
	if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) ||
734 735 736
	    !rcu_gp_in_progress())
		return;
	rcu_stall_kick_kthreads();
737 738 739 740 741 742 743 744 745 746

	/*
	 * Check if it was requested (via rcu_cpu_stall_reset()) that the FQS
	 * loop has to set jiffies to ensure a non-stale jiffies value. This
	 * is required to have good jiffies value after coming out of long
	 * breaks of jiffies updates. Not doing so can cause false positives.
	 */
	if (READ_ONCE(rcu_state.nr_fqs_jiffies_stall) > 0)
		return;

747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
	j = jiffies;

	/*
	 * Lots of memory barriers to reject false positives.
	 *
	 * The idea is to pick up rcu_state.gp_seq, then
	 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
	 * another copy of rcu_state.gp_seq.  These values are updated in
	 * the opposite order with memory barriers (or equivalent) during
	 * grace-period initialization and cleanup.  Now, a false positive
	 * can occur if we get an new value of rcu_state.gp_start and a old
	 * value of rcu_state.jiffies_stall.  But given the memory barriers,
	 * the only way that this can happen is if one grace period ends
	 * and another starts between these two fetches.  This is detected
	 * by comparing the second fetch of rcu_state.gp_seq with the
	 * previous fetch from rcu_state.gp_seq.
	 *
	 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
	 * and rcu_state.gp_start suffice to forestall false positives.
	 */
	gs1 = READ_ONCE(rcu_state.gp_seq);
	smp_rmb(); /* Pick up ->gp_seq first... */
	js = READ_ONCE(rcu_state.jiffies_stall);
	smp_rmb(); /* ...then ->jiffies_stall before the rest... */
	gps = READ_ONCE(rcu_state.gp_start);
	smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
	gs2 = READ_ONCE(rcu_state.gp_seq);
	if (gs1 != gs2 ||
	    ULONG_CMP_LT(j, js) ||
	    ULONG_CMP_GE(gps, js))
		return; /* No stall or GP completed since entering function. */
	rnp = rdp->mynode;
779
	jn = jiffies + ULONG_MAX / 2;
780
	self_detected = READ_ONCE(rnp->qsmask) & rdp->grpmask;
781
	if (rcu_gp_in_progress() &&
782
	    (self_detected || ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) &&
783
	    cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
784 785 786 787 788 789 790 791
		/*
		 * If a virtual machine is stopped by the host it can look to
		 * the watchdog like an RCU stall. Check to see if the host
		 * stopped the vm.
		 */
		if (kvm_check_and_clear_guest_paused())
			return;

792
		rcu_stall_notifier_call_chain(RCU_STALL_NOTIFY_NORM, (void *)j - gps);
793 794 795 796 797 798 799
		if (self_detected) {
			/* We haven't checked in, so go dump stack. */
			print_cpu_stall(gps);
		} else {
			/* They had a few time units to dump stack, so complain. */
			print_other_cpu_stall(gs2, gps);
		}
800

801
		if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
802
			rcu_ftrace_dump(DUMP_ALL);
803 804 805 806 807

		if (READ_ONCE(rcu_state.jiffies_stall) == jn) {
			jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
			WRITE_ONCE(rcu_state.jiffies_stall, jn);
		}
808 809
	}
}
810 811 812

//////////////////////////////////////////////////////////////////////////////
//
813
// RCU forward-progress mechanisms, including for callback invocation.
814 815


816 817 818 819 820 821 822
/*
 * Check to see if a failure to end RCU priority inversion was due to
 * a CPU not passing through a quiescent state.  When this happens, there
 * is nothing that RCU priority boosting can do to help, so we shouldn't
 * count this as an RCU priority boosting failure.  A return of true says
 * RCU priority boosting is to blame, and false says otherwise.  If false
 * is returned, the first of the CPUs to blame is stored through cpup.
823 824 825 826
 * If there was no CPU blocking the current grace period, but also nothing
 * in need of being boosted, *cpup is set to -1.  This can happen in case
 * of vCPU preemption while the last CPU is reporting its quiscent state,
 * for example.
827 828 829 830
 *
 * If cpup is NULL, then a lockless quick check is carried out, suitable
 * for high-rate usage.  On the other hand, if cpup is non-NULL, each
 * rcu_node structure's ->lock is acquired, ruling out high-rate usage.
831 832 833
 */
bool rcu_check_boost_fail(unsigned long gp_state, int *cpup)
{
834
	bool atb = false;
835 836 837 838 839
	int cpu;
	unsigned long flags;
	struct rcu_node *rnp;

	rcu_for_each_leaf_node(rnp) {
840
		if (!cpup) {
841
			if (data_race(READ_ONCE(rnp->qsmask))) {
842
				return false;
843 844 845
			} else {
				if (READ_ONCE(rnp->gp_tasks))
					atb = true;
846
				continue;
847
			}
848
		}
849
		*cpup = -1;
850
		raw_spin_lock_irqsave_rcu_node(rnp, flags);
851 852
		if (rnp->gp_tasks)
			atb = true;
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868
		if (!rnp->qsmask) {
			// No CPUs without quiescent states for this rnp.
			raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
			continue;
		}
		// Find the first holdout CPU.
		for_each_leaf_node_possible_cpu(rnp, cpu) {
			if (rnp->qsmask & (1UL << (cpu - rnp->grplo))) {
				raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
				*cpup = cpu;
				return false;
			}
		}
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
	}
	// Can't blame CPUs, so must blame RCU priority boosting.
869
	return atb;
870 871 872
}
EXPORT_SYMBOL_GPL(rcu_check_boost_fail);

873 874 875 876 877
/*
 * Show the state of the grace-period kthreads.
 */
void show_rcu_gp_kthreads(void)
{
878
	unsigned long cbs = 0;
879 880 881 882
	int cpu;
	unsigned long j;
	unsigned long ja;
	unsigned long jr;
883
	unsigned long js;
884 885 886
	unsigned long jw;
	struct rcu_data *rdp;
	struct rcu_node *rnp;
887
	struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
888 889

	j = jiffies;
890 891 892 893
	ja = j - data_race(READ_ONCE(rcu_state.gp_activity));
	jr = j - data_race(READ_ONCE(rcu_state.gp_req_activity));
	js = j - data_race(READ_ONCE(rcu_state.gp_start));
	jw = j - data_race(READ_ONCE(rcu_state.gp_wake_time));
894
	pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n",
895
		rcu_state.name, gp_state_getname(rcu_state.gp_state),
896 897 898 899 900 901 902
		data_race(READ_ONCE(rcu_state.gp_state)),
		t ? data_race(READ_ONCE(t->__state)) : 0x1ffff, t ? t->rt_priority : 0xffU,
		js, ja, jr, jw, (long)data_race(READ_ONCE(rcu_state.gp_wake_seq)),
		(long)data_race(READ_ONCE(rcu_state.gp_seq)),
		(long)data_race(READ_ONCE(rcu_get_root()->gp_seq_needed)),
		data_race(READ_ONCE(rcu_state.gp_max)),
		data_race(READ_ONCE(rcu_state.gp_flags)));
903
	rcu_for_each_node_breadth_first(rnp) {
904
		if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) &&
905 906
		    !data_race(READ_ONCE(rnp->qsmask)) && !data_race(READ_ONCE(rnp->boost_tasks)) &&
		    !data_race(READ_ONCE(rnp->exp_tasks)) && !data_race(READ_ONCE(rnp->gp_tasks)))
907
			continue;
908 909
		pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n",
			rnp->grplo, rnp->grphi,
910 911 912 913 914 915 916 917
			(long)data_race(READ_ONCE(rnp->gp_seq)),
			(long)data_race(READ_ONCE(rnp->gp_seq_needed)),
			data_race(READ_ONCE(rnp->qsmask)),
			".b"[!!data_race(READ_ONCE(rnp->boost_kthread_task))],
			".B"[!!data_race(READ_ONCE(rnp->boost_tasks))],
			".E"[!!data_race(READ_ONCE(rnp->exp_tasks))],
			".G"[!!data_race(READ_ONCE(rnp->gp_tasks))],
			data_race(READ_ONCE(rnp->n_boosts)));
918 919 920 921
		if (!rcu_is_leaf_node(rnp))
			continue;
		for_each_leaf_node_possible_cpu(rnp, cpu) {
			rdp = per_cpu_ptr(&rcu_data, cpu);
922
			if (READ_ONCE(rdp->gpwrap) ||
923 924
			    ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
					 READ_ONCE(rdp->gp_seq_needed)))
925 926
				continue;
			pr_info("\tcpu %d ->gp_seq_needed %ld\n",
927
				cpu, (long)data_race(READ_ONCE(rdp->gp_seq_needed)));
928 929
		}
	}
930 931
	for_each_possible_cpu(cpu) {
		rdp = per_cpu_ptr(&rcu_data, cpu);
932
		cbs += data_race(READ_ONCE(rdp->n_cbs_invoked));
933 934 935
		if (rcu_segcblist_is_offloaded(&rdp->cblist))
			show_rcu_nocb_state(rdp);
	}
936
	pr_info("RCU callbacks invoked since boot: %lu\n", cbs);
937
	show_rcu_tasks_gp_kthreads();
938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
}
EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);

/*
 * This function checks for grace-period requests that fail to motivate
 * RCU to come out of its idle mode.
 */
static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
				     const unsigned long gpssdelay)
{
	unsigned long flags;
	unsigned long j;
	struct rcu_node *rnp_root = rcu_get_root();
	static atomic_t warned = ATOMIC_INIT(0);

	if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
954
	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
955 956
			 READ_ONCE(rnp_root->gp_seq_needed)) ||
	    !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread.
957 958 959 960 961 962 963 964 965 966
		return;
	j = jiffies; /* Expensive access, and in common case don't get here. */
	if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
	    atomic_read(&warned))
		return;

	raw_spin_lock_irqsave_rcu_node(rnp, flags);
	j = jiffies;
	if (rcu_gp_in_progress() ||
967 968
	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
			 READ_ONCE(rnp_root->gp_seq_needed)) ||
969 970 971 972 973 974 975 976 977 978 979 980
	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
	    atomic_read(&warned)) {
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
		return;
	}
	/* Hold onto the leaf lock to make others see warned==1. */

	if (rnp_root != rnp)
		raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
	j = jiffies;
	if (rcu_gp_in_progress() ||
981 982 983 984
	    ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
			 READ_ONCE(rnp_root->gp_seq_needed)) ||
	    time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
	    time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
985
	    atomic_xchg(&warned, 1)) {
986 987 988
		if (rnp_root != rnp)
			/* irqs remain disabled. */
			raw_spin_unlock_rcu_node(rnp_root);
989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013
		raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
		return;
	}
	WARN_ON(1);
	if (rnp_root != rnp)
		raw_spin_unlock_rcu_node(rnp_root);
	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
	show_rcu_gp_kthreads();
}

/*
 * Do a forward-progress check for rcutorture.  This is normally invoked
 * due to an OOM event.  The argument "j" gives the time period during
 * which rcutorture would like progress to have been made.
 */
void rcu_fwd_progress_check(unsigned long j)
{
	unsigned long cbs;
	int cpu;
	unsigned long max_cbs = 0;
	int max_cpu = -1;
	struct rcu_data *rdp;

	if (rcu_gp_in_progress()) {
		pr_info("%s: GP age %lu jiffies\n",
1014
			__func__, jiffies - data_race(READ_ONCE(rcu_state.gp_start)));
1015 1016 1017
		show_rcu_gp_kthreads();
	} else {
		pr_info("%s: Last GP end %lu jiffies ago\n",
1018
			__func__, jiffies - data_race(READ_ONCE(rcu_state.gp_end)));
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
		preempt_disable();
		rdp = this_cpu_ptr(&rcu_data);
		rcu_check_gp_start_stall(rdp->mynode, rdp, j);
		preempt_enable();
	}
	for_each_possible_cpu(cpu) {
		cbs = rcu_get_n_cbs_cpu(cpu);
		if (!cbs)
			continue;
		if (max_cpu < 0)
			pr_info("%s: callbacks", __func__);
		pr_cont(" %d: %lu", cpu, cbs);
		if (cbs <= max_cbs)
			continue;
		max_cbs = cbs;
		max_cpu = cpu;
	}
	if (max_cpu >= 0)
		pr_cont("\n");
}
EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);

/* Commandeer a sysrq key to dump RCU's tree. */
static bool sysrq_rcu;
module_param(sysrq_rcu, bool, 0444);

/* Dump grace-period-request information due to commandeered sysrq. */
1046
static void sysrq_show_rcu(u8 key)
1047 1048 1049 1050
{
	show_rcu_gp_kthreads();
}

Emil Velikov's avatar
Emil Velikov committed
1051
static const struct sysrq_key_op sysrq_rcudump_op = {
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064
	.handler = sysrq_show_rcu,
	.help_msg = "show-rcu(y)",
	.action_msg = "Show RCU tree",
	.enable_mask = SYSRQ_ENABLE_DUMP,
};

static int __init rcu_sysrq_init(void)
{
	if (sysrq_rcu)
		return register_sysrq_key('y', &sysrq_rcudump_op);
	return 0;
}
early_initcall(rcu_sysrq_init);
1065

1066
#ifdef CONFIG_RCU_CPU_STALL_NOTIFIER
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086

//////////////////////////////////////////////////////////////////////////////
//
// RCU CPU stall-warning notifiers

static ATOMIC_NOTIFIER_HEAD(rcu_cpu_stall_notifier_list);

/**
 * rcu_stall_chain_notifier_register - Add an RCU CPU stall notifier
 * @n: Entry to add.
 *
 * Adds an RCU CPU stall notifier to an atomic notifier chain.
 * The @action passed to a notifier will be @RCU_STALL_NOTIFY_NORM or
 * friends.  The @data will be the duration of the stalled grace period,
 * in jiffies, coerced to a void* pointer.
 *
 * Returns 0 on success, %-EEXIST on error.
 */
int rcu_stall_chain_notifier_register(struct notifier_block *n)
{
1087 1088 1089 1090 1091 1092 1093
	int rcsn = rcu_cpu_stall_notifiers;

	WARN(1, "Adding %pS() to RCU stall notifier list (%s).\n", n->notifier_call,
	     rcsn ? "possibly suppressing RCU CPU stall warnings" : "failed, so all is well");
	if (rcsn)
		return atomic_notifier_chain_register(&rcu_cpu_stall_notifier_list, n);
	return -EEXIST;
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
}
EXPORT_SYMBOL_GPL(rcu_stall_chain_notifier_register);

/**
 * rcu_stall_chain_notifier_unregister - Remove an RCU CPU stall notifier
 * @n: Entry to add.
 *
 * Removes an RCU CPU stall notifier from an atomic notifier chain.
 *
 * Returns zero on success, %-ENOENT on failure.
 */
int rcu_stall_chain_notifier_unregister(struct notifier_block *n)
{
	return atomic_notifier_chain_unregister(&rcu_cpu_stall_notifier_list, n);
}
EXPORT_SYMBOL_GPL(rcu_stall_chain_notifier_unregister);

/*
 * rcu_stall_notifier_call_chain - Call functions in an RCU CPU stall notifier chain
 * @val: Value passed unmodified to notifier function
 * @v: Pointer passed unmodified to notifier function
 *
 * Calls each function in the RCU CPU stall notifier chain in turn, which
 * is an atomic call chain.  See atomic_notifier_call_chain() for more
 * information.
 *
 * This is for use within RCU, hence the omission of the extra asterisk
 * to indicate a non-kerneldoc format header comment.
 */
int rcu_stall_notifier_call_chain(unsigned long val, void *v)
{
	return atomic_notifier_call_chain(&rcu_cpu_stall_notifier_list, val, v);
}
1127 1128

#endif // #ifdef CONFIG_RCU_CPU_STALL_NOTIFIER