1 // SPDX-License-Identifier: GPL-2.0+
3 * RCU CPU stall warnings for normal RCU grace periods
5 * Copyright IBM Corporation, 2019
7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
10 #include <linux/kvm_para.h>
12 //////////////////////////////////////////////////////////////////////////////
14 // Controlling CPU stall warnings, including delay calculation.
16 /* panic() on RCU Stall sysctl. */
17 int sysctl_panic_on_rcu_stall __read_mostly;
19 #ifdef CONFIG_PROVE_RCU
20 #define RCU_STALL_DELAY_DELTA (5 * HZ)
22 #define RCU_STALL_DELAY_DELTA 0
24 #define RCU_STALL_MIGHT_DIV 8
25 #define RCU_STALL_MIGHT_MIN (2 * HZ)
27 /* Limit-check stall timeouts specified at boottime and runtime. */
28 int rcu_jiffies_till_stall_check(void)
30 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
33 * Limit check must be consistent with the Kconfig limits
34 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
36 if (till_stall_check < 3) {
37 WRITE_ONCE(rcu_cpu_stall_timeout, 3);
39 } else if (till_stall_check > 300) {
40 WRITE_ONCE(rcu_cpu_stall_timeout, 300);
41 till_stall_check = 300;
43 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
45 EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check);
48 * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled?
50 * Returns @true if the current grace period is sufficiently old that
51 * it is reasonable to assume that it might be stalled. This can be
52 * useful when deciding whether to allocate memory to enable RCU-mediated
53 * freeing on the one hand or just invoking synchronize_rcu() on the other.
54 * The latter is preferable when the grace period is stalled.
56 * Note that sampling of the .gp_start and .gp_seq fields must be done
57 * carefully to avoid false positives at the beginnings and ends of
60 bool rcu_gp_might_be_stalled(void)
62 unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV;
63 unsigned long j = jiffies;
65 if (d < RCU_STALL_MIGHT_MIN)
66 d = RCU_STALL_MIGHT_MIN;
67 smp_mb(); // jiffies before .gp_seq to avoid false positives.
68 if (!rcu_gp_in_progress())
70 // Long delays at this point avoids false positive, but a delay
71 // of ULONG_MAX/4 jiffies voids your no-false-positive warranty.
72 smp_mb(); // .gp_seq before second .gp_start
74 return !time_before(j, READ_ONCE(rcu_state.gp_start) + d);
77 /* Don't do RCU CPU stall warnings during long sysrq printouts. */
78 void rcu_sysrq_start(void)
80 if (!rcu_cpu_stall_suppress)
81 rcu_cpu_stall_suppress = 2;
84 void rcu_sysrq_end(void)
86 if (rcu_cpu_stall_suppress == 2)
87 rcu_cpu_stall_suppress = 0;
90 /* Don't print RCU CPU stall warnings during a kernel panic. */
91 static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
93 rcu_cpu_stall_suppress = 1;
97 static struct notifier_block rcu_panic_block = {
98 .notifier_call = rcu_panic,
101 static int __init check_cpu_stall_init(void)
103 atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
106 early_initcall(check_cpu_stall_init);
108 /* If so specified via sysctl, panic, yielding cleaner stall-warning output. */
109 static void panic_on_rcu_stall(void)
111 if (sysctl_panic_on_rcu_stall)
112 panic("RCU Stall\n");
116 * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
118 * Set the stall-warning timeout way off into the future, thus preventing
119 * any RCU CPU stall-warning messages from appearing in the current set of
122 * The caller must disable hard irqs.
124 void rcu_cpu_stall_reset(void)
126 WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2);
129 //////////////////////////////////////////////////////////////////////////////
131 // Interaction with RCU grace periods
133 /* Start of new grace period, so record stall time (and forcing times). */
134 static void record_gp_stall_check_time(void)
136 unsigned long j = jiffies;
139 WRITE_ONCE(rcu_state.gp_start, j);
140 j1 = rcu_jiffies_till_stall_check();
141 smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq.
142 WRITE_ONCE(rcu_state.jiffies_stall, j + j1);
143 rcu_state.jiffies_resched = j + j1 / 2;
144 rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs);
147 /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */
148 static void zero_cpu_stall_ticks(struct rcu_data *rdp)
150 rdp->ticks_this_gp = 0;
151 rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id());
152 WRITE_ONCE(rdp->last_fqs_resched, jiffies);
156 * If too much time has passed in the current grace period, and if
157 * so configured, go kick the relevant kthreads.
159 static void rcu_stall_kick_kthreads(void)
163 if (!READ_ONCE(rcu_kick_kthreads))
165 j = READ_ONCE(rcu_state.jiffies_kick_kthreads);
166 if (time_after(jiffies, j) && rcu_state.gp_kthread &&
167 (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) {
168 WARN_ONCE(1, "Kicking %s grace-period kthread\n",
170 rcu_ftrace_dump(DUMP_ALL);
171 wake_up_process(rcu_state.gp_kthread);
172 WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ);
177 * Handler for the irq_work request posted about halfway into the RCU CPU
178 * stall timeout, and used to detect excessive irq disabling. Set state
179 * appropriately, but just complain if there is unexpected state on entry.
181 static void rcu_iw_handler(struct irq_work *iwp)
183 struct rcu_data *rdp;
184 struct rcu_node *rnp;
186 rdp = container_of(iwp, struct rcu_data, rcu_iw);
188 raw_spin_lock_rcu_node(rnp);
189 if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
190 rdp->rcu_iw_gp_seq = rnp->gp_seq;
191 rdp->rcu_iw_pending = false;
193 raw_spin_unlock_rcu_node(rnp);
196 //////////////////////////////////////////////////////////////////////////////
198 // Printing RCU CPU stall warnings
200 #ifdef CONFIG_PREEMPT_RCU
203 * Dump detailed information for all tasks blocking the current RCU
204 * grace period on the specified rcu_node structure.
206 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
209 struct task_struct *t;
211 raw_spin_lock_irqsave_rcu_node(rnp, flags);
212 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
213 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
216 t = list_entry(rnp->gp_tasks->prev,
217 struct task_struct, rcu_node_entry);
218 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
220 * We could be printing a lot while holding a spinlock.
221 * Avoid triggering hard lockup.
223 touch_nmi_watchdog();
226 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
229 // Communicate task state back to the RCU CPU stall warning request.
230 struct rcu_stall_chk_rdr {
232 union rcu_special rs;
237 * Report out the state of a not-running task that is stalling the
238 * current RCU grace period.
240 static bool check_slow_task(struct task_struct *t, void *arg)
242 struct rcu_stall_chk_rdr *rscrp = arg;
245 return false; // It is running, so decline to inspect it.
246 rscrp->nesting = t->rcu_read_lock_nesting;
247 rscrp->rs = t->rcu_read_unlock_special;
248 rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry);
253 * Scan the current list of tasks blocked within RCU read-side critical
254 * sections, printing out the tid of each of the first few of them.
256 static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
257 __releases(rnp->lock)
261 struct rcu_stall_chk_rdr rscr;
262 struct task_struct *t;
263 struct task_struct *ts[8];
265 lockdep_assert_irqs_disabled();
266 if (!rcu_preempt_blocked_readers_cgp(rnp)) {
267 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
270 pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
271 rnp->level, rnp->grplo, rnp->grphi);
272 t = list_entry(rnp->gp_tasks->prev,
273 struct task_struct, rcu_node_entry);
274 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
277 if (i >= ARRAY_SIZE(ts))
280 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
283 if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
284 pr_cont(" P%d", t->pid);
286 pr_cont(" P%d/%d:%c%c%c%c",
287 t->pid, rscr.nesting,
288 ".b"[rscr.rs.b.blocked],
289 ".q"[rscr.rs.b.need_qs],
290 ".e"[rscr.rs.b.exp_hint],
291 ".l"[rscr.on_blkd_list]);
292 lockdep_assert_irqs_disabled();
300 #else /* #ifdef CONFIG_PREEMPT_RCU */
303 * Because preemptible RCU does not exist, we never have to check for
304 * tasks blocked within RCU read-side critical sections.
306 static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
311 * Because preemptible RCU does not exist, we never have to check for
312 * tasks blocked within RCU read-side critical sections.
314 static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
316 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
319 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
322 * Dump stacks of all tasks running on stalled CPUs. First try using
323 * NMIs, but fall back to manual remote stack tracing on architectures
324 * that don't support NMI-based stack dumps. The NMI-triggered stack
325 * traces are more accurate because they are printed by the target CPU.
327 static void rcu_dump_cpu_stacks(void)
331 struct rcu_node *rnp;
333 rcu_for_each_leaf_node(rnp) {
334 raw_spin_lock_irqsave_rcu_node(rnp, flags);
335 for_each_leaf_node_possible_cpu(rnp, cpu)
336 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
337 if (!trigger_single_cpu_backtrace(cpu))
339 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
343 #ifdef CONFIG_RCU_FAST_NO_HZ
345 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
347 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
349 sprintf(cp, "last_accelerate: %04lx/%04lx dyntick_enabled: %d",
350 rdp->last_accelerate & 0xffff, jiffies & 0xffff,
351 !!rdp->tick_nohz_enabled_snap);
354 #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
356 static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
361 #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
363 static const char * const gp_state_names[] = {
364 [RCU_GP_IDLE] = "RCU_GP_IDLE",
365 [RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS",
366 [RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS",
367 [RCU_GP_ONOFF] = "RCU_GP_ONOFF",
368 [RCU_GP_INIT] = "RCU_GP_INIT",
369 [RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS",
370 [RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS",
371 [RCU_GP_CLEANUP] = "RCU_GP_CLEANUP",
372 [RCU_GP_CLEANED] = "RCU_GP_CLEANED",
376 * Convert a ->gp_state value to a character string.
378 static const char *gp_state_getname(short gs)
380 if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
382 return gp_state_names[gs];
385 /* Is the RCU grace-period kthread being starved of CPU time? */
386 static bool rcu_is_gp_kthread_starving(unsigned long *jp)
388 unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity);
396 * Print out diagnostic information for the specified stalled CPU.
398 * If the specified CPU is aware of the current RCU grace period, then
399 * print the number of scheduling clock interrupts the CPU has taken
400 * during the time that it has been aware. Otherwise, print the number
401 * of RCU grace periods that this CPU is ignorant of, for example, "1"
402 * if the CPU was aware of the previous grace period.
404 * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
406 static void print_cpu_stall_info(int cpu)
411 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
413 unsigned long ticks_value;
416 * We could be printing a lot while holding a spinlock. Avoid
417 * triggering hard lockup.
419 touch_nmi_watchdog();
421 ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq);
423 ticks_title = "GPs behind";
425 ticks_title = "ticks this GP";
426 ticks_value = rdp->ticks_this_gp;
428 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
429 delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq);
430 falsepositive = rcu_is_gp_kthread_starving(NULL) &&
431 rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp));
432 pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s%s\n",
434 "O."[!!cpu_online(cpu)],
435 "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
436 "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
437 !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' :
438 rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' :
440 ticks_value, ticks_title,
441 rcu_dynticks_snap(rdp) & 0xfff,
442 rdp->dynticks_nesting, rdp->dynticks_nmi_nesting,
443 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
444 data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart,
446 falsepositive ? " (false positive?)" : "");
449 /* Complain about starvation of grace-period kthread. */
450 static void rcu_check_gp_kthread_starvation(void)
452 struct task_struct *gpk = rcu_state.gp_kthread;
455 if (rcu_is_gp_kthread_starving(&j)) {
456 pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
458 (long)rcu_seq_current(&rcu_state.gp_seq),
459 data_race(rcu_state.gp_flags),
460 gp_state_getname(rcu_state.gp_state), rcu_state.gp_state,
461 gpk ? gpk->state : ~0, gpk ? task_cpu(gpk) : -1);
463 pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name);
464 pr_err("RCU grace-period kthread stack dump:\n");
465 sched_show_task(gpk);
466 wake_up_process(gpk);
471 static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps)
478 struct rcu_node *rnp;
481 lockdep_assert_irqs_disabled();
483 /* Kick and suppress, if so configured. */
484 rcu_stall_kick_kthreads();
485 if (rcu_stall_is_suppressed())
489 * OK, time to rat on our buddy...
490 * See Documentation/RCU/stallwarn.rst for info on how to debug
491 * RCU CPU stall warnings.
493 pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
494 rcu_for_each_leaf_node(rnp) {
495 raw_spin_lock_irqsave_rcu_node(rnp, flags);
496 if (rnp->qsmask != 0) {
497 for_each_leaf_node_possible_cpu(rnp, cpu)
498 if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
499 print_cpu_stall_info(cpu);
503 ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock.
504 lockdep_assert_irqs_disabled();
507 for_each_possible_cpu(cpu)
508 totqlen += rcu_get_n_cbs_cpu(cpu);
509 pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n",
510 smp_processor_id(), (long)(jiffies - gps),
511 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
513 rcu_dump_cpu_stacks();
515 /* Complain about tasks blocking the grace period. */
516 rcu_for_each_leaf_node(rnp)
517 rcu_print_detail_task_stall_rnp(rnp);
519 if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) {
520 pr_err("INFO: Stall ended before state dump start\n");
523 gpa = data_race(rcu_state.gp_activity);
524 pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
525 rcu_state.name, j - gpa, j, gpa,
526 data_race(jiffies_till_next_fqs),
527 rcu_get_root()->qsmask);
530 /* Rewrite if needed in case of slow consoles. */
531 if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
532 WRITE_ONCE(rcu_state.jiffies_stall,
533 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
535 rcu_check_gp_kthread_starvation();
537 panic_on_rcu_stall();
539 rcu_force_quiescent_state(); /* Kick them all. */
542 static void print_cpu_stall(unsigned long gps)
546 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
547 struct rcu_node *rnp = rcu_get_root();
550 lockdep_assert_irqs_disabled();
552 /* Kick and suppress, if so configured. */
553 rcu_stall_kick_kthreads();
554 if (rcu_stall_is_suppressed())
558 * OK, time to rat on ourselves...
559 * See Documentation/RCU/stallwarn.rst for info on how to debug
560 * RCU CPU stall warnings.
562 pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
563 raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
564 print_cpu_stall_info(smp_processor_id());
565 raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
566 for_each_possible_cpu(cpu)
567 totqlen += rcu_get_n_cbs_cpu(cpu);
568 pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n",
570 (long)rcu_seq_current(&rcu_state.gp_seq), totqlen);
572 rcu_check_gp_kthread_starvation();
574 rcu_dump_cpu_stacks();
576 raw_spin_lock_irqsave_rcu_node(rnp, flags);
577 /* Rewrite if needed in case of slow consoles. */
578 if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall)))
579 WRITE_ONCE(rcu_state.jiffies_stall,
580 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
581 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
583 panic_on_rcu_stall();
586 * Attempt to revive the RCU machinery by forcing a context switch.
588 * A context switch would normally allow the RCU state machine to make
589 * progress and it could be we're stuck in kernel space without context
590 * switches for an entirely unreasonable amount of time.
592 set_tsk_need_resched(current);
593 set_preempt_need_resched();
596 static void check_cpu_stall(struct rcu_data *rdp)
604 struct rcu_node *rnp;
606 lockdep_assert_irqs_disabled();
607 if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) ||
608 !rcu_gp_in_progress())
610 rcu_stall_kick_kthreads();
614 * Lots of memory barriers to reject false positives.
616 * The idea is to pick up rcu_state.gp_seq, then
617 * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally
618 * another copy of rcu_state.gp_seq. These values are updated in
619 * the opposite order with memory barriers (or equivalent) during
620 * grace-period initialization and cleanup. Now, a false positive
621 * can occur if we get an new value of rcu_state.gp_start and a old
622 * value of rcu_state.jiffies_stall. But given the memory barriers,
623 * the only way that this can happen is if one grace period ends
624 * and another starts between these two fetches. This is detected
625 * by comparing the second fetch of rcu_state.gp_seq with the
626 * previous fetch from rcu_state.gp_seq.
628 * Given this check, comparisons of jiffies, rcu_state.jiffies_stall,
629 * and rcu_state.gp_start suffice to forestall false positives.
631 gs1 = READ_ONCE(rcu_state.gp_seq);
632 smp_rmb(); /* Pick up ->gp_seq first... */
633 js = READ_ONCE(rcu_state.jiffies_stall);
634 smp_rmb(); /* ...then ->jiffies_stall before the rest... */
635 gps = READ_ONCE(rcu_state.gp_start);
636 smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */
637 gs2 = READ_ONCE(rcu_state.gp_seq);
639 ULONG_CMP_LT(j, js) ||
640 ULONG_CMP_GE(gps, js))
641 return; /* No stall or GP completed since entering function. */
643 jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
644 if (rcu_gp_in_progress() &&
645 (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
646 cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
649 * If a virtual machine is stopped by the host it can look to
650 * the watchdog like an RCU stall. Check to see if the host
653 if (kvm_check_and_clear_guest_paused())
656 /* We haven't checked in, so go dump stack. */
657 print_cpu_stall(gps);
658 if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
659 rcu_ftrace_dump(DUMP_ALL);
661 } else if (rcu_gp_in_progress() &&
662 ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
663 cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
666 * If a virtual machine is stopped by the host it can look to
667 * the watchdog like an RCU stall. Check to see if the host
670 if (kvm_check_and_clear_guest_paused())
673 /* They had a few time units to dump stack, so complain. */
674 print_other_cpu_stall(gs2, gps);
675 if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
676 rcu_ftrace_dump(DUMP_ALL);
680 //////////////////////////////////////////////////////////////////////////////
682 // RCU forward-progress mechanisms, including of callback invocation.
686 * Show the state of the grace-period kthreads.
688 void show_rcu_gp_kthreads(void)
690 unsigned long cbs = 0;
696 struct rcu_data *rdp;
697 struct rcu_node *rnp;
698 struct task_struct *t = READ_ONCE(rcu_state.gp_kthread);
701 ja = j - data_race(rcu_state.gp_activity);
702 jr = j - data_race(rcu_state.gp_req_activity);
703 jw = j - data_race(rcu_state.gp_wake_time);
704 pr_info("%s: wait state: %s(%d) ->state: %#lx delta ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_flags %#x\n",
705 rcu_state.name, gp_state_getname(rcu_state.gp_state),
706 rcu_state.gp_state, t ? t->state : 0x1ffffL,
707 ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq),
708 (long)data_race(rcu_state.gp_seq),
709 (long)data_race(rcu_get_root()->gp_seq_needed),
710 data_race(rcu_state.gp_flags));
711 rcu_for_each_node_breadth_first(rnp) {
712 if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
713 READ_ONCE(rnp->gp_seq_needed)))
715 pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld\n",
716 rnp->grplo, rnp->grphi, (long)data_race(rnp->gp_seq),
717 (long)data_race(rnp->gp_seq_needed));
718 if (!rcu_is_leaf_node(rnp))
720 for_each_leaf_node_possible_cpu(rnp, cpu) {
721 rdp = per_cpu_ptr(&rcu_data, cpu);
722 if (READ_ONCE(rdp->gpwrap) ||
723 ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq),
724 READ_ONCE(rdp->gp_seq_needed)))
726 pr_info("\tcpu %d ->gp_seq_needed %ld\n",
727 cpu, (long)data_race(rdp->gp_seq_needed));
730 for_each_possible_cpu(cpu) {
731 rdp = per_cpu_ptr(&rcu_data, cpu);
732 cbs += data_race(rdp->n_cbs_invoked);
733 if (rcu_segcblist_is_offloaded(&rdp->cblist))
734 show_rcu_nocb_state(rdp);
736 pr_info("RCU callbacks invoked since boot: %lu\n", cbs);
737 show_rcu_tasks_gp_kthreads();
739 EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
742 * This function checks for grace-period requests that fail to motivate
743 * RCU to come out of its idle mode.
745 static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp,
746 const unsigned long gpssdelay)
750 struct rcu_node *rnp_root = rcu_get_root();
751 static atomic_t warned = ATOMIC_INIT(0);
753 if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() ||
754 ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
755 READ_ONCE(rnp_root->gp_seq_needed)) ||
756 !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread.
758 j = jiffies; /* Expensive access, and in common case don't get here. */
759 if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
760 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
761 atomic_read(&warned))
764 raw_spin_lock_irqsave_rcu_node(rnp, flags);
766 if (rcu_gp_in_progress() ||
767 ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
768 READ_ONCE(rnp_root->gp_seq_needed)) ||
769 time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
770 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
771 atomic_read(&warned)) {
772 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
775 /* Hold onto the leaf lock to make others see warned==1. */
778 raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
780 if (rcu_gp_in_progress() ||
781 ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq),
782 READ_ONCE(rnp_root->gp_seq_needed)) ||
783 time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) ||
784 time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) ||
785 atomic_xchg(&warned, 1)) {
787 /* irqs remain disabled. */
788 raw_spin_unlock_rcu_node(rnp_root);
789 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
794 raw_spin_unlock_rcu_node(rnp_root);
795 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
796 show_rcu_gp_kthreads();
800 * Do a forward-progress check for rcutorture. This is normally invoked
801 * due to an OOM event. The argument "j" gives the time period during
802 * which rcutorture would like progress to have been made.
804 void rcu_fwd_progress_check(unsigned long j)
808 unsigned long max_cbs = 0;
810 struct rcu_data *rdp;
812 if (rcu_gp_in_progress()) {
813 pr_info("%s: GP age %lu jiffies\n",
814 __func__, jiffies - rcu_state.gp_start);
815 show_rcu_gp_kthreads();
817 pr_info("%s: Last GP end %lu jiffies ago\n",
818 __func__, jiffies - rcu_state.gp_end);
820 rdp = this_cpu_ptr(&rcu_data);
821 rcu_check_gp_start_stall(rdp->mynode, rdp, j);
824 for_each_possible_cpu(cpu) {
825 cbs = rcu_get_n_cbs_cpu(cpu);
829 pr_info("%s: callbacks", __func__);
830 pr_cont(" %d: %lu", cpu, cbs);
839 EXPORT_SYMBOL_GPL(rcu_fwd_progress_check);
841 /* Commandeer a sysrq key to dump RCU's tree. */
842 static bool sysrq_rcu;
843 module_param(sysrq_rcu, bool, 0444);
845 /* Dump grace-period-request information due to commandeered sysrq. */
846 static void sysrq_show_rcu(int key)
848 show_rcu_gp_kthreads();
851 static const struct sysrq_key_op sysrq_rcudump_op = {
852 .handler = sysrq_show_rcu,
853 .help_msg = "show-rcu(y)",
854 .action_msg = "Show RCU tree",
855 .enable_mask = SYSRQ_ENABLE_DUMP,
858 static int __init rcu_sysrq_init(void)
861 return register_sysrq_key('y', &sysrq_rcudump_op);
864 early_initcall(rcu_sysrq_init);