1 // SPDX-License-Identifier: GPL-2.0
3 * Watchdog support on powerpc systems.
5 * Copyright 2017, IBM Corporation.
7 * This uses code from arch/sparc/kernel/nmi.c and kernel/watchdog.c
9 #include <linux/kernel.h>
10 #include <linux/param.h>
11 #include <linux/init.h>
12 #include <linux/percpu.h>
13 #include <linux/cpu.h>
14 #include <linux/nmi.h>
15 #include <linux/module.h>
16 #include <linux/export.h>
17 #include <linux/kprobes.h>
18 #include <linux/hardirq.h>
19 #include <linux/reboot.h>
20 #include <linux/slab.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched/debug.h>
23 #include <linux/delay.h>
24 #include <linux/smp.h>
29 * The watchdog has a simple timer that runs on each CPU, once per timer
30 * period. This is the heartbeat.
32 * Then there are checks to see if the heartbeat has not triggered on a CPU
33 * for the panic timeout period. Currently the watchdog only supports an
34 * SMP check, so the heartbeat only turns on when we have 2 or more CPUs.
36 * This is not an NMI watchdog, but Linux uses that name for a generic
37 * watchdog in some cases, so NMI gets used in some places.
40 static cpumask_t wd_cpus_enabled __read_mostly;
42 static u64 wd_panic_timeout_tb __read_mostly; /* timebase ticks until panic */
43 static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */
45 static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */
47 static DEFINE_PER_CPU(struct timer_list, wd_timer);
48 static DEFINE_PER_CPU(u64, wd_timer_tb);
51 * These are for the SMP checker. CPUs clear their pending bit in their
52 * heartbeat. If the bitmask becomes empty, the time is noted and the
53 * bitmask is refilled.
55 * All CPUs clear their bit in the pending mask every timer period.
56 * Once all have cleared, the time is noted and the bits are reset.
57 * If the time since all clear was greater than the panic timeout,
58 * we can panic with the list of stuck CPUs.
60 * This will work best with NMI IPIs for crash code so the stuck CPUs
61 * can be pulled out to get their backtraces.
63 static unsigned long __wd_smp_lock;
64 static cpumask_t wd_smp_cpus_pending;
65 static cpumask_t wd_smp_cpus_stuck;
66 static u64 wd_smp_last_reset_tb;
68 static inline void wd_smp_lock(unsigned long *flags)
71 * Avoid locking layers if possible.
72 * This may be called from low level interrupt handlers at some
75 raw_local_irq_save(*flags);
76 hard_irq_disable(); /* Make it soft-NMI safe */
77 while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) {
78 raw_local_irq_restore(*flags);
79 spin_until_cond(!test_bit(0, &__wd_smp_lock));
80 raw_local_irq_save(*flags);
85 static inline void wd_smp_unlock(unsigned long *flags)
87 clear_bit_unlock(0, &__wd_smp_lock);
88 raw_local_irq_restore(*flags);
91 static void wd_lockup_ipi(struct pt_regs *regs)
93 pr_emerg("Watchdog CPU:%d Hard LOCKUP\n", raw_smp_processor_id());
95 print_irqtrace_events(current);
101 if (hardlockup_panic)
102 nmi_panic(regs, "Hard LOCKUP");
105 static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb)
107 cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask);
108 cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask);
110 * See wd_smp_clear_cpu_pending()
113 if (cpumask_empty(&wd_smp_cpus_pending)) {
114 wd_smp_last_reset_tb = tb;
115 cpumask_andnot(&wd_smp_cpus_pending,
120 static void set_cpu_stuck(int cpu, u64 tb)
122 set_cpumask_stuck(cpumask_of(cpu), tb);
125 static void watchdog_smp_panic(int cpu, u64 tb)
131 /* Double check some things under lock */
132 if ((s64)(tb - wd_smp_last_reset_tb) < (s64)wd_smp_panic_timeout_tb)
134 if (cpumask_test_cpu(cpu, &wd_smp_cpus_pending))
136 if (cpumask_weight(&wd_smp_cpus_pending) == 0)
139 pr_emerg("Watchdog CPU:%d detected Hard LOCKUP other CPUS:%*pbl\n",
140 cpu, cpumask_pr_args(&wd_smp_cpus_pending));
143 * Try to trigger the stuck CPUs.
145 for_each_cpu(c, &wd_smp_cpus_pending) {
148 smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000);
150 smp_flush_nmi_ipi(1000000);
152 /* Take the stuck CPUs out of the watch group */
153 set_cpumask_stuck(&wd_smp_cpus_pending, tb);
155 wd_smp_unlock(&flags);
159 * printk_safe_flush() seems to require another print
160 * before anything actually goes out to console.
162 if (sysctl_hardlockup_all_cpu_backtrace)
163 trigger_allbutself_cpu_backtrace();
165 if (hardlockup_panic)
166 nmi_panic(NULL, "Hard LOCKUP");
171 wd_smp_unlock(&flags);
174 static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
176 if (!cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) {
177 if (unlikely(cpumask_test_cpu(cpu, &wd_smp_cpus_stuck))) {
180 pr_emerg("Watchdog CPU:%d became unstuck\n", cpu);
182 cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck);
183 wd_smp_unlock(&flags);
186 * The last CPU to clear pending should have reset the
187 * watchdog so we generally should not find it empty
188 * here if our CPU was clear. However it could happen
189 * due to a rare race with another CPU taking the
190 * last CPU out of the mask concurrently.
192 * We can't add a warning for it. But just in case
193 * there is a problem with the watchdog that is causing
194 * the mask to not be reset, try to kick it along here.
196 if (unlikely(cpumask_empty(&wd_smp_cpus_pending)))
202 cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
205 * Order the store to clear pending with the load(s) to check all
206 * words in the pending mask to check they are all empty. This orders
207 * with the same barrier on another CPU. This prevents two CPUs
208 * clearing the last 2 pending bits, but neither seeing the other's
209 * store when checking if the mask is empty, and missing an empty
210 * mask, which ends with a false positive.
213 if (cpumask_empty(&wd_smp_cpus_pending)) {
218 * Double check under lock because more than one CPU could see
219 * a clear mask with the lockless check after clearing their
223 if (cpumask_empty(&wd_smp_cpus_pending)) {
224 wd_smp_last_reset_tb = tb;
225 cpumask_andnot(&wd_smp_cpus_pending,
229 wd_smp_unlock(&flags);
233 static void watchdog_timer_interrupt(int cpu)
237 per_cpu(wd_timer_tb, cpu) = tb;
239 wd_smp_clear_cpu_pending(cpu, tb);
241 if ((s64)(tb - wd_smp_last_reset_tb) >= (s64)wd_smp_panic_timeout_tb)
242 watchdog_smp_panic(cpu, tb);
245 void soft_nmi_interrupt(struct pt_regs *regs)
248 int cpu = raw_smp_processor_id();
251 if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
256 __this_cpu_inc(irq_stat.soft_nmi_irqs);
259 if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) {
260 per_cpu(wd_timer_tb, cpu) = tb;
263 if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) {
264 wd_smp_unlock(&flags);
267 set_cpu_stuck(cpu, tb);
269 pr_emerg("Watchdog CPU:%d Hard LOCKUP\n", cpu);
271 print_irqtrace_events(current);
277 wd_smp_unlock(&flags);
279 if (sysctl_hardlockup_all_cpu_backtrace)
280 trigger_allbutself_cpu_backtrace();
282 if (hardlockup_panic)
283 nmi_panic(regs, "Hard LOCKUP");
285 if (wd_panic_timeout_tb < 0x7fffffff)
286 mtspr(SPRN_DEC, wd_panic_timeout_tb);
292 static void wd_timer_reset(unsigned int cpu, struct timer_list *t)
294 t->expires = jiffies + msecs_to_jiffies(wd_timer_period_ms);
295 if (wd_timer_period_ms > 1000)
296 t->expires = __round_jiffies_up(t->expires, cpu);
297 add_timer_on(t, cpu);
300 static void wd_timer_fn(unsigned long data)
302 struct timer_list *t = this_cpu_ptr(&wd_timer);
303 int cpu = smp_processor_id();
305 watchdog_timer_interrupt(cpu);
307 wd_timer_reset(cpu, t);
310 void arch_touch_nmi_watchdog(void)
312 unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
313 int cpu = smp_processor_id();
316 if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
320 if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) {
321 per_cpu(wd_timer_tb, cpu) = tb;
322 wd_smp_clear_cpu_pending(cpu, tb);
325 EXPORT_SYMBOL(arch_touch_nmi_watchdog);
327 static void start_watchdog_timer_on(unsigned int cpu)
329 struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
331 per_cpu(wd_timer_tb, cpu) = get_tb();
333 setup_pinned_timer(t, wd_timer_fn, 0);
334 wd_timer_reset(cpu, t);
337 static void stop_watchdog_timer_on(unsigned int cpu)
339 struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
344 static int start_wd_on_cpu(unsigned int cpu)
348 if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
353 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
356 if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
360 cpumask_set_cpu(cpu, &wd_cpus_enabled);
361 if (cpumask_weight(&wd_cpus_enabled) == 1) {
362 cpumask_set_cpu(cpu, &wd_smp_cpus_pending);
363 wd_smp_last_reset_tb = get_tb();
365 wd_smp_unlock(&flags);
367 start_watchdog_timer_on(cpu);
372 static int stop_wd_on_cpu(unsigned int cpu)
376 if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
377 return 0; /* Can happen in CPU unplug case */
379 stop_watchdog_timer_on(cpu);
382 cpumask_clear_cpu(cpu, &wd_cpus_enabled);
383 wd_smp_unlock(&flags);
385 wd_smp_clear_cpu_pending(cpu, get_tb());
390 static void watchdog_calc_timeouts(void)
392 wd_panic_timeout_tb = watchdog_thresh * ppc_tb_freq;
394 /* Have the SMP detector trigger a bit later */
395 wd_smp_panic_timeout_tb = wd_panic_timeout_tb * 3 / 2;
397 /* 2/5 is the factor that the perf based detector uses */
398 wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5;
401 void watchdog_nmi_stop(void)
405 for_each_cpu(cpu, &wd_cpus_enabled)
409 void watchdog_nmi_start(void)
413 watchdog_calc_timeouts();
414 for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
415 start_wd_on_cpu(cpu);
419 * Invoked from core watchdog init.
421 int __init watchdog_nmi_probe(void)
425 err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
426 "powerpc/watchdog:online",
427 start_wd_on_cpu, stop_wd_on_cpu);
429 pr_warn("Watchdog could not be initialized");
435 static void handle_backtrace_ipi(struct pt_regs *regs)
437 nmi_cpu_backtrace(regs);
440 static void raise_backtrace_ipi(cpumask_t *mask)
444 for_each_cpu(cpu, mask) {
445 if (cpu == smp_processor_id())
446 handle_backtrace_ipi(NULL);
448 smp_send_nmi_ipi(cpu, handle_backtrace_ipi, 1000000);
452 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
454 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);