1 // SPDX-License-Identifier: GPL-2.0
3 * Detect hard and soft lockups on a system
5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
7 * Note: Most of this code is borrowed heavily from the original softlockup
8 * detector, so thanks to Ingo for the initial implementation.
9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10 * to those contributors as well.
13 #define pr_fmt(fmt) "watchdog: " fmt
16 #include <linux/cpu.h>
17 #include <linux/nmi.h>
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/sysctl.h>
21 #include <linux/tick.h>
22 #include <linux/sched/clock.h>
23 #include <linux/sched/debug.h>
24 #include <linux/sched/isolation.h>
25 #include <linux/stop_machine.h>
27 #include <asm/irq_regs.h>
28 #include <linux/kvm_para.h>
30 static DEFINE_MUTEX(watchdog_mutex);
32 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
33 # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
34 # define NMI_WATCHDOG_DEFAULT 1
36 # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED)
37 # define NMI_WATCHDOG_DEFAULT 0
40 unsigned long __read_mostly watchdog_enabled;
41 int __read_mostly watchdog_user_enabled = 1;
42 int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
43 int __read_mostly soft_watchdog_user_enabled = 1;
44 int __read_mostly watchdog_thresh = 10;
45 static int __read_mostly nmi_watchdog_available;
47 struct cpumask watchdog_cpumask __read_mostly;
48 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
50 #ifdef CONFIG_HARDLOCKUP_DETECTOR
53 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
54 # endif /* CONFIG_SMP */
57 * Should we panic when a soft-lockup or hard-lockup occurs:
59 unsigned int __read_mostly hardlockup_panic =
60 IS_ENABLED(CONFIG_BOOTPARAM_HARDLOCKUP_PANIC);
62 * We may not want to enable hard lockup detection by default in all cases,
63 * for example when running the kernel as a guest on a hypervisor. In these
64 * cases this function can be called to disable hard lockup detection. This
65 * function should only be executed once by the boot processor before the
66 * kernel command line parameters are parsed, because otherwise it is not
67 * possible to override this in hardlockup_panic_setup().
69 void __init hardlockup_detector_disable(void)
71 nmi_watchdog_user_enabled = 0;
74 static int __init hardlockup_panic_setup(char *str)
76 if (!strncmp(str, "panic", 5))
78 else if (!strncmp(str, "nopanic", 7))
80 else if (!strncmp(str, "0", 1))
81 nmi_watchdog_user_enabled = 0;
82 else if (!strncmp(str, "1", 1))
83 nmi_watchdog_user_enabled = 1;
86 __setup("nmi_watchdog=", hardlockup_panic_setup);
88 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
91 * These functions can be overridden if an architecture implements its
92 * own hardlockup detector.
94 * watchdog_nmi_enable/disable can be implemented to start and stop when
95 * softlockup watchdog start and stop. The arch must select the
96 * SOFTLOCKUP_DETECTOR Kconfig.
98 int __weak watchdog_nmi_enable(unsigned int cpu)
100 hardlockup_detector_perf_enable();
104 void __weak watchdog_nmi_disable(unsigned int cpu)
106 hardlockup_detector_perf_disable();
109 /* Return 0, if a NMI watchdog is available. Error code otherwise */
110 int __weak __init watchdog_nmi_probe(void)
112 return hardlockup_detector_perf_init();
116 * watchdog_nmi_stop - Stop the watchdog for reconfiguration
118 * The reconfiguration steps are:
119 * watchdog_nmi_stop();
120 * update_variables();
121 * watchdog_nmi_start();
123 void __weak watchdog_nmi_stop(void) { }
126 * watchdog_nmi_start - Start the watchdog after reconfiguration
128 * Counterpart to watchdog_nmi_stop().
130 * The following variables have been updated in update_variables() and
131 * contain the currently valid configuration:
136 void __weak watchdog_nmi_start(void) { }
139 * lockup_detector_update_enable - Update the sysctl enable bit
141 * Caller needs to make sure that the NMI/perf watchdogs are off, so this
142 * can't race with watchdog_nmi_disable().
144 static void lockup_detector_update_enable(void)
146 watchdog_enabled = 0;
147 if (!watchdog_user_enabled)
149 if (nmi_watchdog_available && nmi_watchdog_user_enabled)
150 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
151 if (soft_watchdog_user_enabled)
152 watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
155 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
158 * Delay the soflockup report when running a known slow code.
159 * It does _not_ affect the timestamp of the last successdul reschedule.
161 #define SOFTLOCKUP_DELAY_REPORT ULONG_MAX
164 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
167 static struct cpumask watchdog_allowed_mask __read_mostly;
169 /* Global variables, exported for sysctl */
170 unsigned int __read_mostly softlockup_panic =
171 IS_ENABLED(CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC);
173 static bool softlockup_initialized __read_mostly;
174 static u64 __read_mostly sample_period;
176 /* Timestamp taken after the last successful reschedule. */
177 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
178 /* Timestamp of the last softlockup report. */
179 static DEFINE_PER_CPU(unsigned long, watchdog_report_ts);
180 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
181 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
182 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
183 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
184 static unsigned long soft_lockup_nmi_warn;
186 static int __init softlockup_panic_setup(char *str)
188 softlockup_panic = simple_strtoul(str, NULL, 0);
191 __setup("softlockup_panic=", softlockup_panic_setup);
193 static int __init nowatchdog_setup(char *str)
195 watchdog_user_enabled = 0;
198 __setup("nowatchdog", nowatchdog_setup);
200 static int __init nosoftlockup_setup(char *str)
202 soft_watchdog_user_enabled = 0;
205 __setup("nosoftlockup", nosoftlockup_setup);
207 static int __init watchdog_thresh_setup(char *str)
209 get_option(&str, &watchdog_thresh);
212 __setup("watchdog_thresh=", watchdog_thresh_setup);
214 static void __lockup_detector_cleanup(void);
217 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
218 * lockups can have false positives under extreme conditions. So we generally
219 * want a higher threshold for soft lockups than for hard lockups. So we couple
220 * the thresholds with a factor: we make the soft threshold twice the amount of
221 * time the hard threshold is.
223 static int get_softlockup_thresh(void)
225 return watchdog_thresh * 2;
229 * Returns seconds, approximately. We don't need nanosecond
230 * resolution, and we don't need to waste time with a big divide when
233 static unsigned long get_timestamp(void)
235 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
238 static void set_sample_period(void)
241 * convert watchdog_thresh from seconds to ns
242 * the divide by 5 is to give hrtimer several chances (two
243 * or three with the current relation between the soft
244 * and hard thresholds) to increment before the
245 * hardlockup detector generates a warning
247 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
248 watchdog_update_hrtimer_threshold(sample_period);
251 static void update_report_ts(void)
253 __this_cpu_write(watchdog_report_ts, get_timestamp());
256 /* Commands for resetting the watchdog */
257 static void update_touch_ts(void)
259 __this_cpu_write(watchdog_touch_ts, get_timestamp());
264 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
266 * Call when the scheduler may have stalled for legitimate reasons
267 * preventing the watchdog task from executing - e.g. the scheduler
268 * entering idle state. This should only be used for scheduler events.
269 * Use touch_softlockup_watchdog() for everything else.
271 notrace void touch_softlockup_watchdog_sched(void)
274 * Preemption can be enabled. It doesn't matter which CPU's watchdog
275 * report period gets restarted here, so use the raw_ operation.
277 raw_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
280 notrace void touch_softlockup_watchdog(void)
282 touch_softlockup_watchdog_sched();
283 wq_watchdog_touch(raw_smp_processor_id());
285 EXPORT_SYMBOL(touch_softlockup_watchdog);
287 void touch_all_softlockup_watchdogs(void)
292 * watchdog_mutex cannpt be taken here, as this might be called
293 * from (soft)interrupt context, so the access to
294 * watchdog_allowed_cpumask might race with a concurrent update.
296 * The watchdog time stamp can race against a concurrent real
297 * update as well, the only side effect might be a cycle delay for
298 * the softlockup check.
300 for_each_cpu(cpu, &watchdog_allowed_mask) {
301 per_cpu(watchdog_report_ts, cpu) = SOFTLOCKUP_DELAY_REPORT;
302 wq_watchdog_touch(cpu);
306 void touch_softlockup_watchdog_sync(void)
308 __this_cpu_write(softlockup_touch_sync, true);
309 __this_cpu_write(watchdog_report_ts, SOFTLOCKUP_DELAY_REPORT);
312 static int is_softlockup(unsigned long touch_ts,
313 unsigned long period_ts,
316 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
317 /* Warn about unreasonable delays. */
318 if (time_after(now, period_ts + get_softlockup_thresh()))
319 return now - touch_ts;
324 /* watchdog detector functions */
325 bool is_hardlockup(void)
327 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
329 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
332 __this_cpu_write(hrtimer_interrupts_saved, hrint);
336 static void watchdog_interrupt_count(void)
338 __this_cpu_inc(hrtimer_interrupts);
341 static DEFINE_PER_CPU(struct completion, softlockup_completion);
342 static DEFINE_PER_CPU(struct cpu_stop_work, softlockup_stop_work);
345 * The watchdog feed function - touches the timestamp.
347 * It only runs once every sample_period seconds (4 seconds by
348 * default) to reset the softlockup timestamp. If this gets delayed
349 * for more than 2*watchdog_thresh seconds then the debug-printout
350 * triggers in watchdog_timer_fn().
352 static int softlockup_fn(void *data)
355 complete(this_cpu_ptr(&softlockup_completion));
360 /* watchdog kicker functions */
361 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
363 unsigned long touch_ts, period_ts, now;
364 struct pt_regs *regs = get_irq_regs();
366 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
368 if (!watchdog_enabled)
369 return HRTIMER_NORESTART;
371 /* kick the hardlockup detector */
372 watchdog_interrupt_count();
374 /* kick the softlockup detector */
375 if (completion_done(this_cpu_ptr(&softlockup_completion))) {
376 reinit_completion(this_cpu_ptr(&softlockup_completion));
377 stop_one_cpu_nowait(smp_processor_id(),
379 this_cpu_ptr(&softlockup_stop_work));
383 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
386 * Read the current timestamp first. It might become invalid anytime
387 * when a virtual machine is stopped by the host or when the watchog
388 * is touched from NMI.
390 now = get_timestamp();
392 * If a virtual machine is stopped by the host it can look to
393 * the watchdog like a soft lockup. This function touches the watchdog.
395 kvm_check_and_clear_guest_paused();
397 * The stored timestamp is comparable with @now only when not touched.
398 * It might get touched anytime from NMI. Make sure that is_softlockup()
399 * uses the same (valid) value.
401 period_ts = READ_ONCE(*this_cpu_ptr(&watchdog_report_ts));
403 /* Reset the interval when touched by known problematic code. */
404 if (period_ts == SOFTLOCKUP_DELAY_REPORT) {
405 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
407 * If the time stamp was touched atomically
408 * make sure the scheduler tick is up to date.
410 __this_cpu_write(softlockup_touch_sync, false);
415 return HRTIMER_RESTART;
418 /* Check for a softlockup. */
419 touch_ts = __this_cpu_read(watchdog_touch_ts);
420 duration = is_softlockup(touch_ts, period_ts, now);
421 if (unlikely(duration)) {
423 * Prevent multiple soft-lockup reports if one cpu is already
424 * engaged in dumping all cpu back traces.
426 if (softlockup_all_cpu_backtrace) {
427 if (test_and_set_bit_lock(0, &soft_lockup_nmi_warn))
428 return HRTIMER_RESTART;
431 /* Start period for the next softlockup warning. */
434 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
435 smp_processor_id(), duration,
436 current->comm, task_pid_nr(current));
438 print_irqtrace_events(current);
444 if (softlockup_all_cpu_backtrace) {
445 trigger_allbutself_cpu_backtrace();
446 clear_bit_unlock(0, &soft_lockup_nmi_warn);
449 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
450 if (softlockup_panic)
451 panic("softlockup: hung tasks");
454 return HRTIMER_RESTART;
457 static void watchdog_enable(unsigned int cpu)
459 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
460 struct completion *done = this_cpu_ptr(&softlockup_completion);
462 WARN_ON_ONCE(cpu != smp_processor_id());
464 init_completion(done);
468 * Start the timer first to prevent the NMI watchdog triggering
469 * before the timer has a chance to fire.
471 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
472 hrtimer->function = watchdog_timer_fn;
473 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
474 HRTIMER_MODE_REL_PINNED_HARD);
476 /* Initialize timestamp */
478 /* Enable the perf event */
479 if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
480 watchdog_nmi_enable(cpu);
483 static void watchdog_disable(unsigned int cpu)
485 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
487 WARN_ON_ONCE(cpu != smp_processor_id());
490 * Disable the perf event first. That prevents that a large delay
491 * between disabling the timer and disabling the perf event causes
492 * the perf NMI to detect a false positive.
494 watchdog_nmi_disable(cpu);
495 hrtimer_cancel(hrtimer);
496 wait_for_completion(this_cpu_ptr(&softlockup_completion));
499 static int softlockup_stop_fn(void *data)
501 watchdog_disable(smp_processor_id());
505 static void softlockup_stop_all(void)
509 if (!softlockup_initialized)
512 for_each_cpu(cpu, &watchdog_allowed_mask)
513 smp_call_on_cpu(cpu, softlockup_stop_fn, NULL, false);
515 cpumask_clear(&watchdog_allowed_mask);
518 static int softlockup_start_fn(void *data)
520 watchdog_enable(smp_processor_id());
524 static void softlockup_start_all(void)
528 cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
529 for_each_cpu(cpu, &watchdog_allowed_mask)
530 smp_call_on_cpu(cpu, softlockup_start_fn, NULL, false);
533 int lockup_detector_online_cpu(unsigned int cpu)
535 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
536 watchdog_enable(cpu);
540 int lockup_detector_offline_cpu(unsigned int cpu)
542 if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
543 watchdog_disable(cpu);
547 static void __lockup_detector_reconfigure(void)
552 softlockup_stop_all();
554 lockup_detector_update_enable();
555 if (watchdog_enabled && watchdog_thresh)
556 softlockup_start_all();
558 watchdog_nmi_start();
561 * Must be called outside the cpus locked section to prevent
562 * recursive locking in the perf code.
564 __lockup_detector_cleanup();
567 void lockup_detector_reconfigure(void)
569 mutex_lock(&watchdog_mutex);
570 __lockup_detector_reconfigure();
571 mutex_unlock(&watchdog_mutex);
575 * Create the watchdog infrastructure and configure the detector(s).
577 static __init void lockup_detector_setup(void)
580 * If sysctl is off and watchdog got disabled on the command line,
581 * nothing to do here.
583 lockup_detector_update_enable();
585 if (!IS_ENABLED(CONFIG_SYSCTL) &&
586 !(watchdog_enabled && watchdog_thresh))
589 mutex_lock(&watchdog_mutex);
590 __lockup_detector_reconfigure();
591 softlockup_initialized = true;
592 mutex_unlock(&watchdog_mutex);
595 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
596 static void __lockup_detector_reconfigure(void)
600 lockup_detector_update_enable();
601 watchdog_nmi_start();
604 void lockup_detector_reconfigure(void)
606 __lockup_detector_reconfigure();
608 static inline void lockup_detector_setup(void)
610 __lockup_detector_reconfigure();
612 #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
614 static void __lockup_detector_cleanup(void)
616 lockdep_assert_held(&watchdog_mutex);
617 hardlockup_detector_perf_cleanup();
621 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
623 * Caller must not hold the cpu hotplug rwsem.
625 void lockup_detector_cleanup(void)
627 mutex_lock(&watchdog_mutex);
628 __lockup_detector_cleanup();
629 mutex_unlock(&watchdog_mutex);
633 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
635 * Special interface for parisc. It prevents lockup detector warnings from
636 * the default pm_poweroff() function which busy loops forever.
638 void lockup_detector_soft_poweroff(void)
640 watchdog_enabled = 0;
645 /* Propagate any changes to the watchdog infrastructure */
646 static void proc_watchdog_update(void)
648 /* Remove impossible cpus to keep sysctl output clean. */
649 cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
650 __lockup_detector_reconfigure();
654 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
656 * caller | table->data points to | 'which'
657 * -------------------|----------------------------|--------------------------
658 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
659 * | | SOFT_WATCHDOG_ENABLED
660 * -------------------|----------------------------|--------------------------
661 * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED
662 * -------------------|----------------------------|--------------------------
663 * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
665 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
666 void *buffer, size_t *lenp, loff_t *ppos)
668 int err, old, *param = table->data;
670 mutex_lock(&watchdog_mutex);
674 * On read synchronize the userspace interface. This is a
677 *param = (watchdog_enabled & which) != 0;
678 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
680 old = READ_ONCE(*param);
681 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
682 if (!err && old != READ_ONCE(*param))
683 proc_watchdog_update();
685 mutex_unlock(&watchdog_mutex);
690 * /proc/sys/kernel/watchdog
692 int proc_watchdog(struct ctl_table *table, int write,
693 void *buffer, size_t *lenp, loff_t *ppos)
695 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
696 table, write, buffer, lenp, ppos);
700 * /proc/sys/kernel/nmi_watchdog
702 int proc_nmi_watchdog(struct ctl_table *table, int write,
703 void *buffer, size_t *lenp, loff_t *ppos)
705 if (!nmi_watchdog_available && write)
707 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
708 table, write, buffer, lenp, ppos);
712 * /proc/sys/kernel/soft_watchdog
714 int proc_soft_watchdog(struct ctl_table *table, int write,
715 void *buffer, size_t *lenp, loff_t *ppos)
717 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
718 table, write, buffer, lenp, ppos);
722 * /proc/sys/kernel/watchdog_thresh
724 int proc_watchdog_thresh(struct ctl_table *table, int write,
725 void *buffer, size_t *lenp, loff_t *ppos)
729 mutex_lock(&watchdog_mutex);
731 old = READ_ONCE(watchdog_thresh);
732 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
734 if (!err && write && old != READ_ONCE(watchdog_thresh))
735 proc_watchdog_update();
737 mutex_unlock(&watchdog_mutex);
742 * The cpumask is the mask of possible cpus that the watchdog can run
743 * on, not the mask of cpus it is actually running on. This allows the
744 * user to specify a mask that will include cpus that have not yet
745 * been brought online, if desired.
747 int proc_watchdog_cpumask(struct ctl_table *table, int write,
748 void *buffer, size_t *lenp, loff_t *ppos)
752 mutex_lock(&watchdog_mutex);
754 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
756 proc_watchdog_update();
758 mutex_unlock(&watchdog_mutex);
762 static const int sixty = 60;
764 static struct ctl_table watchdog_sysctls[] = {
766 .procname = "watchdog",
767 .data = &watchdog_user_enabled,
768 .maxlen = sizeof(int),
770 .proc_handler = proc_watchdog,
771 .extra1 = SYSCTL_ZERO,
772 .extra2 = SYSCTL_ONE,
775 .procname = "watchdog_thresh",
776 .data = &watchdog_thresh,
777 .maxlen = sizeof(int),
779 .proc_handler = proc_watchdog_thresh,
780 .extra1 = SYSCTL_ZERO,
781 .extra2 = (void *)&sixty,
784 .procname = "nmi_watchdog",
785 .data = &nmi_watchdog_user_enabled,
786 .maxlen = sizeof(int),
787 .mode = NMI_WATCHDOG_SYSCTL_PERM,
788 .proc_handler = proc_nmi_watchdog,
789 .extra1 = SYSCTL_ZERO,
790 .extra2 = SYSCTL_ONE,
793 .procname = "watchdog_cpumask",
794 .data = &watchdog_cpumask_bits,
797 .proc_handler = proc_watchdog_cpumask,
799 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
801 .procname = "soft_watchdog",
802 .data = &soft_watchdog_user_enabled,
803 .maxlen = sizeof(int),
805 .proc_handler = proc_soft_watchdog,
806 .extra1 = SYSCTL_ZERO,
807 .extra2 = SYSCTL_ONE,
810 .procname = "softlockup_panic",
811 .data = &softlockup_panic,
812 .maxlen = sizeof(int),
814 .proc_handler = proc_dointvec_minmax,
815 .extra1 = SYSCTL_ZERO,
816 .extra2 = SYSCTL_ONE,
820 .procname = "softlockup_all_cpu_backtrace",
821 .data = &sysctl_softlockup_all_cpu_backtrace,
822 .maxlen = sizeof(int),
824 .proc_handler = proc_dointvec_minmax,
825 .extra1 = SYSCTL_ZERO,
826 .extra2 = SYSCTL_ONE,
828 #endif /* CONFIG_SMP */
830 #ifdef CONFIG_HARDLOCKUP_DETECTOR
832 .procname = "hardlockup_panic",
833 .data = &hardlockup_panic,
834 .maxlen = sizeof(int),
836 .proc_handler = proc_dointvec_minmax,
837 .extra1 = SYSCTL_ZERO,
838 .extra2 = SYSCTL_ONE,
842 .procname = "hardlockup_all_cpu_backtrace",
843 .data = &sysctl_hardlockup_all_cpu_backtrace,
844 .maxlen = sizeof(int),
846 .proc_handler = proc_dointvec_minmax,
847 .extra1 = SYSCTL_ZERO,
848 .extra2 = SYSCTL_ONE,
850 #endif /* CONFIG_SMP */
855 static void __init watchdog_sysctl_init(void)
857 register_sysctl_init("kernel", watchdog_sysctls);
860 #define watchdog_sysctl_init() do { } while (0)
861 #endif /* CONFIG_SYSCTL */
863 void __init lockup_detector_init(void)
865 if (tick_nohz_full_enabled())
866 pr_info("Disabling watchdog on nohz_full cores by default\n");
868 cpumask_copy(&watchdog_cpumask,
869 housekeeping_cpumask(HK_TYPE_TIMER));
871 if (!watchdog_nmi_probe())
872 nmi_watchdog_available = true;
873 lockup_detector_setup();
874 watchdog_sysctl_init();