1 // SPDX-License-Identifier: GPL-2.0
3 * Detect hard and soft lockups on a system
5 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
7 * Note: Most of this code is borrowed heavily from the original softlockup
8 * detector, so thanks to Ingo for the initial implementation.
9 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
10 * to those contributors as well.
13 #define pr_fmt(fmt) "watchdog: " fmt
16 #include <linux/cpu.h>
17 #include <linux/nmi.h>
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/sysctl.h>
21 #include <linux/smpboot.h>
22 #include <linux/sched/rt.h>
23 #include <uapi/linux/sched/types.h>
24 #include <linux/tick.h>
25 #include <linux/workqueue.h>
26 #include <linux/sched/clock.h>
27 #include <linux/sched/debug.h>
29 #include <asm/irq_regs.h>
30 #include <linux/kvm_para.h>
31 #include <linux/kthread.h>
33 static DEFINE_MUTEX(watchdog_mutex);
35 #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG)
36 # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED)
37 # define NMI_WATCHDOG_DEFAULT 1
39 # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED)
40 # define NMI_WATCHDOG_DEFAULT 0
43 unsigned long __read_mostly watchdog_enabled;
44 int __read_mostly watchdog_user_enabled = 1;
45 int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT;
46 int __read_mostly soft_watchdog_user_enabled = 1;
47 int __read_mostly watchdog_thresh = 10;
48 int __read_mostly nmi_watchdog_available;
50 struct cpumask watchdog_allowed_mask __read_mostly;
52 struct cpumask watchdog_cpumask __read_mostly;
53 unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
55 #ifdef CONFIG_HARDLOCKUP_DETECTOR
57 * Should we panic when a soft-lockup or hard-lockup occurs:
59 unsigned int __read_mostly hardlockup_panic =
60 CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
62 * We may not want to enable hard lockup detection by default in all cases,
63 * for example when running the kernel as a guest on a hypervisor. In these
64 * cases this function can be called to disable hard lockup detection. This
65 * function should only be executed once by the boot processor before the
66 * kernel command line parameters are parsed, because otherwise it is not
67 * possible to override this in hardlockup_panic_setup().
69 void __init hardlockup_detector_disable(void)
71 nmi_watchdog_user_enabled = 0;
74 static int __init hardlockup_panic_setup(char *str)
76 if (!strncmp(str, "panic", 5))
78 else if (!strncmp(str, "nopanic", 7))
80 else if (!strncmp(str, "0", 1))
81 nmi_watchdog_user_enabled = 0;
82 else if (!strncmp(str, "1", 1))
83 nmi_watchdog_user_enabled = 1;
86 __setup("nmi_watchdog=", hardlockup_panic_setup);
89 int __read_mostly sysctl_hardlockup_all_cpu_backtrace;
91 static int __init hardlockup_all_cpu_backtrace_setup(char *str)
93 sysctl_hardlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
96 __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
97 # endif /* CONFIG_SMP */
98 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
101 * These functions can be overridden if an architecture implements its
102 * own hardlockup detector.
104 * watchdog_nmi_enable/disable can be implemented to start and stop when
105 * softlockup watchdog threads start and stop. The arch must select the
106 * SOFTLOCKUP_DETECTOR Kconfig.
108 int __weak watchdog_nmi_enable(unsigned int cpu)
110 hardlockup_detector_perf_enable();
114 void __weak watchdog_nmi_disable(unsigned int cpu)
116 hardlockup_detector_perf_disable();
119 /* Return 0, if a NMI watchdog is available. Error code otherwise */
120 int __weak __init watchdog_nmi_probe(void)
122 return hardlockup_detector_perf_init();
126 * watchdog_nmi_stop - Stop the watchdog for reconfiguration
128 * The reconfiguration steps are:
129 * watchdog_nmi_stop();
130 * update_variables();
131 * watchdog_nmi_start();
133 void __weak watchdog_nmi_stop(void) { }
136 * watchdog_nmi_start - Start the watchdog after reconfiguration
138 * Counterpart to watchdog_nmi_stop().
140 * The following variables have been updated in update_variables() and
141 * contain the currently valid configuration:
146 void __weak watchdog_nmi_start(void) { }
149 * lockup_detector_update_enable - Update the sysctl enable bit
151 * Caller needs to make sure that the NMI/perf watchdogs are off, so this
152 * can't race with watchdog_nmi_disable().
154 static void lockup_detector_update_enable(void)
156 watchdog_enabled = 0;
157 if (!watchdog_user_enabled)
159 if (nmi_watchdog_available && nmi_watchdog_user_enabled)
160 watchdog_enabled |= NMI_WATCHDOG_ENABLED;
161 if (soft_watchdog_user_enabled)
162 watchdog_enabled |= SOFT_WATCHDOG_ENABLED;
165 #ifdef CONFIG_SOFTLOCKUP_DETECTOR
167 #define SOFTLOCKUP_RESET ULONG_MAX
169 /* Global variables, exported for sysctl */
170 unsigned int __read_mostly softlockup_panic =
171 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
173 static bool softlockup_threads_initialized __read_mostly;
174 static u64 __read_mostly sample_period;
176 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
177 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
178 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
179 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
180 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
181 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
182 static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt);
183 static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved);
184 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
185 static unsigned long soft_lockup_nmi_warn;
187 static int __init softlockup_panic_setup(char *str)
189 softlockup_panic = simple_strtoul(str, NULL, 0);
192 __setup("softlockup_panic=", softlockup_panic_setup);
194 static int __init nowatchdog_setup(char *str)
196 watchdog_user_enabled = 0;
199 __setup("nowatchdog", nowatchdog_setup);
201 static int __init nosoftlockup_setup(char *str)
203 soft_watchdog_user_enabled = 0;
206 __setup("nosoftlockup", nosoftlockup_setup);
209 int __read_mostly sysctl_softlockup_all_cpu_backtrace;
211 static int __init softlockup_all_cpu_backtrace_setup(char *str)
213 sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0);
216 __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
219 static void __lockup_detector_cleanup(void);
222 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
223 * lockups can have false positives under extreme conditions. So we generally
224 * want a higher threshold for soft lockups than for hard lockups. So we couple
225 * the thresholds with a factor: we make the soft threshold twice the amount of
226 * time the hard threshold is.
228 static int get_softlockup_thresh(void)
230 return watchdog_thresh * 2;
234 * Returns seconds, approximately. We don't need nanosecond
235 * resolution, and we don't need to waste time with a big divide when
238 static unsigned long get_timestamp(void)
240 return running_clock() >> 30LL; /* 2^30 ~= 10^9 */
243 static void set_sample_period(void)
246 * convert watchdog_thresh from seconds to ns
247 * the divide by 5 is to give hrtimer several chances (two
248 * or three with the current relation between the soft
249 * and hard thresholds) to increment before the
250 * hardlockup detector generates a warning
252 sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
253 watchdog_update_hrtimer_threshold(sample_period);
256 /* Commands for resetting the watchdog */
257 static void __touch_watchdog(void)
259 __this_cpu_write(watchdog_touch_ts, get_timestamp());
263 * touch_softlockup_watchdog_sched - touch watchdog on scheduler stalls
265 * Call when the scheduler may have stalled for legitimate reasons
266 * preventing the watchdog task from executing - e.g. the scheduler
267 * entering idle state. This should only be used for scheduler events.
268 * Use touch_softlockup_watchdog() for everything else.
270 notrace void touch_softlockup_watchdog_sched(void)
273 * Preemption can be enabled. It doesn't matter which CPU's timestamp
274 * gets zeroed here, so use the raw_ operation.
276 raw_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
279 notrace void touch_softlockup_watchdog(void)
281 touch_softlockup_watchdog_sched();
282 wq_watchdog_touch(raw_smp_processor_id());
284 EXPORT_SYMBOL(touch_softlockup_watchdog);
286 void touch_all_softlockup_watchdogs(void)
291 * watchdog_mutex cannpt be taken here, as this might be called
292 * from (soft)interrupt context, so the access to
293 * watchdog_allowed_cpumask might race with a concurrent update.
295 * The watchdog time stamp can race against a concurrent real
296 * update as well, the only side effect might be a cycle delay for
297 * the softlockup check.
299 for_each_cpu(cpu, &watchdog_allowed_mask)
300 per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET;
301 wq_watchdog_touch(-1);
304 void touch_softlockup_watchdog_sync(void)
306 __this_cpu_write(softlockup_touch_sync, true);
307 __this_cpu_write(watchdog_touch_ts, SOFTLOCKUP_RESET);
310 static int is_softlockup(unsigned long touch_ts)
312 unsigned long now = get_timestamp();
314 if ((watchdog_enabled & SOFT_WATCHDOG_ENABLED) && watchdog_thresh){
315 /* Warn about unreasonable delays. */
316 if (time_after(now, touch_ts + get_softlockup_thresh()))
317 return now - touch_ts;
322 /* watchdog detector functions */
323 bool is_hardlockup(void)
325 unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
327 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
330 __this_cpu_write(hrtimer_interrupts_saved, hrint);
334 static void watchdog_interrupt_count(void)
336 __this_cpu_inc(hrtimer_interrupts);
339 /* watchdog kicker functions */
340 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
342 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
343 struct pt_regs *regs = get_irq_regs();
345 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
347 if (!watchdog_enabled)
348 return HRTIMER_NORESTART;
350 /* kick the hardlockup detector */
351 watchdog_interrupt_count();
353 /* kick the softlockup detector */
354 wake_up_process(__this_cpu_read(softlockup_watchdog));
357 hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period));
359 if (touch_ts == SOFTLOCKUP_RESET) {
360 if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
362 * If the time stamp was touched atomically
363 * make sure the scheduler tick is up to date.
365 __this_cpu_write(softlockup_touch_sync, false);
369 /* Clear the guest paused flag on watchdog reset */
370 kvm_check_and_clear_guest_paused();
372 return HRTIMER_RESTART;
375 /* check for a softlockup
376 * This is done by making sure a high priority task is
377 * being scheduled. The task touches the watchdog to
378 * indicate it is getting cpu time. If it hasn't then
379 * this is a good indication some task is hogging the cpu
381 duration = is_softlockup(touch_ts);
382 if (unlikely(duration)) {
384 * If a virtual machine is stopped by the host it can look to
385 * the watchdog like a soft lockup, check to see if the host
386 * stopped the vm before we issue the warning
388 if (kvm_check_and_clear_guest_paused())
389 return HRTIMER_RESTART;
392 if (__this_cpu_read(soft_watchdog_warn) == true) {
394 * When multiple processes are causing softlockups the
395 * softlockup detector only warns on the first one
396 * because the code relies on a full quiet cycle to
397 * re-arm. The second process prevents the quiet cycle
398 * and never gets reported. Use task pointers to detect
401 if (__this_cpu_read(softlockup_task_ptr_saved) !=
403 __this_cpu_write(soft_watchdog_warn, false);
406 return HRTIMER_RESTART;
409 if (softlockup_all_cpu_backtrace) {
410 /* Prevent multiple soft-lockup reports if one cpu is already
411 * engaged in dumping cpu back traces
413 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
414 /* Someone else will report us. Let's give up */
415 __this_cpu_write(soft_watchdog_warn, true);
416 return HRTIMER_RESTART;
420 pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
421 smp_processor_id(), duration,
422 current->comm, task_pid_nr(current));
423 __this_cpu_write(softlockup_task_ptr_saved, current);
425 print_irqtrace_events(current);
431 if (softlockup_all_cpu_backtrace) {
432 /* Avoid generating two back traces for current
433 * given that one is already made above
435 trigger_allbutself_cpu_backtrace();
437 clear_bit(0, &soft_lockup_nmi_warn);
438 /* Barrier to sync with other cpus */
439 smp_mb__after_atomic();
442 add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
443 if (softlockup_panic)
444 panic("softlockup: hung tasks");
445 __this_cpu_write(soft_watchdog_warn, true);
447 __this_cpu_write(soft_watchdog_warn, false);
449 return HRTIMER_RESTART;
452 static void watchdog_set_prio(unsigned int policy, unsigned int prio)
454 struct sched_param param = { .sched_priority = prio };
456 sched_setscheduler(current, policy, ¶m);
459 static void watchdog_enable(unsigned int cpu)
461 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
464 * Start the timer first to prevent the NMI watchdog triggering
465 * before the timer has a chance to fire.
467 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
468 hrtimer->function = watchdog_timer_fn;
469 hrtimer_start(hrtimer, ns_to_ktime(sample_period),
470 HRTIMER_MODE_REL_PINNED);
472 /* Initialize timestamp */
474 /* Enable the perf event */
475 if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
476 watchdog_nmi_enable(cpu);
478 watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
481 static void watchdog_disable(unsigned int cpu)
483 struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer);
485 watchdog_set_prio(SCHED_NORMAL, 0);
487 * Disable the perf event first. That prevents that a large delay
488 * between disabling the timer and disabling the perf event causes
489 * the perf NMI to detect a false positive.
491 watchdog_nmi_disable(cpu);
492 hrtimer_cancel(hrtimer);
495 static void watchdog_cleanup(unsigned int cpu, bool online)
497 watchdog_disable(cpu);
500 static int watchdog_should_run(unsigned int cpu)
502 return __this_cpu_read(hrtimer_interrupts) !=
503 __this_cpu_read(soft_lockup_hrtimer_cnt);
507 * The watchdog thread function - touches the timestamp.
509 * It only runs once every sample_period seconds (4 seconds by
510 * default) to reset the softlockup timestamp. If this gets delayed
511 * for more than 2*watchdog_thresh seconds then the debug-printout
512 * triggers in watchdog_timer_fn().
514 static void watchdog(unsigned int cpu)
516 __this_cpu_write(soft_lockup_hrtimer_cnt,
517 __this_cpu_read(hrtimer_interrupts));
521 static struct smp_hotplug_thread watchdog_threads = {
522 .store = &softlockup_watchdog,
523 .thread_should_run = watchdog_should_run,
524 .thread_fn = watchdog,
525 .thread_comm = "watchdog/%u",
526 .setup = watchdog_enable,
527 .cleanup = watchdog_cleanup,
528 .park = watchdog_disable,
529 .unpark = watchdog_enable,
532 static void softlockup_update_smpboot_threads(void)
534 lockdep_assert_held(&watchdog_mutex);
536 if (!softlockup_threads_initialized)
539 smpboot_update_cpumask_percpu_thread(&watchdog_threads,
540 &watchdog_allowed_mask);
543 /* Temporarily park all watchdog threads */
544 static void softlockup_park_all_threads(void)
546 cpumask_clear(&watchdog_allowed_mask);
547 softlockup_update_smpboot_threads();
550 /* Unpark enabled threads */
551 static void softlockup_unpark_threads(void)
553 cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask);
554 softlockup_update_smpboot_threads();
557 static void lockup_detector_reconfigure(void)
561 softlockup_park_all_threads();
563 lockup_detector_update_enable();
564 if (watchdog_enabled && watchdog_thresh)
565 softlockup_unpark_threads();
566 watchdog_nmi_start();
569 * Must be called outside the cpus locked section to prevent
570 * recursive locking in the perf code.
572 __lockup_detector_cleanup();
576 * Create the watchdog thread infrastructure and configure the detector(s).
578 * The threads are not unparked as watchdog_allowed_mask is empty. When
579 * the threads are sucessfully initialized, take the proper locks and
580 * unpark the threads in the watchdog_cpumask if the watchdog is enabled.
582 static __init void lockup_detector_setup(void)
587 * If sysctl is off and watchdog got disabled on the command line,
588 * nothing to do here.
590 lockup_detector_update_enable();
592 if (!IS_ENABLED(CONFIG_SYSCTL) &&
593 !(watchdog_enabled && watchdog_thresh))
596 ret = smpboot_register_percpu_thread_cpumask(&watchdog_threads,
597 &watchdog_allowed_mask);
599 pr_err("Failed to initialize soft lockup detector threads\n");
603 mutex_lock(&watchdog_mutex);
604 softlockup_threads_initialized = true;
605 lockup_detector_reconfigure();
606 mutex_unlock(&watchdog_mutex);
609 #else /* CONFIG_SOFTLOCKUP_DETECTOR */
610 static inline int watchdog_park_threads(void) { return 0; }
611 static inline void watchdog_unpark_threads(void) { }
612 static inline int watchdog_enable_all_cpus(void) { return 0; }
613 static inline void watchdog_disable_all_cpus(void) { }
614 static void lockup_detector_reconfigure(void)
618 lockup_detector_update_enable();
619 watchdog_nmi_start();
622 static inline void lockup_detector_setup(void)
624 lockup_detector_reconfigure();
626 #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */
628 static void __lockup_detector_cleanup(void)
630 lockdep_assert_held(&watchdog_mutex);
631 hardlockup_detector_perf_cleanup();
635 * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
637 * Caller must not hold the cpu hotplug rwsem.
639 void lockup_detector_cleanup(void)
641 mutex_lock(&watchdog_mutex);
642 __lockup_detector_cleanup();
643 mutex_unlock(&watchdog_mutex);
647 * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
649 * Special interface for parisc. It prevents lockup detector warnings from
650 * the default pm_poweroff() function which busy loops forever.
652 void lockup_detector_soft_poweroff(void)
654 watchdog_enabled = 0;
659 /* Propagate any changes to the watchdog threads */
660 static void proc_watchdog_update(void)
662 /* Remove impossible cpus to keep sysctl output clean. */
663 cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask);
664 lockup_detector_reconfigure();
668 * common function for watchdog, nmi_watchdog and soft_watchdog parameter
670 * caller | table->data points to | 'which'
671 * -------------------|----------------------------|--------------------------
672 * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
673 * | | SOFT_WATCHDOG_ENABLED
674 * -------------------|----------------------------|--------------------------
675 * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED
676 * -------------------|----------------------------|--------------------------
677 * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
679 static int proc_watchdog_common(int which, struct ctl_table *table, int write,
680 void __user *buffer, size_t *lenp, loff_t *ppos)
682 int err, old, *param = table->data;
684 mutex_lock(&watchdog_mutex);
688 * On read synchronize the userspace interface. This is a
691 *param = (watchdog_enabled & which) != 0;
692 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
694 old = READ_ONCE(*param);
695 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
696 if (!err && old != READ_ONCE(*param))
697 proc_watchdog_update();
699 mutex_unlock(&watchdog_mutex);
704 * /proc/sys/kernel/watchdog
706 int proc_watchdog(struct ctl_table *table, int write,
707 void __user *buffer, size_t *lenp, loff_t *ppos)
709 return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
710 table, write, buffer, lenp, ppos);
714 * /proc/sys/kernel/nmi_watchdog
716 int proc_nmi_watchdog(struct ctl_table *table, int write,
717 void __user *buffer, size_t *lenp, loff_t *ppos)
719 if (!nmi_watchdog_available && write)
721 return proc_watchdog_common(NMI_WATCHDOG_ENABLED,
722 table, write, buffer, lenp, ppos);
726 * /proc/sys/kernel/soft_watchdog
728 int proc_soft_watchdog(struct ctl_table *table, int write,
729 void __user *buffer, size_t *lenp, loff_t *ppos)
731 return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
732 table, write, buffer, lenp, ppos);
736 * /proc/sys/kernel/watchdog_thresh
738 int proc_watchdog_thresh(struct ctl_table *table, int write,
739 void __user *buffer, size_t *lenp, loff_t *ppos)
743 mutex_lock(&watchdog_mutex);
745 old = READ_ONCE(watchdog_thresh);
746 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
748 if (!err && write && old != READ_ONCE(watchdog_thresh))
749 proc_watchdog_update();
751 mutex_unlock(&watchdog_mutex);
756 * The cpumask is the mask of possible cpus that the watchdog can run
757 * on, not the mask of cpus it is actually running on. This allows the
758 * user to specify a mask that will include cpus that have not yet
759 * been brought online, if desired.
761 int proc_watchdog_cpumask(struct ctl_table *table, int write,
762 void __user *buffer, size_t *lenp, loff_t *ppos)
766 mutex_lock(&watchdog_mutex);
768 err = proc_do_large_bitmap(table, write, buffer, lenp, ppos);
770 proc_watchdog_update();
772 mutex_unlock(&watchdog_mutex);
775 #endif /* CONFIG_SYSCTL */
777 void __init lockup_detector_init(void)
779 #ifdef CONFIG_NO_HZ_FULL
780 if (tick_nohz_full_enabled()) {
781 pr_info("Disabling watchdog on nohz_full cores by default\n");
782 cpumask_copy(&watchdog_cpumask, housekeeping_mask);
784 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
786 cpumask_copy(&watchdog_cpumask, cpu_possible_mask);
789 if (!watchdog_nmi_probe())
790 nmi_watchdog_available = true;
791 lockup_detector_setup();