2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/sched/smt.h>
12 #include <linux/unistd.h>
13 #include <linux/cpu.h>
14 #include <linux/oom.h>
15 #include <linux/rcupdate.h>
16 #include <linux/export.h>
17 #include <linux/bug.h>
18 #include <linux/kthread.h>
19 #include <linux/stop_machine.h>
20 #include <linux/mutex.h>
21 #include <linux/gfp.h>
22 #include <linux/suspend.h>
23 #include <linux/lockdep.h>
24 #include <linux/tick.h>
25 #include <linux/irq.h>
26 #include <trace/events/power.h>
31 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
32 static DEFINE_MUTEX(cpu_add_remove_lock);
35 * The following two APIs (cpu_maps_update_begin/done) must be used when
36 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
37 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
38 * hotplug callback (un)registration performed using __register_cpu_notifier()
39 * or __unregister_cpu_notifier().
41 void cpu_maps_update_begin(void)
43 mutex_lock(&cpu_add_remove_lock);
45 EXPORT_SYMBOL(cpu_notifier_register_begin);
47 void cpu_maps_update_done(void)
49 mutex_unlock(&cpu_add_remove_lock);
51 EXPORT_SYMBOL(cpu_notifier_register_done);
53 static RAW_NOTIFIER_HEAD(cpu_chain);
55 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
56 * Should always be manipulated under cpu_add_remove_lock
58 static int cpu_hotplug_disabled;
60 #ifdef CONFIG_HOTPLUG_CPU
63 struct task_struct *active_writer;
64 /* wait queue to wake up the active_writer */
66 /* verifies that no writer will get active while readers are active */
69 * Also blocks the new readers during
70 * an ongoing cpu hotplug operation.
74 #ifdef CONFIG_DEBUG_LOCK_ALLOC
75 struct lockdep_map dep_map;
78 .active_writer = NULL,
79 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
80 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
81 #ifdef CONFIG_DEBUG_LOCK_ALLOC
82 .dep_map = {.name = "cpu_hotplug.lock" },
86 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
87 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
88 #define cpuhp_lock_acquire_tryread() \
89 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
90 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
91 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
94 void get_online_cpus(void)
97 if (cpu_hotplug.active_writer == current)
99 cpuhp_lock_acquire_read();
100 mutex_lock(&cpu_hotplug.lock);
101 atomic_inc(&cpu_hotplug.refcount);
102 mutex_unlock(&cpu_hotplug.lock);
104 EXPORT_SYMBOL_GPL(get_online_cpus);
106 void put_online_cpus(void)
110 if (cpu_hotplug.active_writer == current)
113 refcount = atomic_dec_return(&cpu_hotplug.refcount);
114 if (WARN_ON(refcount < 0)) /* try to fix things up */
115 atomic_inc(&cpu_hotplug.refcount);
117 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
118 wake_up(&cpu_hotplug.wq);
120 cpuhp_lock_release();
123 EXPORT_SYMBOL_GPL(put_online_cpus);
126 * This ensures that the hotplug operation can begin only when the
127 * refcount goes to zero.
129 * Note that during a cpu-hotplug operation, the new readers, if any,
130 * will be blocked by the cpu_hotplug.lock
132 * Since cpu_hotplug_begin() is always called after invoking
133 * cpu_maps_update_begin(), we can be sure that only one writer is active.
135 * Note that theoretically, there is a possibility of a livelock:
136 * - Refcount goes to zero, last reader wakes up the sleeping
138 * - Last reader unlocks the cpu_hotplug.lock.
139 * - A new reader arrives at this moment, bumps up the refcount.
140 * - The writer acquires the cpu_hotplug.lock finds the refcount
141 * non zero and goes to sleep again.
143 * However, this is very difficult to achieve in practice since
144 * get_online_cpus() not an api which is called all that often.
147 void cpu_hotplug_begin(void)
151 cpu_hotplug.active_writer = current;
152 cpuhp_lock_acquire();
155 mutex_lock(&cpu_hotplug.lock);
156 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
157 if (likely(!atomic_read(&cpu_hotplug.refcount)))
159 mutex_unlock(&cpu_hotplug.lock);
162 finish_wait(&cpu_hotplug.wq, &wait);
165 void cpu_hotplug_done(void)
167 cpu_hotplug.active_writer = NULL;
168 mutex_unlock(&cpu_hotplug.lock);
169 cpuhp_lock_release();
173 * Wait for currently running CPU hotplug operations to complete (if any) and
174 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
175 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
176 * hotplug path before performing hotplug operations. So acquiring that lock
177 * guarantees mutual exclusion from any currently running hotplug operations.
179 void cpu_hotplug_disable(void)
181 cpu_maps_update_begin();
182 cpu_hotplug_disabled++;
183 cpu_maps_update_done();
185 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
187 static void __cpu_hotplug_enable(void)
189 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
191 cpu_hotplug_disabled--;
194 void cpu_hotplug_enable(void)
196 cpu_maps_update_begin();
197 __cpu_hotplug_enable();
198 cpu_maps_update_done();
200 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
201 #endif /* CONFIG_HOTPLUG_CPU */
204 * Architectures that need SMT-specific errata handling during SMT hotplug
205 * should override this.
207 void __weak arch_smt_update(void) { }
209 /* Need to know about CPUs going up/down? */
210 int register_cpu_notifier(struct notifier_block *nb)
213 cpu_maps_update_begin();
214 ret = raw_notifier_chain_register(&cpu_chain, nb);
215 cpu_maps_update_done();
219 int __register_cpu_notifier(struct notifier_block *nb)
221 return raw_notifier_chain_register(&cpu_chain, nb);
224 static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
229 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
232 return notifier_to_errno(ret);
235 static int cpu_notify(unsigned long val, void *v)
237 return __cpu_notify(val, v, -1, NULL);
240 EXPORT_SYMBOL(register_cpu_notifier);
241 EXPORT_SYMBOL(__register_cpu_notifier);
243 void unregister_cpu_notifier(struct notifier_block *nb)
245 cpu_maps_update_begin();
246 raw_notifier_chain_unregister(&cpu_chain, nb);
247 cpu_maps_update_done();
249 EXPORT_SYMBOL(unregister_cpu_notifier);
251 void __unregister_cpu_notifier(struct notifier_block *nb)
253 raw_notifier_chain_unregister(&cpu_chain, nb);
255 EXPORT_SYMBOL(__unregister_cpu_notifier);
257 #ifdef CONFIG_HOTPLUG_CPU
258 static void cpu_notify_nofail(unsigned long val, void *v)
260 BUG_ON(cpu_notify(val, v));
264 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
267 * This function walks all processes, finds a valid mm struct for each one and
268 * then clears a corresponding bit in mm's cpumask. While this all sounds
269 * trivial, there are various non-obvious corner cases, which this function
270 * tries to solve in a safe manner.
272 * Also note that the function uses a somewhat relaxed locking scheme, so it may
273 * be called only for an already offlined CPU.
275 void clear_tasks_mm_cpumask(int cpu)
277 struct task_struct *p;
280 * This function is called after the cpu is taken down and marked
281 * offline, so its not like new tasks will ever get this cpu set in
282 * their mm mask. -- Peter Zijlstra
283 * Thus, we may use rcu_read_lock() here, instead of grabbing
284 * full-fledged tasklist_lock.
286 WARN_ON(cpu_online(cpu));
288 for_each_process(p) {
289 struct task_struct *t;
292 * Main thread might exit, but other threads may still have
293 * a valid mm. Find one.
295 t = find_lock_task_mm(p);
298 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
304 static inline void check_for_tasks(int dead_cpu)
306 struct task_struct *g, *p;
308 read_lock(&tasklist_lock);
309 for_each_process_thread(g, p) {
313 * We do the check with unlocked task_rq(p)->lock.
314 * Order the reading to do not warn about a task,
315 * which was running on this cpu in the past, and
316 * it's just been woken on another cpu.
319 if (task_cpu(p) != dead_cpu)
322 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
323 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
325 read_unlock(&tasklist_lock);
328 struct take_cpu_down_param {
333 /* Take this CPU down. */
334 static int take_cpu_down(void *_param)
336 struct take_cpu_down_param *param = _param;
339 /* Ensure this CPU doesn't handle any more interrupts. */
340 err = __cpu_disable();
344 cpu_notify(CPU_DYING | param->mod, param->hcpu);
345 /* Give up timekeeping duties */
346 tick_handover_do_timer();
347 /* Park the stopper thread */
348 stop_machine_park((long)param->hcpu);
352 /* Requires cpu_add_remove_lock to be held */
353 static int _cpu_down(unsigned int cpu, int tasks_frozen)
355 int err, nr_calls = 0;
356 void *hcpu = (void *)(long)cpu;
357 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
358 struct take_cpu_down_param tcd_param = {
363 if (num_online_cpus() == 1)
366 if (!cpu_online(cpu))
371 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
374 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
375 pr_warn("%s: attempt to take down CPU %u failed\n",
381 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
382 * and RCU users of this state to go away such that all new such users
385 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
386 * not imply sync_sched(), so wait for both.
388 * Do sync before park smpboot threads to take care the rcu boost case.
390 if (IS_ENABLED(CONFIG_PREEMPT))
391 synchronize_rcu_mult(call_rcu, call_rcu_sched);
395 smpboot_park_threads(cpu);
398 * Prevent irq alloc/free while the dying cpu reorganizes the
399 * interrupt affinities.
404 * So now all preempt/rcu users must observe !cpu_active().
406 err = stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
408 /* CPU didn't die: tell everyone. Can't complain. */
409 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
413 BUG_ON(cpu_online(cpu));
416 * The migration_call() CPU_DYING callback will have removed all
417 * runnable tasks from the cpu, there's only the idle task left now
418 * that the migration thread is done doing the stop_machine thing.
420 * Wait for the stop thread to go away.
422 while (!per_cpu(cpu_dead_idle, cpu))
424 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
425 per_cpu(cpu_dead_idle, cpu) = false;
427 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
430 hotplug_cpu__broadcast_tick_pull(cpu);
431 /* This actually kills the CPU. */
434 /* CPU is completely dead: tell everyone. Too late to complain. */
435 tick_cleanup_dead_cpu(cpu);
436 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
438 check_for_tasks(cpu);
443 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
448 int cpu_down(unsigned int cpu)
452 cpu_maps_update_begin();
454 if (cpu_hotplug_disabled) {
459 err = _cpu_down(cpu, 0);
462 cpu_maps_update_done();
465 EXPORT_SYMBOL(cpu_down);
466 #endif /*CONFIG_HOTPLUG_CPU*/
469 * Unpark per-CPU smpboot kthreads at CPU-online time.
471 static int smpboot_thread_call(struct notifier_block *nfb,
472 unsigned long action, void *hcpu)
474 int cpu = (long)hcpu;
476 switch (action & ~CPU_TASKS_FROZEN) {
478 case CPU_DOWN_FAILED:
480 smpboot_unpark_threads(cpu);
490 static struct notifier_block smpboot_thread_notifier = {
491 .notifier_call = smpboot_thread_call,
492 .priority = CPU_PRI_SMPBOOT,
495 void smpboot_thread_init(void)
497 register_cpu_notifier(&smpboot_thread_notifier);
500 /* Requires cpu_add_remove_lock to be held */
501 static int _cpu_up(unsigned int cpu, int tasks_frozen)
503 int ret, nr_calls = 0;
504 void *hcpu = (void *)(long)cpu;
505 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
506 struct task_struct *idle;
510 if (cpu_online(cpu) || !cpu_present(cpu)) {
515 idle = idle_thread_get(cpu);
521 ret = smpboot_create_threads(cpu);
525 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
528 pr_warn("%s: attempt to bring up CPU %u failed\n",
533 /* Arch-specific enabling code. */
534 ret = __cpu_up(cpu, idle);
538 BUG_ON(!cpu_online(cpu));
540 /* Now call notifier in preparation. */
541 cpu_notify(CPU_ONLINE | mod, hcpu);
545 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
552 int cpu_up(unsigned int cpu)
556 if (!cpu_possible(cpu)) {
557 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
559 #if defined(CONFIG_IA64)
560 pr_err("please check additional_cpus= boot parameter\n");
565 err = try_online_node(cpu_to_node(cpu));
569 cpu_maps_update_begin();
571 if (cpu_hotplug_disabled) {
576 err = _cpu_up(cpu, 0);
579 cpu_maps_update_done();
582 EXPORT_SYMBOL_GPL(cpu_up);
584 #ifdef CONFIG_PM_SLEEP_SMP
585 static cpumask_var_t frozen_cpus;
587 int disable_nonboot_cpus(void)
589 int cpu, first_cpu, error = 0;
591 cpu_maps_update_begin();
592 first_cpu = cpumask_first(cpu_online_mask);
594 * We take down all of the non-boot CPUs in one shot to avoid races
595 * with the userspace trying to use the CPU hotplug at the same time
597 cpumask_clear(frozen_cpus);
599 pr_info("Disabling non-boot CPUs ...\n");
600 for_each_online_cpu(cpu) {
601 if (cpu == first_cpu)
603 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
604 error = _cpu_down(cpu, 1);
605 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
607 cpumask_set_cpu(cpu, frozen_cpus);
609 pr_err("Error taking CPU%d down: %d\n", cpu, error);
615 BUG_ON(num_online_cpus() > 1);
617 pr_err("Non-boot CPUs are not disabled\n");
620 * Make sure the CPUs won't be enabled by someone else. We need to do
621 * this even in case of failure as all disable_nonboot_cpus() users are
622 * supposed to do enable_nonboot_cpus() on the failure path.
624 cpu_hotplug_disabled++;
626 cpu_maps_update_done();
630 void __weak arch_enable_nonboot_cpus_begin(void)
634 void __weak arch_enable_nonboot_cpus_end(void)
638 void enable_nonboot_cpus(void)
642 /* Allow everyone to use the CPU hotplug again */
643 cpu_maps_update_begin();
644 __cpu_hotplug_enable();
645 if (cpumask_empty(frozen_cpus))
648 pr_info("Enabling non-boot CPUs ...\n");
650 arch_enable_nonboot_cpus_begin();
652 for_each_cpu(cpu, frozen_cpus) {
653 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
654 error = _cpu_up(cpu, 1);
655 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
657 pr_info("CPU%d is up\n", cpu);
660 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
663 arch_enable_nonboot_cpus_end();
665 cpumask_clear(frozen_cpus);
667 cpu_maps_update_done();
670 static int __init alloc_frozen_cpus(void)
672 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
676 core_initcall(alloc_frozen_cpus);
679 * When callbacks for CPU hotplug notifications are being executed, we must
680 * ensure that the state of the system with respect to the tasks being frozen
681 * or not, as reported by the notification, remains unchanged *throughout the
682 * duration* of the execution of the callbacks.
683 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
685 * This synchronization is implemented by mutually excluding regular CPU
686 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
687 * Hibernate notifications.
690 cpu_hotplug_pm_callback(struct notifier_block *nb,
691 unsigned long action, void *ptr)
695 case PM_SUSPEND_PREPARE:
696 case PM_HIBERNATION_PREPARE:
697 cpu_hotplug_disable();
700 case PM_POST_SUSPEND:
701 case PM_POST_HIBERNATION:
702 cpu_hotplug_enable();
713 static int __init cpu_hotplug_pm_sync_init(void)
716 * cpu_hotplug_pm_callback has higher priority than x86
717 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
718 * to disable cpu hotplug to avoid cpu hotplug race.
720 pm_notifier(cpu_hotplug_pm_callback, 0);
723 core_initcall(cpu_hotplug_pm_sync_init);
725 #endif /* CONFIG_PM_SLEEP_SMP */
728 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
729 * @cpu: cpu that just started
731 * This function calls the cpu_chain notifiers with CPU_STARTING.
732 * It must be called by the arch code on the new cpu, before the new cpu
733 * enables interrupts and before the "boot" cpu returns from __cpu_up().
735 void notify_cpu_starting(unsigned int cpu)
737 unsigned long val = CPU_STARTING;
739 #ifdef CONFIG_PM_SLEEP_SMP
740 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
741 val = CPU_STARTING_FROZEN;
742 #endif /* CONFIG_PM_SLEEP_SMP */
743 cpu_notify(val, (void *)(long)cpu);
746 #endif /* CONFIG_SMP */
749 * cpu_bit_bitmap[] is a special, "compressed" data structure that
750 * represents all NR_CPUS bits binary values of 1<<nr.
752 * It is used by cpumask_of() to get a constant address to a CPU
753 * mask value that has a single bit set only.
756 /* cpu_bit_bitmap[0] is empty - so we can back into it */
757 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
758 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
759 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
760 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
762 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
764 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
765 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
766 #if BITS_PER_LONG > 32
767 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
768 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
771 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
773 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
774 EXPORT_SYMBOL(cpu_all_bits);
776 #ifdef CONFIG_INIT_ALL_POSSIBLE
777 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
780 static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
782 const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
783 EXPORT_SYMBOL(cpu_possible_mask);
785 static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
786 const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
787 EXPORT_SYMBOL(cpu_online_mask);
789 static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
790 const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
791 EXPORT_SYMBOL(cpu_present_mask);
793 static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
794 const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
795 EXPORT_SYMBOL(cpu_active_mask);
797 void set_cpu_possible(unsigned int cpu, bool possible)
800 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
802 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
805 void set_cpu_present(unsigned int cpu, bool present)
808 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
810 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
813 void set_cpu_online(unsigned int cpu, bool online)
816 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
817 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
819 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
823 void set_cpu_active(unsigned int cpu, bool active)
826 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
828 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
831 void init_cpu_present(const struct cpumask *src)
833 cpumask_copy(to_cpumask(cpu_present_bits), src);
836 void init_cpu_possible(const struct cpumask *src)
838 cpumask_copy(to_cpumask(cpu_possible_bits), src);
841 void init_cpu_online(const struct cpumask *src)
843 cpumask_copy(to_cpumask(cpu_online_bits), src);
846 enum cpu_mitigations cpu_mitigations = CPU_MITIGATIONS_AUTO;
848 static int __init mitigations_parse_cmdline(char *arg)
850 if (!strcmp(arg, "off"))
851 cpu_mitigations = CPU_MITIGATIONS_OFF;
852 else if (!strcmp(arg, "auto"))
853 cpu_mitigations = CPU_MITIGATIONS_AUTO;
855 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
860 early_param("mitigations", mitigations_parse_cmdline);