2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
4 * This code is licenced under the GPL.
6 #include <linux/proc_fs.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/sched/smt.h>
12 #include <linux/unistd.h>
13 #include <linux/cpu.h>
14 #include <linux/oom.h>
15 #include <linux/rcupdate.h>
16 #include <linux/export.h>
17 #include <linux/bug.h>
18 #include <linux/kthread.h>
19 #include <linux/stop_machine.h>
20 #include <linux/mutex.h>
21 #include <linux/gfp.h>
22 #include <linux/suspend.h>
23 #include <linux/lockdep.h>
24 #include <linux/tick.h>
25 #include <linux/irq.h>
26 #include <linux/smpboot.h>
27 #include <linux/relay.h>
28 #include <linux/slab.h>
29 #include <linux/random.h>
31 #include <trace/events/power.h>
32 #define CREATE_TRACE_POINTS
33 #include <trace/events/cpuhp.h>
38 * cpuhp_cpu_state - Per cpu hotplug state storage
39 * @state: The current cpu state
40 * @target: The target state
41 * @thread: Pointer to the hotplug thread
42 * @should_run: Thread should execute
43 * @rollback: Perform a rollback
44 * @single: Single callback invocation
45 * @bringup: Single callback bringup or teardown selector
46 * @cb_state: The state for a single callback (install/uninstall)
47 * @result: Result of the operation
48 * @done: Signal completion to the issuer of the task
50 struct cpuhp_cpu_state {
51 enum cpuhp_state state;
52 enum cpuhp_state target;
54 struct task_struct *thread;
60 struct hlist_node *node;
61 enum cpuhp_state cb_state;
63 struct completion done;
67 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
69 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
70 static struct lock_class_key cpuhp_state_key;
71 static struct lockdep_map cpuhp_state_lock_map =
72 STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
76 * cpuhp_step - Hotplug state machine step
77 * @name: Name of the step
78 * @startup: Startup function of the step
79 * @teardown: Teardown function of the step
80 * @skip_onerr: Do not invoke the functions on error rollback
81 * Will go away once the notifiers are gone
82 * @cant_stop: Bringup/teardown can't be stopped at this step
87 int (*single)(unsigned int cpu);
88 int (*multi)(unsigned int cpu,
89 struct hlist_node *node);
92 int (*single)(unsigned int cpu);
93 int (*multi)(unsigned int cpu,
94 struct hlist_node *node);
96 struct hlist_head list;
102 static DEFINE_MUTEX(cpuhp_state_mutex);
103 static struct cpuhp_step cpuhp_bp_states[];
104 static struct cpuhp_step cpuhp_ap_states[];
106 static bool cpuhp_is_ap_state(enum cpuhp_state state)
109 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
110 * purposes as that state is handled explicitly in cpu_down.
112 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
115 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
117 struct cpuhp_step *sp;
119 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
124 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
125 * @cpu: The cpu for which the callback should be invoked
126 * @step: The step in the state machine
127 * @bringup: True if the bringup callback should be invoked
129 * Called from cpu hotplug and from the state register machinery.
131 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
132 bool bringup, struct hlist_node *node)
134 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
135 struct cpuhp_step *step = cpuhp_get_step(state);
136 int (*cbm)(unsigned int cpu, struct hlist_node *node);
137 int (*cb)(unsigned int cpu);
140 if (!step->multi_instance) {
141 cb = bringup ? step->startup.single : step->teardown.single;
144 trace_cpuhp_enter(cpu, st->target, state, cb);
146 trace_cpuhp_exit(cpu, st->state, state, ret);
149 cbm = bringup ? step->startup.multi : step->teardown.multi;
153 /* Single invocation for instance add/remove */
155 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
156 ret = cbm(cpu, node);
157 trace_cpuhp_exit(cpu, st->state, state, ret);
161 /* State transition. Invoke on all instances */
163 hlist_for_each(node, &step->list) {
164 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
165 ret = cbm(cpu, node);
166 trace_cpuhp_exit(cpu, st->state, state, ret);
173 /* Rollback the instances if one failed */
174 cbm = !bringup ? step->startup.multi : step->teardown.multi;
178 hlist_for_each(node, &step->list) {
187 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
188 static DEFINE_MUTEX(cpu_add_remove_lock);
189 bool cpuhp_tasks_frozen;
190 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
193 * The following two APIs (cpu_maps_update_begin/done) must be used when
194 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
195 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
196 * hotplug callback (un)registration performed using __register_cpu_notifier()
197 * or __unregister_cpu_notifier().
199 void cpu_maps_update_begin(void)
201 mutex_lock(&cpu_add_remove_lock);
203 EXPORT_SYMBOL(cpu_notifier_register_begin);
205 void cpu_maps_update_done(void)
207 mutex_unlock(&cpu_add_remove_lock);
209 EXPORT_SYMBOL(cpu_notifier_register_done);
211 static RAW_NOTIFIER_HEAD(cpu_chain);
213 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
214 * Should always be manipulated under cpu_add_remove_lock
216 static int cpu_hotplug_disabled;
218 #ifdef CONFIG_HOTPLUG_CPU
221 struct task_struct *active_writer;
222 /* wait queue to wake up the active_writer */
223 wait_queue_head_t wq;
224 /* verifies that no writer will get active while readers are active */
227 * Also blocks the new readers during
228 * an ongoing cpu hotplug operation.
232 #ifdef CONFIG_DEBUG_LOCK_ALLOC
233 struct lockdep_map dep_map;
236 .active_writer = NULL,
237 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
238 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
239 #ifdef CONFIG_DEBUG_LOCK_ALLOC
240 .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
244 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
245 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
246 #define cpuhp_lock_acquire_tryread() \
247 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
248 #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
249 #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
252 void get_online_cpus(void)
255 if (cpu_hotplug.active_writer == current)
257 cpuhp_lock_acquire_read();
258 mutex_lock(&cpu_hotplug.lock);
259 atomic_inc(&cpu_hotplug.refcount);
260 mutex_unlock(&cpu_hotplug.lock);
262 EXPORT_SYMBOL_GPL(get_online_cpus);
264 void put_online_cpus(void)
268 if (cpu_hotplug.active_writer == current)
271 refcount = atomic_dec_return(&cpu_hotplug.refcount);
272 if (WARN_ON(refcount < 0)) /* try to fix things up */
273 atomic_inc(&cpu_hotplug.refcount);
275 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
276 wake_up(&cpu_hotplug.wq);
278 cpuhp_lock_release();
281 EXPORT_SYMBOL_GPL(put_online_cpus);
284 * This ensures that the hotplug operation can begin only when the
285 * refcount goes to zero.
287 * Note that during a cpu-hotplug operation, the new readers, if any,
288 * will be blocked by the cpu_hotplug.lock
290 * Since cpu_hotplug_begin() is always called after invoking
291 * cpu_maps_update_begin(), we can be sure that only one writer is active.
293 * Note that theoretically, there is a possibility of a livelock:
294 * - Refcount goes to zero, last reader wakes up the sleeping
296 * - Last reader unlocks the cpu_hotplug.lock.
297 * - A new reader arrives at this moment, bumps up the refcount.
298 * - The writer acquires the cpu_hotplug.lock finds the refcount
299 * non zero and goes to sleep again.
301 * However, this is very difficult to achieve in practice since
302 * get_online_cpus() not an api which is called all that often.
305 void cpu_hotplug_begin(void)
309 cpu_hotplug.active_writer = current;
310 cpuhp_lock_acquire();
313 mutex_lock(&cpu_hotplug.lock);
314 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
315 if (likely(!atomic_read(&cpu_hotplug.refcount)))
317 mutex_unlock(&cpu_hotplug.lock);
320 finish_wait(&cpu_hotplug.wq, &wait);
323 void cpu_hotplug_done(void)
325 cpu_hotplug.active_writer = NULL;
326 mutex_unlock(&cpu_hotplug.lock);
327 cpuhp_lock_release();
331 * Wait for currently running CPU hotplug operations to complete (if any) and
332 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
333 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
334 * hotplug path before performing hotplug operations. So acquiring that lock
335 * guarantees mutual exclusion from any currently running hotplug operations.
337 void cpu_hotplug_disable(void)
339 cpu_maps_update_begin();
340 cpu_hotplug_disabled++;
341 cpu_maps_update_done();
343 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
345 static void __cpu_hotplug_enable(void)
347 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
349 cpu_hotplug_disabled--;
352 void cpu_hotplug_enable(void)
354 cpu_maps_update_begin();
355 __cpu_hotplug_enable();
356 cpu_maps_update_done();
358 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
359 #endif /* CONFIG_HOTPLUG_CPU */
362 * Architectures that need SMT-specific errata handling during SMT hotplug
363 * should override this.
365 void __weak arch_smt_update(void) { }
367 #ifdef CONFIG_HOTPLUG_SMT
368 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
369 EXPORT_SYMBOL_GPL(cpu_smt_control);
371 static bool cpu_smt_available __read_mostly;
373 void __init cpu_smt_disable(bool force)
375 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
376 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
380 pr_info("SMT: Force disabled\n");
381 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
383 pr_info("SMT: disabled\n");
384 cpu_smt_control = CPU_SMT_DISABLED;
389 * The decision whether SMT is supported can only be done after the full
390 * CPU identification. Called from architecture code before non boot CPUs
393 void __init cpu_smt_check_topology_early(void)
395 if (!topology_smt_supported())
396 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
400 * If SMT was disabled by BIOS, detect it here, after the CPUs have been
401 * brought online. This ensures the smt/l1tf sysfs entries are consistent
402 * with reality. cpu_smt_available is set to true during the bringup of non
403 * boot CPUs when a SMT sibling is detected. Note, this may overwrite
404 * cpu_smt_control's previous setting.
406 void __init cpu_smt_check_topology(void)
408 if (!cpu_smt_available)
409 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
412 static int __init smt_cmdline_disable(char *str)
414 cpu_smt_disable(str && !strcmp(str, "force"));
417 early_param("nosmt", smt_cmdline_disable);
419 static inline bool cpu_smt_allowed(unsigned int cpu)
421 if (topology_is_primary_thread(cpu))
425 * If the CPU is not a 'primary' thread and the booted_once bit is
426 * set then the processor has SMT support. Store this information
427 * for the late check of SMT support in cpu_smt_check_topology().
429 if (per_cpu(cpuhp_state, cpu).booted_once)
430 cpu_smt_available = true;
432 if (cpu_smt_control == CPU_SMT_ENABLED)
436 * On x86 it's required to boot all logical CPUs at least once so
437 * that the init code can get a chance to set CR4.MCE on each
438 * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
439 * core will shutdown the machine.
441 return !per_cpu(cpuhp_state, cpu).booted_once;
444 static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
447 /* Need to know about CPUs going up/down? */
448 int register_cpu_notifier(struct notifier_block *nb)
451 cpu_maps_update_begin();
452 ret = raw_notifier_chain_register(&cpu_chain, nb);
453 cpu_maps_update_done();
457 int __register_cpu_notifier(struct notifier_block *nb)
459 return raw_notifier_chain_register(&cpu_chain, nb);
462 static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
465 unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
466 void *hcpu = (void *)(long)cpu;
470 ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
473 return notifier_to_errno(ret);
476 static int cpu_notify(unsigned long val, unsigned int cpu)
478 return __cpu_notify(val, cpu, -1, NULL);
481 static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
483 BUG_ON(cpu_notify(val, cpu));
486 /* Notifier wrappers for transitioning to state machine */
487 static int notify_prepare(unsigned int cpu)
492 ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
495 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
497 __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
502 static int notify_online(unsigned int cpu)
504 cpu_notify(CPU_ONLINE, cpu);
508 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st);
510 static int bringup_wait_for_ap(unsigned int cpu)
512 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
514 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
515 wait_for_completion(&st->done);
516 if (WARN_ON_ONCE((!cpu_online(cpu))))
519 /* Unpark the hotplug thread of the target cpu */
520 kthread_unpark(st->thread);
523 * SMT soft disabling on X86 requires to bring the CPU out of the
524 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
525 * CPU marked itself as booted_once in cpu_notify_starting() so the
526 * cpu_smt_allowed() check will now return false if this is not the
529 if (!cpu_smt_allowed(cpu))
532 /* Should we go further up ? */
533 if (st->target > CPUHP_AP_ONLINE_IDLE) {
534 __cpuhp_kick_ap_work(st);
535 wait_for_completion(&st->done);
540 static int bringup_cpu(unsigned int cpu)
542 struct task_struct *idle = idle_thread_get(cpu);
546 * Some architectures have to walk the irq descriptors to
547 * setup the vector space for the cpu which comes online.
548 * Prevent irq alloc/free across the bringup.
552 /* Arch-specific enabling code. */
553 ret = __cpu_up(cpu, idle);
556 cpu_notify(CPU_UP_CANCELED, cpu);
559 return bringup_wait_for_ap(cpu);
563 * Hotplug state machine related functions
565 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
567 for (st->state++; st->state < st->target; st->state++) {
568 struct cpuhp_step *step = cpuhp_get_step(st->state);
570 if (!step->skip_onerr)
571 cpuhp_invoke_callback(cpu, st->state, true, NULL);
575 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
576 enum cpuhp_state target)
578 enum cpuhp_state prev_state = st->state;
581 for (; st->state > target; st->state--) {
582 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
584 st->target = prev_state;
585 undo_cpu_down(cpu, st);
592 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
594 for (st->state--; st->state > st->target; st->state--) {
595 struct cpuhp_step *step = cpuhp_get_step(st->state);
597 if (!step->skip_onerr)
598 cpuhp_invoke_callback(cpu, st->state, false, NULL);
602 static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
604 if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
607 * When CPU hotplug is disabled, then taking the CPU down is not
608 * possible because takedown_cpu() and the architecture and
609 * subsystem specific mechanisms are not available. So the CPU
610 * which would be completely unplugged again needs to stay around
611 * in the current state.
613 return st->state <= CPUHP_BRINGUP_CPU;
616 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
617 enum cpuhp_state target)
619 enum cpuhp_state prev_state = st->state;
622 while (st->state < target) {
624 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
626 if (can_rollback_cpu(st)) {
627 st->target = prev_state;
628 undo_cpu_up(cpu, st);
637 * The cpu hotplug threads manage the bringup and teardown of the cpus
639 static void cpuhp_create(unsigned int cpu)
641 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
643 init_completion(&st->done);
646 static int cpuhp_should_run(unsigned int cpu)
648 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
650 return st->should_run;
653 /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
654 static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
656 enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
658 return cpuhp_down_callbacks(cpu, st, target);
661 /* Execute the online startup callbacks. Used to be CPU_ONLINE */
662 static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
664 return cpuhp_up_callbacks(cpu, st, st->target);
668 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
669 * callbacks when a state gets [un]installed at runtime.
671 static void cpuhp_thread_fun(unsigned int cpu)
673 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
677 * Paired with the mb() in cpuhp_kick_ap_work and
678 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
684 st->should_run = false;
686 lock_map_acquire(&cpuhp_state_lock_map);
687 /* Single callback invocation for [un]install ? */
689 if (st->cb_state < CPUHP_AP_ONLINE) {
691 ret = cpuhp_invoke_callback(cpu, st->cb_state,
692 st->bringup, st->node);
695 ret = cpuhp_invoke_callback(cpu, st->cb_state,
696 st->bringup, st->node);
698 } else if (st->rollback) {
699 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
701 undo_cpu_down(cpu, st);
703 * This is a momentary workaround to keep the notifier users
704 * happy. Will go away once we got rid of the notifiers.
706 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
707 st->rollback = false;
709 /* Cannot happen .... */
710 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
712 /* Regular hotplug work */
713 if (st->state < st->target)
714 ret = cpuhp_ap_online(cpu, st);
715 else if (st->state > st->target)
716 ret = cpuhp_ap_offline(cpu, st);
718 lock_map_release(&cpuhp_state_lock_map);
723 /* Invoke a single callback on a remote cpu */
725 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
726 struct hlist_node *node)
728 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
730 if (!cpu_online(cpu))
733 lock_map_acquire(&cpuhp_state_lock_map);
734 lock_map_release(&cpuhp_state_lock_map);
737 * If we are up and running, use the hotplug thread. For early calls
738 * we invoke the thread function directly.
741 return cpuhp_invoke_callback(cpu, state, bringup, node);
743 st->cb_state = state;
745 st->bringup = bringup;
749 * Make sure the above stores are visible before should_run becomes
750 * true. Paired with the mb() above in cpuhp_thread_fun()
753 st->should_run = true;
754 wake_up_process(st->thread);
755 wait_for_completion(&st->done);
759 /* Regular hotplug invocation of the AP hotplug thread */
760 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
765 * Make sure the above stores are visible before should_run becomes
766 * true. Paired with the mb() above in cpuhp_thread_fun()
769 st->should_run = true;
770 wake_up_process(st->thread);
773 static int cpuhp_kick_ap_work(unsigned int cpu)
775 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
776 enum cpuhp_state state = st->state;
778 trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
779 lock_map_acquire(&cpuhp_state_lock_map);
780 lock_map_release(&cpuhp_state_lock_map);
781 __cpuhp_kick_ap_work(st);
782 wait_for_completion(&st->done);
783 trace_cpuhp_exit(cpu, st->state, state, st->result);
787 static struct smp_hotplug_thread cpuhp_threads = {
788 .store = &cpuhp_state.thread,
789 .create = &cpuhp_create,
790 .thread_should_run = cpuhp_should_run,
791 .thread_fn = cpuhp_thread_fun,
792 .thread_comm = "cpuhp/%u",
796 void __init cpuhp_threads_init(void)
798 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
799 kthread_unpark(this_cpu_read(cpuhp_state.thread));
802 EXPORT_SYMBOL(register_cpu_notifier);
803 EXPORT_SYMBOL(__register_cpu_notifier);
804 void unregister_cpu_notifier(struct notifier_block *nb)
806 cpu_maps_update_begin();
807 raw_notifier_chain_unregister(&cpu_chain, nb);
808 cpu_maps_update_done();
810 EXPORT_SYMBOL(unregister_cpu_notifier);
812 void __unregister_cpu_notifier(struct notifier_block *nb)
814 raw_notifier_chain_unregister(&cpu_chain, nb);
816 EXPORT_SYMBOL(__unregister_cpu_notifier);
818 #ifdef CONFIG_HOTPLUG_CPU
819 #ifndef arch_clear_mm_cpumask_cpu
820 #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
824 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
827 * This function walks all processes, finds a valid mm struct for each one and
828 * then clears a corresponding bit in mm's cpumask. While this all sounds
829 * trivial, there are various non-obvious corner cases, which this function
830 * tries to solve in a safe manner.
832 * Also note that the function uses a somewhat relaxed locking scheme, so it may
833 * be called only for an already offlined CPU.
835 void clear_tasks_mm_cpumask(int cpu)
837 struct task_struct *p;
840 * This function is called after the cpu is taken down and marked
841 * offline, so its not like new tasks will ever get this cpu set in
842 * their mm mask. -- Peter Zijlstra
843 * Thus, we may use rcu_read_lock() here, instead of grabbing
844 * full-fledged tasklist_lock.
846 WARN_ON(cpu_online(cpu));
848 for_each_process(p) {
849 struct task_struct *t;
852 * Main thread might exit, but other threads may still have
853 * a valid mm. Find one.
855 t = find_lock_task_mm(p);
858 arch_clear_mm_cpumask_cpu(cpu, t->mm);
864 static inline void check_for_tasks(int dead_cpu)
866 struct task_struct *g, *p;
868 read_lock(&tasklist_lock);
869 for_each_process_thread(g, p) {
873 * We do the check with unlocked task_rq(p)->lock.
874 * Order the reading to do not warn about a task,
875 * which was running on this cpu in the past, and
876 * it's just been woken on another cpu.
879 if (task_cpu(p) != dead_cpu)
882 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
883 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
885 read_unlock(&tasklist_lock);
888 static int notify_down_prepare(unsigned int cpu)
890 int err, nr_calls = 0;
892 err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
895 __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
896 pr_warn("%s: attempt to take down CPU %u failed\n",
902 /* Take this CPU down. */
903 static int take_cpu_down(void *_param)
905 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
906 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
907 int err, cpu = smp_processor_id();
909 /* Ensure this CPU doesn't handle any more interrupts. */
910 err = __cpu_disable();
915 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
916 * do this step again.
918 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
920 /* Invoke the former CPU_DYING callbacks */
921 for (; st->state > target; st->state--)
922 cpuhp_invoke_callback(cpu, st->state, false, NULL);
924 /* Give up timekeeping duties */
925 tick_handover_do_timer();
926 /* Park the stopper thread */
927 stop_machine_park(cpu);
931 static int takedown_cpu(unsigned int cpu)
933 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
936 /* Park the smpboot threads */
937 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
940 * Prevent irq alloc/free while the dying cpu reorganizes the
941 * interrupt affinities.
946 * So now all preempt/rcu users must observe !cpu_active().
948 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
950 /* CPU refused to die */
952 /* Unpark the hotplug thread so we can rollback there */
953 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
956 BUG_ON(cpu_online(cpu));
959 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
960 * runnable tasks from the cpu, there's only the idle task left now
961 * that the migration thread is done doing the stop_machine thing.
963 * Wait for the stop thread to go away.
965 wait_for_completion(&st->done);
966 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
968 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
971 hotplug_cpu__broadcast_tick_pull(cpu);
972 /* This actually kills the CPU. */
975 tick_cleanup_dead_cpu(cpu);
979 static int notify_dead(unsigned int cpu)
981 cpu_notify_nofail(CPU_DEAD, cpu);
982 check_for_tasks(cpu);
986 static void cpuhp_complete_idle_dead(void *arg)
988 struct cpuhp_cpu_state *st = arg;
993 void cpuhp_report_idle_dead(void)
995 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
997 BUG_ON(st->state != CPUHP_AP_OFFLINE);
998 rcu_report_dead(smp_processor_id());
999 st->state = CPUHP_AP_IDLE_DEAD;
1001 * We cannot call complete after rcu_report_dead() so we delegate it
1004 smp_call_function_single(cpumask_first(cpu_online_mask),
1005 cpuhp_complete_idle_dead, st, 0);
1009 #define notify_down_prepare NULL
1010 #define takedown_cpu NULL
1011 #define notify_dead NULL
1014 #ifdef CONFIG_HOTPLUG_CPU
1016 /* Requires cpu_add_remove_lock to be held */
1017 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1018 enum cpuhp_state target)
1020 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1021 int prev_state, ret = 0;
1022 bool hasdied = false;
1024 if (num_online_cpus() == 1)
1027 if (!cpu_present(cpu))
1030 cpu_hotplug_begin();
1032 cpuhp_tasks_frozen = tasks_frozen;
1034 prev_state = st->state;
1035 st->target = target;
1037 * If the current CPU state is in the range of the AP hotplug thread,
1038 * then we need to kick the thread.
1040 if (st->state > CPUHP_TEARDOWN_CPU) {
1041 ret = cpuhp_kick_ap_work(cpu);
1043 * The AP side has done the error rollback already. Just
1044 * return the error code..
1050 * We might have stopped still in the range of the AP hotplug
1051 * thread. Nothing to do anymore.
1053 if (st->state > CPUHP_TEARDOWN_CPU)
1057 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1058 * to do the further cleanups.
1060 ret = cpuhp_down_callbacks(cpu, st, target);
1061 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
1062 st->target = prev_state;
1063 st->rollback = true;
1064 cpuhp_kick_ap_work(cpu);
1067 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
1070 /* This post dead nonsense must die */
1071 if (!ret && hasdied)
1072 cpu_notify_nofail(CPU_POST_DEAD, cpu);
1077 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1079 if (cpu_hotplug_disabled)
1081 return _cpu_down(cpu, 0, target);
1084 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
1088 cpu_maps_update_begin();
1089 err = cpu_down_maps_locked(cpu, target);
1090 cpu_maps_update_done();
1093 int cpu_down(unsigned int cpu)
1095 return do_cpu_down(cpu, CPUHP_OFFLINE);
1097 EXPORT_SYMBOL(cpu_down);
1098 #endif /*CONFIG_HOTPLUG_CPU*/
1101 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1102 * @cpu: cpu that just started
1104 * It must be called by the arch code on the new cpu, before the new cpu
1105 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1107 void notify_cpu_starting(unsigned int cpu)
1109 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1110 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1112 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
1113 st->booted_once = true;
1114 while (st->state < target) {
1116 cpuhp_invoke_callback(cpu, st->state, true, NULL);
1121 * Called from the idle task. Wake up the controlling task which brings the
1122 * hotplug thread of the upcoming CPU up and then delegates the rest of the
1123 * online bringup to the hotplug thread.
1125 void cpuhp_online_idle(enum cpuhp_state state)
1127 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1129 /* Happens for the boot cpu */
1130 if (state != CPUHP_AP_ONLINE_IDLE)
1134 * Unpart the stopper thread before we start the idle loop (and start
1135 * scheduling); this ensures the stopper task is always available.
1137 stop_machine_unpark(smp_processor_id());
1139 st->state = CPUHP_AP_ONLINE_IDLE;
1140 complete(&st->done);
1143 /* Requires cpu_add_remove_lock to be held */
1144 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1146 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1147 struct task_struct *idle;
1150 cpu_hotplug_begin();
1152 if (!cpu_present(cpu)) {
1158 * The caller of do_cpu_up might have raced with another
1159 * caller. Ignore it for now.
1161 if (st->state >= target)
1164 if (st->state == CPUHP_OFFLINE) {
1165 /* Let it fail before we try to bring the cpu up */
1166 idle = idle_thread_get(cpu);
1168 ret = PTR_ERR(idle);
1173 cpuhp_tasks_frozen = tasks_frozen;
1175 st->target = target;
1177 * If the current CPU state is in the range of the AP hotplug thread,
1178 * then we need to kick the thread once more.
1180 if (st->state > CPUHP_BRINGUP_CPU) {
1181 ret = cpuhp_kick_ap_work(cpu);
1183 * The AP side has done the error rollback already. Just
1184 * return the error code..
1191 * Try to reach the target state. We max out on the BP at
1192 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1193 * responsible for bringing it up to the target state.
1195 target = min((int)target, CPUHP_BRINGUP_CPU);
1196 ret = cpuhp_up_callbacks(cpu, st, target);
1203 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
1207 if (!cpu_possible(cpu)) {
1208 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1210 #if defined(CONFIG_IA64)
1211 pr_err("please check additional_cpus= boot parameter\n");
1216 err = try_online_node(cpu_to_node(cpu));
1220 cpu_maps_update_begin();
1222 if (cpu_hotplug_disabled) {
1226 if (!cpu_smt_allowed(cpu)) {
1231 err = _cpu_up(cpu, 0, target);
1233 cpu_maps_update_done();
1237 int cpu_up(unsigned int cpu)
1239 return do_cpu_up(cpu, CPUHP_ONLINE);
1241 EXPORT_SYMBOL_GPL(cpu_up);
1243 #ifdef CONFIG_PM_SLEEP_SMP
1244 static cpumask_var_t frozen_cpus;
1246 int freeze_secondary_cpus(int primary)
1250 cpu_maps_update_begin();
1251 if (!cpu_online(primary))
1252 primary = cpumask_first(cpu_online_mask);
1254 * We take down all of the non-boot CPUs in one shot to avoid races
1255 * with the userspace trying to use the CPU hotplug at the same time
1257 cpumask_clear(frozen_cpus);
1259 pr_info("Disabling non-boot CPUs ...\n");
1260 for_each_online_cpu(cpu) {
1263 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1264 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1265 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1267 cpumask_set_cpu(cpu, frozen_cpus);
1269 pr_err("Error taking CPU%d down: %d\n", cpu, error);
1275 BUG_ON(num_online_cpus() > 1);
1277 pr_err("Non-boot CPUs are not disabled\n");
1280 * Make sure the CPUs won't be enabled by someone else. We need to do
1281 * this even in case of failure as all disable_nonboot_cpus() users are
1282 * supposed to do enable_nonboot_cpus() on the failure path.
1284 cpu_hotplug_disabled++;
1286 cpu_maps_update_done();
1290 void __weak arch_enable_nonboot_cpus_begin(void)
1294 void __weak arch_enable_nonboot_cpus_end(void)
1298 void enable_nonboot_cpus(void)
1302 /* Allow everyone to use the CPU hotplug again */
1303 cpu_maps_update_begin();
1304 __cpu_hotplug_enable();
1305 if (cpumask_empty(frozen_cpus))
1308 pr_info("Enabling non-boot CPUs ...\n");
1310 arch_enable_nonboot_cpus_begin();
1312 for_each_cpu(cpu, frozen_cpus) {
1313 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1314 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1315 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1317 pr_info("CPU%d is up\n", cpu);
1320 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1323 arch_enable_nonboot_cpus_end();
1325 cpumask_clear(frozen_cpus);
1327 cpu_maps_update_done();
1330 static int __init alloc_frozen_cpus(void)
1332 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1336 core_initcall(alloc_frozen_cpus);
1339 * When callbacks for CPU hotplug notifications are being executed, we must
1340 * ensure that the state of the system with respect to the tasks being frozen
1341 * or not, as reported by the notification, remains unchanged *throughout the
1342 * duration* of the execution of the callbacks.
1343 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1345 * This synchronization is implemented by mutually excluding regular CPU
1346 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1347 * Hibernate notifications.
1350 cpu_hotplug_pm_callback(struct notifier_block *nb,
1351 unsigned long action, void *ptr)
1355 case PM_SUSPEND_PREPARE:
1356 case PM_HIBERNATION_PREPARE:
1357 cpu_hotplug_disable();
1360 case PM_POST_SUSPEND:
1361 case PM_POST_HIBERNATION:
1362 cpu_hotplug_enable();
1373 static int __init cpu_hotplug_pm_sync_init(void)
1376 * cpu_hotplug_pm_callback has higher priority than x86
1377 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1378 * to disable cpu hotplug to avoid cpu hotplug race.
1380 pm_notifier(cpu_hotplug_pm_callback, 0);
1383 core_initcall(cpu_hotplug_pm_sync_init);
1385 #endif /* CONFIG_PM_SLEEP_SMP */
1387 #endif /* CONFIG_SMP */
1389 /* Boot processor state steps */
1390 static struct cpuhp_step cpuhp_bp_states[] = {
1393 .startup.single = NULL,
1394 .teardown.single = NULL,
1397 [CPUHP_CREATE_THREADS]= {
1398 .name = "threads:prepare",
1399 .startup.single = smpboot_create_threads,
1400 .teardown.single = NULL,
1403 [CPUHP_PERF_PREPARE] = {
1404 .name = "perf:prepare",
1405 .startup.single = perf_event_init_cpu,
1406 .teardown.single = perf_event_exit_cpu,
1408 [CPUHP_RANDOM_PREPARE] = {
1409 .name = "random:prepare",
1410 .startup.single = random_prepare_cpu,
1411 .teardown.single = NULL,
1413 [CPUHP_WORKQUEUE_PREP] = {
1414 .name = "workqueue:prepare",
1415 .startup.single = workqueue_prepare_cpu,
1416 .teardown.single = NULL,
1418 [CPUHP_HRTIMERS_PREPARE] = {
1419 .name = "hrtimers:prepare",
1420 .startup.single = hrtimers_prepare_cpu,
1421 .teardown.single = hrtimers_dead_cpu,
1423 [CPUHP_SMPCFD_PREPARE] = {
1424 .name = "smpcfd:prepare",
1425 .startup.single = smpcfd_prepare_cpu,
1426 .teardown.single = smpcfd_dead_cpu,
1428 [CPUHP_RELAY_PREPARE] = {
1429 .name = "relay:prepare",
1430 .startup.single = relay_prepare_cpu,
1431 .teardown.single = NULL,
1433 [CPUHP_SLAB_PREPARE] = {
1434 .name = "slab:prepare",
1435 .startup.single = slab_prepare_cpu,
1436 .teardown.single = slab_dead_cpu,
1438 [CPUHP_RCUTREE_PREP] = {
1439 .name = "RCU/tree:prepare",
1440 .startup.single = rcutree_prepare_cpu,
1441 .teardown.single = rcutree_dead_cpu,
1444 * Preparatory and dead notifiers. Will be replaced once the notifiers
1445 * are converted to states.
1447 [CPUHP_NOTIFY_PREPARE] = {
1448 .name = "notify:prepare",
1449 .startup.single = notify_prepare,
1450 .teardown.single = notify_dead,
1455 * On the tear-down path, timers_dead_cpu() must be invoked
1456 * before blk_mq_queue_reinit_notify() from notify_dead(),
1457 * otherwise a RCU stall occurs.
1459 [CPUHP_TIMERS_PREPARE] = {
1460 .name = "timers:dead",
1461 .startup.single = timers_prepare_cpu,
1462 .teardown.single = timers_dead_cpu,
1464 /* Kicks the plugged cpu into life */
1465 [CPUHP_BRINGUP_CPU] = {
1466 .name = "cpu:bringup",
1467 .startup.single = bringup_cpu,
1468 .teardown.single = NULL,
1472 * Handled on controll processor until the plugged processor manages
1475 [CPUHP_TEARDOWN_CPU] = {
1476 .name = "cpu:teardown",
1477 .startup.single = NULL,
1478 .teardown.single = takedown_cpu,
1482 [CPUHP_BRINGUP_CPU] = { },
1486 /* Application processor state steps */
1487 static struct cpuhp_step cpuhp_ap_states[] = {
1489 /* Final state before CPU kills itself */
1490 [CPUHP_AP_IDLE_DEAD] = {
1491 .name = "idle:dead",
1494 * Last state before CPU enters the idle loop to die. Transient state
1495 * for synchronization.
1497 [CPUHP_AP_OFFLINE] = {
1498 .name = "ap:offline",
1501 /* First state is scheduler control. Interrupts are disabled */
1502 [CPUHP_AP_SCHED_STARTING] = {
1503 .name = "sched:starting",
1504 .startup.single = sched_cpu_starting,
1505 .teardown.single = sched_cpu_dying,
1507 [CPUHP_AP_RCUTREE_DYING] = {
1508 .name = "RCU/tree:dying",
1509 .startup.single = NULL,
1510 .teardown.single = rcutree_dying_cpu,
1512 [CPUHP_AP_SMPCFD_DYING] = {
1513 .name = "smpcfd:dying",
1514 .startup.single = NULL,
1515 .teardown.single = smpcfd_dying_cpu,
1517 /* Entry state on starting. Interrupts enabled from here on. Transient
1518 * state for synchronsization */
1519 [CPUHP_AP_ONLINE] = {
1520 .name = "ap:online",
1522 /* Handle smpboot threads park/unpark */
1523 [CPUHP_AP_SMPBOOT_THREADS] = {
1524 .name = "smpboot/threads:online",
1525 .startup.single = smpboot_unpark_threads,
1526 .teardown.single = smpboot_park_threads,
1528 [CPUHP_AP_PERF_ONLINE] = {
1529 .name = "perf:online",
1530 .startup.single = perf_event_init_cpu,
1531 .teardown.single = perf_event_exit_cpu,
1533 [CPUHP_AP_WORKQUEUE_ONLINE] = {
1534 .name = "workqueue:online",
1535 .startup.single = workqueue_online_cpu,
1536 .teardown.single = workqueue_offline_cpu,
1538 [CPUHP_AP_RANDOM_ONLINE] = {
1539 .name = "random:online",
1540 .startup.single = random_online_cpu,
1541 .teardown.single = NULL,
1543 [CPUHP_AP_RCUTREE_ONLINE] = {
1544 .name = "RCU/tree:online",
1545 .startup.single = rcutree_online_cpu,
1546 .teardown.single = rcutree_offline_cpu,
1550 * Online/down_prepare notifiers. Will be removed once the notifiers
1551 * are converted to states.
1553 [CPUHP_AP_NOTIFY_ONLINE] = {
1554 .name = "notify:online",
1555 .startup.single = notify_online,
1556 .teardown.single = notify_down_prepare,
1561 * The dynamically registered state space is here
1565 /* Last state is scheduler control setting the cpu active */
1566 [CPUHP_AP_ACTIVE] = {
1567 .name = "sched:active",
1568 .startup.single = sched_cpu_activate,
1569 .teardown.single = sched_cpu_deactivate,
1573 /* CPU is fully up and running. */
1576 .startup.single = NULL,
1577 .teardown.single = NULL,
1581 /* Sanity check for callbacks */
1582 static int cpuhp_cb_check(enum cpuhp_state state)
1584 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1589 static void cpuhp_store_callbacks(enum cpuhp_state state,
1591 int (*startup)(unsigned int cpu),
1592 int (*teardown)(unsigned int cpu),
1593 bool multi_instance)
1595 /* (Un)Install the callbacks for further cpu hotplug operations */
1596 struct cpuhp_step *sp;
1598 sp = cpuhp_get_step(state);
1599 sp->startup.single = startup;
1600 sp->teardown.single = teardown;
1602 sp->multi_instance = multi_instance;
1603 INIT_HLIST_HEAD(&sp->list);
1606 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1608 return cpuhp_get_step(state)->teardown.single;
1612 * Call the startup/teardown function for a step either on the AP or
1613 * on the current CPU.
1615 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1616 struct hlist_node *node)
1618 struct cpuhp_step *sp = cpuhp_get_step(state);
1621 if ((bringup && !sp->startup.single) ||
1622 (!bringup && !sp->teardown.single))
1625 * The non AP bound callbacks can fail on bringup. On teardown
1626 * e.g. module removal we crash for now.
1629 if (cpuhp_is_ap_state(state))
1630 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1632 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1634 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1636 BUG_ON(ret && !bringup);
1641 * Called from __cpuhp_setup_state on a recoverable failure.
1643 * Note: The teardown callbacks for rollback are not allowed to fail!
1645 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1646 struct hlist_node *node)
1650 /* Roll back the already executed steps on the other cpus */
1651 for_each_present_cpu(cpu) {
1652 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1653 int cpustate = st->state;
1655 if (cpu >= failedcpu)
1658 /* Did we invoke the startup call on that cpu ? */
1659 if (cpustate >= state)
1660 cpuhp_issue_call(cpu, state, false, node);
1665 * Returns a free for dynamic slot assignment of the Online state. The states
1666 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1667 * by having no name assigned.
1669 static int cpuhp_reserve_state(enum cpuhp_state state)
1673 for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
1674 if (cpuhp_ap_states[i].name)
1677 cpuhp_ap_states[i].name = "Reserved";
1680 WARN(1, "No more dynamic states available for CPU hotplug\n");
1684 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1687 struct cpuhp_step *sp;
1691 sp = cpuhp_get_step(state);
1692 if (sp->multi_instance == false)
1696 mutex_lock(&cpuhp_state_mutex);
1698 if (!invoke || !sp->startup.multi)
1702 * Try to call the startup callback for each present cpu
1703 * depending on the hotplug state of the cpu.
1705 for_each_present_cpu(cpu) {
1706 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1707 int cpustate = st->state;
1709 if (cpustate < state)
1712 ret = cpuhp_issue_call(cpu, state, true, node);
1714 if (sp->teardown.multi)
1715 cpuhp_rollback_install(cpu, state, node);
1721 hlist_add_head(node, &sp->list);
1724 mutex_unlock(&cpuhp_state_mutex);
1728 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1731 * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1732 * @state: The state to setup
1733 * @invoke: If true, the startup function is invoked for cpus where
1734 * cpu state >= @state
1735 * @startup: startup callback function
1736 * @teardown: teardown callback function
1738 * Returns 0 if successful, otherwise a proper error code
1740 int __cpuhp_setup_state(enum cpuhp_state state,
1741 const char *name, bool invoke,
1742 int (*startup)(unsigned int cpu),
1743 int (*teardown)(unsigned int cpu),
1744 bool multi_instance)
1749 if (cpuhp_cb_check(state) || !name)
1753 mutex_lock(&cpuhp_state_mutex);
1755 /* currently assignments for the ONLINE state are possible */
1756 if (state == CPUHP_AP_ONLINE_DYN) {
1758 ret = cpuhp_reserve_state(state);
1764 cpuhp_store_callbacks(state, name, startup, teardown, multi_instance);
1766 if (!invoke || !startup)
1770 * Try to call the startup callback for each present cpu
1771 * depending on the hotplug state of the cpu.
1773 for_each_present_cpu(cpu) {
1774 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1775 int cpustate = st->state;
1777 if (cpustate < state)
1780 ret = cpuhp_issue_call(cpu, state, true, NULL);
1783 cpuhp_rollback_install(cpu, state, NULL);
1784 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1789 mutex_unlock(&cpuhp_state_mutex);
1792 if (!ret && dyn_state)
1796 EXPORT_SYMBOL(__cpuhp_setup_state);
1798 int __cpuhp_state_remove_instance(enum cpuhp_state state,
1799 struct hlist_node *node, bool invoke)
1801 struct cpuhp_step *sp = cpuhp_get_step(state);
1804 BUG_ON(cpuhp_cb_check(state));
1806 if (!sp->multi_instance)
1810 mutex_lock(&cpuhp_state_mutex);
1812 if (!invoke || !cpuhp_get_teardown_cb(state))
1815 * Call the teardown callback for each present cpu depending
1816 * on the hotplug state of the cpu. This function is not
1817 * allowed to fail currently!
1819 for_each_present_cpu(cpu) {
1820 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1821 int cpustate = st->state;
1823 if (cpustate >= state)
1824 cpuhp_issue_call(cpu, state, false, node);
1829 mutex_unlock(&cpuhp_state_mutex);
1834 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
1836 * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1837 * @state: The state to remove
1838 * @invoke: If true, the teardown function is invoked for cpus where
1839 * cpu state >= @state
1841 * The teardown callback is currently not allowed to fail. Think
1842 * about module removal!
1844 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1846 struct cpuhp_step *sp = cpuhp_get_step(state);
1849 BUG_ON(cpuhp_cb_check(state));
1852 mutex_lock(&cpuhp_state_mutex);
1854 if (sp->multi_instance) {
1855 WARN(!hlist_empty(&sp->list),
1856 "Error: Removing state %d which has instances left.\n",
1861 if (!invoke || !cpuhp_get_teardown_cb(state))
1865 * Call the teardown callback for each present cpu depending
1866 * on the hotplug state of the cpu. This function is not
1867 * allowed to fail currently!
1869 for_each_present_cpu(cpu) {
1870 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1871 int cpustate = st->state;
1873 if (cpustate >= state)
1874 cpuhp_issue_call(cpu, state, false, NULL);
1877 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1878 mutex_unlock(&cpuhp_state_mutex);
1881 EXPORT_SYMBOL(__cpuhp_remove_state);
1883 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1884 static ssize_t show_cpuhp_state(struct device *dev,
1885 struct device_attribute *attr, char *buf)
1887 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1889 return sprintf(buf, "%d\n", st->state);
1891 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1893 static ssize_t write_cpuhp_target(struct device *dev,
1894 struct device_attribute *attr,
1895 const char *buf, size_t count)
1897 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1898 struct cpuhp_step *sp;
1901 ret = kstrtoint(buf, 10, &target);
1905 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1906 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1909 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1913 ret = lock_device_hotplug_sysfs();
1917 mutex_lock(&cpuhp_state_mutex);
1918 sp = cpuhp_get_step(target);
1919 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1920 mutex_unlock(&cpuhp_state_mutex);
1924 if (st->state < target)
1925 ret = do_cpu_up(dev->id, target);
1927 ret = do_cpu_down(dev->id, target);
1929 unlock_device_hotplug();
1930 return ret ? ret : count;
1933 static ssize_t show_cpuhp_target(struct device *dev,
1934 struct device_attribute *attr, char *buf)
1936 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1938 return sprintf(buf, "%d\n", st->target);
1940 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
1942 static struct attribute *cpuhp_cpu_attrs[] = {
1943 &dev_attr_state.attr,
1944 &dev_attr_target.attr,
1948 static struct attribute_group cpuhp_cpu_attr_group = {
1949 .attrs = cpuhp_cpu_attrs,
1954 static ssize_t show_cpuhp_states(struct device *dev,
1955 struct device_attribute *attr, char *buf)
1957 ssize_t cur, res = 0;
1960 mutex_lock(&cpuhp_state_mutex);
1961 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
1962 struct cpuhp_step *sp = cpuhp_get_step(i);
1965 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1970 mutex_unlock(&cpuhp_state_mutex);
1973 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1975 static struct attribute *cpuhp_cpu_root_attrs[] = {
1976 &dev_attr_states.attr,
1980 static struct attribute_group cpuhp_cpu_root_attr_group = {
1981 .attrs = cpuhp_cpu_root_attrs,
1986 #ifdef CONFIG_HOTPLUG_SMT
1988 static const char *smt_states[] = {
1989 [CPU_SMT_ENABLED] = "on",
1990 [CPU_SMT_DISABLED] = "off",
1991 [CPU_SMT_FORCE_DISABLED] = "forceoff",
1992 [CPU_SMT_NOT_SUPPORTED] = "notsupported",
1996 show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
1998 return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
2001 static void cpuhp_offline_cpu_device(unsigned int cpu)
2003 struct device *dev = get_cpu_device(cpu);
2005 dev->offline = true;
2006 /* Tell user space about the state change */
2007 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2010 static void cpuhp_online_cpu_device(unsigned int cpu)
2012 struct device *dev = get_cpu_device(cpu);
2014 dev->offline = false;
2015 /* Tell user space about the state change */
2016 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2019 int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2023 cpu_maps_update_begin();
2024 for_each_online_cpu(cpu) {
2025 if (topology_is_primary_thread(cpu))
2027 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2031 * As this needs to hold the cpu maps lock it's impossible
2032 * to call device_offline() because that ends up calling
2033 * cpu_down() which takes cpu maps lock. cpu maps lock
2034 * needs to be held as this might race against in kernel
2035 * abusers of the hotplug machinery (thermal management).
2037 * So nothing would update device:offline state. That would
2038 * leave the sysfs entry stale and prevent onlining after
2039 * smt control has been changed to 'off' again. This is
2040 * called under the sysfs hotplug lock, so it is properly
2041 * serialized against the regular offline usage.
2043 cpuhp_offline_cpu_device(cpu);
2046 cpu_smt_control = ctrlval;
2047 cpu_maps_update_done();
2051 int cpuhp_smt_enable(void)
2055 cpu_maps_update_begin();
2056 cpu_smt_control = CPU_SMT_ENABLED;
2057 for_each_present_cpu(cpu) {
2058 /* Skip online CPUs and CPUs on offline nodes */
2059 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2061 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2064 /* See comment in cpuhp_smt_disable() */
2065 cpuhp_online_cpu_device(cpu);
2067 cpu_maps_update_done();
2072 store_smt_control(struct device *dev, struct device_attribute *attr,
2073 const char *buf, size_t count)
2077 if (sysfs_streq(buf, "on"))
2078 ctrlval = CPU_SMT_ENABLED;
2079 else if (sysfs_streq(buf, "off"))
2080 ctrlval = CPU_SMT_DISABLED;
2081 else if (sysfs_streq(buf, "forceoff"))
2082 ctrlval = CPU_SMT_FORCE_DISABLED;
2086 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2089 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2092 ret = lock_device_hotplug_sysfs();
2096 if (ctrlval != cpu_smt_control) {
2098 case CPU_SMT_ENABLED:
2099 ret = cpuhp_smt_enable();
2101 case CPU_SMT_DISABLED:
2102 case CPU_SMT_FORCE_DISABLED:
2103 ret = cpuhp_smt_disable(ctrlval);
2108 unlock_device_hotplug();
2109 return ret ? ret : count;
2111 static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2114 show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2116 bool active = topology_max_smt_threads() > 1;
2118 return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
2120 static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2122 static struct attribute *cpuhp_smt_attrs[] = {
2123 &dev_attr_control.attr,
2124 &dev_attr_active.attr,
2128 static const struct attribute_group cpuhp_smt_attr_group = {
2129 .attrs = cpuhp_smt_attrs,
2134 static int __init cpu_smt_state_init(void)
2136 return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2137 &cpuhp_smt_attr_group);
2141 static inline int cpu_smt_state_init(void) { return 0; }
2144 static int __init cpuhp_sysfs_init(void)
2148 ret = cpu_smt_state_init();
2152 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2153 &cpuhp_cpu_root_attr_group);
2157 for_each_possible_cpu(cpu) {
2158 struct device *dev = get_cpu_device(cpu);
2162 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2168 device_initcall(cpuhp_sysfs_init);
2172 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2173 * represents all NR_CPUS bits binary values of 1<<nr.
2175 * It is used by cpumask_of() to get a constant address to a CPU
2176 * mask value that has a single bit set only.
2179 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2180 #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
2181 #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2182 #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2183 #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2185 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2187 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2188 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2189 #if BITS_PER_LONG > 32
2190 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2191 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
2194 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2196 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2197 EXPORT_SYMBOL(cpu_all_bits);
2199 #ifdef CONFIG_INIT_ALL_POSSIBLE
2200 struct cpumask __cpu_possible_mask __read_mostly
2203 struct cpumask __cpu_possible_mask __read_mostly;
2205 EXPORT_SYMBOL(__cpu_possible_mask);
2207 struct cpumask __cpu_online_mask __read_mostly;
2208 EXPORT_SYMBOL(__cpu_online_mask);
2210 struct cpumask __cpu_present_mask __read_mostly;
2211 EXPORT_SYMBOL(__cpu_present_mask);
2213 struct cpumask __cpu_active_mask __read_mostly;
2214 EXPORT_SYMBOL(__cpu_active_mask);
2216 void init_cpu_present(const struct cpumask *src)
2218 cpumask_copy(&__cpu_present_mask, src);
2221 void init_cpu_possible(const struct cpumask *src)
2223 cpumask_copy(&__cpu_possible_mask, src);
2226 void init_cpu_online(const struct cpumask *src)
2228 cpumask_copy(&__cpu_online_mask, src);
2232 * Activate the first processor.
2234 void __init boot_cpu_init(void)
2236 int cpu = smp_processor_id();
2238 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2239 set_cpu_online(cpu, true);
2240 set_cpu_active(cpu, true);
2241 set_cpu_present(cpu, true);
2242 set_cpu_possible(cpu, true);
2246 * Must be called _AFTER_ setting up the per_cpu areas
2248 void __init boot_cpu_hotplug_init(void)
2251 this_cpu_write(cpuhp_state.booted_once, true);
2253 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
2257 * These are used for a global "mitigations=" cmdline option for toggling
2258 * optional CPU mitigations.
2260 enum cpu_mitigations {
2261 CPU_MITIGATIONS_OFF,
2262 CPU_MITIGATIONS_AUTO,
2263 CPU_MITIGATIONS_AUTO_NOSMT,
2266 static enum cpu_mitigations cpu_mitigations __ro_after_init =
2267 CPU_MITIGATIONS_AUTO;
2269 static int __init mitigations_parse_cmdline(char *arg)
2271 if (!strcmp(arg, "off"))
2272 cpu_mitigations = CPU_MITIGATIONS_OFF;
2273 else if (!strcmp(arg, "auto"))
2274 cpu_mitigations = CPU_MITIGATIONS_AUTO;
2275 else if (!strcmp(arg, "auto,nosmt"))
2276 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
2278 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2283 early_param("mitigations", mitigations_parse_cmdline);
2285 /* mitigations=off */
2286 bool cpu_mitigations_off(void)
2288 return cpu_mitigations == CPU_MITIGATIONS_OFF;
2290 EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2292 /* mitigations=auto,nosmt */
2293 bool cpu_mitigations_auto_nosmt(void)
2295 return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2297 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);