GNU Linux-libre 4.9.290-gnu1
[releases.git] / kernel / cpu.c
1 /* CPU control.
2  * (C) 2001, 2002, 2003, 2004 Rusty Russell
3  *
4  * This code is licenced under the GPL.
5  */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/sched/smt.h>
12 #include <linux/unistd.h>
13 #include <linux/cpu.h>
14 #include <linux/oom.h>
15 #include <linux/rcupdate.h>
16 #include <linux/export.h>
17 #include <linux/bug.h>
18 #include <linux/kthread.h>
19 #include <linux/stop_machine.h>
20 #include <linux/mutex.h>
21 #include <linux/gfp.h>
22 #include <linux/suspend.h>
23 #include <linux/lockdep.h>
24 #include <linux/tick.h>
25 #include <linux/irq.h>
26 #include <linux/smpboot.h>
27 #include <linux/relay.h>
28 #include <linux/slab.h>
29
30 #include <trace/events/power.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/cpuhp.h>
33
34 #include "smpboot.h"
35
36 /**
37  * cpuhp_cpu_state - Per cpu hotplug state storage
38  * @state:      The current cpu state
39  * @target:     The target state
40  * @thread:     Pointer to the hotplug thread
41  * @should_run: Thread should execute
42  * @rollback:   Perform a rollback
43  * @single:     Single callback invocation
44  * @bringup:    Single callback bringup or teardown selector
45  * @cb_state:   The state for a single callback (install/uninstall)
46  * @result:     Result of the operation
47  * @done:       Signal completion to the issuer of the task
48  */
49 struct cpuhp_cpu_state {
50         enum cpuhp_state        state;
51         enum cpuhp_state        target;
52 #ifdef CONFIG_SMP
53         struct task_struct      *thread;
54         bool                    should_run;
55         bool                    rollback;
56         bool                    single;
57         bool                    bringup;
58         bool                    booted_once;
59         struct hlist_node       *node;
60         enum cpuhp_state        cb_state;
61         int                     result;
62         struct completion       done;
63 #endif
64 };
65
66 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
67
68 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
69 static struct lock_class_key cpuhp_state_key;
70 static struct lockdep_map cpuhp_state_lock_map =
71         STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
72 #endif
73
74 /**
75  * cpuhp_step - Hotplug state machine step
76  * @name:       Name of the step
77  * @startup:    Startup function of the step
78  * @teardown:   Teardown function of the step
79  * @skip_onerr: Do not invoke the functions on error rollback
80  *              Will go away once the notifiers are gone
81  * @cant_stop:  Bringup/teardown can't be stopped at this step
82  */
83 struct cpuhp_step {
84         const char              *name;
85         union {
86                 int             (*single)(unsigned int cpu);
87                 int             (*multi)(unsigned int cpu,
88                                          struct hlist_node *node);
89         } startup;
90         union {
91                 int             (*single)(unsigned int cpu);
92                 int             (*multi)(unsigned int cpu,
93                                          struct hlist_node *node);
94         } teardown;
95         struct hlist_head       list;
96         bool                    skip_onerr;
97         bool                    cant_stop;
98         bool                    multi_instance;
99 };
100
101 static DEFINE_MUTEX(cpuhp_state_mutex);
102 static struct cpuhp_step cpuhp_bp_states[];
103 static struct cpuhp_step cpuhp_ap_states[];
104
105 static bool cpuhp_is_ap_state(enum cpuhp_state state)
106 {
107         /*
108          * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
109          * purposes as that state is handled explicitly in cpu_down.
110          */
111         return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
112 }
113
114 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
115 {
116         struct cpuhp_step *sp;
117
118         sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
119         return sp + state;
120 }
121
122 /**
123  * cpuhp_invoke_callback _ Invoke the callbacks for a given state
124  * @cpu:        The cpu for which the callback should be invoked
125  * @step:       The step in the state machine
126  * @bringup:    True if the bringup callback should be invoked
127  *
128  * Called from cpu hotplug and from the state register machinery.
129  */
130 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
131                                  bool bringup, struct hlist_node *node)
132 {
133         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
134         struct cpuhp_step *step = cpuhp_get_step(state);
135         int (*cbm)(unsigned int cpu, struct hlist_node *node);
136         int (*cb)(unsigned int cpu);
137         int ret, cnt;
138
139         if (!step->multi_instance) {
140                 cb = bringup ? step->startup.single : step->teardown.single;
141                 if (!cb)
142                         return 0;
143                 trace_cpuhp_enter(cpu, st->target, state, cb);
144                 ret = cb(cpu);
145                 trace_cpuhp_exit(cpu, st->state, state, ret);
146                 return ret;
147         }
148         cbm = bringup ? step->startup.multi : step->teardown.multi;
149         if (!cbm)
150                 return 0;
151
152         /* Single invocation for instance add/remove */
153         if (node) {
154                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
155                 ret = cbm(cpu, node);
156                 trace_cpuhp_exit(cpu, st->state, state, ret);
157                 return ret;
158         }
159
160         /* State transition. Invoke on all instances */
161         cnt = 0;
162         hlist_for_each(node, &step->list) {
163                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
164                 ret = cbm(cpu, node);
165                 trace_cpuhp_exit(cpu, st->state, state, ret);
166                 if (ret)
167                         goto err;
168                 cnt++;
169         }
170         return 0;
171 err:
172         /* Rollback the instances if one failed */
173         cbm = !bringup ? step->startup.multi : step->teardown.multi;
174         if (!cbm)
175                 return ret;
176
177         hlist_for_each(node, &step->list) {
178                 if (!cnt--)
179                         break;
180                 cbm(cpu, node);
181         }
182         return ret;
183 }
184
185 #ifdef CONFIG_SMP
186 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
187 static DEFINE_MUTEX(cpu_add_remove_lock);
188 bool cpuhp_tasks_frozen;
189 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
190
191 /*
192  * The following two APIs (cpu_maps_update_begin/done) must be used when
193  * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
194  * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
195  * hotplug callback (un)registration performed using __register_cpu_notifier()
196  * or __unregister_cpu_notifier().
197  */
198 void cpu_maps_update_begin(void)
199 {
200         mutex_lock(&cpu_add_remove_lock);
201 }
202 EXPORT_SYMBOL(cpu_notifier_register_begin);
203
204 void cpu_maps_update_done(void)
205 {
206         mutex_unlock(&cpu_add_remove_lock);
207 }
208 EXPORT_SYMBOL(cpu_notifier_register_done);
209
210 static RAW_NOTIFIER_HEAD(cpu_chain);
211
212 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
213  * Should always be manipulated under cpu_add_remove_lock
214  */
215 static int cpu_hotplug_disabled;
216
217 #ifdef CONFIG_HOTPLUG_CPU
218
219 static struct {
220         struct task_struct *active_writer;
221         /* wait queue to wake up the active_writer */
222         wait_queue_head_t wq;
223         /* verifies that no writer will get active while readers are active */
224         struct mutex lock;
225         /*
226          * Also blocks the new readers during
227          * an ongoing cpu hotplug operation.
228          */
229         atomic_t refcount;
230
231 #ifdef CONFIG_DEBUG_LOCK_ALLOC
232         struct lockdep_map dep_map;
233 #endif
234 } cpu_hotplug = {
235         .active_writer = NULL,
236         .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
237         .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
238 #ifdef CONFIG_DEBUG_LOCK_ALLOC
239         .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
240 #endif
241 };
242
243 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
244 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
245 #define cpuhp_lock_acquire_tryread() \
246                                   lock_map_acquire_tryread(&cpu_hotplug.dep_map)
247 #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
248 #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
249
250
251 void get_online_cpus(void)
252 {
253         might_sleep();
254         if (cpu_hotplug.active_writer == current)
255                 return;
256         cpuhp_lock_acquire_read();
257         mutex_lock(&cpu_hotplug.lock);
258         atomic_inc(&cpu_hotplug.refcount);
259         mutex_unlock(&cpu_hotplug.lock);
260 }
261 EXPORT_SYMBOL_GPL(get_online_cpus);
262
263 void put_online_cpus(void)
264 {
265         int refcount;
266
267         if (cpu_hotplug.active_writer == current)
268                 return;
269
270         refcount = atomic_dec_return(&cpu_hotplug.refcount);
271         if (WARN_ON(refcount < 0)) /* try to fix things up */
272                 atomic_inc(&cpu_hotplug.refcount);
273
274         if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
275                 wake_up(&cpu_hotplug.wq);
276
277         cpuhp_lock_release();
278
279 }
280 EXPORT_SYMBOL_GPL(put_online_cpus);
281
282 /*
283  * This ensures that the hotplug operation can begin only when the
284  * refcount goes to zero.
285  *
286  * Note that during a cpu-hotplug operation, the new readers, if any,
287  * will be blocked by the cpu_hotplug.lock
288  *
289  * Since cpu_hotplug_begin() is always called after invoking
290  * cpu_maps_update_begin(), we can be sure that only one writer is active.
291  *
292  * Note that theoretically, there is a possibility of a livelock:
293  * - Refcount goes to zero, last reader wakes up the sleeping
294  *   writer.
295  * - Last reader unlocks the cpu_hotplug.lock.
296  * - A new reader arrives at this moment, bumps up the refcount.
297  * - The writer acquires the cpu_hotplug.lock finds the refcount
298  *   non zero and goes to sleep again.
299  *
300  * However, this is very difficult to achieve in practice since
301  * get_online_cpus() not an api which is called all that often.
302  *
303  */
304 void cpu_hotplug_begin(void)
305 {
306         DEFINE_WAIT(wait);
307
308         cpu_hotplug.active_writer = current;
309         cpuhp_lock_acquire();
310
311         for (;;) {
312                 mutex_lock(&cpu_hotplug.lock);
313                 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
314                 if (likely(!atomic_read(&cpu_hotplug.refcount)))
315                                 break;
316                 mutex_unlock(&cpu_hotplug.lock);
317                 schedule();
318         }
319         finish_wait(&cpu_hotplug.wq, &wait);
320 }
321
322 void cpu_hotplug_done(void)
323 {
324         cpu_hotplug.active_writer = NULL;
325         mutex_unlock(&cpu_hotplug.lock);
326         cpuhp_lock_release();
327 }
328
329 /*
330  * Wait for currently running CPU hotplug operations to complete (if any) and
331  * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
332  * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
333  * hotplug path before performing hotplug operations. So acquiring that lock
334  * guarantees mutual exclusion from any currently running hotplug operations.
335  */
336 void cpu_hotplug_disable(void)
337 {
338         cpu_maps_update_begin();
339         cpu_hotplug_disabled++;
340         cpu_maps_update_done();
341 }
342 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
343
344 static void __cpu_hotplug_enable(void)
345 {
346         if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
347                 return;
348         cpu_hotplug_disabled--;
349 }
350
351 void cpu_hotplug_enable(void)
352 {
353         cpu_maps_update_begin();
354         __cpu_hotplug_enable();
355         cpu_maps_update_done();
356 }
357 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
358 #endif  /* CONFIG_HOTPLUG_CPU */
359
360 /*
361  * Architectures that need SMT-specific errata handling during SMT hotplug
362  * should override this.
363  */
364 void __weak arch_smt_update(void) { }
365
366 #ifdef CONFIG_HOTPLUG_SMT
367 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
368 EXPORT_SYMBOL_GPL(cpu_smt_control);
369
370 static bool cpu_smt_available __read_mostly;
371
372 void __init cpu_smt_disable(bool force)
373 {
374         if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
375                 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
376                 return;
377
378         if (force) {
379                 pr_info("SMT: Force disabled\n");
380                 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
381         } else {
382                 pr_info("SMT: disabled\n");
383                 cpu_smt_control = CPU_SMT_DISABLED;
384         }
385 }
386
387 /*
388  * The decision whether SMT is supported can only be done after the full
389  * CPU identification. Called from architecture code before non boot CPUs
390  * are brought up.
391  */
392 void __init cpu_smt_check_topology_early(void)
393 {
394         if (!topology_smt_supported())
395                 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
396 }
397
398 /*
399  * If SMT was disabled by BIOS, detect it here, after the CPUs have been
400  * brought online. This ensures the smt/l1tf sysfs entries are consistent
401  * with reality. cpu_smt_available is set to true during the bringup of non
402  * boot CPUs when a SMT sibling is detected. Note, this may overwrite
403  * cpu_smt_control's previous setting.
404  */
405 void __init cpu_smt_check_topology(void)
406 {
407         if (!cpu_smt_available)
408                 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
409 }
410
411 static int __init smt_cmdline_disable(char *str)
412 {
413         cpu_smt_disable(str && !strcmp(str, "force"));
414         return 0;
415 }
416 early_param("nosmt", smt_cmdline_disable);
417
418 static inline bool cpu_smt_allowed(unsigned int cpu)
419 {
420         if (topology_is_primary_thread(cpu))
421                 return true;
422
423         /*
424          * If the CPU is not a 'primary' thread and the booted_once bit is
425          * set then the processor has SMT support. Store this information
426          * for the late check of SMT support in cpu_smt_check_topology().
427          */
428         if (per_cpu(cpuhp_state, cpu).booted_once)
429                 cpu_smt_available = true;
430
431         if (cpu_smt_control == CPU_SMT_ENABLED)
432                 return true;
433
434         /*
435          * On x86 it's required to boot all logical CPUs at least once so
436          * that the init code can get a chance to set CR4.MCE on each
437          * CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
438          * core will shutdown the machine.
439          */
440         return !per_cpu(cpuhp_state, cpu).booted_once;
441 }
442 #else
443 static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
444 #endif
445
446 /* Need to know about CPUs going up/down? */
447 int register_cpu_notifier(struct notifier_block *nb)
448 {
449         int ret;
450         cpu_maps_update_begin();
451         ret = raw_notifier_chain_register(&cpu_chain, nb);
452         cpu_maps_update_done();
453         return ret;
454 }
455
456 int __register_cpu_notifier(struct notifier_block *nb)
457 {
458         return raw_notifier_chain_register(&cpu_chain, nb);
459 }
460
461 static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
462                         int *nr_calls)
463 {
464         unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
465         void *hcpu = (void *)(long)cpu;
466
467         int ret;
468
469         ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
470                                         nr_calls);
471
472         return notifier_to_errno(ret);
473 }
474
475 static int cpu_notify(unsigned long val, unsigned int cpu)
476 {
477         return __cpu_notify(val, cpu, -1, NULL);
478 }
479
480 static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
481 {
482         BUG_ON(cpu_notify(val, cpu));
483 }
484
485 /* Notifier wrappers for transitioning to state machine */
486 static int notify_prepare(unsigned int cpu)
487 {
488         int nr_calls = 0;
489         int ret;
490
491         ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
492         if (ret) {
493                 nr_calls--;
494                 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
495                                 __func__, cpu);
496                 __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
497         }
498         return ret;
499 }
500
501 static int notify_online(unsigned int cpu)
502 {
503         cpu_notify(CPU_ONLINE, cpu);
504         return 0;
505 }
506
507 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st);
508
509 static int bringup_wait_for_ap(unsigned int cpu)
510 {
511         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
512
513         /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
514         wait_for_completion(&st->done);
515         if (WARN_ON_ONCE((!cpu_online(cpu))))
516                 return -ECANCELED;
517
518         /* Unpark the hotplug thread of the target cpu */
519         kthread_unpark(st->thread);
520
521         /*
522          * SMT soft disabling on X86 requires to bring the CPU out of the
523          * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
524          * CPU marked itself as booted_once in cpu_notify_starting() so the
525          * cpu_smt_allowed() check will now return false if this is not the
526          * primary sibling.
527          */
528         if (!cpu_smt_allowed(cpu))
529                 return -ECANCELED;
530
531         /* Should we go further up ? */
532         if (st->target > CPUHP_AP_ONLINE_IDLE) {
533                 __cpuhp_kick_ap_work(st);
534                 wait_for_completion(&st->done);
535         }
536         return st->result;
537 }
538
539 static int bringup_cpu(unsigned int cpu)
540 {
541         struct task_struct *idle = idle_thread_get(cpu);
542         int ret;
543
544         /*
545          * Some architectures have to walk the irq descriptors to
546          * setup the vector space for the cpu which comes online.
547          * Prevent irq alloc/free across the bringup.
548          */
549         irq_lock_sparse();
550
551         /* Arch-specific enabling code. */
552         ret = __cpu_up(cpu, idle);
553         irq_unlock_sparse();
554         if (ret) {
555                 cpu_notify(CPU_UP_CANCELED, cpu);
556                 return ret;
557         }
558         return bringup_wait_for_ap(cpu);
559 }
560
561 /*
562  * Hotplug state machine related functions
563  */
564 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
565 {
566         for (st->state++; st->state < st->target; st->state++) {
567                 struct cpuhp_step *step = cpuhp_get_step(st->state);
568
569                 if (!step->skip_onerr)
570                         cpuhp_invoke_callback(cpu, st->state, true, NULL);
571         }
572 }
573
574 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
575                                 enum cpuhp_state target)
576 {
577         enum cpuhp_state prev_state = st->state;
578         int ret = 0;
579
580         for (; st->state > target; st->state--) {
581                 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
582                 if (ret) {
583                         st->target = prev_state;
584                         undo_cpu_down(cpu, st);
585                         break;
586                 }
587         }
588         return ret;
589 }
590
591 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
592 {
593         for (st->state--; st->state > st->target; st->state--) {
594                 struct cpuhp_step *step = cpuhp_get_step(st->state);
595
596                 if (!step->skip_onerr)
597                         cpuhp_invoke_callback(cpu, st->state, false, NULL);
598         }
599 }
600
601 static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
602 {
603         if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
604                 return true;
605         /*
606          * When CPU hotplug is disabled, then taking the CPU down is not
607          * possible because takedown_cpu() and the architecture and
608          * subsystem specific mechanisms are not available. So the CPU
609          * which would be completely unplugged again needs to stay around
610          * in the current state.
611          */
612         return st->state <= CPUHP_BRINGUP_CPU;
613 }
614
615 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
616                               enum cpuhp_state target)
617 {
618         enum cpuhp_state prev_state = st->state;
619         int ret = 0;
620
621         while (st->state < target) {
622                 st->state++;
623                 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
624                 if (ret) {
625                         if (can_rollback_cpu(st)) {
626                                 st->target = prev_state;
627                                 undo_cpu_up(cpu, st);
628                         }
629                         break;
630                 }
631         }
632         return ret;
633 }
634
635 /*
636  * The cpu hotplug threads manage the bringup and teardown of the cpus
637  */
638 static void cpuhp_create(unsigned int cpu)
639 {
640         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
641
642         init_completion(&st->done);
643 }
644
645 static int cpuhp_should_run(unsigned int cpu)
646 {
647         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
648
649         return st->should_run;
650 }
651
652 /* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
653 static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
654 {
655         enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
656
657         return cpuhp_down_callbacks(cpu, st, target);
658 }
659
660 /* Execute the online startup callbacks. Used to be CPU_ONLINE */
661 static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
662 {
663         return cpuhp_up_callbacks(cpu, st, st->target);
664 }
665
666 /*
667  * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
668  * callbacks when a state gets [un]installed at runtime.
669  */
670 static void cpuhp_thread_fun(unsigned int cpu)
671 {
672         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
673         int ret = 0;
674
675         /*
676          * Paired with the mb() in cpuhp_kick_ap_work and
677          * cpuhp_invoke_ap_callback, so the work set is consistent visible.
678          */
679         smp_mb();
680         if (!st->should_run)
681                 return;
682
683         st->should_run = false;
684
685         lock_map_acquire(&cpuhp_state_lock_map);
686         /* Single callback invocation for [un]install ? */
687         if (st->single) {
688                 if (st->cb_state < CPUHP_AP_ONLINE) {
689                         local_irq_disable();
690                         ret = cpuhp_invoke_callback(cpu, st->cb_state,
691                                                     st->bringup, st->node);
692                         local_irq_enable();
693                 } else {
694                         ret = cpuhp_invoke_callback(cpu, st->cb_state,
695                                                     st->bringup, st->node);
696                 }
697         } else if (st->rollback) {
698                 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
699
700                 undo_cpu_down(cpu, st);
701                 /*
702                  * This is a momentary workaround to keep the notifier users
703                  * happy. Will go away once we got rid of the notifiers.
704                  */
705                 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
706                 st->rollback = false;
707         } else {
708                 /* Cannot happen .... */
709                 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
710
711                 /* Regular hotplug work */
712                 if (st->state < st->target)
713                         ret = cpuhp_ap_online(cpu, st);
714                 else if (st->state > st->target)
715                         ret = cpuhp_ap_offline(cpu, st);
716         }
717         lock_map_release(&cpuhp_state_lock_map);
718         st->result = ret;
719         complete(&st->done);
720 }
721
722 /* Invoke a single callback on a remote cpu */
723 static int
724 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
725                          struct hlist_node *node)
726 {
727         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
728
729         if (!cpu_online(cpu))
730                 return 0;
731
732         lock_map_acquire(&cpuhp_state_lock_map);
733         lock_map_release(&cpuhp_state_lock_map);
734
735         /*
736          * If we are up and running, use the hotplug thread. For early calls
737          * we invoke the thread function directly.
738          */
739         if (!st->thread)
740                 return cpuhp_invoke_callback(cpu, state, bringup, node);
741
742         st->cb_state = state;
743         st->single = true;
744         st->bringup = bringup;
745         st->node = node;
746
747         /*
748          * Make sure the above stores are visible before should_run becomes
749          * true. Paired with the mb() above in cpuhp_thread_fun()
750          */
751         smp_mb();
752         st->should_run = true;
753         wake_up_process(st->thread);
754         wait_for_completion(&st->done);
755         return st->result;
756 }
757
758 /* Regular hotplug invocation of the AP hotplug thread */
759 static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
760 {
761         st->result = 0;
762         st->single = false;
763         /*
764          * Make sure the above stores are visible before should_run becomes
765          * true. Paired with the mb() above in cpuhp_thread_fun()
766          */
767         smp_mb();
768         st->should_run = true;
769         wake_up_process(st->thread);
770 }
771
772 static int cpuhp_kick_ap_work(unsigned int cpu)
773 {
774         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
775         enum cpuhp_state state = st->state;
776
777         trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
778         lock_map_acquire(&cpuhp_state_lock_map);
779         lock_map_release(&cpuhp_state_lock_map);
780         __cpuhp_kick_ap_work(st);
781         wait_for_completion(&st->done);
782         trace_cpuhp_exit(cpu, st->state, state, st->result);
783         return st->result;
784 }
785
786 static struct smp_hotplug_thread cpuhp_threads = {
787         .store                  = &cpuhp_state.thread,
788         .create                 = &cpuhp_create,
789         .thread_should_run      = cpuhp_should_run,
790         .thread_fn              = cpuhp_thread_fun,
791         .thread_comm            = "cpuhp/%u",
792         .selfparking            = true,
793 };
794
795 void __init cpuhp_threads_init(void)
796 {
797         BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
798         kthread_unpark(this_cpu_read(cpuhp_state.thread));
799 }
800
801 EXPORT_SYMBOL(register_cpu_notifier);
802 EXPORT_SYMBOL(__register_cpu_notifier);
803 void unregister_cpu_notifier(struct notifier_block *nb)
804 {
805         cpu_maps_update_begin();
806         raw_notifier_chain_unregister(&cpu_chain, nb);
807         cpu_maps_update_done();
808 }
809 EXPORT_SYMBOL(unregister_cpu_notifier);
810
811 void __unregister_cpu_notifier(struct notifier_block *nb)
812 {
813         raw_notifier_chain_unregister(&cpu_chain, nb);
814 }
815 EXPORT_SYMBOL(__unregister_cpu_notifier);
816
817 #ifdef CONFIG_HOTPLUG_CPU
818 #ifndef arch_clear_mm_cpumask_cpu
819 #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
820 #endif
821
822 /**
823  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
824  * @cpu: a CPU id
825  *
826  * This function walks all processes, finds a valid mm struct for each one and
827  * then clears a corresponding bit in mm's cpumask.  While this all sounds
828  * trivial, there are various non-obvious corner cases, which this function
829  * tries to solve in a safe manner.
830  *
831  * Also note that the function uses a somewhat relaxed locking scheme, so it may
832  * be called only for an already offlined CPU.
833  */
834 void clear_tasks_mm_cpumask(int cpu)
835 {
836         struct task_struct *p;
837
838         /*
839          * This function is called after the cpu is taken down and marked
840          * offline, so its not like new tasks will ever get this cpu set in
841          * their mm mask. -- Peter Zijlstra
842          * Thus, we may use rcu_read_lock() here, instead of grabbing
843          * full-fledged tasklist_lock.
844          */
845         WARN_ON(cpu_online(cpu));
846         rcu_read_lock();
847         for_each_process(p) {
848                 struct task_struct *t;
849
850                 /*
851                  * Main thread might exit, but other threads may still have
852                  * a valid mm. Find one.
853                  */
854                 t = find_lock_task_mm(p);
855                 if (!t)
856                         continue;
857                 arch_clear_mm_cpumask_cpu(cpu, t->mm);
858                 task_unlock(t);
859         }
860         rcu_read_unlock();
861 }
862
863 static inline void check_for_tasks(int dead_cpu)
864 {
865         struct task_struct *g, *p;
866
867         read_lock(&tasklist_lock);
868         for_each_process_thread(g, p) {
869                 if (!p->on_rq)
870                         continue;
871                 /*
872                  * We do the check with unlocked task_rq(p)->lock.
873                  * Order the reading to do not warn about a task,
874                  * which was running on this cpu in the past, and
875                  * it's just been woken on another cpu.
876                  */
877                 rmb();
878                 if (task_cpu(p) != dead_cpu)
879                         continue;
880
881                 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
882                         p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
883         }
884         read_unlock(&tasklist_lock);
885 }
886
887 static int notify_down_prepare(unsigned int cpu)
888 {
889         int err, nr_calls = 0;
890
891         err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
892         if (err) {
893                 nr_calls--;
894                 __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
895                 pr_warn("%s: attempt to take down CPU %u failed\n",
896                                 __func__, cpu);
897         }
898         return err;
899 }
900
901 /* Take this CPU down. */
902 static int take_cpu_down(void *_param)
903 {
904         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
905         enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
906         int err, cpu = smp_processor_id();
907
908         /* Ensure this CPU doesn't handle any more interrupts. */
909         err = __cpu_disable();
910         if (err < 0)
911                 return err;
912
913         /*
914          * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
915          * do this step again.
916          */
917         WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
918         st->state--;
919         /* Invoke the former CPU_DYING callbacks */
920         for (; st->state > target; st->state--)
921                 cpuhp_invoke_callback(cpu, st->state, false, NULL);
922
923         /* Give up timekeeping duties */
924         tick_handover_do_timer();
925         /* Park the stopper thread */
926         stop_machine_park(cpu);
927         return 0;
928 }
929
930 static int takedown_cpu(unsigned int cpu)
931 {
932         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
933         int err;
934
935         /* Park the smpboot threads */
936         kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
937
938         /*
939          * Prevent irq alloc/free while the dying cpu reorganizes the
940          * interrupt affinities.
941          */
942         irq_lock_sparse();
943
944         /*
945          * So now all preempt/rcu users must observe !cpu_active().
946          */
947         err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
948         if (err) {
949                 /* CPU refused to die */
950                 irq_unlock_sparse();
951                 /* Unpark the hotplug thread so we can rollback there */
952                 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
953                 return err;
954         }
955         BUG_ON(cpu_online(cpu));
956
957         /*
958          * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
959          * runnable tasks from the cpu, there's only the idle task left now
960          * that the migration thread is done doing the stop_machine thing.
961          *
962          * Wait for the stop thread to go away.
963          */
964         wait_for_completion(&st->done);
965         BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
966
967         /* Interrupts are moved away from the dying cpu, reenable alloc/free */
968         irq_unlock_sparse();
969
970         hotplug_cpu__broadcast_tick_pull(cpu);
971         /* This actually kills the CPU. */
972         __cpu_die(cpu);
973
974         tick_cleanup_dead_cpu(cpu);
975         return 0;
976 }
977
978 static int notify_dead(unsigned int cpu)
979 {
980         cpu_notify_nofail(CPU_DEAD, cpu);
981         check_for_tasks(cpu);
982         return 0;
983 }
984
985 static void cpuhp_complete_idle_dead(void *arg)
986 {
987         struct cpuhp_cpu_state *st = arg;
988
989         complete(&st->done);
990 }
991
992 void cpuhp_report_idle_dead(void)
993 {
994         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
995
996         BUG_ON(st->state != CPUHP_AP_OFFLINE);
997         rcu_report_dead(smp_processor_id());
998         st->state = CPUHP_AP_IDLE_DEAD;
999         /*
1000          * We cannot call complete after rcu_report_dead() so we delegate it
1001          * to an online cpu.
1002          */
1003         smp_call_function_single(cpumask_first(cpu_online_mask),
1004                                  cpuhp_complete_idle_dead, st, 0);
1005 }
1006
1007 #else
1008 #define notify_down_prepare     NULL
1009 #define takedown_cpu            NULL
1010 #define notify_dead             NULL
1011 #endif
1012
1013 #ifdef CONFIG_HOTPLUG_CPU
1014
1015 /* Requires cpu_add_remove_lock to be held */
1016 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1017                            enum cpuhp_state target)
1018 {
1019         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1020         int prev_state, ret = 0;
1021         bool hasdied = false;
1022
1023         if (num_online_cpus() == 1)
1024                 return -EBUSY;
1025
1026         if (!cpu_present(cpu))
1027                 return -EINVAL;
1028
1029         cpu_hotplug_begin();
1030
1031         cpuhp_tasks_frozen = tasks_frozen;
1032
1033         prev_state = st->state;
1034         st->target = target;
1035         /*
1036          * If the current CPU state is in the range of the AP hotplug thread,
1037          * then we need to kick the thread.
1038          */
1039         if (st->state > CPUHP_TEARDOWN_CPU) {
1040                 ret = cpuhp_kick_ap_work(cpu);
1041                 /*
1042                  * The AP side has done the error rollback already. Just
1043                  * return the error code..
1044                  */
1045                 if (ret)
1046                         goto out;
1047
1048                 /*
1049                  * We might have stopped still in the range of the AP hotplug
1050                  * thread. Nothing to do anymore.
1051                  */
1052                 if (st->state > CPUHP_TEARDOWN_CPU)
1053                         goto out;
1054         }
1055         /*
1056          * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1057          * to do the further cleanups.
1058          */
1059         ret = cpuhp_down_callbacks(cpu, st, target);
1060         if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
1061                 st->target = prev_state;
1062                 st->rollback = true;
1063                 cpuhp_kick_ap_work(cpu);
1064         }
1065
1066         hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
1067 out:
1068         cpu_hotplug_done();
1069         /* This post dead nonsense must die */
1070         if (!ret && hasdied)
1071                 cpu_notify_nofail(CPU_POST_DEAD, cpu);
1072         arch_smt_update();
1073         return ret;
1074 }
1075
1076 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1077 {
1078         if (cpu_hotplug_disabled)
1079                 return -EBUSY;
1080         return _cpu_down(cpu, 0, target);
1081 }
1082
1083 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
1084 {
1085         int err;
1086
1087         cpu_maps_update_begin();
1088         err = cpu_down_maps_locked(cpu, target);
1089         cpu_maps_update_done();
1090         return err;
1091 }
1092 int cpu_down(unsigned int cpu)
1093 {
1094         return do_cpu_down(cpu, CPUHP_OFFLINE);
1095 }
1096 EXPORT_SYMBOL(cpu_down);
1097 #endif /*CONFIG_HOTPLUG_CPU*/
1098
1099 /**
1100  * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1101  * @cpu: cpu that just started
1102  *
1103  * It must be called by the arch code on the new cpu, before the new cpu
1104  * enables interrupts and before the "boot" cpu returns from __cpu_up().
1105  */
1106 void notify_cpu_starting(unsigned int cpu)
1107 {
1108         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1109         enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1110
1111         rcu_cpu_starting(cpu);  /* Enables RCU usage on this CPU. */
1112         st->booted_once = true;
1113         while (st->state < target) {
1114                 st->state++;
1115                 cpuhp_invoke_callback(cpu, st->state, true, NULL);
1116         }
1117 }
1118
1119 /*
1120  * Called from the idle task. Wake up the controlling task which brings the
1121  * hotplug thread of the upcoming CPU up and then delegates the rest of the
1122  * online bringup to the hotplug thread.
1123  */
1124 void cpuhp_online_idle(enum cpuhp_state state)
1125 {
1126         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1127
1128         /* Happens for the boot cpu */
1129         if (state != CPUHP_AP_ONLINE_IDLE)
1130                 return;
1131
1132         /*
1133          * Unpart the stopper thread before we start the idle loop (and start
1134          * scheduling); this ensures the stopper task is always available.
1135          */
1136         stop_machine_unpark(smp_processor_id());
1137
1138         st->state = CPUHP_AP_ONLINE_IDLE;
1139         complete(&st->done);
1140 }
1141
1142 /* Requires cpu_add_remove_lock to be held */
1143 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1144 {
1145         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1146         struct task_struct *idle;
1147         int ret = 0;
1148
1149         cpu_hotplug_begin();
1150
1151         if (!cpu_present(cpu)) {
1152                 ret = -EINVAL;
1153                 goto out;
1154         }
1155
1156         /*
1157          * The caller of do_cpu_up might have raced with another
1158          * caller. Ignore it for now.
1159          */
1160         if (st->state >= target)
1161                 goto out;
1162
1163         if (st->state == CPUHP_OFFLINE) {
1164                 /* Let it fail before we try to bring the cpu up */
1165                 idle = idle_thread_get(cpu);
1166                 if (IS_ERR(idle)) {
1167                         ret = PTR_ERR(idle);
1168                         goto out;
1169                 }
1170         }
1171
1172         cpuhp_tasks_frozen = tasks_frozen;
1173
1174         st->target = target;
1175         /*
1176          * If the current CPU state is in the range of the AP hotplug thread,
1177          * then we need to kick the thread once more.
1178          */
1179         if (st->state > CPUHP_BRINGUP_CPU) {
1180                 ret = cpuhp_kick_ap_work(cpu);
1181                 /*
1182                  * The AP side has done the error rollback already. Just
1183                  * return the error code..
1184                  */
1185                 if (ret)
1186                         goto out;
1187         }
1188
1189         /*
1190          * Try to reach the target state. We max out on the BP at
1191          * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1192          * responsible for bringing it up to the target state.
1193          */
1194         target = min((int)target, CPUHP_BRINGUP_CPU);
1195         ret = cpuhp_up_callbacks(cpu, st, target);
1196 out:
1197         cpu_hotplug_done();
1198         arch_smt_update();
1199         return ret;
1200 }
1201
1202 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
1203 {
1204         int err = 0;
1205
1206         if (!cpu_possible(cpu)) {
1207                 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1208                        cpu);
1209 #if defined(CONFIG_IA64)
1210                 pr_err("please check additional_cpus= boot parameter\n");
1211 #endif
1212                 return -EINVAL;
1213         }
1214
1215         err = try_online_node(cpu_to_node(cpu));
1216         if (err)
1217                 return err;
1218
1219         cpu_maps_update_begin();
1220
1221         if (cpu_hotplug_disabled) {
1222                 err = -EBUSY;
1223                 goto out;
1224         }
1225         if (!cpu_smt_allowed(cpu)) {
1226                 err = -EPERM;
1227                 goto out;
1228         }
1229
1230         err = _cpu_up(cpu, 0, target);
1231 out:
1232         cpu_maps_update_done();
1233         return err;
1234 }
1235
1236 int cpu_up(unsigned int cpu)
1237 {
1238         return do_cpu_up(cpu, CPUHP_ONLINE);
1239 }
1240 EXPORT_SYMBOL_GPL(cpu_up);
1241
1242 #ifdef CONFIG_PM_SLEEP_SMP
1243 static cpumask_var_t frozen_cpus;
1244
1245 int freeze_secondary_cpus(int primary)
1246 {
1247         int cpu, error = 0;
1248
1249         cpu_maps_update_begin();
1250         if (!cpu_online(primary))
1251                 primary = cpumask_first(cpu_online_mask);
1252         /*
1253          * We take down all of the non-boot CPUs in one shot to avoid races
1254          * with the userspace trying to use the CPU hotplug at the same time
1255          */
1256         cpumask_clear(frozen_cpus);
1257
1258         pr_info("Disabling non-boot CPUs ...\n");
1259         for_each_online_cpu(cpu) {
1260                 if (cpu == primary)
1261                         continue;
1262                 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1263                 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1264                 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1265                 if (!error)
1266                         cpumask_set_cpu(cpu, frozen_cpus);
1267                 else {
1268                         pr_err("Error taking CPU%d down: %d\n", cpu, error);
1269                         break;
1270                 }
1271         }
1272
1273         if (!error)
1274                 BUG_ON(num_online_cpus() > 1);
1275         else
1276                 pr_err("Non-boot CPUs are not disabled\n");
1277
1278         /*
1279          * Make sure the CPUs won't be enabled by someone else. We need to do
1280          * this even in case of failure as all disable_nonboot_cpus() users are
1281          * supposed to do enable_nonboot_cpus() on the failure path.
1282          */
1283         cpu_hotplug_disabled++;
1284
1285         cpu_maps_update_done();
1286         return error;
1287 }
1288
1289 void __weak arch_enable_nonboot_cpus_begin(void)
1290 {
1291 }
1292
1293 void __weak arch_enable_nonboot_cpus_end(void)
1294 {
1295 }
1296
1297 void enable_nonboot_cpus(void)
1298 {
1299         int cpu, error;
1300
1301         /* Allow everyone to use the CPU hotplug again */
1302         cpu_maps_update_begin();
1303         __cpu_hotplug_enable();
1304         if (cpumask_empty(frozen_cpus))
1305                 goto out;
1306
1307         pr_info("Enabling non-boot CPUs ...\n");
1308
1309         arch_enable_nonboot_cpus_begin();
1310
1311         for_each_cpu(cpu, frozen_cpus) {
1312                 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1313                 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1314                 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1315                 if (!error) {
1316                         pr_info("CPU%d is up\n", cpu);
1317                         continue;
1318                 }
1319                 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1320         }
1321
1322         arch_enable_nonboot_cpus_end();
1323
1324         cpumask_clear(frozen_cpus);
1325 out:
1326         cpu_maps_update_done();
1327 }
1328
1329 static int __init alloc_frozen_cpus(void)
1330 {
1331         if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1332                 return -ENOMEM;
1333         return 0;
1334 }
1335 core_initcall(alloc_frozen_cpus);
1336
1337 /*
1338  * When callbacks for CPU hotplug notifications are being executed, we must
1339  * ensure that the state of the system with respect to the tasks being frozen
1340  * or not, as reported by the notification, remains unchanged *throughout the
1341  * duration* of the execution of the callbacks.
1342  * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1343  *
1344  * This synchronization is implemented by mutually excluding regular CPU
1345  * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1346  * Hibernate notifications.
1347  */
1348 static int
1349 cpu_hotplug_pm_callback(struct notifier_block *nb,
1350                         unsigned long action, void *ptr)
1351 {
1352         switch (action) {
1353
1354         case PM_SUSPEND_PREPARE:
1355         case PM_HIBERNATION_PREPARE:
1356                 cpu_hotplug_disable();
1357                 break;
1358
1359         case PM_POST_SUSPEND:
1360         case PM_POST_HIBERNATION:
1361                 cpu_hotplug_enable();
1362                 break;
1363
1364         default:
1365                 return NOTIFY_DONE;
1366         }
1367
1368         return NOTIFY_OK;
1369 }
1370
1371
1372 static int __init cpu_hotplug_pm_sync_init(void)
1373 {
1374         /*
1375          * cpu_hotplug_pm_callback has higher priority than x86
1376          * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1377          * to disable cpu hotplug to avoid cpu hotplug race.
1378          */
1379         pm_notifier(cpu_hotplug_pm_callback, 0);
1380         return 0;
1381 }
1382 core_initcall(cpu_hotplug_pm_sync_init);
1383
1384 #endif /* CONFIG_PM_SLEEP_SMP */
1385
1386 #endif /* CONFIG_SMP */
1387
1388 /* Boot processor state steps */
1389 static struct cpuhp_step cpuhp_bp_states[] = {
1390         [CPUHP_OFFLINE] = {
1391                 .name                   = "offline",
1392                 .startup.single         = NULL,
1393                 .teardown.single        = NULL,
1394         },
1395 #ifdef CONFIG_SMP
1396         [CPUHP_CREATE_THREADS]= {
1397                 .name                   = "threads:prepare",
1398                 .startup.single         = smpboot_create_threads,
1399                 .teardown.single        = NULL,
1400                 .cant_stop              = true,
1401         },
1402         [CPUHP_PERF_PREPARE] = {
1403                 .name                   = "perf:prepare",
1404                 .startup.single         = perf_event_init_cpu,
1405                 .teardown.single        = perf_event_exit_cpu,
1406         },
1407         [CPUHP_WORKQUEUE_PREP] = {
1408                 .name                   = "workqueue:prepare",
1409                 .startup.single         = workqueue_prepare_cpu,
1410                 .teardown.single        = NULL,
1411         },
1412         [CPUHP_HRTIMERS_PREPARE] = {
1413                 .name                   = "hrtimers:prepare",
1414                 .startup.single         = hrtimers_prepare_cpu,
1415                 .teardown.single        = hrtimers_dead_cpu,
1416         },
1417         [CPUHP_SMPCFD_PREPARE] = {
1418                 .name                   = "smpcfd:prepare",
1419                 .startup.single         = smpcfd_prepare_cpu,
1420                 .teardown.single        = smpcfd_dead_cpu,
1421         },
1422         [CPUHP_RELAY_PREPARE] = {
1423                 .name                   = "relay:prepare",
1424                 .startup.single         = relay_prepare_cpu,
1425                 .teardown.single        = NULL,
1426         },
1427         [CPUHP_SLAB_PREPARE] = {
1428                 .name                   = "slab:prepare",
1429                 .startup.single         = slab_prepare_cpu,
1430                 .teardown.single        = slab_dead_cpu,
1431         },
1432         [CPUHP_RCUTREE_PREP] = {
1433                 .name                   = "RCU/tree:prepare",
1434                 .startup.single         = rcutree_prepare_cpu,
1435                 .teardown.single        = rcutree_dead_cpu,
1436         },
1437         /*
1438          * Preparatory and dead notifiers. Will be replaced once the notifiers
1439          * are converted to states.
1440          */
1441         [CPUHP_NOTIFY_PREPARE] = {
1442                 .name                   = "notify:prepare",
1443                 .startup.single         = notify_prepare,
1444                 .teardown.single        = notify_dead,
1445                 .skip_onerr             = true,
1446                 .cant_stop              = true,
1447         },
1448         /*
1449          * On the tear-down path, timers_dead_cpu() must be invoked
1450          * before blk_mq_queue_reinit_notify() from notify_dead(),
1451          * otherwise a RCU stall occurs.
1452          */
1453         [CPUHP_TIMERS_PREPARE] = {
1454                 .name                   = "timers:dead",
1455                 .startup.single         = timers_prepare_cpu,
1456                 .teardown.single        = timers_dead_cpu,
1457         },
1458         /* Kicks the plugged cpu into life */
1459         [CPUHP_BRINGUP_CPU] = {
1460                 .name                   = "cpu:bringup",
1461                 .startup.single         = bringup_cpu,
1462                 .teardown.single        = NULL,
1463                 .cant_stop              = true,
1464         },
1465         /*
1466          * Handled on controll processor until the plugged processor manages
1467          * this itself.
1468          */
1469         [CPUHP_TEARDOWN_CPU] = {
1470                 .name                   = "cpu:teardown",
1471                 .startup.single         = NULL,
1472                 .teardown.single        = takedown_cpu,
1473                 .cant_stop              = true,
1474         },
1475 #else
1476         [CPUHP_BRINGUP_CPU] = { },
1477 #endif
1478 };
1479
1480 /* Application processor state steps */
1481 static struct cpuhp_step cpuhp_ap_states[] = {
1482 #ifdef CONFIG_SMP
1483         /* Final state before CPU kills itself */
1484         [CPUHP_AP_IDLE_DEAD] = {
1485                 .name                   = "idle:dead",
1486         },
1487         /*
1488          * Last state before CPU enters the idle loop to die. Transient state
1489          * for synchronization.
1490          */
1491         [CPUHP_AP_OFFLINE] = {
1492                 .name                   = "ap:offline",
1493                 .cant_stop              = true,
1494         },
1495         /* First state is scheduler control. Interrupts are disabled */
1496         [CPUHP_AP_SCHED_STARTING] = {
1497                 .name                   = "sched:starting",
1498                 .startup.single         = sched_cpu_starting,
1499                 .teardown.single        = sched_cpu_dying,
1500         },
1501         [CPUHP_AP_RCUTREE_DYING] = {
1502                 .name                   = "RCU/tree:dying",
1503                 .startup.single         = NULL,
1504                 .teardown.single        = rcutree_dying_cpu,
1505         },
1506         [CPUHP_AP_SMPCFD_DYING] = {
1507                 .name                   = "smpcfd:dying",
1508                 .startup.single         = NULL,
1509                 .teardown.single        = smpcfd_dying_cpu,
1510         },
1511         /* Entry state on starting. Interrupts enabled from here on. Transient
1512          * state for synchronsization */
1513         [CPUHP_AP_ONLINE] = {
1514                 .name                   = "ap:online",
1515         },
1516         /* Handle smpboot threads park/unpark */
1517         [CPUHP_AP_SMPBOOT_THREADS] = {
1518                 .name                   = "smpboot/threads:online",
1519                 .startup.single         = smpboot_unpark_threads,
1520                 .teardown.single        = smpboot_park_threads,
1521         },
1522         [CPUHP_AP_PERF_ONLINE] = {
1523                 .name                   = "perf:online",
1524                 .startup.single         = perf_event_init_cpu,
1525                 .teardown.single        = perf_event_exit_cpu,
1526         },
1527         [CPUHP_AP_WORKQUEUE_ONLINE] = {
1528                 .name                   = "workqueue:online",
1529                 .startup.single         = workqueue_online_cpu,
1530                 .teardown.single        = workqueue_offline_cpu,
1531         },
1532         [CPUHP_AP_RCUTREE_ONLINE] = {
1533                 .name                   = "RCU/tree:online",
1534                 .startup.single         = rcutree_online_cpu,
1535                 .teardown.single        = rcutree_offline_cpu,
1536         },
1537
1538         /*
1539          * Online/down_prepare notifiers. Will be removed once the notifiers
1540          * are converted to states.
1541          */
1542         [CPUHP_AP_NOTIFY_ONLINE] = {
1543                 .name                   = "notify:online",
1544                 .startup.single         = notify_online,
1545                 .teardown.single        = notify_down_prepare,
1546                 .skip_onerr             = true,
1547         },
1548 #endif
1549         /*
1550          * The dynamically registered state space is here
1551          */
1552
1553 #ifdef CONFIG_SMP
1554         /* Last state is scheduler control setting the cpu active */
1555         [CPUHP_AP_ACTIVE] = {
1556                 .name                   = "sched:active",
1557                 .startup.single         = sched_cpu_activate,
1558                 .teardown.single        = sched_cpu_deactivate,
1559         },
1560 #endif
1561
1562         /* CPU is fully up and running. */
1563         [CPUHP_ONLINE] = {
1564                 .name                   = "online",
1565                 .startup.single         = NULL,
1566                 .teardown.single        = NULL,
1567         },
1568 };
1569
1570 /* Sanity check for callbacks */
1571 static int cpuhp_cb_check(enum cpuhp_state state)
1572 {
1573         if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1574                 return -EINVAL;
1575         return 0;
1576 }
1577
1578 static void cpuhp_store_callbacks(enum cpuhp_state state,
1579                                   const char *name,
1580                                   int (*startup)(unsigned int cpu),
1581                                   int (*teardown)(unsigned int cpu),
1582                                   bool multi_instance)
1583 {
1584         /* (Un)Install the callbacks for further cpu hotplug operations */
1585         struct cpuhp_step *sp;
1586
1587         sp = cpuhp_get_step(state);
1588         sp->startup.single = startup;
1589         sp->teardown.single = teardown;
1590         sp->name = name;
1591         sp->multi_instance = multi_instance;
1592         INIT_HLIST_HEAD(&sp->list);
1593 }
1594
1595 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1596 {
1597         return cpuhp_get_step(state)->teardown.single;
1598 }
1599
1600 /*
1601  * Call the startup/teardown function for a step either on the AP or
1602  * on the current CPU.
1603  */
1604 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1605                             struct hlist_node *node)
1606 {
1607         struct cpuhp_step *sp = cpuhp_get_step(state);
1608         int ret;
1609
1610         if ((bringup && !sp->startup.single) ||
1611             (!bringup && !sp->teardown.single))
1612                 return 0;
1613         /*
1614          * The non AP bound callbacks can fail on bringup. On teardown
1615          * e.g. module removal we crash for now.
1616          */
1617 #ifdef CONFIG_SMP
1618         if (cpuhp_is_ap_state(state))
1619                 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1620         else
1621                 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1622 #else
1623         ret = cpuhp_invoke_callback(cpu, state, bringup, node);
1624 #endif
1625         BUG_ON(ret && !bringup);
1626         return ret;
1627 }
1628
1629 /*
1630  * Called from __cpuhp_setup_state on a recoverable failure.
1631  *
1632  * Note: The teardown callbacks for rollback are not allowed to fail!
1633  */
1634 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1635                                    struct hlist_node *node)
1636 {
1637         int cpu;
1638
1639         /* Roll back the already executed steps on the other cpus */
1640         for_each_present_cpu(cpu) {
1641                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1642                 int cpustate = st->state;
1643
1644                 if (cpu >= failedcpu)
1645                         break;
1646
1647                 /* Did we invoke the startup call on that cpu ? */
1648                 if (cpustate >= state)
1649                         cpuhp_issue_call(cpu, state, false, node);
1650         }
1651 }
1652
1653 /*
1654  * Returns a free for dynamic slot assignment of the Online state. The states
1655  * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1656  * by having no name assigned.
1657  */
1658 static int cpuhp_reserve_state(enum cpuhp_state state)
1659 {
1660         enum cpuhp_state i;
1661
1662         for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
1663                 if (cpuhp_ap_states[i].name)
1664                         continue;
1665
1666                 cpuhp_ap_states[i].name = "Reserved";
1667                 return i;
1668         }
1669         WARN(1, "No more dynamic states available for CPU hotplug\n");
1670         return -ENOSPC;
1671 }
1672
1673 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1674                                bool invoke)
1675 {
1676         struct cpuhp_step *sp;
1677         int cpu;
1678         int ret;
1679
1680         sp = cpuhp_get_step(state);
1681         if (sp->multi_instance == false)
1682                 return -EINVAL;
1683
1684         get_online_cpus();
1685         mutex_lock(&cpuhp_state_mutex);
1686
1687         if (!invoke || !sp->startup.multi)
1688                 goto add_node;
1689
1690         /*
1691          * Try to call the startup callback for each present cpu
1692          * depending on the hotplug state of the cpu.
1693          */
1694         for_each_present_cpu(cpu) {
1695                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1696                 int cpustate = st->state;
1697
1698                 if (cpustate < state)
1699                         continue;
1700
1701                 ret = cpuhp_issue_call(cpu, state, true, node);
1702                 if (ret) {
1703                         if (sp->teardown.multi)
1704                                 cpuhp_rollback_install(cpu, state, node);
1705                         goto err;
1706                 }
1707         }
1708 add_node:
1709         ret = 0;
1710         hlist_add_head(node, &sp->list);
1711
1712 err:
1713         mutex_unlock(&cpuhp_state_mutex);
1714         put_online_cpus();
1715         return ret;
1716 }
1717 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1718
1719 /**
1720  * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1721  * @state:      The state to setup
1722  * @invoke:     If true, the startup function is invoked for cpus where
1723  *              cpu state >= @state
1724  * @startup:    startup callback function
1725  * @teardown:   teardown callback function
1726  *
1727  * Returns 0 if successful, otherwise a proper error code
1728  */
1729 int __cpuhp_setup_state(enum cpuhp_state state,
1730                         const char *name, bool invoke,
1731                         int (*startup)(unsigned int cpu),
1732                         int (*teardown)(unsigned int cpu),
1733                         bool multi_instance)
1734 {
1735         int cpu, ret = 0;
1736         int dyn_state = 0;
1737
1738         if (cpuhp_cb_check(state) || !name)
1739                 return -EINVAL;
1740
1741         get_online_cpus();
1742         mutex_lock(&cpuhp_state_mutex);
1743
1744         /* currently assignments for the ONLINE state are possible */
1745         if (state == CPUHP_AP_ONLINE_DYN) {
1746                 dyn_state = 1;
1747                 ret = cpuhp_reserve_state(state);
1748                 if (ret < 0)
1749                         goto out;
1750                 state = ret;
1751         }
1752
1753         cpuhp_store_callbacks(state, name, startup, teardown, multi_instance);
1754
1755         if (!invoke || !startup)
1756                 goto out;
1757
1758         /*
1759          * Try to call the startup callback for each present cpu
1760          * depending on the hotplug state of the cpu.
1761          */
1762         for_each_present_cpu(cpu) {
1763                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1764                 int cpustate = st->state;
1765
1766                 if (cpustate < state)
1767                         continue;
1768
1769                 ret = cpuhp_issue_call(cpu, state, true, NULL);
1770                 if (ret) {
1771                         if (teardown)
1772                                 cpuhp_rollback_install(cpu, state, NULL);
1773                         cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1774                         goto out;
1775                 }
1776         }
1777 out:
1778         mutex_unlock(&cpuhp_state_mutex);
1779
1780         put_online_cpus();
1781         if (!ret && dyn_state)
1782                 return state;
1783         return ret;
1784 }
1785 EXPORT_SYMBOL(__cpuhp_setup_state);
1786
1787 int __cpuhp_state_remove_instance(enum cpuhp_state state,
1788                                   struct hlist_node *node, bool invoke)
1789 {
1790         struct cpuhp_step *sp = cpuhp_get_step(state);
1791         int cpu;
1792
1793         BUG_ON(cpuhp_cb_check(state));
1794
1795         if (!sp->multi_instance)
1796                 return -EINVAL;
1797
1798         get_online_cpus();
1799         mutex_lock(&cpuhp_state_mutex);
1800
1801         if (!invoke || !cpuhp_get_teardown_cb(state))
1802                 goto remove;
1803         /*
1804          * Call the teardown callback for each present cpu depending
1805          * on the hotplug state of the cpu. This function is not
1806          * allowed to fail currently!
1807          */
1808         for_each_present_cpu(cpu) {
1809                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1810                 int cpustate = st->state;
1811
1812                 if (cpustate >= state)
1813                         cpuhp_issue_call(cpu, state, false, node);
1814         }
1815
1816 remove:
1817         hlist_del(node);
1818         mutex_unlock(&cpuhp_state_mutex);
1819         put_online_cpus();
1820
1821         return 0;
1822 }
1823 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
1824 /**
1825  * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1826  * @state:      The state to remove
1827  * @invoke:     If true, the teardown function is invoked for cpus where
1828  *              cpu state >= @state
1829  *
1830  * The teardown callback is currently not allowed to fail. Think
1831  * about module removal!
1832  */
1833 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1834 {
1835         struct cpuhp_step *sp = cpuhp_get_step(state);
1836         int cpu;
1837
1838         BUG_ON(cpuhp_cb_check(state));
1839
1840         get_online_cpus();
1841         mutex_lock(&cpuhp_state_mutex);
1842
1843         if (sp->multi_instance) {
1844                 WARN(!hlist_empty(&sp->list),
1845                      "Error: Removing state %d which has instances left.\n",
1846                      state);
1847                 goto remove;
1848         }
1849
1850         if (!invoke || !cpuhp_get_teardown_cb(state))
1851                 goto remove;
1852
1853         /*
1854          * Call the teardown callback for each present cpu depending
1855          * on the hotplug state of the cpu. This function is not
1856          * allowed to fail currently!
1857          */
1858         for_each_present_cpu(cpu) {
1859                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1860                 int cpustate = st->state;
1861
1862                 if (cpustate >= state)
1863                         cpuhp_issue_call(cpu, state, false, NULL);
1864         }
1865 remove:
1866         cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1867         mutex_unlock(&cpuhp_state_mutex);
1868         put_online_cpus();
1869 }
1870 EXPORT_SYMBOL(__cpuhp_remove_state);
1871
1872 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1873 static ssize_t show_cpuhp_state(struct device *dev,
1874                                 struct device_attribute *attr, char *buf)
1875 {
1876         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1877
1878         return sprintf(buf, "%d\n", st->state);
1879 }
1880 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1881
1882 static ssize_t write_cpuhp_target(struct device *dev,
1883                                   struct device_attribute *attr,
1884                                   const char *buf, size_t count)
1885 {
1886         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1887         struct cpuhp_step *sp;
1888         int target, ret;
1889
1890         ret = kstrtoint(buf, 10, &target);
1891         if (ret)
1892                 return ret;
1893
1894 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1895         if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1896                 return -EINVAL;
1897 #else
1898         if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1899                 return -EINVAL;
1900 #endif
1901
1902         ret = lock_device_hotplug_sysfs();
1903         if (ret)
1904                 return ret;
1905
1906         mutex_lock(&cpuhp_state_mutex);
1907         sp = cpuhp_get_step(target);
1908         ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1909         mutex_unlock(&cpuhp_state_mutex);
1910         if (ret)
1911                 goto out;
1912
1913         if (st->state < target)
1914                 ret = do_cpu_up(dev->id, target);
1915         else
1916                 ret = do_cpu_down(dev->id, target);
1917 out:
1918         unlock_device_hotplug();
1919         return ret ? ret : count;
1920 }
1921
1922 static ssize_t show_cpuhp_target(struct device *dev,
1923                                  struct device_attribute *attr, char *buf)
1924 {
1925         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1926
1927         return sprintf(buf, "%d\n", st->target);
1928 }
1929 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
1930
1931 static struct attribute *cpuhp_cpu_attrs[] = {
1932         &dev_attr_state.attr,
1933         &dev_attr_target.attr,
1934         NULL
1935 };
1936
1937 static struct attribute_group cpuhp_cpu_attr_group = {
1938         .attrs = cpuhp_cpu_attrs,
1939         .name = "hotplug",
1940         NULL
1941 };
1942
1943 static ssize_t show_cpuhp_states(struct device *dev,
1944                                  struct device_attribute *attr, char *buf)
1945 {
1946         ssize_t cur, res = 0;
1947         int i;
1948
1949         mutex_lock(&cpuhp_state_mutex);
1950         for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
1951                 struct cpuhp_step *sp = cpuhp_get_step(i);
1952
1953                 if (sp->name) {
1954                         cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1955                         buf += cur;
1956                         res += cur;
1957                 }
1958         }
1959         mutex_unlock(&cpuhp_state_mutex);
1960         return res;
1961 }
1962 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1963
1964 static struct attribute *cpuhp_cpu_root_attrs[] = {
1965         &dev_attr_states.attr,
1966         NULL
1967 };
1968
1969 static struct attribute_group cpuhp_cpu_root_attr_group = {
1970         .attrs = cpuhp_cpu_root_attrs,
1971         .name = "hotplug",
1972         NULL
1973 };
1974
1975 #ifdef CONFIG_HOTPLUG_SMT
1976
1977 static const char *smt_states[] = {
1978         [CPU_SMT_ENABLED]               = "on",
1979         [CPU_SMT_DISABLED]              = "off",
1980         [CPU_SMT_FORCE_DISABLED]        = "forceoff",
1981         [CPU_SMT_NOT_SUPPORTED]         = "notsupported",
1982 };
1983
1984 static ssize_t
1985 show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
1986 {
1987         return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
1988 }
1989
1990 static void cpuhp_offline_cpu_device(unsigned int cpu)
1991 {
1992         struct device *dev = get_cpu_device(cpu);
1993
1994         dev->offline = true;
1995         /* Tell user space about the state change */
1996         kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
1997 }
1998
1999 static void cpuhp_online_cpu_device(unsigned int cpu)
2000 {
2001         struct device *dev = get_cpu_device(cpu);
2002
2003         dev->offline = false;
2004         /* Tell user space about the state change */
2005         kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2006 }
2007
2008 int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2009 {
2010         int cpu, ret = 0;
2011
2012         cpu_maps_update_begin();
2013         for_each_online_cpu(cpu) {
2014                 if (topology_is_primary_thread(cpu))
2015                         continue;
2016                 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2017                 if (ret)
2018                         break;
2019                 /*
2020                  * As this needs to hold the cpu maps lock it's impossible
2021                  * to call device_offline() because that ends up calling
2022                  * cpu_down() which takes cpu maps lock. cpu maps lock
2023                  * needs to be held as this might race against in kernel
2024                  * abusers of the hotplug machinery (thermal management).
2025                  *
2026                  * So nothing would update device:offline state. That would
2027                  * leave the sysfs entry stale and prevent onlining after
2028                  * smt control has been changed to 'off' again. This is
2029                  * called under the sysfs hotplug lock, so it is properly
2030                  * serialized against the regular offline usage.
2031                  */
2032                 cpuhp_offline_cpu_device(cpu);
2033         }
2034         if (!ret)
2035                 cpu_smt_control = ctrlval;
2036         cpu_maps_update_done();
2037         return ret;
2038 }
2039
2040 int cpuhp_smt_enable(void)
2041 {
2042         int cpu, ret = 0;
2043
2044         cpu_maps_update_begin();
2045         cpu_smt_control = CPU_SMT_ENABLED;
2046         for_each_present_cpu(cpu) {
2047                 /* Skip online CPUs and CPUs on offline nodes */
2048                 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2049                         continue;
2050                 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2051                 if (ret)
2052                         break;
2053                 /* See comment in cpuhp_smt_disable() */
2054                 cpuhp_online_cpu_device(cpu);
2055         }
2056         cpu_maps_update_done();
2057         return ret;
2058 }
2059
2060 static ssize_t
2061 store_smt_control(struct device *dev, struct device_attribute *attr,
2062                   const char *buf, size_t count)
2063 {
2064         int ctrlval, ret;
2065
2066         if (sysfs_streq(buf, "on"))
2067                 ctrlval = CPU_SMT_ENABLED;
2068         else if (sysfs_streq(buf, "off"))
2069                 ctrlval = CPU_SMT_DISABLED;
2070         else if (sysfs_streq(buf, "forceoff"))
2071                 ctrlval = CPU_SMT_FORCE_DISABLED;
2072         else
2073                 return -EINVAL;
2074
2075         if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2076                 return -EPERM;
2077
2078         if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2079                 return -ENODEV;
2080
2081         ret = lock_device_hotplug_sysfs();
2082         if (ret)
2083                 return ret;
2084
2085         if (ctrlval != cpu_smt_control) {
2086                 switch (ctrlval) {
2087                 case CPU_SMT_ENABLED:
2088                         ret = cpuhp_smt_enable();
2089                         break;
2090                 case CPU_SMT_DISABLED:
2091                 case CPU_SMT_FORCE_DISABLED:
2092                         ret = cpuhp_smt_disable(ctrlval);
2093                         break;
2094                 }
2095         }
2096
2097         unlock_device_hotplug();
2098         return ret ? ret : count;
2099 }
2100 static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2101
2102 static ssize_t
2103 show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2104 {
2105         bool active = topology_max_smt_threads() > 1;
2106
2107         return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
2108 }
2109 static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2110
2111 static struct attribute *cpuhp_smt_attrs[] = {
2112         &dev_attr_control.attr,
2113         &dev_attr_active.attr,
2114         NULL
2115 };
2116
2117 static const struct attribute_group cpuhp_smt_attr_group = {
2118         .attrs = cpuhp_smt_attrs,
2119         .name = "smt",
2120         NULL
2121 };
2122
2123 static int __init cpu_smt_state_init(void)
2124 {
2125         return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2126                                   &cpuhp_smt_attr_group);
2127 }
2128
2129 #else
2130 static inline int cpu_smt_state_init(void) { return 0; }
2131 #endif
2132
2133 static int __init cpuhp_sysfs_init(void)
2134 {
2135         int cpu, ret;
2136
2137         ret = cpu_smt_state_init();
2138         if (ret)
2139                 return ret;
2140
2141         ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2142                                  &cpuhp_cpu_root_attr_group);
2143         if (ret)
2144                 return ret;
2145
2146         for_each_possible_cpu(cpu) {
2147                 struct device *dev = get_cpu_device(cpu);
2148
2149                 if (!dev)
2150                         continue;
2151                 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2152                 if (ret)
2153                         return ret;
2154         }
2155         return 0;
2156 }
2157 device_initcall(cpuhp_sysfs_init);
2158 #endif
2159
2160 /*
2161  * cpu_bit_bitmap[] is a special, "compressed" data structure that
2162  * represents all NR_CPUS bits binary values of 1<<nr.
2163  *
2164  * It is used by cpumask_of() to get a constant address to a CPU
2165  * mask value that has a single bit set only.
2166  */
2167
2168 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2169 #define MASK_DECLARE_1(x)       [x+1][0] = (1UL << (x))
2170 #define MASK_DECLARE_2(x)       MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2171 #define MASK_DECLARE_4(x)       MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2172 #define MASK_DECLARE_8(x)       MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2173
2174 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2175
2176         MASK_DECLARE_8(0),      MASK_DECLARE_8(8),
2177         MASK_DECLARE_8(16),     MASK_DECLARE_8(24),
2178 #if BITS_PER_LONG > 32
2179         MASK_DECLARE_8(32),     MASK_DECLARE_8(40),
2180         MASK_DECLARE_8(48),     MASK_DECLARE_8(56),
2181 #endif
2182 };
2183 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2184
2185 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2186 EXPORT_SYMBOL(cpu_all_bits);
2187
2188 #ifdef CONFIG_INIT_ALL_POSSIBLE
2189 struct cpumask __cpu_possible_mask __read_mostly
2190         = {CPU_BITS_ALL};
2191 #else
2192 struct cpumask __cpu_possible_mask __read_mostly;
2193 #endif
2194 EXPORT_SYMBOL(__cpu_possible_mask);
2195
2196 struct cpumask __cpu_online_mask __read_mostly;
2197 EXPORT_SYMBOL(__cpu_online_mask);
2198
2199 struct cpumask __cpu_present_mask __read_mostly;
2200 EXPORT_SYMBOL(__cpu_present_mask);
2201
2202 struct cpumask __cpu_active_mask __read_mostly;
2203 EXPORT_SYMBOL(__cpu_active_mask);
2204
2205 void init_cpu_present(const struct cpumask *src)
2206 {
2207         cpumask_copy(&__cpu_present_mask, src);
2208 }
2209
2210 void init_cpu_possible(const struct cpumask *src)
2211 {
2212         cpumask_copy(&__cpu_possible_mask, src);
2213 }
2214
2215 void init_cpu_online(const struct cpumask *src)
2216 {
2217         cpumask_copy(&__cpu_online_mask, src);
2218 }
2219
2220 /*
2221  * Activate the first processor.
2222  */
2223 void __init boot_cpu_init(void)
2224 {
2225         int cpu = smp_processor_id();
2226
2227         /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2228         set_cpu_online(cpu, true);
2229         set_cpu_active(cpu, true);
2230         set_cpu_present(cpu, true);
2231         set_cpu_possible(cpu, true);
2232 }
2233
2234 /*
2235  * Must be called _AFTER_ setting up the per_cpu areas
2236  */
2237 void __init boot_cpu_hotplug_init(void)
2238 {
2239 #ifdef CONFIG_SMP
2240         this_cpu_write(cpuhp_state.booted_once, true);
2241 #endif
2242         this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
2243 }
2244
2245 /*
2246  * These are used for a global "mitigations=" cmdline option for toggling
2247  * optional CPU mitigations.
2248  */
2249 enum cpu_mitigations {
2250         CPU_MITIGATIONS_OFF,
2251         CPU_MITIGATIONS_AUTO,
2252         CPU_MITIGATIONS_AUTO_NOSMT,
2253 };
2254
2255 static enum cpu_mitigations cpu_mitigations __ro_after_init =
2256         CPU_MITIGATIONS_AUTO;
2257
2258 static int __init mitigations_parse_cmdline(char *arg)
2259 {
2260         if (!strcmp(arg, "off"))
2261                 cpu_mitigations = CPU_MITIGATIONS_OFF;
2262         else if (!strcmp(arg, "auto"))
2263                 cpu_mitigations = CPU_MITIGATIONS_AUTO;
2264         else if (!strcmp(arg, "auto,nosmt"))
2265                 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
2266         else
2267                 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2268                         arg);
2269
2270         return 0;
2271 }
2272 early_param("mitigations", mitigations_parse_cmdline);
2273
2274 /* mitigations=off */
2275 bool cpu_mitigations_off(void)
2276 {
2277         return cpu_mitigations == CPU_MITIGATIONS_OFF;
2278 }
2279 EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2280
2281 /* mitigations=auto,nosmt */
2282 bool cpu_mitigations_auto_nosmt(void)
2283 {
2284         return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2285 }
2286 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);