4 * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great
5 * deal of code from the sparc and intel versions.
7 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
9 * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and
10 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/sched/mm.h>
23 #include <linux/sched/topology.h>
24 #include <linux/smp.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/spinlock.h>
29 #include <linux/cache.h>
30 #include <linux/err.h>
31 #include <linux/device.h>
32 #include <linux/cpu.h>
33 #include <linux/notifier.h>
34 #include <linux/topology.h>
35 #include <linux/profile.h>
36 #include <linux/processor.h>
38 #include <asm/ptrace.h>
39 #include <linux/atomic.h>
41 #include <asm/hw_irq.h>
42 #include <asm/kvm_ppc.h>
43 #include <asm/dbell.h>
45 #include <asm/pgtable.h>
49 #include <asm/machdep.h>
50 #include <asm/cputhreads.h>
51 #include <asm/cputable.h>
53 #include <asm/vdso_datapage.h>
58 #include <asm/debug.h>
59 #include <asm/kexec.h>
60 #include <asm/asm-prototypes.h>
61 #include <asm/cpu_has_feature.h>
62 #include <asm/ftrace.h>
66 #define DBG(fmt...) udbg_printf(fmt)
71 #ifdef CONFIG_HOTPLUG_CPU
72 /* State of each CPU during hotplug phases */
73 static DEFINE_PER_CPU(int, cpu_state) = { 0 };
76 struct thread_info *secondary_ti;
78 DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
79 DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
80 DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
82 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
83 EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
84 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
86 /* SMP operations for this machine */
87 struct smp_ops_t *smp_ops;
89 /* Can't be static due to PowerMac hackery */
90 volatile unsigned int cpu_callin_map[NR_CPUS];
92 int smt_enabled_at_boot = 1;
95 * Returns 1 if the specified cpu should be brought up during boot.
96 * Used to inhibit booting threads if they've been disabled or
97 * limited on the command line
99 int smp_generic_cpu_bootable(unsigned int nr)
101 /* Special case - we inhibit secondary thread startup
102 * during boot if the user requests it.
104 if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
105 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
107 if (smt_enabled_at_boot
108 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
117 int smp_generic_kick_cpu(int nr)
119 if (nr < 0 || nr >= nr_cpu_ids)
123 * The processor is currently spinning, waiting for the
124 * cpu_start field to become non-zero After we set cpu_start,
125 * the processor will continue on to secondary_start
127 if (!paca_ptrs[nr]->cpu_start) {
128 paca_ptrs[nr]->cpu_start = 1;
133 #ifdef CONFIG_HOTPLUG_CPU
135 * Ok it's not there, so it might be soft-unplugged, let's
136 * try to bring it back
138 generic_set_cpu_up(nr);
140 smp_send_reschedule(nr);
141 #endif /* CONFIG_HOTPLUG_CPU */
145 #endif /* CONFIG_PPC64 */
147 static irqreturn_t call_function_action(int irq, void *data)
149 generic_smp_call_function_interrupt();
153 static irqreturn_t reschedule_action(int irq, void *data)
159 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
160 static irqreturn_t tick_broadcast_ipi_action(int irq, void *data)
162 timer_broadcast_interrupt();
167 #ifdef CONFIG_NMI_IPI
168 static irqreturn_t nmi_ipi_action(int irq, void *data)
170 smp_handle_nmi_ipi(get_irq_regs());
175 static irq_handler_t smp_ipi_action[] = {
176 [PPC_MSG_CALL_FUNCTION] = call_function_action,
177 [PPC_MSG_RESCHEDULE] = reschedule_action,
178 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
179 [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action,
181 #ifdef CONFIG_NMI_IPI
182 [PPC_MSG_NMI_IPI] = nmi_ipi_action,
187 * The NMI IPI is a fallback and not truly non-maskable. It is simpler
188 * than going through the call function infrastructure, and strongly
189 * serialized, so it is more appropriate for debugging.
191 const char *smp_ipi_name[] = {
192 [PPC_MSG_CALL_FUNCTION] = "ipi call function",
193 [PPC_MSG_RESCHEDULE] = "ipi reschedule",
194 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
195 [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast",
197 #ifdef CONFIG_NMI_IPI
198 [PPC_MSG_NMI_IPI] = "nmi ipi",
202 /* optional function to request ipi, for controllers with >= 4 ipis */
203 int smp_request_message_ipi(int virq, int msg)
207 if (msg < 0 || msg > PPC_MSG_NMI_IPI)
209 #ifndef CONFIG_NMI_IPI
210 if (msg == PPC_MSG_NMI_IPI)
214 err = request_irq(virq, smp_ipi_action[msg],
215 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
216 smp_ipi_name[msg], NULL);
217 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
218 virq, smp_ipi_name[msg], err);
223 #ifdef CONFIG_PPC_SMP_MUXED_IPI
224 struct cpu_messages {
225 long messages; /* current messages */
227 static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
229 void smp_muxed_ipi_set_message(int cpu, int msg)
231 struct cpu_messages *info = &per_cpu(ipi_message, cpu);
232 char *message = (char *)&info->messages;
235 * Order previous accesses before accesses in the IPI handler.
241 void smp_muxed_ipi_message_pass(int cpu, int msg)
243 smp_muxed_ipi_set_message(cpu, msg);
246 * cause_ipi functions are required to include a full barrier
247 * before doing whatever causes the IPI.
249 smp_ops->cause_ipi(cpu);
252 #ifdef __BIG_ENDIAN__
253 #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
255 #define IPI_MESSAGE(A) (1uL << (8 * (A)))
258 irqreturn_t smp_ipi_demux(void)
260 mb(); /* order any irq clear */
262 return smp_ipi_demux_relaxed();
265 /* sync-free variant. Callers should ensure synchronization */
266 irqreturn_t smp_ipi_demux_relaxed(void)
268 struct cpu_messages *info;
271 info = this_cpu_ptr(&ipi_message);
273 all = xchg(&info->messages, 0);
274 #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
276 * Must check for PPC_MSG_RM_HOST_ACTION messages
277 * before PPC_MSG_CALL_FUNCTION messages because when
278 * a VM is destroyed, we call kick_all_cpus_sync()
279 * to ensure that any pending PPC_MSG_RM_HOST_ACTION
280 * messages have completed before we free any VCPUs.
282 if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
283 kvmppc_xics_ipi_action();
285 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
286 generic_smp_call_function_interrupt();
287 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
289 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
290 if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST))
291 timer_broadcast_interrupt();
293 #ifdef CONFIG_NMI_IPI
294 if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI))
295 nmi_ipi_action(0, NULL);
297 } while (info->messages);
301 #endif /* CONFIG_PPC_SMP_MUXED_IPI */
303 static inline void do_message_pass(int cpu, int msg)
305 if (smp_ops->message_pass)
306 smp_ops->message_pass(cpu, msg);
307 #ifdef CONFIG_PPC_SMP_MUXED_IPI
309 smp_muxed_ipi_message_pass(cpu, msg);
313 void smp_send_reschedule(int cpu)
316 do_message_pass(cpu, PPC_MSG_RESCHEDULE);
318 EXPORT_SYMBOL_GPL(smp_send_reschedule);
320 void arch_send_call_function_single_ipi(int cpu)
322 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
325 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
329 for_each_cpu(cpu, mask)
330 do_message_pass(cpu, PPC_MSG_CALL_FUNCTION);
333 #ifdef CONFIG_NMI_IPI
338 * NMI IPIs may not be recoverable, so should not be used as ongoing part of
339 * a running system. They can be used for crash, debug, halt/reboot, etc.
341 * The IPI call waits with interrupts disabled until all targets enter the
342 * NMI handler, then returns. Subsequent IPIs can be issued before targets
343 * have returned from their handlers, so there is no guarantee about
344 * concurrency or re-entrancy.
346 * A new NMI can be issued before all targets exit the handler.
348 * The IPI call may time out without all targets entering the NMI handler.
349 * In that case, there is some logic to recover (and ignore subsequent
350 * NMI interrupts that may eventually be raised), but the platform interrupt
351 * handler may not be able to distinguish this from other exception causes,
352 * which may cause a crash.
355 static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0);
356 static struct cpumask nmi_ipi_pending_mask;
357 static bool nmi_ipi_busy = false;
358 static void (*nmi_ipi_function)(struct pt_regs *) = NULL;
360 static void nmi_ipi_lock_start(unsigned long *flags)
362 raw_local_irq_save(*flags);
364 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) {
365 raw_local_irq_restore(*flags);
366 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
367 raw_local_irq_save(*flags);
372 static void nmi_ipi_lock(void)
374 while (atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1)
375 spin_until_cond(atomic_read(&__nmi_ipi_lock) == 0);
378 static void nmi_ipi_unlock(void)
381 WARN_ON(atomic_read(&__nmi_ipi_lock) != 1);
382 atomic_set(&__nmi_ipi_lock, 0);
385 static void nmi_ipi_unlock_end(unsigned long *flags)
388 raw_local_irq_restore(*flags);
392 * Platform NMI handler calls this to ack
394 int smp_handle_nmi_ipi(struct pt_regs *regs)
396 void (*fn)(struct pt_regs *) = NULL;
398 int me = raw_smp_processor_id();
402 * Unexpected NMIs are possible here because the interrupt may not
403 * be able to distinguish NMI IPIs from other types of NMIs, or
404 * because the caller may have timed out.
406 nmi_ipi_lock_start(&flags);
407 if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) {
408 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
409 fn = READ_ONCE(nmi_ipi_function);
413 nmi_ipi_unlock_end(&flags);
421 static void do_smp_send_nmi_ipi(int cpu, bool safe)
423 if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu))
427 do_message_pass(cpu, PPC_MSG_NMI_IPI);
431 for_each_online_cpu(c) {
432 if (c == raw_smp_processor_id())
434 do_message_pass(c, PPC_MSG_NMI_IPI);
440 * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS.
441 * - fn is the target callback function.
442 * - delay_us > 0 is the delay before giving up waiting for targets to
443 * begin executing the handler, == 0 specifies indefinite delay.
445 int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us, bool safe)
448 int me = raw_smp_processor_id();
452 BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS);
454 if (unlikely(!smp_ops))
457 nmi_ipi_lock_start(&flags);
458 while (nmi_ipi_busy) {
459 nmi_ipi_unlock_end(&flags);
460 spin_until_cond(!nmi_ipi_busy);
461 nmi_ipi_lock_start(&flags);
464 nmi_ipi_function = fn;
466 WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask));
470 cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask);
471 cpumask_clear_cpu(me, &nmi_ipi_pending_mask);
473 cpumask_set_cpu(cpu, &nmi_ipi_pending_mask);
478 /* Interrupts remain hard disabled */
480 do_smp_send_nmi_ipi(cpu, safe);
483 /* nmi_ipi_busy is set here, so unlock/lock is okay */
484 while (!cpumask_empty(&nmi_ipi_pending_mask)) {
495 if (!cpumask_empty(&nmi_ipi_pending_mask)) {
496 /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */
498 cpumask_clear(&nmi_ipi_pending_mask);
501 nmi_ipi_function = NULL;
502 nmi_ipi_busy = false;
504 nmi_ipi_unlock_end(&flags);
509 int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
511 return __smp_send_nmi_ipi(cpu, fn, delay_us, false);
514 int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us)
516 return __smp_send_nmi_ipi(cpu, fn, delay_us, true);
518 #endif /* CONFIG_NMI_IPI */
520 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
521 void tick_broadcast(const struct cpumask *mask)
525 for_each_cpu(cpu, mask)
526 do_message_pass(cpu, PPC_MSG_TICK_BROADCAST);
530 #ifdef CONFIG_DEBUGGER
531 void debugger_ipi_callback(struct pt_regs *regs)
536 void smp_send_debugger_break(void)
538 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
542 #ifdef CONFIG_KEXEC_CORE
543 void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
547 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000);
548 if (kdump_in_progress() && crash_wake_offline) {
549 for_each_present_cpu(cpu) {
553 * crash_ipi_callback will wait for
554 * all cpus, including offline CPUs.
555 * We don't care about nmi_ipi_function.
556 * Offline cpus will jump straight into
557 * crash_ipi_callback, we can skip the
558 * entire NMI dance and waiting for
559 * cpus to clear pending mask, etc.
561 do_smp_send_nmi_ipi(cpu, false);
567 #ifdef CONFIG_NMI_IPI
568 static void crash_stop_this_cpu(struct pt_regs *regs)
570 static void crash_stop_this_cpu(void *dummy)
574 * Just busy wait here and avoid marking CPU as offline to ensure
575 * register data is captured appropriately.
581 void crash_smp_send_stop(void)
583 static bool stopped = false;
590 #ifdef CONFIG_NMI_IPI
591 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_stop_this_cpu, 1000000);
593 smp_call_function(crash_stop_this_cpu, NULL, 0);
594 #endif /* CONFIG_NMI_IPI */
597 #ifdef CONFIG_NMI_IPI
598 static void nmi_stop_this_cpu(struct pt_regs *regs)
601 * IRQs are already hard disabled by the smp_handle_nmi_ipi.
603 set_cpu_online(smp_processor_id(), false);
610 void smp_send_stop(void)
612 smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
615 #else /* CONFIG_NMI_IPI */
617 static void stop_this_cpu(void *dummy)
622 * Offlining CPUs in stop_this_cpu can result in scheduler warnings,
623 * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants
624 * to know other CPUs are offline before it breaks locks to flush
625 * printk buffers, in case we panic()ed while holding the lock.
627 set_cpu_online(smp_processor_id(), false);
634 void smp_send_stop(void)
636 static bool stopped = false;
639 * Prevent waiting on csd lock from a previous smp_send_stop.
640 * This is racy, but in general callers try to do the right
641 * thing and only fire off one smp_send_stop (e.g., see
649 smp_call_function(stop_this_cpu, NULL, 0);
651 #endif /* CONFIG_NMI_IPI */
653 struct thread_info *current_set[NR_CPUS];
655 static void smp_store_cpu_info(int id)
657 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR);
658 #ifdef CONFIG_PPC_FSL_BOOK3E
659 per_cpu(next_tlbcam_idx, id)
660 = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
665 * Relationships between CPUs are maintained in a set of per-cpu cpumasks so
666 * rather than just passing around the cpumask we pass around a function that
667 * returns the that cpumask for the given CPU.
669 static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int))
671 cpumask_set_cpu(i, get_cpumask(j));
672 cpumask_set_cpu(j, get_cpumask(i));
675 #ifdef CONFIG_HOTPLUG_CPU
676 static void set_cpus_unrelated(int i, int j,
677 struct cpumask *(*get_cpumask)(int))
679 cpumask_clear_cpu(i, get_cpumask(j));
680 cpumask_clear_cpu(j, get_cpumask(i));
684 void __init smp_prepare_cpus(unsigned int max_cpus)
688 DBG("smp_prepare_cpus\n");
691 * setup_cpu may need to be called on the boot cpu. We havent
692 * spun any cpus up but lets be paranoid.
694 BUG_ON(boot_cpuid != smp_processor_id());
697 smp_store_cpu_info(boot_cpuid);
698 cpu_callin_map[boot_cpuid] = 1;
700 for_each_possible_cpu(cpu) {
701 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu),
702 GFP_KERNEL, cpu_to_node(cpu));
703 zalloc_cpumask_var_node(&per_cpu(cpu_l2_cache_map, cpu),
704 GFP_KERNEL, cpu_to_node(cpu));
705 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu),
706 GFP_KERNEL, cpu_to_node(cpu));
708 * numa_node_id() works after this.
710 if (cpu_present(cpu)) {
711 set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]);
712 set_cpu_numa_mem(cpu,
713 local_memory_node(numa_cpu_lookup_table[cpu]));
717 /* Init the cpumasks so the boot CPU is related to itself */
718 cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
719 cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
720 cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
722 if (smp_ops && smp_ops->probe)
726 void smp_prepare_boot_cpu(void)
728 BUG_ON(smp_processor_id() != boot_cpuid);
730 paca_ptrs[boot_cpuid]->__current = current;
732 set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
733 current_set[boot_cpuid] = task_thread_info(current);
736 #ifdef CONFIG_HOTPLUG_CPU
738 int generic_cpu_disable(void)
740 unsigned int cpu = smp_processor_id();
742 if (cpu == boot_cpuid)
745 set_cpu_online(cpu, false);
747 vdso_data->processorCount--;
749 /* Update affinity of all IRQs previously aimed at this CPU */
750 irq_migrate_all_off_this_cpu();
753 * Depending on the details of the interrupt controller, it's possible
754 * that one of the interrupts we just migrated away from this CPU is
755 * actually already pending on this CPU. If we leave it in that state
756 * the interrupt will never be EOI'ed, and will never fire again. So
757 * temporarily enable interrupts here, to allow any pending interrupt to
758 * be received (and EOI'ed), before we take this CPU offline.
767 void generic_cpu_die(unsigned int cpu)
771 for (i = 0; i < 100; i++) {
773 if (is_cpu_dead(cpu))
777 printk(KERN_ERR "CPU%d didn't die...\n", cpu);
780 void generic_set_cpu_dead(unsigned int cpu)
782 per_cpu(cpu_state, cpu) = CPU_DEAD;
786 * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
787 * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
788 * which makes the delay in generic_cpu_die() not happen.
790 void generic_set_cpu_up(unsigned int cpu)
792 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
795 int generic_check_cpu_restart(unsigned int cpu)
797 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
800 int is_cpu_dead(unsigned int cpu)
802 return per_cpu(cpu_state, cpu) == CPU_DEAD;
805 static bool secondaries_inhibited(void)
807 return kvm_hv_mode_active();
810 #else /* HOTPLUG_CPU */
812 #define secondaries_inhibited() 0
816 static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
818 struct thread_info *ti = task_thread_info(idle);
821 paca_ptrs[cpu]->__current = idle;
822 paca_ptrs[cpu]->kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
825 secondary_ti = current_set[cpu] = ti;
828 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
833 * Don't allow secondary threads to come online if inhibited
835 if (threads_per_core > 1 && secondaries_inhibited() &&
836 cpu_thread_in_subcore(cpu))
839 if (smp_ops == NULL ||
840 (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
843 cpu_idle_thread_init(cpu, tidle);
846 * The platform might need to allocate resources prior to bringing
849 if (smp_ops->prepare_cpu) {
850 rc = smp_ops->prepare_cpu(cpu);
855 /* Make sure callin-map entry is 0 (can be leftover a CPU
858 cpu_callin_map[cpu] = 0;
860 /* The information for processor bringup must
861 * be written out to main store before we release
867 DBG("smp: kicking cpu %d\n", cpu);
868 rc = smp_ops->kick_cpu(cpu);
870 pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc);
875 * wait to see if the cpu made a callin (is actually up).
876 * use this value that I found through experimentation.
879 if (system_state < SYSTEM_RUNNING)
880 for (c = 50000; c && !cpu_callin_map[cpu]; c--)
882 #ifdef CONFIG_HOTPLUG_CPU
885 * CPUs can take much longer to come up in the
886 * hotplug case. Wait five seconds.
888 for (c = 5000; c && !cpu_callin_map[cpu]; c--)
892 if (!cpu_callin_map[cpu]) {
893 printk(KERN_ERR "Processor %u is stuck.\n", cpu);
897 DBG("Processor %u found.\n", cpu);
899 if (smp_ops->give_timebase)
900 smp_ops->give_timebase();
902 /* Wait until cpu puts itself in the online & active maps */
903 spin_until_cond(cpu_online(cpu));
908 /* Return the value of the reg property corresponding to the given
911 int cpu_to_core_id(int cpu)
913 struct device_node *np;
917 np = of_get_cpu_node(cpu, NULL);
921 reg = of_get_property(np, "reg", NULL);
925 id = be32_to_cpup(reg);
930 EXPORT_SYMBOL_GPL(cpu_to_core_id);
932 /* Helper routines for cpu to core mapping */
933 int cpu_core_index_of_thread(int cpu)
935 return cpu >> threads_shift;
937 EXPORT_SYMBOL_GPL(cpu_core_index_of_thread);
939 int cpu_first_thread_of_core(int core)
941 return core << threads_shift;
943 EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
945 /* Must be called when no change can occur to cpu_present_mask,
946 * i.e. during cpu online or offline.
948 static struct device_node *cpu_to_l2cache(int cpu)
950 struct device_node *np;
951 struct device_node *cache;
953 if (!cpu_present(cpu))
956 np = of_get_cpu_node(cpu, NULL);
960 cache = of_find_next_cache_node(np);
967 static bool update_mask_by_l2(int cpu, struct cpumask *(*mask_fn)(int))
969 struct device_node *l2_cache, *np;
972 l2_cache = cpu_to_l2cache(cpu);
976 for_each_cpu(i, cpu_online_mask) {
978 * when updating the marks the current CPU has not been marked
979 * online, but we need to update the cache masks
981 np = cpu_to_l2cache(i);
986 set_cpus_related(cpu, i, mask_fn);
990 of_node_put(l2_cache);
995 #ifdef CONFIG_HOTPLUG_CPU
996 static void remove_cpu_from_masks(int cpu)
1000 /* NB: cpu_core_mask is a superset of the others */
1001 for_each_cpu(i, cpu_core_mask(cpu)) {
1002 set_cpus_unrelated(cpu, i, cpu_core_mask);
1003 set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
1004 set_cpus_unrelated(cpu, i, cpu_sibling_mask);
1009 static void add_cpu_to_masks(int cpu)
1011 int first_thread = cpu_first_thread_sibling(cpu);
1012 int chipid = cpu_to_chip_id(cpu);
1016 * This CPU will not be in the online mask yet so we need to manually
1017 * add it to it's own thread sibling mask.
1019 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
1021 for (i = first_thread; i < first_thread + threads_per_core; i++)
1023 set_cpus_related(i, cpu, cpu_sibling_mask);
1026 * Copy the thread sibling mask into the cache sibling mask
1027 * and mark any CPUs that share an L2 with this CPU.
1029 for_each_cpu(i, cpu_sibling_mask(cpu))
1030 set_cpus_related(cpu, i, cpu_l2_cache_mask);
1031 update_mask_by_l2(cpu, cpu_l2_cache_mask);
1034 * Copy the cache sibling mask into core sibling mask and mark
1035 * any CPUs on the same chip as this CPU.
1037 for_each_cpu(i, cpu_l2_cache_mask(cpu))
1038 set_cpus_related(cpu, i, cpu_core_mask);
1043 for_each_cpu(i, cpu_online_mask)
1044 if (cpu_to_chip_id(i) == chipid)
1045 set_cpus_related(cpu, i, cpu_core_mask);
1048 static bool shared_caches;
1050 /* Activate a secondary processor. */
1051 void start_secondary(void *unused)
1053 unsigned int cpu = smp_processor_id();
1056 current->active_mm = &init_mm;
1058 smp_store_cpu_info(cpu);
1059 set_dec(tb_ticks_per_jiffy);
1061 cpu_callin_map[cpu] = 1;
1063 if (smp_ops->setup_cpu)
1064 smp_ops->setup_cpu(cpu);
1065 if (smp_ops->take_timebase)
1066 smp_ops->take_timebase();
1068 secondary_cpu_time_init();
1071 if (system_state == SYSTEM_RUNNING)
1072 vdso_data->processorCount++;
1076 set_numa_node(numa_cpu_lookup_table[cpu]);
1077 set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu]));
1079 /* Update topology CPU masks */
1080 add_cpu_to_masks(cpu);
1083 * Check for any shared caches. Note that this must be done on a
1084 * per-core basis because one core in the pair might be disabled.
1086 if (!cpumask_equal(cpu_l2_cache_mask(cpu), cpu_sibling_mask(cpu)))
1087 shared_caches = true;
1090 notify_cpu_starting(cpu);
1091 set_cpu_online(cpu, true);
1095 /* We can enable ftrace for secondary cpus now */
1096 this_cpu_enable_ftrace();
1098 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
1103 #ifdef CONFIG_PROFILING
1104 int setup_profiling_timer(unsigned int multiplier)
1110 #ifdef CONFIG_SCHED_SMT
1111 /* cpumask of CPUs with asymetric SMT dependancy */
1112 static int powerpc_smt_flags(void)
1114 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
1116 if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
1117 printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
1118 flags |= SD_ASYM_PACKING;
1124 static struct sched_domain_topology_level powerpc_topology[] = {
1125 #ifdef CONFIG_SCHED_SMT
1126 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1128 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1133 * P9 has a slightly odd architecture where pairs of cores share an L2 cache.
1134 * This topology makes it *much* cheaper to migrate tasks between adjacent cores
1135 * since the migrated task remains cache hot. We want to take advantage of this
1136 * at the scheduler level so an extra topology level is required.
1138 static int powerpc_shared_cache_flags(void)
1140 return SD_SHARE_PKG_RESOURCES;
1144 * We can't just pass cpu_l2_cache_mask() directly because
1145 * returns a non-const pointer and the compiler barfs on that.
1147 static const struct cpumask *shared_cache_mask(int cpu)
1149 return cpu_l2_cache_mask(cpu);
1152 static struct sched_domain_topology_level power9_topology[] = {
1153 #ifdef CONFIG_SCHED_SMT
1154 { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
1156 { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) },
1157 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
1161 void __init smp_cpus_done(unsigned int max_cpus)
1164 * We are running pinned to the boot CPU, see rest_init().
1166 if (smp_ops && smp_ops->setup_cpu)
1167 smp_ops->setup_cpu(boot_cpuid);
1169 if (smp_ops && smp_ops->bringup_done)
1170 smp_ops->bringup_done();
1173 * On a shared LPAR, associativity needs to be requested.
1174 * Hence, get numa topology before dumping cpu topology
1176 shared_proc_topology_init();
1177 dump_numa_cpu_topology();
1180 * If any CPU detects that it's sharing a cache with another CPU then
1181 * use the deeper topology that is aware of this sharing.
1183 if (shared_caches) {
1184 pr_info("Using shared cache scheduler topology\n");
1185 set_sched_topology(power9_topology);
1187 pr_info("Using standard scheduler topology\n");
1188 set_sched_topology(powerpc_topology);
1192 #ifdef CONFIG_HOTPLUG_CPU
1193 int __cpu_disable(void)
1195 int cpu = smp_processor_id();
1198 if (!smp_ops->cpu_disable)
1201 this_cpu_disable_ftrace();
1203 err = smp_ops->cpu_disable();
1207 /* Update sibling maps */
1208 remove_cpu_from_masks(cpu);
1213 void __cpu_die(unsigned int cpu)
1215 if (smp_ops->cpu_die)
1216 smp_ops->cpu_die(cpu);
1222 * Disable on the down path. This will be re-enabled by
1223 * start_secondary() via start_secondary_resume() below
1225 this_cpu_disable_ftrace();
1230 /* If we return, we re-enter start_secondary */
1231 start_secondary_resume();