2 * SMP initialisation and IPI support
3 * Based on arch/arm/kernel/smp.c
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/acpi.h>
21 #include <linux/arm_sdei.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/spinlock.h>
25 #include <linux/sched/mm.h>
26 #include <linux/sched/hotplug.h>
27 #include <linux/sched/task_stack.h>
28 #include <linux/interrupt.h>
29 #include <linux/cache.h>
30 #include <linux/profile.h>
31 #include <linux/errno.h>
33 #include <linux/err.h>
34 #include <linux/cpu.h>
35 #include <linux/smp.h>
36 #include <linux/seq_file.h>
37 #include <linux/irq.h>
38 #include <linux/percpu.h>
39 #include <linux/clockchips.h>
40 #include <linux/completion.h>
42 #include <linux/irq_work.h>
43 #include <linux/kexec.h>
45 #include <asm/alternative.h>
46 #include <asm/atomic.h>
47 #include <asm/cacheflush.h>
49 #include <asm/cputype.h>
50 #include <asm/cpu_ops.h>
51 #include <asm/daifflags.h>
52 #include <asm/mmu_context.h>
54 #include <asm/pgtable.h>
55 #include <asm/pgalloc.h>
56 #include <asm/processor.h>
57 #include <asm/smp_plat.h>
58 #include <asm/sections.h>
59 #include <asm/tlbflush.h>
60 #include <asm/ptrace.h>
63 #define CREATE_TRACE_POINTS
64 #include <trace/events/ipi.h>
66 DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
67 EXPORT_PER_CPU_SYMBOL(cpu_number);
70 * as from 2.5, kernels no longer have an init_tasks structure
71 * so we need some other way of telling a new secondary core
72 * where to place its SVC stack
74 struct secondary_data secondary_data;
75 /* Number of CPUs which aren't online, but looping in kernel text. */
76 int cpus_stuck_in_kernel;
88 #ifdef CONFIG_HOTPLUG_CPU
89 static int op_cpu_kill(unsigned int cpu);
91 static inline int op_cpu_kill(unsigned int cpu)
99 * Boot a secondary CPU, and assign it the specified idle task.
100 * This also gives us the initial stack to use for this CPU.
102 static int boot_secondary(unsigned int cpu, struct task_struct *idle)
104 if (cpu_ops[cpu]->cpu_boot)
105 return cpu_ops[cpu]->cpu_boot(cpu);
110 static DECLARE_COMPLETION(cpu_running);
111 bool va52mismatch __ro_after_init;
113 int __cpu_up(unsigned int cpu, struct task_struct *idle)
119 * We need to tell the secondary core where to find its stack and the
122 secondary_data.task = idle;
123 secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
124 update_cpu_boot_status(CPU_MMU_OFF);
125 __flush_dcache_area(&secondary_data, sizeof(secondary_data));
128 * Now bring the CPU into our world.
130 ret = boot_secondary(cpu, idle);
133 * CPU was successfully started, wait for it to come online or
136 wait_for_completion_timeout(&cpu_running,
137 msecs_to_jiffies(1000));
139 if (!cpu_online(cpu)) {
140 pr_crit("CPU%u: failed to come online\n", cpu);
142 if (IS_ENABLED(CONFIG_ARM64_52BIT_VA) && va52mismatch)
143 pr_crit("CPU%u: does not support 52-bit VAs\n", cpu);
148 pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
152 secondary_data.task = NULL;
153 secondary_data.stack = NULL;
154 status = READ_ONCE(secondary_data.status);
157 if (status == CPU_MMU_OFF)
158 status = READ_ONCE(__early_cpu_boot_status);
162 pr_err("CPU%u: failed in unknown state : 0x%lx\n",
166 if (!op_cpu_kill(cpu)) {
167 pr_crit("CPU%u: died during early boot\n", cpu);
171 pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
172 case CPU_STUCK_IN_KERNEL:
173 pr_crit("CPU%u: is stuck in kernel\n", cpu);
174 cpus_stuck_in_kernel++;
176 case CPU_PANIC_KERNEL:
177 panic("CPU%u detected unsupported configuration\n", cpu);
185 * This is the secondary CPU boot entry. We're using this CPUs
186 * idle thread stack, but a set of temporary page tables.
188 asmlinkage notrace void secondary_start_kernel(void)
190 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
191 struct mm_struct *mm = &init_mm;
194 cpu = task_cpu(current);
195 set_my_cpu_offset(per_cpu_offset(cpu));
198 * All kernel threads share the same mm context; grab a
199 * reference and switch to it.
202 current->active_mm = mm;
205 * TTBR0 is only used for the identity mapping at this stage. Make it
206 * point to zero page to avoid speculatively fetching new entries.
208 cpu_uninstall_idmap();
211 trace_hardirqs_off();
214 * If the system has established the capabilities, make sure
215 * this CPU ticks all of those. If it doesn't, the CPU will
216 * fail to come online.
218 check_local_cpu_capabilities();
220 if (cpu_ops[cpu]->cpu_postboot)
221 cpu_ops[cpu]->cpu_postboot();
224 * Log the CPU info before it is marked online and might get read.
229 * Enable GIC and timers.
231 notify_cpu_starting(cpu);
233 store_cpu_topology(cpu);
237 * OK, now it's safe to let the boot CPU continue. Wait for
238 * the CPU migration code to notice that the CPU is online
239 * before we continue.
241 pr_info("CPU%u: Booted secondary processor 0x%010lx [0x%08x]\n",
242 cpu, (unsigned long)mpidr,
244 update_cpu_boot_status(CPU_BOOT_SUCCESS);
245 set_cpu_online(cpu, true);
246 complete(&cpu_running);
248 local_daif_restore(DAIF_PROCCTX);
251 * OK, it's off to the idle thread for us
253 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
256 #ifdef CONFIG_HOTPLUG_CPU
257 static int op_cpu_disable(unsigned int cpu)
260 * If we don't have a cpu_die method, abort before we reach the point
261 * of no return. CPU0 may not have an cpu_ops, so test for it.
263 if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
267 * We may need to abort a hot unplug for some other mechanism-specific
270 if (cpu_ops[cpu]->cpu_disable)
271 return cpu_ops[cpu]->cpu_disable(cpu);
277 * __cpu_disable runs on the processor to be shutdown.
279 int __cpu_disable(void)
281 unsigned int cpu = smp_processor_id();
284 ret = op_cpu_disable(cpu);
288 remove_cpu_topology(cpu);
289 numa_remove_cpu(cpu);
292 * Take this CPU offline. Once we clear this, we can't return,
293 * and we must not schedule until we're ready to give up the cpu.
295 set_cpu_online(cpu, false);
298 * OK - migrate IRQs away from this CPU
300 irq_migrate_all_off_this_cpu();
305 static int op_cpu_kill(unsigned int cpu)
308 * If we have no means of synchronising with the dying CPU, then assume
309 * that it is really dead. We can only wait for an arbitrary length of
310 * time and hope that it's dead, so let's skip the wait and just hope.
312 if (!cpu_ops[cpu]->cpu_kill)
315 return cpu_ops[cpu]->cpu_kill(cpu);
319 * called on the thread which is asking for a CPU to be shutdown -
320 * waits until shutdown has completed, or it is timed out.
322 void __cpu_die(unsigned int cpu)
326 if (!cpu_wait_death(cpu, 5)) {
327 pr_crit("CPU%u: cpu didn't die\n", cpu);
330 pr_notice("CPU%u: shutdown\n", cpu);
333 * Now that the dying CPU is beyond the point of no return w.r.t.
334 * in-kernel synchronisation, try to get the firwmare to help us to
335 * verify that it has really left the kernel before we consider
336 * clobbering anything it might still be using.
338 err = op_cpu_kill(cpu);
340 pr_warn("CPU%d may not have shut down cleanly: %d\n",
345 * Called from the idle thread for the CPU which has been shutdown.
350 unsigned int cpu = smp_processor_id();
356 /* Tell __cpu_die() that this CPU is now safe to dispose of */
357 (void)cpu_report_death();
360 * Actually shutdown the CPU. This must never fail. The specific hotplug
361 * mechanism must perform all required cache maintenance to ensure that
362 * no dirty lines are lost in the process of shutting down the CPU.
364 cpu_ops[cpu]->cpu_die(cpu);
371 * Kill the calling secondary CPU, early in bringup before it is turned
374 void cpu_die_early(void)
376 int cpu = smp_processor_id();
378 pr_crit("CPU%d: will not boot\n", cpu);
380 /* Mark this CPU absent */
381 set_cpu_present(cpu, 0);
383 #ifdef CONFIG_HOTPLUG_CPU
384 update_cpu_boot_status(CPU_KILL_ME);
385 /* Check if we can park ourselves */
386 if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
387 cpu_ops[cpu]->cpu_die(cpu);
389 update_cpu_boot_status(CPU_STUCK_IN_KERNEL);
394 static void __init hyp_mode_check(void)
396 if (is_hyp_mode_available())
397 pr_info("CPU: All CPU(s) started at EL2\n");
398 else if (is_hyp_mode_mismatched())
399 WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
400 "CPU: CPUs started in inconsistent modes");
402 pr_info("CPU: All CPU(s) started at EL1\n");
405 void __init smp_cpus_done(unsigned int max_cpus)
407 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
408 setup_cpu_features();
410 apply_alternatives_all();
411 mark_linear_text_alias_ro();
414 void __init smp_prepare_boot_cpu(void)
416 set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
418 * Initialise the static keys early as they may be enabled by the
422 cpuinfo_store_boot_cpu();
425 static u64 __init of_get_cpu_mpidr(struct device_node *dn)
431 * A cpu node with missing "reg" property is
432 * considered invalid to build a cpu_logical_map
435 cell = of_get_property(dn, "reg", NULL);
437 pr_err("%pOF: missing reg property\n", dn);
441 hwid = of_read_number(cell, of_n_addr_cells(dn));
443 * Non affinity bits must be set to 0 in the DT
445 if (hwid & ~MPIDR_HWID_BITMASK) {
446 pr_err("%pOF: invalid reg property\n", dn);
453 * Duplicate MPIDRs are a recipe for disaster. Scan all initialized
454 * entries and check for duplicates. If any is found just ignore the
455 * cpu. cpu_logical_map was initialized to INVALID_HWID to avoid
456 * matching valid MPIDR values.
458 static bool __init is_mpidr_duplicate(unsigned int cpu, u64 hwid)
462 for (i = 1; (i < cpu) && (i < NR_CPUS); i++)
463 if (cpu_logical_map(i) == hwid)
469 * Initialize cpu operations for a logical cpu and
470 * set it in the possible mask on success
472 static int __init smp_cpu_setup(int cpu)
474 if (cpu_read_ops(cpu))
477 if (cpu_ops[cpu]->cpu_init(cpu))
480 set_cpu_possible(cpu, true);
485 static bool bootcpu_valid __initdata;
486 static unsigned int cpu_count = 1;
489 static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS];
491 struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu)
493 return &cpu_madt_gicc[cpu];
497 * acpi_map_gic_cpu_interface - parse processor MADT entry
499 * Carry out sanity checks on MADT processor entry and initialize
500 * cpu_logical_map on success
503 acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
505 u64 hwid = processor->arm_mpidr;
507 if (!(processor->flags & ACPI_MADT_ENABLED)) {
508 pr_debug("skipping disabled CPU entry with 0x%llx MPIDR\n", hwid);
512 if (hwid & ~MPIDR_HWID_BITMASK || hwid == INVALID_HWID) {
513 pr_err("skipping CPU entry with invalid MPIDR 0x%llx\n", hwid);
517 if (is_mpidr_duplicate(cpu_count, hwid)) {
518 pr_err("duplicate CPU MPIDR 0x%llx in MADT\n", hwid);
522 /* Check if GICC structure of boot CPU is available in the MADT */
523 if (cpu_logical_map(0) == hwid) {
525 pr_err("duplicate boot CPU MPIDR: 0x%llx in MADT\n",
529 bootcpu_valid = true;
530 cpu_madt_gicc[0] = *processor;
534 if (cpu_count >= NR_CPUS)
537 /* map the logical cpu id to cpu MPIDR */
538 cpu_logical_map(cpu_count) = hwid;
540 cpu_madt_gicc[cpu_count] = *processor;
543 * Set-up the ACPI parking protocol cpu entries
544 * while initializing the cpu_logical_map to
545 * avoid parsing MADT entries multiple times for
546 * nothing (ie a valid cpu_logical_map entry should
547 * contain a valid parking protocol data set to
548 * initialize the cpu if the parking protocol is
549 * the only available enable method).
551 acpi_set_mailbox_entry(cpu_count, processor);
557 acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
558 const unsigned long end)
560 struct acpi_madt_generic_interrupt *processor;
562 processor = (struct acpi_madt_generic_interrupt *)header;
563 if (BAD_MADT_GICC_ENTRY(processor, end))
566 acpi_table_print_madt_entry(header);
568 acpi_map_gic_cpu_interface(processor);
573 static void __init acpi_parse_and_init_cpus(void)
578 * do a walk of MADT to determine how many CPUs
579 * we have including disabled CPUs, and get information
580 * we need for SMP init.
582 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
583 acpi_parse_gic_cpu_interface, 0);
586 * In ACPI, SMP and CPU NUMA information is provided in separate
587 * static tables, namely the MADT and the SRAT.
589 * Thus, it is simpler to first create the cpu logical map through
590 * an MADT walk and then map the logical cpus to their node ids
593 acpi_map_cpus_to_nodes();
595 for (i = 0; i < nr_cpu_ids; i++)
596 early_map_cpu_to_node(i, acpi_numa_get_nid(i));
599 #define acpi_parse_and_init_cpus(...) do { } while (0)
603 * Enumerate the possible CPU set from the device tree and build the
604 * cpu logical map array containing MPIDR values related to logical
605 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
607 static void __init of_parse_and_init_cpus(void)
609 struct device_node *dn;
611 for_each_node_by_type(dn, "cpu") {
612 u64 hwid = of_get_cpu_mpidr(dn);
614 if (hwid == INVALID_HWID)
617 if (is_mpidr_duplicate(cpu_count, hwid)) {
618 pr_err("%pOF: duplicate cpu reg properties in the DT\n",
624 * The numbering scheme requires that the boot CPU
625 * must be assigned logical id 0. Record it so that
626 * the logical map built from DT is validated and can
629 if (hwid == cpu_logical_map(0)) {
631 pr_err("%pOF: duplicate boot cpu reg property in DT\n",
636 bootcpu_valid = true;
637 early_map_cpu_to_node(0, of_node_to_nid(dn));
640 * cpu_logical_map has already been
641 * initialized and the boot cpu doesn't need
642 * the enable-method so continue without
648 if (cpu_count >= NR_CPUS)
651 pr_debug("cpu logical map 0x%llx\n", hwid);
652 cpu_logical_map(cpu_count) = hwid;
654 early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
661 * Enumerate the possible CPU set from the device tree or ACPI and build the
662 * cpu logical map array containing MPIDR values related to logical
663 * cpus. Assumes that cpu_logical_map(0) has already been initialized.
665 void __init smp_init_cpus(void)
670 of_parse_and_init_cpus();
672 acpi_parse_and_init_cpus();
674 if (cpu_count > nr_cpu_ids)
675 pr_warn("Number of cores (%d) exceeds configured maximum of %u - clipping\n",
676 cpu_count, nr_cpu_ids);
678 if (!bootcpu_valid) {
679 pr_err("missing boot CPU MPIDR, not enabling secondaries\n");
684 * We need to set the cpu_logical_map entries before enabling
685 * the cpus so that cpu processor description entries (DT cpu nodes
686 * and ACPI MADT entries) can be retrieved by matching the cpu hwid
687 * with entries in cpu_logical_map while initializing the cpus.
688 * If the cpu set-up fails, invalidate the cpu_logical_map entry.
690 for (i = 1; i < nr_cpu_ids; i++) {
691 if (cpu_logical_map(i) != INVALID_HWID) {
692 if (smp_cpu_setup(i))
693 cpu_logical_map(i) = INVALID_HWID;
698 void __init smp_prepare_cpus(unsigned int max_cpus)
702 unsigned int this_cpu;
706 this_cpu = smp_processor_id();
707 store_cpu_topology(this_cpu);
708 numa_store_cpu_info(this_cpu);
709 numa_add_cpu(this_cpu);
712 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
713 * secondary CPUs present.
719 * Initialise the present map (which describes the set of CPUs
720 * actually populated at the present time) and release the
721 * secondaries from the bootloader.
723 for_each_possible_cpu(cpu) {
725 per_cpu(cpu_number, cpu) = cpu;
727 if (cpu == smp_processor_id())
733 err = cpu_ops[cpu]->cpu_prepare(cpu);
737 set_cpu_present(cpu, true);
738 numa_store_cpu_info(cpu);
742 void (*__smp_cross_call)(const struct cpumask *, unsigned int);
744 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
746 __smp_cross_call = fn;
749 static const char *ipi_types[NR_IPI] __tracepoint_string = {
750 #define S(x,s) [x] = s
751 S(IPI_RESCHEDULE, "Rescheduling interrupts"),
752 S(IPI_CALL_FUNC, "Function call interrupts"),
753 S(IPI_CPU_STOP, "CPU stop interrupts"),
754 S(IPI_CPU_CRASH_STOP, "CPU stop (for crash dump) interrupts"),
755 S(IPI_TIMER, "Timer broadcast interrupts"),
756 S(IPI_IRQ_WORK, "IRQ work interrupts"),
757 S(IPI_WAKEUP, "CPU wake-up interrupts"),
760 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
762 trace_ipi_raise(target, ipi_types[ipinr]);
763 __smp_cross_call(target, ipinr);
766 void show_ipi_list(struct seq_file *p, int prec)
770 for (i = 0; i < NR_IPI; i++) {
771 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
772 prec >= 4 ? " " : "");
773 for_each_online_cpu(cpu)
774 seq_printf(p, "%10u ",
775 __get_irq_stat(cpu, ipi_irqs[i]));
776 seq_printf(p, " %s\n", ipi_types[i]);
780 u64 smp_irq_stat_cpu(unsigned int cpu)
785 for (i = 0; i < NR_IPI; i++)
786 sum += __get_irq_stat(cpu, ipi_irqs[i]);
791 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
793 smp_cross_call(mask, IPI_CALL_FUNC);
796 void arch_send_call_function_single_ipi(int cpu)
798 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
801 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
802 void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
804 smp_cross_call(mask, IPI_WAKEUP);
808 #ifdef CONFIG_IRQ_WORK
809 void arch_irq_work_raise(void)
811 if (__smp_cross_call)
812 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
817 * ipi_cpu_stop - handle IPI from smp_send_stop()
819 static void ipi_cpu_stop(unsigned int cpu)
821 set_cpu_online(cpu, false);
824 sdei_mask_local_cpu();
830 #ifdef CONFIG_KEXEC_CORE
831 static atomic_t waiting_for_crash_ipi = ATOMIC_INIT(0);
834 static void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
836 #ifdef CONFIG_KEXEC_CORE
837 crash_save_cpu(regs, cpu);
839 atomic_dec(&waiting_for_crash_ipi);
842 sdei_mask_local_cpu();
844 #ifdef CONFIG_HOTPLUG_CPU
845 if (cpu_ops[cpu]->cpu_die)
846 cpu_ops[cpu]->cpu_die(cpu);
855 * Main handler for inter-processor interrupts
857 void handle_IPI(int ipinr, struct pt_regs *regs)
859 unsigned int cpu = smp_processor_id();
860 struct pt_regs *old_regs = set_irq_regs(regs);
862 if ((unsigned)ipinr < NR_IPI) {
863 trace_ipi_entry_rcuidle(ipi_types[ipinr]);
864 __inc_irq_stat(cpu, ipi_irqs[ipinr]);
874 generic_smp_call_function_interrupt();
884 case IPI_CPU_CRASH_STOP:
885 if (IS_ENABLED(CONFIG_KEXEC_CORE)) {
887 ipi_cpu_crash_stop(cpu, regs);
893 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
896 tick_receive_broadcast();
901 #ifdef CONFIG_IRQ_WORK
909 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
911 WARN_ONCE(!acpi_parking_protocol_valid(cpu),
912 "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
918 pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
922 if ((unsigned)ipinr < NR_IPI)
923 trace_ipi_exit_rcuidle(ipi_types[ipinr]);
924 set_irq_regs(old_regs);
927 void smp_send_reschedule(int cpu)
929 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
932 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
933 void tick_broadcast(const struct cpumask *mask)
935 smp_cross_call(mask, IPI_TIMER);
940 * The number of CPUs online, not counting this CPU (which may not be
941 * fully online and so not counted in num_online_cpus()).
943 static inline unsigned int num_other_online_cpus(void)
945 unsigned int this_cpu_online = cpu_online(smp_processor_id());
947 return num_online_cpus() - this_cpu_online;
950 void smp_send_stop(void)
952 unsigned long timeout;
954 if (num_other_online_cpus()) {
957 cpumask_copy(&mask, cpu_online_mask);
958 cpumask_clear_cpu(smp_processor_id(), &mask);
960 if (system_state <= SYSTEM_RUNNING)
961 pr_crit("SMP: stopping secondary CPUs\n");
962 smp_cross_call(&mask, IPI_CPU_STOP);
965 /* Wait up to one second for other CPUs to stop */
966 timeout = USEC_PER_SEC;
967 while (num_other_online_cpus() && timeout--)
970 if (num_other_online_cpus())
971 pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
972 cpumask_pr_args(cpu_online_mask));
974 sdei_mask_local_cpu();
977 #ifdef CONFIG_KEXEC_CORE
978 void crash_smp_send_stop(void)
980 static int cpus_stopped;
982 unsigned long timeout;
985 * This function can be called twice in panic path, but obviously
986 * we execute this only once.
994 * If this cpu is the only one alive at this point in time, online or
995 * not, there are no stop messages to be sent around, so just back out.
997 if (num_other_online_cpus() == 0) {
998 sdei_mask_local_cpu();
1002 cpumask_copy(&mask, cpu_online_mask);
1003 cpumask_clear_cpu(smp_processor_id(), &mask);
1005 atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
1007 pr_crit("SMP: stopping secondary CPUs\n");
1008 smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
1010 /* Wait up to one second for other CPUs to stop */
1011 timeout = USEC_PER_SEC;
1012 while ((atomic_read(&waiting_for_crash_ipi) > 0) && timeout--)
1015 if (atomic_read(&waiting_for_crash_ipi) > 0)
1016 pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
1017 cpumask_pr_args(&mask));
1019 sdei_mask_local_cpu();
1022 bool smp_crash_stop_failed(void)
1024 return (atomic_read(&waiting_for_crash_ipi) > 0);
1029 * not supported here
1031 int setup_profiling_timer(unsigned int multiplier)
1036 static bool have_cpu_die(void)
1038 #ifdef CONFIG_HOTPLUG_CPU
1039 int any_cpu = raw_smp_processor_id();
1041 if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
1047 bool cpus_are_stuck_in_kernel(void)
1049 bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
1051 return !!cpus_stuck_in_kernel || smp_spin_tables;