1 // SPDX-License-Identifier: GPL-2.0-only
3 * SMP initialisation and IPI support
4 * Based on arch/arm64/kernel/smp.c
6 * Copyright (C) 2012 ARM Ltd.
7 * Copyright (C) 2015 Regents of the University of California
8 * Copyright (C) 2017 SiFive
11 #include <linux/arch_topology.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/notifier.h>
19 #include <linux/cpu.h>
20 #include <linux/percpu.h>
21 #include <linux/delay.h>
22 #include <linux/err.h>
23 #include <linux/irq.h>
25 #include <linux/sched/task_stack.h>
26 #include <linux/sched/mm.h>
27 #include <asm/cpu_ops.h>
29 #include <asm/mmu_context.h>
31 #include <asm/tlbflush.h>
32 #include <asm/sections.h>
35 #include <asm/alternative.h>
39 static DECLARE_COMPLETION(cpu_running);
41 void __init smp_prepare_boot_cpu(void)
44 #ifdef CONFIG_RISCV_ERRATA_ALTERNATIVE
45 apply_boot_alternatives();
49 void __init smp_prepare_cpus(unsigned int max_cpus)
53 unsigned int curr_cpuid;
55 curr_cpuid = smp_processor_id();
56 numa_store_cpu_info(curr_cpuid);
57 numa_add_cpu(curr_cpuid);
59 /* This covers non-smp usecase mandated by "nosmp" option */
63 for_each_possible_cpu(cpuid) {
64 if (cpuid == curr_cpuid)
66 if (cpu_ops[cpuid]->cpu_prepare) {
67 ret = cpu_ops[cpuid]->cpu_prepare(cpuid);
71 set_cpu_present(cpuid, true);
72 numa_store_cpu_info(cpuid);
76 void __init setup_smp(void)
78 struct device_node *dn;
80 bool found_boot_cpu = false;
85 for_each_of_cpu_node(dn) {
86 hart = riscv_of_processor_hartid(dn);
90 if (hart == cpuid_to_hartid_map(0)) {
91 BUG_ON(found_boot_cpu);
93 early_map_cpu_to_node(0, of_node_to_nid(dn));
96 if (cpuid >= NR_CPUS) {
97 pr_warn("Invalid cpuid [%d] for hartid [%d]\n",
102 cpuid_to_hartid_map(cpuid) = hart;
103 early_map_cpu_to_node(cpuid, of_node_to_nid(dn));
107 BUG_ON(!found_boot_cpu);
109 if (cpuid > nr_cpu_ids)
110 pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
113 for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
114 if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
116 set_cpu_possible(cpuid, true);
121 static int start_secondary_cpu(int cpu, struct task_struct *tidle)
123 if (cpu_ops[cpu]->cpu_start)
124 return cpu_ops[cpu]->cpu_start(cpu, tidle);
129 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
132 tidle->thread_info.cpu = cpu;
134 ret = start_secondary_cpu(cpu, tidle);
136 wait_for_completion_timeout(&cpu_running,
137 msecs_to_jiffies(1000));
139 if (!cpu_online(cpu)) {
140 pr_crit("CPU%u: failed to come online\n", cpu);
144 pr_crit("CPU%u: failed to start\n", cpu);
150 void __init smp_cpus_done(unsigned int max_cpus)
155 * C entry point for a secondary processor.
157 asmlinkage __visible void smp_callin(void)
159 struct mm_struct *mm = &init_mm;
160 unsigned int curr_cpuid = smp_processor_id();
164 /* All kernel threads share the same mm context. */
166 current->active_mm = mm;
168 notify_cpu_starting(curr_cpuid);
169 numa_add_cpu(curr_cpuid);
170 update_siblings_masks(curr_cpuid);
171 set_cpu_online(curr_cpuid, 1);
174 * Remote TLB flushes are ignored while the CPU is offline, so emit
175 * a local TLB flush right now just in case.
177 local_flush_tlb_all();
178 complete(&cpu_running);
180 * Disable preemption before enabling interrupts, so we don't try to
181 * schedule a CPU that hasn't actually started yet.
184 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);