1 // SPDX-License-Identifier: GPL-2.0-only
3 * SMP initialisation and IPI support
4 * Based on arch/arm64/kernel/smp.c
6 * Copyright (C) 2012 ARM Ltd.
7 * Copyright (C) 2015 Regents of the University of California
8 * Copyright (C) 2017 SiFive
11 #include <linux/cpu.h>
12 #include <linux/clockchips.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/profile.h>
16 #include <linux/smp.h>
17 #include <linux/sched.h>
18 #include <linux/seq_file.h>
19 #include <linux/delay.h>
20 #include <linux/irq_work.h>
23 #include <asm/tlbflush.h>
24 #include <asm/cacheflush.h>
26 enum ipi_message_type {
35 unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = {
36 [0 ... NR_CPUS-1] = INVALID_HARTID
39 void __init smp_setup_processor_id(void)
41 cpuid_to_hartid_map(0) = boot_cpu_hartid;
44 /* A collection of single bit ipi messages. */
46 unsigned long stats[IPI_MAX] ____cacheline_aligned;
47 unsigned long bits ____cacheline_aligned;
48 } ipi_data[NR_CPUS] __cacheline_aligned;
50 int riscv_hartid_to_cpuid(int hartid)
54 for (i = 0; i < NR_CPUS; i++)
55 if (cpuid_to_hartid_map(i) == hartid)
58 pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
62 void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
68 cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
70 EXPORT_SYMBOL_GPL(riscv_cpuid_to_hartid_mask);
72 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
74 return phys_id == cpuid_to_hartid_map(cpu);
78 int setup_profiling_timer(unsigned int multiplier)
83 static void ipi_stop(void)
85 set_cpu_online(smp_processor_id(), false);
90 static const struct riscv_ipi_ops *ipi_ops __ro_after_init;
92 void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops)
96 EXPORT_SYMBOL_GPL(riscv_set_ipi_ops);
98 void riscv_clear_ipi(void)
100 if (ipi_ops && ipi_ops->ipi_clear)
101 ipi_ops->ipi_clear();
103 csr_clear(CSR_IP, IE_SIE);
105 EXPORT_SYMBOL_GPL(riscv_clear_ipi);
107 static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
111 smp_mb__before_atomic();
112 for_each_cpu(cpu, mask)
113 set_bit(op, &ipi_data[cpu].bits);
114 smp_mb__after_atomic();
116 if (ipi_ops && ipi_ops->ipi_inject)
117 ipi_ops->ipi_inject(mask);
119 pr_warn("SMP: IPI inject method not available\n");
122 static void send_ipi_single(int cpu, enum ipi_message_type op)
124 smp_mb__before_atomic();
125 set_bit(op, &ipi_data[cpu].bits);
126 smp_mb__after_atomic();
128 if (ipi_ops && ipi_ops->ipi_inject)
129 ipi_ops->ipi_inject(cpumask_of(cpu));
131 pr_warn("SMP: IPI inject method not available\n");
134 #ifdef CONFIG_IRQ_WORK
135 void arch_irq_work_raise(void)
137 send_ipi_single(smp_processor_id(), IPI_IRQ_WORK);
141 void handle_IPI(struct pt_regs *regs)
143 struct pt_regs *old_regs = set_irq_regs(regs);
144 unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
145 unsigned long *stats = ipi_data[smp_processor_id()].stats;
154 /* Order bit clearing and data access. */
157 ops = xchg(pending_ipis, 0);
161 if (ops & (1 << IPI_RESCHEDULE)) {
162 stats[IPI_RESCHEDULE]++;
166 if (ops & (1 << IPI_CALL_FUNC)) {
167 stats[IPI_CALL_FUNC]++;
168 generic_smp_call_function_interrupt();
171 if (ops & (1 << IPI_CPU_STOP)) {
172 stats[IPI_CPU_STOP]++;
176 if (ops & (1 << IPI_IRQ_WORK)) {
177 stats[IPI_IRQ_WORK]++;
181 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
182 if (ops & (1 << IPI_TIMER)) {
184 tick_receive_broadcast();
187 BUG_ON((ops >> IPI_MAX) != 0);
189 /* Order data access and bit testing. */
195 set_irq_regs(old_regs);
198 static const char * const ipi_names[] = {
199 [IPI_RESCHEDULE] = "Rescheduling interrupts",
200 [IPI_CALL_FUNC] = "Function call interrupts",
201 [IPI_CPU_STOP] = "CPU stop interrupts",
202 [IPI_IRQ_WORK] = "IRQ work interrupts",
203 [IPI_TIMER] = "Timer broadcast interrupts",
206 void show_ipi_stats(struct seq_file *p, int prec)
210 for (i = 0; i < IPI_MAX; i++) {
211 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
212 prec >= 4 ? " " : "");
213 for_each_online_cpu(cpu)
214 seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
215 seq_printf(p, " %s\n", ipi_names[i]);
219 void arch_send_call_function_ipi_mask(struct cpumask *mask)
221 send_ipi_mask(mask, IPI_CALL_FUNC);
224 void arch_send_call_function_single_ipi(int cpu)
226 send_ipi_single(cpu, IPI_CALL_FUNC);
229 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
230 void tick_broadcast(const struct cpumask *mask)
232 send_ipi_mask(mask, IPI_TIMER);
236 void smp_send_stop(void)
238 unsigned long timeout;
240 if (num_online_cpus() > 1) {
243 cpumask_copy(&mask, cpu_online_mask);
244 cpumask_clear_cpu(smp_processor_id(), &mask);
246 if (system_state <= SYSTEM_RUNNING)
247 pr_crit("SMP: stopping secondary CPUs\n");
248 send_ipi_mask(&mask, IPI_CPU_STOP);
251 /* Wait up to one second for other CPUs to stop */
252 timeout = USEC_PER_SEC;
253 while (num_online_cpus() > 1 && timeout--)
256 if (num_online_cpus() > 1)
257 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
258 cpumask_pr_args(cpu_online_mask));
261 void smp_send_reschedule(int cpu)
263 send_ipi_single(cpu, IPI_RESCHEDULE);
265 EXPORT_SYMBOL_GPL(smp_send_reschedule);