GNU Linux-libre 5.15.137-gnu
[releases.git] / arch / x86 / kernel / apic / ipi.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/cpumask.h>
4 #include <linux/smp.h>
5 #include <asm/io_apic.h>
6
7 #include "local.h"
8
9 DEFINE_STATIC_KEY_FALSE(apic_use_ipi_shorthand);
10
11 #ifdef CONFIG_SMP
12 static int apic_ipi_shorthand_off __ro_after_init;
13
14 static __init int apic_ipi_shorthand(char *str)
15 {
16         get_option(&str, &apic_ipi_shorthand_off);
17         return 1;
18 }
19 __setup("no_ipi_broadcast=", apic_ipi_shorthand);
20
21 static int __init print_ipi_mode(void)
22 {
23         pr_info("IPI shorthand broadcast: %s\n",
24                 apic_ipi_shorthand_off ? "disabled" : "enabled");
25         return 0;
26 }
27 late_initcall(print_ipi_mode);
28
29 void apic_smt_update(void)
30 {
31         /*
32          * Do not switch to broadcast mode if:
33          * - Disabled on the command line
34          * - Only a single CPU is online
35          * - Not all present CPUs have been at least booted once
36          *
37          * The latter is important as the local APIC might be in some
38          * random state and a broadcast might cause havoc. That's
39          * especially true for NMI broadcasting.
40          */
41         if (apic_ipi_shorthand_off || num_online_cpus() == 1 ||
42             !cpumask_equal(cpu_present_mask, &cpus_booted_once_mask)) {
43                 static_branch_disable(&apic_use_ipi_shorthand);
44         } else {
45                 static_branch_enable(&apic_use_ipi_shorthand);
46         }
47 }
48
49 void apic_send_IPI_allbutself(unsigned int vector)
50 {
51         if (num_online_cpus() < 2)
52                 return;
53
54         if (static_branch_likely(&apic_use_ipi_shorthand))
55                 apic->send_IPI_allbutself(vector);
56         else
57                 apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
58 }
59
60 /*
61  * Send a 'reschedule' IPI to another CPU. It goes straight through and
62  * wastes no time serializing anything. Worst case is that we lose a
63  * reschedule ...
64  */
65 void native_smp_send_reschedule(int cpu)
66 {
67         if (unlikely(cpu_is_offline(cpu))) {
68                 WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
69                 return;
70         }
71         apic->send_IPI(cpu, RESCHEDULE_VECTOR);
72 }
73
74 void native_send_call_func_single_ipi(int cpu)
75 {
76         apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
77 }
78
79 void native_send_call_func_ipi(const struct cpumask *mask)
80 {
81         if (static_branch_likely(&apic_use_ipi_shorthand)) {
82                 unsigned int cpu = smp_processor_id();
83
84                 if (!cpumask_or_equal(mask, cpumask_of(cpu), cpu_online_mask))
85                         goto sendmask;
86
87                 if (cpumask_test_cpu(cpu, mask))
88                         apic->send_IPI_all(CALL_FUNCTION_VECTOR);
89                 else if (num_online_cpus() > 1)
90                         apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
91                 return;
92         }
93
94 sendmask:
95         apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
96 }
97
98 #endif /* CONFIG_SMP */
99
100 static inline int __prepare_ICR2(unsigned int mask)
101 {
102         return SET_APIC_DEST_FIELD(mask);
103 }
104
105 static inline void __xapic_wait_icr_idle(void)
106 {
107         while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY)
108                 cpu_relax();
109 }
110
111 void __default_send_IPI_shortcut(unsigned int shortcut, int vector)
112 {
113         /*
114          * Subtle. In the case of the 'never do double writes' workaround
115          * we have to lock out interrupts to be safe.  As we don't care
116          * of the value read we use an atomic rmw access to avoid costly
117          * cli/sti.  Otherwise we use an even cheaper single atomic write
118          * to the APIC.
119          */
120         unsigned int cfg;
121
122         /*
123          * Wait for idle.
124          */
125         if (unlikely(vector == NMI_VECTOR))
126                 safe_apic_wait_icr_idle();
127         else
128                 __xapic_wait_icr_idle();
129
130         /*
131          * No need to touch the target chip field. Also the destination
132          * mode is ignored when a shorthand is used.
133          */
134         cfg = __prepare_ICR(shortcut, vector, 0);
135
136         /*
137          * Send the IPI. The write to APIC_ICR fires this off.
138          */
139         native_apic_mem_write(APIC_ICR, cfg);
140 }
141
142 /*
143  * This is used to send an IPI with no shorthand notation (the destination is
144  * specified in bits 56 to 63 of the ICR).
145  */
146 void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
147 {
148         unsigned long cfg;
149
150         /*
151          * Wait for idle.
152          */
153         if (unlikely(vector == NMI_VECTOR))
154                 safe_apic_wait_icr_idle();
155         else
156                 __xapic_wait_icr_idle();
157
158         /*
159          * prepare target chip field
160          */
161         cfg = __prepare_ICR2(mask);
162         native_apic_mem_write(APIC_ICR2, cfg);
163
164         /*
165          * program the ICR
166          */
167         cfg = __prepare_ICR(0, vector, dest);
168
169         /*
170          * Send the IPI. The write to APIC_ICR fires this off.
171          */
172         native_apic_mem_write(APIC_ICR, cfg);
173 }
174
175 void default_send_IPI_single_phys(int cpu, int vector)
176 {
177         unsigned long flags;
178
179         local_irq_save(flags);
180         __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
181                                       vector, APIC_DEST_PHYSICAL);
182         local_irq_restore(flags);
183 }
184
185 void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
186 {
187         unsigned long query_cpu;
188         unsigned long flags;
189
190         /*
191          * Hack. The clustered APIC addressing mode doesn't allow us to send
192          * to an arbitrary mask, so I do a unicast to each CPU instead.
193          * - mbligh
194          */
195         local_irq_save(flags);
196         for_each_cpu(query_cpu, mask) {
197                 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
198                                 query_cpu), vector, APIC_DEST_PHYSICAL);
199         }
200         local_irq_restore(flags);
201 }
202
203 void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
204                                                  int vector)
205 {
206         unsigned int this_cpu = smp_processor_id();
207         unsigned int query_cpu;
208         unsigned long flags;
209
210         /* See Hack comment above */
211
212         local_irq_save(flags);
213         for_each_cpu(query_cpu, mask) {
214                 if (query_cpu == this_cpu)
215                         continue;
216                 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
217                                  query_cpu), vector, APIC_DEST_PHYSICAL);
218         }
219         local_irq_restore(flags);
220 }
221
222 /*
223  * Helper function for APICs which insist on cpumasks
224  */
225 void default_send_IPI_single(int cpu, int vector)
226 {
227         apic->send_IPI_mask(cpumask_of(cpu), vector);
228 }
229
230 void default_send_IPI_allbutself(int vector)
231 {
232         __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
233 }
234
235 void default_send_IPI_all(int vector)
236 {
237         __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector);
238 }
239
240 void default_send_IPI_self(int vector)
241 {
242         __default_send_IPI_shortcut(APIC_DEST_SELF, vector);
243 }
244
245 #ifdef CONFIG_X86_32
246
247 void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
248                                                  int vector)
249 {
250         unsigned long flags;
251         unsigned int query_cpu;
252
253         /*
254          * Hack. The clustered APIC addressing mode doesn't allow us to send
255          * to an arbitrary mask, so I do a unicasts to each CPU instead. This
256          * should be modified to do 1 message per cluster ID - mbligh
257          */
258
259         local_irq_save(flags);
260         for_each_cpu(query_cpu, mask)
261                 __default_send_IPI_dest_field(
262                         early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
263                         vector, APIC_DEST_LOGICAL);
264         local_irq_restore(flags);
265 }
266
267 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
268                                                  int vector)
269 {
270         unsigned long flags;
271         unsigned int query_cpu;
272         unsigned int this_cpu = smp_processor_id();
273
274         /* See Hack comment above */
275
276         local_irq_save(flags);
277         for_each_cpu(query_cpu, mask) {
278                 if (query_cpu == this_cpu)
279                         continue;
280                 __default_send_IPI_dest_field(
281                         early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
282                         vector, APIC_DEST_LOGICAL);
283                 }
284         local_irq_restore(flags);
285 }
286
287 /*
288  * This is only used on smaller machines.
289  */
290 void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
291 {
292         unsigned long mask = cpumask_bits(cpumask)[0];
293         unsigned long flags;
294
295         if (!mask)
296                 return;
297
298         local_irq_save(flags);
299         WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
300         __default_send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
301         local_irq_restore(flags);
302 }
303
304 /* must come after the send_IPI functions above for inlining */
305 static int convert_apicid_to_cpu(int apic_id)
306 {
307         int i;
308
309         for_each_possible_cpu(i) {
310                 if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
311                         return i;
312         }
313         return -1;
314 }
315
316 int safe_smp_processor_id(void)
317 {
318         int apicid, cpuid;
319
320         if (!boot_cpu_has(X86_FEATURE_APIC))
321                 return 0;
322
323         apicid = hard_smp_processor_id();
324         if (apicid == BAD_APICID)
325                 return 0;
326
327         cpuid = convert_apicid_to_cpu(apicid);
328
329         return cpuid >= 0 ? cpuid : 0;
330 }
331 #endif