1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/threads.h>
3 #include <linux/cpumask.h>
4 #include <linux/string.h>
5 #include <linux/kernel.h>
6 #include <linux/ctype.h>
7 #include <linux/dmar.h>
12 #include <asm/x2apic.h>
14 static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
15 static DEFINE_PER_CPU(cpumask_var_t, cpus_in_cluster);
16 static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
18 static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
20 return x2apic_enabled();
23 static inline u32 x2apic_cluster(int cpu)
25 return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16;
28 static void x2apic_send_IPI(int cpu, int vector)
30 u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
32 /* x2apic MSRs are special and need a special fence: */
34 __x2apic_send_IPI_dest(dest, vector, APIC_DEST_LOGICAL);
38 __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
40 struct cpumask *cpus_in_cluster_ptr;
41 struct cpumask *ipi_mask_ptr;
42 unsigned int cpu, this_cpu;
46 /* x2apic MSRs are special and need a special fence: */
49 local_irq_save(flags);
51 this_cpu = smp_processor_id();
54 * We are to modify mask, so we need an own copy
55 * and be sure it's manipulated with irq off.
57 ipi_mask_ptr = this_cpu_cpumask_var_ptr(ipi_mask);
58 cpumask_copy(ipi_mask_ptr, mask);
61 * The idea is to send one IPI per cluster.
63 for_each_cpu(cpu, ipi_mask_ptr) {
66 cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu);
69 /* Collect cpus in cluster. */
70 for_each_cpu_and(i, ipi_mask_ptr, cpus_in_cluster_ptr) {
71 if (apic_dest == APIC_DEST_ALLINC || i != this_cpu)
72 dest |= per_cpu(x86_cpu_to_logical_apicid, i);
78 __x2apic_send_IPI_dest(dest, vector, apic->dest_logical);
80 * Cluster sibling cpus should be discared now so
81 * we would not send IPI them second time.
83 cpumask_andnot(ipi_mask_ptr, ipi_mask_ptr, cpus_in_cluster_ptr);
86 local_irq_restore(flags);
89 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
91 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
95 x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
97 __x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
100 static void x2apic_send_IPI_allbutself(int vector)
102 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT);
105 static void x2apic_send_IPI_all(int vector)
107 __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
111 x2apic_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqdata,
112 unsigned int *apicid)
114 struct cpumask *effmsk = irq_data_get_effective_affinity_mask(irqdata);
119 cpu = cpumask_first(mask);
120 if (cpu >= nr_cpu_ids)
123 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
124 cluster = x2apic_cluster(cpu);
126 cpumask_clear(effmsk);
127 for_each_cpu(cpu, mask) {
128 if (cluster != x2apic_cluster(cpu))
130 dest |= per_cpu(x86_cpu_to_logical_apicid, cpu);
131 cpumask_set_cpu(cpu, effmsk);
138 static void init_x2apic_ldr(void)
140 unsigned int this_cpu = smp_processor_id();
143 per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR);
145 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, this_cpu));
146 for_each_online_cpu(cpu) {
147 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
149 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
150 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
155 * At CPU state changes, update the x2apic cluster sibling info.
157 static int x2apic_prepare_cpu(unsigned int cpu)
159 if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL))
162 if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) {
163 free_cpumask_var(per_cpu(cpus_in_cluster, cpu));
170 static int x2apic_dead_cpu(unsigned int this_cpu)
174 for_each_online_cpu(cpu) {
175 if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
177 cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
178 cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
180 free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
181 free_cpumask_var(per_cpu(ipi_mask, this_cpu));
185 static int x2apic_cluster_probe(void)
187 int cpu = smp_processor_id();
193 ret = cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
194 x2apic_prepare_cpu, x2apic_dead_cpu);
196 pr_err("Failed to register X2APIC_PREPARE\n");
199 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
203 static const struct cpumask *x2apic_cluster_target_cpus(void)
209 * Each x2apic cluster is an allocation domain.
211 static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
212 const struct cpumask *mask)
215 * To minimize vector pressure, default case of boot, device bringup
216 * etc will use a single cpu for the interrupt destination.
218 * On explicit migration requests coming from irqbalance etc,
219 * interrupts will be routed to the x2apic cluster (cluster-id
220 * derived from the first cpu in the mask) members specified
223 if (mask == x2apic_cluster_target_cpus())
224 cpumask_copy(retmask, cpumask_of(cpu));
226 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
229 static struct apic apic_x2apic_cluster __ro_after_init = {
231 .name = "cluster x2apic",
232 .probe = x2apic_cluster_probe,
233 .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
234 .apic_id_valid = x2apic_apic_id_valid,
235 .apic_id_registered = x2apic_apic_id_registered,
237 .irq_delivery_mode = dest_LowestPrio,
238 .irq_dest_mode = 1, /* logical */
240 .target_cpus = x2apic_cluster_target_cpus,
242 .dest_logical = APIC_DEST_LOGICAL,
243 .check_apicid_used = NULL,
245 .vector_allocation_domain = cluster_vector_allocation_domain,
246 .init_apic_ldr = init_x2apic_ldr,
248 .ioapic_phys_id_map = NULL,
249 .setup_apic_routing = NULL,
250 .cpu_present_to_apicid = default_cpu_present_to_apicid,
251 .apicid_to_cpu_present = NULL,
252 .check_phys_apicid_present = default_check_phys_apicid_present,
253 .phys_pkg_id = x2apic_phys_pkg_id,
255 .get_apic_id = x2apic_get_apic_id,
256 .set_apic_id = x2apic_set_apic_id,
258 .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
260 .send_IPI = x2apic_send_IPI,
261 .send_IPI_mask = x2apic_send_IPI_mask,
262 .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
263 .send_IPI_allbutself = x2apic_send_IPI_allbutself,
264 .send_IPI_all = x2apic_send_IPI_all,
265 .send_IPI_self = x2apic_send_IPI_self,
267 .inquire_remote_apic = NULL,
269 .read = native_apic_msr_read,
270 .write = native_apic_msr_write,
271 .eoi_write = native_apic_msr_eoi_write,
272 .icr_read = native_x2apic_icr_read,
273 .icr_write = native_x2apic_icr_write,
274 .wait_icr_idle = native_x2apic_wait_icr_idle,
275 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
278 apic_driver(apic_x2apic_cluster);