1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/slab.h>
5 #include <linux/cpumask.h>
6 #include <linux/percpu.h>
8 #include <xen/events.h>
10 #include <xen/hvc-console.h>
14 static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
15 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
16 static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
17 static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
19 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
20 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
23 * Reschedule call back.
25 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
27 inc_irq_stat(irq_resched_count);
33 void xen_smp_intr_free(unsigned int cpu)
35 kfree(per_cpu(xen_resched_irq, cpu).name);
36 per_cpu(xen_resched_irq, cpu).name = NULL;
37 if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
38 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
39 per_cpu(xen_resched_irq, cpu).irq = -1;
41 kfree(per_cpu(xen_callfunc_irq, cpu).name);
42 per_cpu(xen_callfunc_irq, cpu).name = NULL;
43 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
44 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
45 per_cpu(xen_callfunc_irq, cpu).irq = -1;
47 kfree(per_cpu(xen_debug_irq, cpu).name);
48 per_cpu(xen_debug_irq, cpu).name = NULL;
49 if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
50 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
51 per_cpu(xen_debug_irq, cpu).irq = -1;
53 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
54 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
55 if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
56 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
58 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
62 int xen_smp_intr_init(unsigned int cpu)
65 char *resched_name, *callfunc_name, *debug_name;
67 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
70 per_cpu(xen_resched_irq, cpu).name = resched_name;
71 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
73 xen_reschedule_interrupt,
74 IRQF_PERCPU|IRQF_NOBALANCING,
79 per_cpu(xen_resched_irq, cpu).irq = rc;
81 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
84 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
85 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
87 xen_call_function_interrupt,
88 IRQF_PERCPU|IRQF_NOBALANCING,
93 per_cpu(xen_callfunc_irq, cpu).irq = rc;
95 if (!xen_fifo_events) {
96 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
100 per_cpu(xen_debug_irq, cpu).name = debug_name;
101 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
103 IRQF_PERCPU | IRQF_NOBALANCING,
107 per_cpu(xen_debug_irq, cpu).irq = rc;
110 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
114 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
115 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
117 xen_call_function_single_interrupt,
118 IRQF_PERCPU|IRQF_NOBALANCING,
123 per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
130 xen_smp_intr_free(cpu);
134 void __init xen_smp_cpus_done(unsigned int max_cpus)
136 int cpu, rc, count = 0;
138 if (xen_hvm_domain())
139 native_smp_cpus_done(max_cpus);
141 calculate_max_logical_packages();
143 if (xen_have_vcpu_info_placement)
146 for_each_online_cpu(cpu) {
147 if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
154 * Reset vcpu_info so this cpu cannot be onlined again.
156 xen_vcpu_info_reset(cpu);
159 pr_warn("%s: failed to bring CPU %d down, error %d\n",
163 WARN(count, "%s: brought %d CPUs offline\n", __func__, count);
166 void xen_smp_send_reschedule(int cpu)
168 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
171 static void __xen_send_IPI_mask(const struct cpumask *mask,
176 for_each_cpu_and(cpu, mask, cpu_online_mask)
177 xen_send_IPI_one(cpu, vector);
180 void xen_smp_send_call_function_ipi(const struct cpumask *mask)
184 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
186 /* Make sure other vcpus get a chance to run if they need to. */
187 for_each_cpu(cpu, mask) {
188 if (xen_vcpu_stolen(cpu)) {
189 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
195 void xen_smp_send_call_function_single_ipi(int cpu)
197 __xen_send_IPI_mask(cpumask_of(cpu),
198 XEN_CALL_FUNCTION_SINGLE_VECTOR);
201 static inline int xen_map_vector(int vector)
206 case RESCHEDULE_VECTOR:
207 xen_vector = XEN_RESCHEDULE_VECTOR;
209 case CALL_FUNCTION_VECTOR:
210 xen_vector = XEN_CALL_FUNCTION_VECTOR;
212 case CALL_FUNCTION_SINGLE_VECTOR:
213 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
215 case IRQ_WORK_VECTOR:
216 xen_vector = XEN_IRQ_WORK_VECTOR;
220 case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
221 xen_vector = XEN_NMI_VECTOR;
226 printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
233 void xen_send_IPI_mask(const struct cpumask *mask,
236 int xen_vector = xen_map_vector(vector);
239 __xen_send_IPI_mask(mask, xen_vector);
242 void xen_send_IPI_all(int vector)
244 int xen_vector = xen_map_vector(vector);
247 __xen_send_IPI_mask(cpu_online_mask, xen_vector);
250 void xen_send_IPI_self(int vector)
252 int xen_vector = xen_map_vector(vector);
255 xen_send_IPI_one(smp_processor_id(), xen_vector);
258 void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
262 unsigned int this_cpu = smp_processor_id();
263 int xen_vector = xen_map_vector(vector);
265 if (!(num_online_cpus() > 1) || (xen_vector < 0))
268 for_each_cpu_and(cpu, mask, cpu_online_mask) {
272 xen_send_IPI_one(cpu, xen_vector);
276 void xen_send_IPI_allbutself(int vector)
278 xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
281 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
284 generic_smp_call_function_interrupt();
285 inc_irq_stat(irq_call_count);
291 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
294 generic_smp_call_function_single_interrupt();
295 inc_irq_stat(irq_call_count);