1 // SPDX-License-Identifier: GPL-2.0-only
5 * Xen models interrupts with abstract event channels. Because each
6 * domain gets 1024 event channels, but NR_IRQ is not that large, we
7 * must dynamically map irqs<->event channels. The event channels
8 * interface with the rest of the kernel by defining a xen interrupt
9 * chip. When an event is received, it is mapped to an irq and sent
10 * through the normal interrupt processing path.
12 * There are four kinds of events which can be mapped to an event
15 * 1. Inter-domain notifications. This includes all the virtual
16 * device events, since they're driven by front-ends in another domain
18 * 2. VIRQs, typically used for timers. These are per-cpu events.
20 * 4. PIRQs - Hardware interrupts.
22 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
25 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
27 #include <linux/linkage.h>
28 #include <linux/interrupt.h>
29 #include <linux/irq.h>
30 #include <linux/moduleparam.h>
31 #include <linux/string.h>
32 #include <linux/memblock.h>
33 #include <linux/slab.h>
34 #include <linux/irqnr.h>
35 #include <linux/pci.h>
36 #include <linux/rcupdate.h>
37 #include <linux/spinlock.h>
38 #include <linux/cpuhotplug.h>
39 #include <linux/atomic.h>
40 #include <linux/ktime.h>
44 #include <asm/ptrace.h>
45 #include <asm/idtentry.h>
47 #include <asm/io_apic.h>
48 #include <asm/i8259.h>
49 #include <asm/xen/cpuid.h>
50 #include <asm/xen/pci.h>
52 #include <asm/sync_bitops.h>
53 #include <asm/xen/hypercall.h>
54 #include <asm/xen/hypervisor.h>
59 #include <xen/xen-ops.h>
60 #include <xen/events.h>
61 #include <xen/interface/xen.h>
62 #include <xen/interface/event_channel.h>
63 #include <xen/interface/hvm/hvm_op.h>
64 #include <xen/interface/hvm/params.h>
65 #include <xen/interface/physdev.h>
66 #include <xen/interface/sched.h>
67 #include <xen/interface/vcpu.h>
68 #include <xen/xenbus.h>
69 #include <asm/hw_irq.h>
71 #include "events_internal.h"
73 #undef MODULE_PARAM_PREFIX
74 #define MODULE_PARAM_PREFIX "xen."
76 /* Interrupt types. */
86 * Packed IRQ information:
87 * type - enum xen_irq_type
88 * event channel - irq->event channel mapping
89 * cpu - cpu this event channel is bound to
90 * index - type-specific information:
91 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
92 * guest, or GSI (real passthrough IRQ) of the device.
98 struct list_head list;
99 struct list_head eoi_list;
100 struct rcu_work rwork;
104 short type; /* type: IRQT_* */
105 u8 mask_reason; /* Why is event channel masked */
106 #define EVT_MASK_REASON_EXPLICIT 0x01
107 #define EVT_MASK_REASON_TEMPORARY 0x02
108 #define EVT_MASK_REASON_EOI_PENDING 0x04
109 u8 is_active; /* Is event just being handled? */
111 evtchn_port_t evtchn; /* event channel */
112 unsigned short cpu; /* cpu bound */
113 unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
114 unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
115 u64 eoi_time; /* Time in jiffies when to EOI. */
124 unsigned char vector;
128 struct xenbus_device *interdomain;
132 #define PIRQ_NEEDS_EOI (1 << 0)
133 #define PIRQ_SHAREABLE (1 << 1)
134 #define PIRQ_MSI_GROUP (1 << 2)
136 static uint __read_mostly event_loop_timeout = 2;
137 module_param(event_loop_timeout, uint, 0644);
139 static uint __read_mostly event_eoi_delay = 10;
140 module_param(event_eoi_delay, uint, 0644);
142 const struct evtchn_ops *evtchn_ops;
145 * This lock protects updates to the following mapping and reference-count
146 * arrays. The lock does not need to be acquired to read the mapping tables.
148 static DEFINE_MUTEX(irq_mapping_update_lock);
153 * irq_mapping_update_lock
155 * percpu eoi_list_lock
159 static LIST_HEAD(xen_irq_list_head);
161 /* IRQ <-> VIRQ mapping. */
162 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
164 /* IRQ <-> IPI mapping */
165 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
167 /* Event channel distribution data */
168 static atomic_t channels_on_cpu[NR_CPUS];
170 static int **evtchn_to_irq;
172 static unsigned long *pirq_eoi_map;
174 static bool (*pirq_needs_eoi)(unsigned irq);
176 #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
177 #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
178 #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
180 /* Xen will never allocate port zero for any purpose. */
181 #define VALID_EVTCHN(chn) ((chn) != 0)
183 static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY];
185 static struct irq_chip xen_dynamic_chip;
186 static struct irq_chip xen_lateeoi_chip;
187 static struct irq_chip xen_percpu_chip;
188 static struct irq_chip xen_pirq_chip;
189 static void enable_dynirq(struct irq_data *data);
190 static void disable_dynirq(struct irq_data *data);
192 static DEFINE_PER_CPU(unsigned int, irq_epoch);
194 static void clear_evtchn_to_irq_row(int *evtchn_row)
198 for (col = 0; col < EVTCHN_PER_ROW; col++)
199 WRITE_ONCE(evtchn_row[col], -1);
202 static void clear_evtchn_to_irq_all(void)
206 for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
207 if (evtchn_to_irq[row] == NULL)
209 clear_evtchn_to_irq_row(evtchn_to_irq[row]);
213 static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
219 if (evtchn >= xen_evtchn_max_channels())
222 row = EVTCHN_ROW(evtchn);
223 col = EVTCHN_COL(evtchn);
225 if (evtchn_to_irq[row] == NULL) {
226 /* Unallocated irq entries return -1 anyway */
230 evtchn_row = (int *) __get_free_pages(GFP_KERNEL, 0);
231 if (evtchn_row == NULL)
234 clear_evtchn_to_irq_row(evtchn_row);
237 * We've prepared an empty row for the mapping. If a different
238 * thread was faster inserting it, we can drop ours.
240 if (cmpxchg(&evtchn_to_irq[row], NULL, evtchn_row) != NULL)
241 free_page((unsigned long) evtchn_row);
244 WRITE_ONCE(evtchn_to_irq[row][col], irq);
248 int get_evtchn_to_irq(evtchn_port_t evtchn)
250 if (evtchn >= xen_evtchn_max_channels())
252 if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
254 return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
257 /* Get info for IRQ */
258 static struct irq_info *info_for_irq(unsigned irq)
260 if (irq < nr_legacy_irqs())
261 return legacy_info_ptrs[irq];
263 return irq_get_chip_data(irq);
266 static void set_info_for_irq(unsigned int irq, struct irq_info *info)
268 if (irq < nr_legacy_irqs())
269 legacy_info_ptrs[irq] = info;
271 irq_set_chip_data(irq, info);
274 /* Per CPU channel accounting */
275 static void channels_on_cpu_dec(struct irq_info *info)
277 if (!info->is_accounted)
280 info->is_accounted = 0;
282 if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
285 WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], -1 , 0));
288 static void channels_on_cpu_inc(struct irq_info *info)
290 if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
293 if (WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], 1,
297 info->is_accounted = 1;
300 static void delayed_free_irq(struct work_struct *work)
302 struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
304 unsigned int irq = info->irq;
306 /* Remove the info pointer only now, with no potential users left. */
307 set_info_for_irq(irq, NULL);
311 /* Legacy IRQ descriptors are managed by the arch. */
312 if (irq >= nr_legacy_irqs())
316 /* Constructors for packed IRQ information. */
317 static int xen_irq_info_common_setup(struct irq_info *info,
319 enum xen_irq_type type,
320 evtchn_port_t evtchn,
325 BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
329 info->evtchn = evtchn;
331 info->mask_reason = EVT_MASK_REASON_EXPLICIT;
332 raw_spin_lock_init(&info->lock);
334 ret = set_evtchn_to_irq(evtchn, irq);
338 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
340 return xen_evtchn_port_setup(evtchn);
343 static int xen_irq_info_evtchn_setup(unsigned irq,
344 evtchn_port_t evtchn,
345 struct xenbus_device *dev)
347 struct irq_info *info = info_for_irq(irq);
350 ret = xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
351 info->u.interdomain = dev;
353 atomic_inc(&dev->event_channels);
358 static int xen_irq_info_ipi_setup(unsigned cpu,
360 evtchn_port_t evtchn,
363 struct irq_info *info = info_for_irq(irq);
367 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
369 return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
372 static int xen_irq_info_virq_setup(unsigned cpu,
374 evtchn_port_t evtchn,
377 struct irq_info *info = info_for_irq(irq);
381 per_cpu(virq_to_irq, cpu)[virq] = irq;
383 return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
386 static int xen_irq_info_pirq_setup(unsigned irq,
387 evtchn_port_t evtchn,
393 struct irq_info *info = info_for_irq(irq);
395 info->u.pirq.pirq = pirq;
396 info->u.pirq.gsi = gsi;
397 info->u.pirq.domid = domid;
398 info->u.pirq.flags = flags;
400 return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
403 static void xen_irq_info_cleanup(struct irq_info *info)
405 set_evtchn_to_irq(info->evtchn, -1);
406 xen_evtchn_port_remove(info->evtchn, info->cpu);
408 channels_on_cpu_dec(info);
412 * Accessors for packed IRQ information.
414 evtchn_port_t evtchn_from_irq(unsigned irq)
416 const struct irq_info *info = NULL;
418 if (likely(irq < nr_irqs))
419 info = info_for_irq(irq);
426 unsigned int irq_from_evtchn(evtchn_port_t evtchn)
428 return get_evtchn_to_irq(evtchn);
430 EXPORT_SYMBOL_GPL(irq_from_evtchn);
432 int irq_from_virq(unsigned int cpu, unsigned int virq)
434 return per_cpu(virq_to_irq, cpu)[virq];
437 static enum ipi_vector ipi_from_irq(unsigned irq)
439 struct irq_info *info = info_for_irq(irq);
441 BUG_ON(info == NULL);
442 BUG_ON(info->type != IRQT_IPI);
447 static unsigned virq_from_irq(unsigned irq)
449 struct irq_info *info = info_for_irq(irq);
451 BUG_ON(info == NULL);
452 BUG_ON(info->type != IRQT_VIRQ);
457 static unsigned pirq_from_irq(unsigned irq)
459 struct irq_info *info = info_for_irq(irq);
461 BUG_ON(info == NULL);
462 BUG_ON(info->type != IRQT_PIRQ);
464 return info->u.pirq.pirq;
467 static enum xen_irq_type type_from_irq(unsigned irq)
469 return info_for_irq(irq)->type;
472 static unsigned cpu_from_irq(unsigned irq)
474 return info_for_irq(irq)->cpu;
477 unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
479 int irq = get_evtchn_to_irq(evtchn);
483 ret = cpu_from_irq(irq);
488 static void do_mask(struct irq_info *info, u8 reason)
492 raw_spin_lock_irqsave(&info->lock, flags);
494 if (!info->mask_reason)
495 mask_evtchn(info->evtchn);
497 info->mask_reason |= reason;
499 raw_spin_unlock_irqrestore(&info->lock, flags);
502 static void do_unmask(struct irq_info *info, u8 reason)
506 raw_spin_lock_irqsave(&info->lock, flags);
508 info->mask_reason &= ~reason;
510 if (!info->mask_reason)
511 unmask_evtchn(info->evtchn);
513 raw_spin_unlock_irqrestore(&info->lock, flags);
517 static bool pirq_check_eoi_map(unsigned irq)
519 return test_bit(pirq_from_irq(irq), pirq_eoi_map);
523 static bool pirq_needs_eoi_flag(unsigned irq)
525 struct irq_info *info = info_for_irq(irq);
526 BUG_ON(info->type != IRQT_PIRQ);
528 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
531 static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
534 int irq = get_evtchn_to_irq(evtchn);
535 struct irq_info *info = info_for_irq(irq);
539 if (IS_ENABLED(CONFIG_SMP) && force_affinity) {
540 struct irq_data *data = irq_get_irq_data(irq);
542 irq_data_update_affinity(data, cpumask_of(cpu));
543 irq_data_update_effective_affinity(data, cpumask_of(cpu));
546 xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
548 channels_on_cpu_dec(info);
550 channels_on_cpu_inc(info);
554 * notify_remote_via_irq - send event to remote end of event channel via irq
555 * @irq: irq of event channel to send event to
557 * Unlike notify_remote_via_evtchn(), this is safe to use across
558 * save/restore. Notifications on a broken connection are silently
561 void notify_remote_via_irq(int irq)
563 evtchn_port_t evtchn = evtchn_from_irq(irq);
565 if (VALID_EVTCHN(evtchn))
566 notify_remote_via_evtchn(evtchn);
568 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
570 struct lateeoi_work {
571 struct delayed_work delayed;
572 spinlock_t eoi_list_lock;
573 struct list_head eoi_list;
576 static DEFINE_PER_CPU(struct lateeoi_work, lateeoi);
578 static void lateeoi_list_del(struct irq_info *info)
580 struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
583 spin_lock_irqsave(&eoi->eoi_list_lock, flags);
584 list_del_init(&info->eoi_list);
585 spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
588 static void lateeoi_list_add(struct irq_info *info)
590 struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
591 struct irq_info *elem;
592 u64 now = get_jiffies_64();
596 if (now < info->eoi_time)
597 delay = info->eoi_time - now;
601 spin_lock_irqsave(&eoi->eoi_list_lock, flags);
603 elem = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
605 if (!elem || info->eoi_time < elem->eoi_time) {
606 list_add(&info->eoi_list, &eoi->eoi_list);
607 mod_delayed_work_on(info->eoi_cpu, system_wq,
608 &eoi->delayed, delay);
610 list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) {
611 if (elem->eoi_time <= info->eoi_time)
614 list_add(&info->eoi_list, &elem->eoi_list);
617 spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
620 static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
622 evtchn_port_t evtchn;
624 unsigned int delay = 0;
626 evtchn = info->evtchn;
627 if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
631 struct xenbus_device *dev = info->u.interdomain;
632 unsigned int threshold = 1;
634 if (dev && dev->spurious_threshold)
635 threshold = dev->spurious_threshold;
637 if ((1 << info->spurious_cnt) < (HZ << 2)) {
638 if (info->spurious_cnt != 0xFF)
639 info->spurious_cnt++;
641 if (info->spurious_cnt > threshold) {
642 delay = 1 << (info->spurious_cnt - 1 - threshold);
646 info->eoi_cpu = smp_processor_id();
647 info->eoi_time = get_jiffies_64() + delay;
649 atomic_add(delay, &dev->jiffies_eoi_delayed);
652 atomic_inc(&dev->spurious_events);
654 info->spurious_cnt = 0;
658 if (info->eoi_time &&
659 (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
660 lateeoi_list_add(info);
666 /* is_active hasn't been reset yet, do it now. */
667 smp_store_release(&info->is_active, 0);
668 do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
671 static void xen_irq_lateeoi_worker(struct work_struct *work)
673 struct lateeoi_work *eoi;
674 struct irq_info *info;
675 u64 now = get_jiffies_64();
678 eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
683 spin_lock_irqsave(&eoi->eoi_list_lock, flags);
685 info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
691 if (now < info->eoi_time) {
692 mod_delayed_work_on(info->eoi_cpu, system_wq,
694 info->eoi_time - now);
698 list_del_init(&info->eoi_list);
700 spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
704 xen_irq_lateeoi_locked(info, false);
707 spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
712 static void xen_cpu_init_eoi(unsigned int cpu)
714 struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu);
716 INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker);
717 spin_lock_init(&eoi->eoi_list_lock);
718 INIT_LIST_HEAD(&eoi->eoi_list);
721 void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
723 struct irq_info *info;
727 info = info_for_irq(irq);
730 xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
734 EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
736 static void xen_irq_init(unsigned irq)
738 struct irq_info *info;
740 info = kzalloc(sizeof(*info), GFP_KERNEL);
742 panic("Unable to allocate metadata for IRQ%d\n", irq);
744 info->type = IRQT_UNBOUND;
746 INIT_RCU_WORK(&info->rwork, delayed_free_irq);
748 set_info_for_irq(irq, info);
750 * Interrupt affinity setting can be immediate. No point
751 * in delaying it until an interrupt is handled.
753 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
755 INIT_LIST_HEAD(&info->eoi_list);
756 list_add_tail(&info->list, &xen_irq_list_head);
759 static int __must_check xen_allocate_irqs_dynamic(int nvec)
761 int i, irq = irq_alloc_descs(-1, 0, nvec, -1);
764 for (i = 0; i < nvec; i++)
765 xen_irq_init(irq + i);
771 static inline int __must_check xen_allocate_irq_dynamic(void)
774 return xen_allocate_irqs_dynamic(1);
777 static int __must_check xen_allocate_irq_gsi(unsigned gsi)
782 * A PV guest has no concept of a GSI (since it has no ACPI
783 * nor access to/knowledge of the physical APICs). Therefore
784 * all IRQs are dynamically allocated from the entire IRQ
787 if (xen_pv_domain() && !xen_initial_domain())
788 return xen_allocate_irq_dynamic();
790 /* Legacy IRQ descriptors are already allocated by the arch. */
791 if (gsi < nr_legacy_irqs())
794 irq = irq_alloc_desc_at(gsi, -1);
801 static void xen_free_irq(unsigned irq)
803 struct irq_info *info = info_for_irq(irq);
808 if (!list_empty(&info->eoi_list))
809 lateeoi_list_del(info);
811 list_del(&info->list);
813 WARN_ON(info->refcnt > 0);
815 queue_rcu_work(system_wq, &info->rwork);
818 static void xen_evtchn_close(evtchn_port_t port)
820 struct evtchn_close close;
823 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
827 /* Not called for lateeoi events. */
828 static void event_handler_exit(struct irq_info *info)
830 smp_store_release(&info->is_active, 0);
831 clear_evtchn(info->evtchn);
834 static void pirq_query_unmask(int irq)
836 struct physdev_irq_status_query irq_status;
837 struct irq_info *info = info_for_irq(irq);
839 BUG_ON(info->type != IRQT_PIRQ);
841 irq_status.irq = pirq_from_irq(irq);
842 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
843 irq_status.flags = 0;
845 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
846 if (irq_status.flags & XENIRQSTAT_needs_eoi)
847 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
850 static void eoi_pirq(struct irq_data *data)
852 struct irq_info *info = info_for_irq(data->irq);
853 evtchn_port_t evtchn = info ? info->evtchn : 0;
854 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
857 if (!VALID_EVTCHN(evtchn))
860 event_handler_exit(info);
862 if (pirq_needs_eoi(data->irq)) {
863 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
868 static void mask_ack_pirq(struct irq_data *data)
870 disable_dynirq(data);
874 static unsigned int __startup_pirq(unsigned int irq)
876 struct evtchn_bind_pirq bind_pirq;
877 struct irq_info *info = info_for_irq(irq);
878 evtchn_port_t evtchn = evtchn_from_irq(irq);
881 BUG_ON(info->type != IRQT_PIRQ);
883 if (VALID_EVTCHN(evtchn))
886 bind_pirq.pirq = pirq_from_irq(irq);
887 /* NB. We are happy to share unless we are probing. */
888 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
889 BIND_PIRQ__WILL_SHARE : 0;
890 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
892 pr_warn("Failed to obtain physical IRQ %d\n", irq);
895 evtchn = bind_pirq.port;
897 pirq_query_unmask(irq);
899 rc = set_evtchn_to_irq(evtchn, irq);
903 info->evtchn = evtchn;
904 bind_evtchn_to_cpu(evtchn, 0, false);
906 rc = xen_evtchn_port_setup(evtchn);
911 do_unmask(info, EVT_MASK_REASON_EXPLICIT);
913 eoi_pirq(irq_get_irq_data(irq));
918 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
919 xen_evtchn_close(evtchn);
923 static unsigned int startup_pirq(struct irq_data *data)
925 return __startup_pirq(data->irq);
928 static void shutdown_pirq(struct irq_data *data)
930 unsigned int irq = data->irq;
931 struct irq_info *info = info_for_irq(irq);
932 evtchn_port_t evtchn = evtchn_from_irq(irq);
934 BUG_ON(info->type != IRQT_PIRQ);
936 if (!VALID_EVTCHN(evtchn))
939 do_mask(info, EVT_MASK_REASON_EXPLICIT);
940 xen_irq_info_cleanup(info);
941 xen_evtchn_close(evtchn);
944 static void enable_pirq(struct irq_data *data)
949 static void disable_pirq(struct irq_data *data)
951 disable_dynirq(data);
954 int xen_irq_from_gsi(unsigned gsi)
956 struct irq_info *info;
958 list_for_each_entry(info, &xen_irq_list_head, list) {
959 if (info->type != IRQT_PIRQ)
962 if (info->u.pirq.gsi == gsi)
968 EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
970 static void __unbind_from_irq(unsigned int irq)
972 evtchn_port_t evtchn = evtchn_from_irq(irq);
973 struct irq_info *info = info_for_irq(irq);
975 if (info->refcnt > 0) {
977 if (info->refcnt != 0)
981 if (VALID_EVTCHN(evtchn)) {
982 unsigned int cpu = cpu_from_irq(irq);
983 struct xenbus_device *dev;
985 switch (type_from_irq(irq)) {
987 per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
990 per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
993 dev = info->u.interdomain;
995 atomic_dec(&dev->event_channels);
1001 xen_irq_info_cleanup(info);
1002 xen_evtchn_close(evtchn);
1009 * Do not make any assumptions regarding the relationship between the
1010 * IRQ number returned here and the Xen pirq argument.
1012 * Note: We don't assign an event channel until the irq actually started
1013 * up. Return an existing irq if we've already got one for the gsi.
1015 * Shareable implies level triggered, not shareable implies edge
1018 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
1019 unsigned pirq, int shareable, char *name)
1022 struct physdev_irq irq_op;
1025 mutex_lock(&irq_mapping_update_lock);
1027 irq = xen_irq_from_gsi(gsi);
1029 pr_info("%s: returning irq %d for gsi %u\n",
1030 __func__, irq, gsi);
1034 irq = xen_allocate_irq_gsi(gsi);
1041 /* Only the privileged domain can do this. For non-priv, the pcifront
1042 * driver provides a PCI bus that does the call to do exactly
1043 * this in the priv domain. */
1044 if (xen_initial_domain() &&
1045 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
1051 ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
1052 shareable ? PIRQ_SHAREABLE : 0);
1054 __unbind_from_irq(irq);
1059 pirq_query_unmask(irq);
1060 /* We try to use the handler with the appropriate semantic for the
1061 * type of interrupt: if the interrupt is an edge triggered
1062 * interrupt we use handle_edge_irq.
1064 * On the other hand if the interrupt is level triggered we use
1065 * handle_fasteoi_irq like the native code does for this kind of
1068 * Depending on the Xen version, pirq_needs_eoi might return true
1069 * not only for level triggered interrupts but for edge triggered
1070 * interrupts too. In any case Xen always honors the eoi mechanism,
1071 * not injecting any more pirqs of the same kind if the first one
1072 * hasn't received an eoi yet. Therefore using the fasteoi handler
1073 * is the right choice either way.
1076 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
1077 handle_fasteoi_irq, name);
1079 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
1080 handle_edge_irq, name);
1083 mutex_unlock(&irq_mapping_update_lock);
1088 #ifdef CONFIG_PCI_MSI
1089 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
1092 struct physdev_get_free_pirq op_get_free_pirq;
1094 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
1095 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
1097 WARN_ONCE(rc == -ENOSYS,
1098 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
1100 return rc ? -1 : op_get_free_pirq.pirq;
1103 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
1104 int pirq, int nvec, const char *name, domid_t domid)
1108 mutex_lock(&irq_mapping_update_lock);
1110 irq = xen_allocate_irqs_dynamic(nvec);
1114 for (i = 0; i < nvec; i++) {
1115 irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
1117 ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
1118 i == 0 ? 0 : PIRQ_MSI_GROUP);
1123 ret = irq_set_msi_desc(irq, msidesc);
1127 mutex_unlock(&irq_mapping_update_lock);
1131 __unbind_from_irq(irq + nvec);
1132 mutex_unlock(&irq_mapping_update_lock);
1137 int xen_destroy_irq(int irq)
1139 struct physdev_unmap_pirq unmap_irq;
1140 struct irq_info *info = info_for_irq(irq);
1143 mutex_lock(&irq_mapping_update_lock);
1146 * If trying to remove a vector in a MSI group different
1147 * than the first one skip the PIRQ unmap unless this vector
1148 * is the first one in the group.
1150 if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) {
1151 unmap_irq.pirq = info->u.pirq.pirq;
1152 unmap_irq.domid = info->u.pirq.domid;
1153 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
1154 /* If another domain quits without making the pci_disable_msix
1155 * call, the Xen hypervisor takes care of freeing the PIRQs
1156 * (free_domain_pirqs).
1158 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
1159 pr_info("domain %d does not have %d anymore\n",
1160 info->u.pirq.domid, info->u.pirq.pirq);
1162 pr_warn("unmap irq failed %d\n", rc);
1170 mutex_unlock(&irq_mapping_update_lock);
1174 int xen_irq_from_pirq(unsigned pirq)
1178 struct irq_info *info;
1180 mutex_lock(&irq_mapping_update_lock);
1182 list_for_each_entry(info, &xen_irq_list_head, list) {
1183 if (info->type != IRQT_PIRQ)
1186 if (info->u.pirq.pirq == pirq)
1191 mutex_unlock(&irq_mapping_update_lock);
1197 int xen_pirq_from_irq(unsigned irq)
1199 return pirq_from_irq(irq);
1201 EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
1203 static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
1204 struct xenbus_device *dev)
1209 if (evtchn >= xen_evtchn_max_channels())
1212 mutex_lock(&irq_mapping_update_lock);
1214 irq = get_evtchn_to_irq(evtchn);
1217 irq = xen_allocate_irq_dynamic();
1221 irq_set_chip_and_handler_name(irq, chip,
1222 handle_edge_irq, "event");
1224 ret = xen_irq_info_evtchn_setup(irq, evtchn, dev);
1226 __unbind_from_irq(irq);
1231 * New interdomain events are initially bound to vCPU0 This
1232 * is required to setup the event channel in the first
1233 * place and also important for UP guests because the
1234 * affinity setting is not invoked on them so nothing would
1237 bind_evtchn_to_cpu(evtchn, 0, false);
1239 struct irq_info *info = info_for_irq(irq);
1240 WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
1244 mutex_unlock(&irq_mapping_update_lock);
1249 int bind_evtchn_to_irq(evtchn_port_t evtchn)
1251 return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip, NULL);
1253 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
1255 int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
1257 return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip, NULL);
1259 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
1261 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
1263 struct evtchn_bind_ipi bind_ipi;
1264 evtchn_port_t evtchn;
1267 mutex_lock(&irq_mapping_update_lock);
1269 irq = per_cpu(ipi_to_irq, cpu)[ipi];
1272 irq = xen_allocate_irq_dynamic();
1276 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
1277 handle_percpu_irq, "ipi");
1279 bind_ipi.vcpu = xen_vcpu_nr(cpu);
1280 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1283 evtchn = bind_ipi.port;
1285 ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
1287 __unbind_from_irq(irq);
1292 * Force the affinity mask to the target CPU so proc shows
1293 * the correct target.
1295 bind_evtchn_to_cpu(evtchn, cpu, true);
1297 struct irq_info *info = info_for_irq(irq);
1298 WARN_ON(info == NULL || info->type != IRQT_IPI);
1302 mutex_unlock(&irq_mapping_update_lock);
1306 static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
1307 evtchn_port_t remote_port,
1308 struct irq_chip *chip)
1310 struct evtchn_bind_interdomain bind_interdomain;
1313 bind_interdomain.remote_dom = dev->otherend_id;
1314 bind_interdomain.remote_port = remote_port;
1316 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
1319 return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
1323 int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev,
1324 evtchn_port_t remote_port)
1326 return bind_interdomain_evtchn_to_irq_chip(dev, remote_port,
1329 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
1331 static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
1333 struct evtchn_status status;
1337 memset(&status, 0, sizeof(status));
1338 for (port = 0; port < xen_evtchn_max_channels(); port++) {
1339 status.dom = DOMID_SELF;
1341 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
1344 if (status.status != EVTCHNSTAT_virq)
1346 if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
1355 * xen_evtchn_nr_channels - number of usable event channel ports
1357 * This may be less than the maximum supported by the current
1358 * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum
1361 unsigned xen_evtchn_nr_channels(void)
1363 return evtchn_ops->nr_channels();
1365 EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
1367 int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
1369 struct evtchn_bind_virq bind_virq;
1370 evtchn_port_t evtchn = 0;
1373 mutex_lock(&irq_mapping_update_lock);
1375 irq = per_cpu(virq_to_irq, cpu)[virq];
1378 irq = xen_allocate_irq_dynamic();
1383 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
1384 handle_percpu_irq, "virq");
1386 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
1387 handle_edge_irq, "virq");
1389 bind_virq.virq = virq;
1390 bind_virq.vcpu = xen_vcpu_nr(cpu);
1391 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1394 evtchn = bind_virq.port;
1397 ret = find_virq(virq, cpu, &evtchn);
1401 ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
1403 __unbind_from_irq(irq);
1409 * Force the affinity mask for percpu interrupts so proc
1410 * shows the correct target.
1412 bind_evtchn_to_cpu(evtchn, cpu, percpu);
1414 struct irq_info *info = info_for_irq(irq);
1415 WARN_ON(info == NULL || info->type != IRQT_VIRQ);
1419 mutex_unlock(&irq_mapping_update_lock);
1424 static void unbind_from_irq(unsigned int irq)
1426 mutex_lock(&irq_mapping_update_lock);
1427 __unbind_from_irq(irq);
1428 mutex_unlock(&irq_mapping_update_lock);
1431 static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
1432 irq_handler_t handler,
1433 unsigned long irqflags,
1434 const char *devname, void *dev_id,
1435 struct irq_chip *chip)
1439 irq = bind_evtchn_to_irq_chip(evtchn, chip, NULL);
1442 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1444 unbind_from_irq(irq);
1451 int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
1452 irq_handler_t handler,
1453 unsigned long irqflags,
1454 const char *devname, void *dev_id)
1456 return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
1460 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
1462 int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
1463 irq_handler_t handler,
1464 unsigned long irqflags,
1465 const char *devname, void *dev_id)
1467 return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
1471 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi);
1473 static int bind_interdomain_evtchn_to_irqhandler_chip(
1474 struct xenbus_device *dev, evtchn_port_t remote_port,
1475 irq_handler_t handler, unsigned long irqflags,
1476 const char *devname, void *dev_id, struct irq_chip *chip)
1480 irq = bind_interdomain_evtchn_to_irq_chip(dev, remote_port, chip);
1484 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1486 unbind_from_irq(irq);
1493 int bind_interdomain_evtchn_to_irqhandler_lateeoi(struct xenbus_device *dev,
1494 evtchn_port_t remote_port,
1495 irq_handler_t handler,
1496 unsigned long irqflags,
1497 const char *devname,
1500 return bind_interdomain_evtchn_to_irqhandler_chip(dev,
1501 remote_port, handler, irqflags, devname,
1502 dev_id, &xen_lateeoi_chip);
1504 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi);
1506 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
1507 irq_handler_t handler,
1508 unsigned long irqflags, const char *devname, void *dev_id)
1512 irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
1515 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1517 unbind_from_irq(irq);
1523 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1525 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1527 irq_handler_t handler,
1528 unsigned long irqflags,
1529 const char *devname,
1534 irq = bind_ipi_to_irq(ipi, cpu);
1538 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
1539 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1541 unbind_from_irq(irq);
1548 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1550 struct irq_info *info = info_for_irq(irq);
1554 free_irq(irq, dev_id);
1555 unbind_from_irq(irq);
1557 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1560 * xen_set_irq_priority() - set an event channel priority.
1561 * @irq:irq bound to an event channel.
1562 * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN.
1564 int xen_set_irq_priority(unsigned irq, unsigned priority)
1566 struct evtchn_set_priority set_priority;
1568 set_priority.port = evtchn_from_irq(irq);
1569 set_priority.priority = priority;
1571 return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority,
1574 EXPORT_SYMBOL_GPL(xen_set_irq_priority);
1576 int evtchn_make_refcounted(evtchn_port_t evtchn)
1578 int irq = get_evtchn_to_irq(evtchn);
1579 struct irq_info *info;
1584 info = info_for_irq(irq);
1589 WARN_ON(info->refcnt != -1);
1595 EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
1597 int evtchn_get(evtchn_port_t evtchn)
1600 struct irq_info *info;
1603 if (evtchn >= xen_evtchn_max_channels())
1606 mutex_lock(&irq_mapping_update_lock);
1608 irq = get_evtchn_to_irq(evtchn);
1612 info = info_for_irq(irq);
1618 if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
1624 mutex_unlock(&irq_mapping_update_lock);
1628 EXPORT_SYMBOL_GPL(evtchn_get);
1630 void evtchn_put(evtchn_port_t evtchn)
1632 int irq = get_evtchn_to_irq(evtchn);
1633 if (WARN_ON(irq == -1))
1635 unbind_from_irq(irq);
1637 EXPORT_SYMBOL_GPL(evtchn_put);
1639 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1644 if (unlikely(vector == XEN_NMI_VECTOR)) {
1645 int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu),
1648 printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
1652 irq = per_cpu(ipi_to_irq, cpu)[vector];
1654 notify_remote_via_irq(irq);
1657 struct evtchn_loop_ctrl {
1663 void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
1666 struct irq_info *info;
1667 struct xenbus_device *dev;
1669 irq = get_evtchn_to_irq(port);
1674 * Check for timeout every 256 events.
1675 * We are setting the timeout value only after the first 256
1676 * events in order to not hurt the common case of few loop
1677 * iterations. The 256 is basically an arbitrary value.
1679 * In case we are hitting the timeout we need to defer all further
1680 * EOIs in order to ensure to leave the event handling loop rather
1681 * sooner than later.
1683 if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) {
1684 ktime_t kt = ktime_get();
1686 if (!ctrl->timeout) {
1687 kt = ktime_add_ms(kt,
1688 jiffies_to_msecs(event_loop_timeout));
1690 } else if (kt > ctrl->timeout) {
1691 ctrl->defer_eoi = true;
1695 info = info_for_irq(irq);
1696 if (xchg_acquire(&info->is_active, 1))
1699 dev = (info->type == IRQT_EVTCHN) ? info->u.interdomain : NULL;
1701 atomic_inc(&dev->events);
1703 if (ctrl->defer_eoi) {
1704 info->eoi_cpu = smp_processor_id();
1705 info->irq_epoch = __this_cpu_read(irq_epoch);
1706 info->eoi_time = get_jiffies_64() + event_eoi_delay;
1709 generic_handle_irq(irq);
1712 int xen_evtchn_do_upcall(void)
1714 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1715 int ret = vcpu_info->evtchn_upcall_pending ? IRQ_HANDLED : IRQ_NONE;
1716 int cpu = smp_processor_id();
1717 struct evtchn_loop_ctrl ctrl = { 0 };
1720 * When closing an event channel the associated IRQ must not be freed
1721 * until all cpus have left the event handling loop. This is ensured
1722 * by taking the rcu_read_lock() while handling events, as freeing of
1723 * the IRQ is handled via queue_rcu_work() _after_ closing the event
1729 vcpu_info->evtchn_upcall_pending = 0;
1731 xen_evtchn_handle_events(cpu, &ctrl);
1733 BUG_ON(!irqs_disabled());
1735 virt_rmb(); /* Hypervisor can set upcall pending. */
1737 } while (vcpu_info->evtchn_upcall_pending);
1742 * Increment irq_epoch only now to defer EOIs only for
1743 * xen_irq_lateeoi() invocations occurring from inside the loop
1746 __this_cpu_inc(irq_epoch);
1750 EXPORT_SYMBOL_GPL(xen_evtchn_do_upcall);
1752 /* Rebind a new event channel to an existing irq. */
1753 void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
1755 struct irq_info *info = info_for_irq(irq);
1760 /* Make sure the irq is masked, since the new event channel
1761 will also be masked. */
1764 mutex_lock(&irq_mapping_update_lock);
1766 /* After resume the irq<->evtchn mappings are all cleared out */
1767 BUG_ON(get_evtchn_to_irq(evtchn) != -1);
1768 /* Expect irq to have been bound before,
1769 so there should be a proper type */
1770 BUG_ON(info->type == IRQT_UNBOUND);
1772 (void)xen_irq_info_evtchn_setup(irq, evtchn, NULL);
1774 mutex_unlock(&irq_mapping_update_lock);
1776 bind_evtchn_to_cpu(evtchn, info->cpu, false);
1778 /* Unmask the event channel. */
1782 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1783 static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
1785 struct evtchn_bind_vcpu bind_vcpu;
1786 evtchn_port_t evtchn = info ? info->evtchn : 0;
1788 if (!VALID_EVTCHN(evtchn))
1791 if (!xen_support_evtchn_rebind())
1794 /* Send future instances of this interrupt to other vcpu. */
1795 bind_vcpu.port = evtchn;
1796 bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
1799 * Mask the event while changing the VCPU binding to prevent
1800 * it being delivered on an unexpected VCPU.
1802 do_mask(info, EVT_MASK_REASON_TEMPORARY);
1805 * If this fails, it usually just indicates that we're dealing with a
1806 * virq or IPI channel, which don't actually need to be rebound. Ignore
1807 * it, but don't do the xenlinux-level rebind in that case.
1809 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1810 bind_evtchn_to_cpu(evtchn, tcpu, false);
1812 do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1818 * Find the CPU within @dest mask which has the least number of channels
1819 * assigned. This is not precise as the per cpu counts can be modified
1822 static unsigned int select_target_cpu(const struct cpumask *dest)
1824 unsigned int cpu, best_cpu = UINT_MAX, minch = UINT_MAX;
1826 for_each_cpu_and(cpu, dest, cpu_online_mask) {
1827 unsigned int curch = atomic_read(&channels_on_cpu[cpu]);
1829 if (curch < minch) {
1836 * Catch the unlikely case that dest contains no online CPUs. Can't
1839 if (best_cpu == UINT_MAX)
1840 return select_target_cpu(cpu_online_mask);
1845 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1848 unsigned int tcpu = select_target_cpu(dest);
1851 ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu);
1853 irq_data_update_effective_affinity(data, cpumask_of(tcpu));
1858 static void enable_dynirq(struct irq_data *data)
1860 struct irq_info *info = info_for_irq(data->irq);
1861 evtchn_port_t evtchn = info ? info->evtchn : 0;
1863 if (VALID_EVTCHN(evtchn))
1864 do_unmask(info, EVT_MASK_REASON_EXPLICIT);
1867 static void disable_dynirq(struct irq_data *data)
1869 struct irq_info *info = info_for_irq(data->irq);
1870 evtchn_port_t evtchn = info ? info->evtchn : 0;
1872 if (VALID_EVTCHN(evtchn))
1873 do_mask(info, EVT_MASK_REASON_EXPLICIT);
1876 static void ack_dynirq(struct irq_data *data)
1878 struct irq_info *info = info_for_irq(data->irq);
1879 evtchn_port_t evtchn = info ? info->evtchn : 0;
1881 if (VALID_EVTCHN(evtchn))
1882 event_handler_exit(info);
1885 static void mask_ack_dynirq(struct irq_data *data)
1887 disable_dynirq(data);
1891 static void lateeoi_ack_dynirq(struct irq_data *data)
1893 struct irq_info *info = info_for_irq(data->irq);
1894 evtchn_port_t evtchn = info ? info->evtchn : 0;
1896 if (VALID_EVTCHN(evtchn)) {
1897 do_mask(info, EVT_MASK_REASON_EOI_PENDING);
1899 * Don't call event_handler_exit().
1900 * Need to keep is_active non-zero in order to ignore re-raised
1901 * events after cpu affinity changes while a lateeoi is pending.
1903 clear_evtchn(evtchn);
1907 static void lateeoi_mask_ack_dynirq(struct irq_data *data)
1909 struct irq_info *info = info_for_irq(data->irq);
1910 evtchn_port_t evtchn = info ? info->evtchn : 0;
1912 if (VALID_EVTCHN(evtchn)) {
1913 do_mask(info, EVT_MASK_REASON_EXPLICIT);
1914 event_handler_exit(info);
1918 static int retrigger_dynirq(struct irq_data *data)
1920 struct irq_info *info = info_for_irq(data->irq);
1921 evtchn_port_t evtchn = info ? info->evtchn : 0;
1923 if (!VALID_EVTCHN(evtchn))
1926 do_mask(info, EVT_MASK_REASON_TEMPORARY);
1928 do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1933 static void restore_pirqs(void)
1935 int pirq, rc, irq, gsi;
1936 struct physdev_map_pirq map_irq;
1937 struct irq_info *info;
1939 list_for_each_entry(info, &xen_irq_list_head, list) {
1940 if (info->type != IRQT_PIRQ)
1943 pirq = info->u.pirq.pirq;
1944 gsi = info->u.pirq.gsi;
1947 /* save/restore of PT devices doesn't work, so at this point the
1948 * only devices present are GSI based emulated devices */
1952 map_irq.domid = DOMID_SELF;
1953 map_irq.type = MAP_PIRQ_TYPE_GSI;
1954 map_irq.index = gsi;
1955 map_irq.pirq = pirq;
1957 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1959 pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1960 gsi, irq, pirq, rc);
1965 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1967 __startup_pirq(irq);
1971 static void restore_cpu_virqs(unsigned int cpu)
1973 struct evtchn_bind_virq bind_virq;
1974 evtchn_port_t evtchn;
1977 for (virq = 0; virq < NR_VIRQS; virq++) {
1978 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1981 BUG_ON(virq_from_irq(irq) != virq);
1983 /* Get a new binding from Xen. */
1984 bind_virq.virq = virq;
1985 bind_virq.vcpu = xen_vcpu_nr(cpu);
1986 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1989 evtchn = bind_virq.port;
1991 /* Record the new mapping. */
1992 (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
1993 /* The affinity mask is still valid */
1994 bind_evtchn_to_cpu(evtchn, cpu, false);
1998 static void restore_cpu_ipis(unsigned int cpu)
2000 struct evtchn_bind_ipi bind_ipi;
2001 evtchn_port_t evtchn;
2004 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
2005 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
2008 BUG_ON(ipi_from_irq(irq) != ipi);
2010 /* Get a new binding from Xen. */
2011 bind_ipi.vcpu = xen_vcpu_nr(cpu);
2012 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
2015 evtchn = bind_ipi.port;
2017 /* Record the new mapping. */
2018 (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
2019 /* The affinity mask is still valid */
2020 bind_evtchn_to_cpu(evtchn, cpu, false);
2024 /* Clear an irq's pending state, in preparation for polling on it */
2025 void xen_clear_irq_pending(int irq)
2027 struct irq_info *info = info_for_irq(irq);
2028 evtchn_port_t evtchn = info ? info->evtchn : 0;
2030 if (VALID_EVTCHN(evtchn))
2031 event_handler_exit(info);
2033 EXPORT_SYMBOL(xen_clear_irq_pending);
2034 void xen_set_irq_pending(int irq)
2036 evtchn_port_t evtchn = evtchn_from_irq(irq);
2038 if (VALID_EVTCHN(evtchn))
2042 bool xen_test_irq_pending(int irq)
2044 evtchn_port_t evtchn = evtchn_from_irq(irq);
2047 if (VALID_EVTCHN(evtchn))
2048 ret = test_evtchn(evtchn);
2053 /* Poll waiting for an irq to become pending with timeout. In the usual case,
2054 * the irq will be disabled so it won't deliver an interrupt. */
2055 void xen_poll_irq_timeout(int irq, u64 timeout)
2057 evtchn_port_t evtchn = evtchn_from_irq(irq);
2059 if (VALID_EVTCHN(evtchn)) {
2060 struct sched_poll poll;
2063 poll.timeout = timeout;
2064 set_xen_guest_handle(poll.ports, &evtchn);
2066 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
2070 EXPORT_SYMBOL(xen_poll_irq_timeout);
2071 /* Poll waiting for an irq to become pending. In the usual case, the
2072 * irq will be disabled so it won't deliver an interrupt. */
2073 void xen_poll_irq(int irq)
2075 xen_poll_irq_timeout(irq, 0 /* no timeout */);
2078 /* Check whether the IRQ line is shared with other guests. */
2079 int xen_test_irq_shared(int irq)
2081 struct irq_info *info = info_for_irq(irq);
2082 struct physdev_irq_status_query irq_status;
2087 irq_status.irq = info->u.pirq.pirq;
2089 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
2091 return !(irq_status.flags & XENIRQSTAT_shared);
2093 EXPORT_SYMBOL_GPL(xen_test_irq_shared);
2095 void xen_irq_resume(void)
2098 struct irq_info *info;
2100 /* New event-channel space is not 'live' yet. */
2101 xen_evtchn_resume();
2103 /* No IRQ <-> event-channel mappings. */
2104 list_for_each_entry(info, &xen_irq_list_head, list) {
2105 /* Zap event-channel binding */
2107 /* Adjust accounting */
2108 channels_on_cpu_dec(info);
2111 clear_evtchn_to_irq_all();
2113 for_each_possible_cpu(cpu) {
2114 restore_cpu_virqs(cpu);
2115 restore_cpu_ipis(cpu);
2121 static struct irq_chip xen_dynamic_chip __read_mostly = {
2124 .irq_disable = disable_dynirq,
2125 .irq_mask = disable_dynirq,
2126 .irq_unmask = enable_dynirq,
2128 .irq_ack = ack_dynirq,
2129 .irq_mask_ack = mask_ack_dynirq,
2131 .irq_set_affinity = set_affinity_irq,
2132 .irq_retrigger = retrigger_dynirq,
2135 static struct irq_chip xen_lateeoi_chip __read_mostly = {
2136 /* The chip name needs to contain "xen-dyn" for irqbalance to work. */
2137 .name = "xen-dyn-lateeoi",
2139 .irq_disable = disable_dynirq,
2140 .irq_mask = disable_dynirq,
2141 .irq_unmask = enable_dynirq,
2143 .irq_ack = lateeoi_ack_dynirq,
2144 .irq_mask_ack = lateeoi_mask_ack_dynirq,
2146 .irq_set_affinity = set_affinity_irq,
2147 .irq_retrigger = retrigger_dynirq,
2150 static struct irq_chip xen_pirq_chip __read_mostly = {
2153 .irq_startup = startup_pirq,
2154 .irq_shutdown = shutdown_pirq,
2155 .irq_enable = enable_pirq,
2156 .irq_disable = disable_pirq,
2158 .irq_mask = disable_dynirq,
2159 .irq_unmask = enable_dynirq,
2161 .irq_ack = eoi_pirq,
2162 .irq_eoi = eoi_pirq,
2163 .irq_mask_ack = mask_ack_pirq,
2165 .irq_set_affinity = set_affinity_irq,
2167 .irq_retrigger = retrigger_dynirq,
2170 static struct irq_chip xen_percpu_chip __read_mostly = {
2171 .name = "xen-percpu",
2173 .irq_disable = disable_dynirq,
2174 .irq_mask = disable_dynirq,
2175 .irq_unmask = enable_dynirq,
2177 .irq_ack = ack_dynirq,
2181 #ifdef CONFIG_XEN_PVHVM
2182 /* Vector callbacks are better than PCI interrupts to receive event
2183 * channel notifications because we can receive vector callbacks on any
2184 * vcpu and we don't need PCI support or APIC interactions. */
2185 void xen_setup_callback_vector(void)
2187 uint64_t callback_via;
2189 if (xen_have_vector_callback) {
2190 callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
2191 if (xen_set_callback_via(callback_via)) {
2192 pr_err("Request for Xen HVM callback vector failed\n");
2193 xen_have_vector_callback = false;
2199 * Setup per-vCPU vector-type callbacks. If this setup is unavailable,
2200 * fallback to the global vector-type callback.
2202 static __init void xen_init_setup_upcall_vector(void)
2204 if (!xen_have_vector_callback)
2207 if ((cpuid_eax(xen_cpuid_base() + 4) & XEN_HVM_CPUID_UPCALL_VECTOR) &&
2208 !xen_set_upcall_vector(0))
2209 xen_percpu_upcall = true;
2210 else if (xen_feature(XENFEAT_hvm_callback_vector))
2211 xen_setup_callback_vector();
2213 xen_have_vector_callback = false;
2216 int xen_set_upcall_vector(unsigned int cpu)
2219 xen_hvm_evtchn_upcall_vector_t op = {
2220 .vector = HYPERVISOR_CALLBACK_VECTOR,
2221 .vcpu = per_cpu(xen_vcpu_id, cpu),
2224 rc = HYPERVISOR_hvm_op(HVMOP_set_evtchn_upcall_vector, &op);
2228 /* Trick toolstack to think we are enlightened. */
2230 rc = xen_set_callback_via(1);
2235 static __init void xen_alloc_callback_vector(void)
2237 if (!xen_have_vector_callback)
2240 pr_info("Xen HVM callback vector for event delivery is enabled\n");
2241 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_xen_hvm_callback);
2244 void xen_setup_callback_vector(void) {}
2245 static inline void xen_init_setup_upcall_vector(void) {}
2246 int xen_set_upcall_vector(unsigned int cpu) {}
2247 static inline void xen_alloc_callback_vector(void) {}
2248 #endif /* CONFIG_XEN_PVHVM */
2249 #endif /* CONFIG_X86 */
2251 bool xen_fifo_events = true;
2252 module_param_named(fifo_events, xen_fifo_events, bool, 0);
2254 static int xen_evtchn_cpu_prepare(unsigned int cpu)
2258 xen_cpu_init_eoi(cpu);
2260 if (evtchn_ops->percpu_init)
2261 ret = evtchn_ops->percpu_init(cpu);
2266 static int xen_evtchn_cpu_dead(unsigned int cpu)
2270 if (evtchn_ops->percpu_deinit)
2271 ret = evtchn_ops->percpu_deinit(cpu);
2276 void __init xen_init_IRQ(void)
2279 evtchn_port_t evtchn;
2281 if (xen_fifo_events)
2282 ret = xen_evtchn_fifo_init();
2284 xen_evtchn_2l_init();
2285 xen_fifo_events = false;
2288 xen_cpu_init_eoi(smp_processor_id());
2290 cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
2291 "xen/evtchn:prepare",
2292 xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
2294 evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
2295 sizeof(*evtchn_to_irq), GFP_KERNEL);
2296 BUG_ON(!evtchn_to_irq);
2298 /* No event channels are 'live' right now. */
2299 for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
2300 mask_evtchn(evtchn);
2302 pirq_needs_eoi = pirq_needs_eoi_flag;
2305 if (xen_pv_domain()) {
2306 if (xen_initial_domain())
2307 pci_xen_initial_domain();
2309 xen_init_setup_upcall_vector();
2310 xen_alloc_callback_vector();
2313 if (xen_hvm_domain()) {
2315 /* pci_xen_hvm_init must be called after native_init_IRQ so that
2316 * __acpi_register_gsi can point at the right function */
2320 struct physdev_pirq_eoi_gmfn eoi_gmfn;
2322 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
2323 eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
2324 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
2326 free_page((unsigned long) pirq_eoi_map);
2327 pirq_eoi_map = NULL;
2329 pirq_needs_eoi = pirq_check_eoi_map;