2 * Local APIC related interfaces to support IOAPIC, MSI, etc.
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * Moved from arch/x86/kernel/apic/io_apic.c.
6 * Jiang Liu <jiang.liu@linux.intel.com>
7 * Enable support of hierarchical irqdomains
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/seq_file.h>
16 #include <linux/init.h>
17 #include <linux/compiler.h>
18 #include <linux/slab.h>
19 #include <asm/irqdomain.h>
20 #include <asm/hw_irq.h>
22 #include <asm/i8259.h>
24 #include <asm/irq_remapping.h>
26 #include <asm/trace/irq_vectors.h>
28 struct apic_chip_data {
29 struct irq_cfg hw_irq_cfg;
31 unsigned int prev_vector;
33 unsigned int prev_cpu;
35 struct hlist_node clist;
36 unsigned int move_in_progress : 1,
42 struct irq_domain *x86_vector_domain;
43 EXPORT_SYMBOL_GPL(x86_vector_domain);
44 static DEFINE_RAW_SPINLOCK(vector_lock);
45 static cpumask_var_t vector_searchmask;
46 static struct irq_chip lapic_controller;
47 static struct irq_matrix *vector_matrix;
49 static DEFINE_PER_CPU(struct hlist_head, cleanup_list);
52 void lock_vector_lock(void)
54 /* Used to the online set of cpus does not change
55 * during assign_irq_vector.
57 raw_spin_lock(&vector_lock);
60 void unlock_vector_lock(void)
62 raw_spin_unlock(&vector_lock);
65 void init_irq_alloc_info(struct irq_alloc_info *info,
66 const struct cpumask *mask)
68 memset(info, 0, sizeof(*info));
72 void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
77 memset(dst, 0, sizeof(*dst));
80 static struct apic_chip_data *apic_chip_data(struct irq_data *irqd)
85 while (irqd->parent_data)
86 irqd = irqd->parent_data;
88 return irqd->chip_data;
91 struct irq_cfg *irqd_cfg(struct irq_data *irqd)
93 struct apic_chip_data *apicd = apic_chip_data(irqd);
95 return apicd ? &apicd->hw_irq_cfg : NULL;
97 EXPORT_SYMBOL_GPL(irqd_cfg);
99 struct irq_cfg *irq_cfg(unsigned int irq)
101 return irqd_cfg(irq_get_irq_data(irq));
104 static struct apic_chip_data *alloc_apic_chip_data(int node)
106 struct apic_chip_data *apicd;
108 apicd = kzalloc_node(sizeof(*apicd), GFP_KERNEL, node);
110 INIT_HLIST_NODE(&apicd->clist);
114 static void free_apic_chip_data(struct apic_chip_data *apicd)
119 static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector,
122 struct apic_chip_data *apicd = apic_chip_data(irqd);
124 lockdep_assert_held(&vector_lock);
126 apicd->hw_irq_cfg.vector = vector;
127 apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu);
128 irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
129 trace_vector_config(irqd->irq, vector, cpu,
130 apicd->hw_irq_cfg.dest_apicid);
133 static void apic_update_vector(struct irq_data *irqd, unsigned int newvec,
136 struct apic_chip_data *apicd = apic_chip_data(irqd);
137 struct irq_desc *desc = irq_data_to_desc(irqd);
138 bool managed = irqd_affinity_is_managed(irqd);
140 lockdep_assert_held(&vector_lock);
142 trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector,
146 * If there is no vector associated or if the associated vector is
147 * the shutdown vector, which is associated to make PCI/MSI
148 * shutdown mode work, then there is nothing to release. Clear out
149 * prev_vector for this and the offlined target case.
151 apicd->prev_vector = 0;
152 if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR)
155 * If the target CPU of the previous vector is online, then mark
156 * the vector as move in progress and store it for cleanup when the
157 * first interrupt on the new vector arrives. If the target CPU is
158 * offline then the regular release mechanism via the cleanup
159 * vector is not possible and the vector can be immediately freed
160 * in the underlying matrix allocator.
162 if (cpu_online(apicd->cpu)) {
163 apicd->move_in_progress = true;
164 apicd->prev_vector = apicd->vector;
165 apicd->prev_cpu = apicd->cpu;
167 irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector,
172 apicd->vector = newvec;
174 BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec]));
175 per_cpu(vector_irq, newcpu)[newvec] = desc;
178 static void vector_assign_managed_shutdown(struct irq_data *irqd)
180 unsigned int cpu = cpumask_first(cpu_online_mask);
182 apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu);
185 static int reserve_managed_vector(struct irq_data *irqd)
187 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
188 struct apic_chip_data *apicd = apic_chip_data(irqd);
192 raw_spin_lock_irqsave(&vector_lock, flags);
193 apicd->is_managed = true;
194 ret = irq_matrix_reserve_managed(vector_matrix, affmsk);
195 raw_spin_unlock_irqrestore(&vector_lock, flags);
196 trace_vector_reserve_managed(irqd->irq, ret);
200 static void reserve_irq_vector_locked(struct irq_data *irqd)
202 struct apic_chip_data *apicd = apic_chip_data(irqd);
204 irq_matrix_reserve(vector_matrix);
205 apicd->can_reserve = true;
206 apicd->has_reserved = true;
207 irqd_set_can_reserve(irqd);
208 trace_vector_reserve(irqd->irq, 0);
209 vector_assign_managed_shutdown(irqd);
212 static int reserve_irq_vector(struct irq_data *irqd)
216 raw_spin_lock_irqsave(&vector_lock, flags);
217 reserve_irq_vector_locked(irqd);
218 raw_spin_unlock_irqrestore(&vector_lock, flags);
223 assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest)
225 struct apic_chip_data *apicd = apic_chip_data(irqd);
226 bool resvd = apicd->has_reserved;
227 unsigned int cpu = apicd->cpu;
228 int vector = apicd->vector;
230 lockdep_assert_held(&vector_lock);
233 * If the current target CPU is online and in the new requested
234 * affinity mask, there is no point in moving the interrupt from
235 * one CPU to another.
237 if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
241 * Careful here. @apicd might either have move_in_progress set or
242 * be enqueued for cleanup. Assigning a new vector would either
243 * leave a stale vector on some CPU around or in case of a pending
244 * cleanup corrupt the hlist.
246 if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist))
249 vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
250 trace_vector_alloc(irqd->irq, vector, resvd, vector);
253 apic_update_vector(irqd, vector, cpu);
254 apic_update_irq_cfg(irqd, vector, cpu);
259 static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest)
264 raw_spin_lock_irqsave(&vector_lock, flags);
265 cpumask_and(vector_searchmask, dest, cpu_online_mask);
266 ret = assign_vector_locked(irqd, vector_searchmask);
267 raw_spin_unlock_irqrestore(&vector_lock, flags);
271 static int assign_irq_vector_any_locked(struct irq_data *irqd)
273 /* Get the affinity mask - either irq_default_affinity or (user) set */
274 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
275 int node = irq_data_get_node(irqd);
277 if (node != NUMA_NO_NODE) {
278 /* Try the intersection of @affmsk and node mask */
279 cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
280 if (!assign_vector_locked(irqd, vector_searchmask))
284 /* Try the full affinity mask */
285 cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
286 if (!assign_vector_locked(irqd, vector_searchmask))
289 if (node != NUMA_NO_NODE) {
290 /* Try the node mask */
291 if (!assign_vector_locked(irqd, cpumask_of_node(node)))
295 /* Try the full online mask */
296 return assign_vector_locked(irqd, cpu_online_mask);
300 assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info)
302 if (irqd_affinity_is_managed(irqd))
303 return reserve_managed_vector(irqd);
305 return assign_irq_vector(irqd, info->mask);
307 * Make only a global reservation with no guarantee. A real vector
308 * is associated at activation time.
310 return reserve_irq_vector(irqd);
314 assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
316 const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
317 struct apic_chip_data *apicd = apic_chip_data(irqd);
320 cpumask_and(vector_searchmask, dest, affmsk);
322 /* set_affinity might call here for nothing */
323 if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
325 vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
327 trace_vector_alloc_managed(irqd->irq, vector, vector);
330 apic_update_vector(irqd, vector, cpu);
331 apic_update_irq_cfg(irqd, vector, cpu);
335 static void clear_irq_vector(struct irq_data *irqd)
337 struct apic_chip_data *apicd = apic_chip_data(irqd);
338 bool managed = irqd_affinity_is_managed(irqd);
339 unsigned int vector = apicd->vector;
341 lockdep_assert_held(&vector_lock);
346 trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector,
349 per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN;
350 irq_matrix_free(vector_matrix, apicd->cpu, vector, managed);
353 /* Clean up move in progress */
354 vector = apicd->prev_vector;
358 per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN;
359 irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, managed);
360 apicd->prev_vector = 0;
361 apicd->move_in_progress = 0;
362 hlist_del_init(&apicd->clist);
365 static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd)
367 struct apic_chip_data *apicd = apic_chip_data(irqd);
370 trace_vector_deactivate(irqd->irq, apicd->is_managed,
371 apicd->can_reserve, false);
373 /* Regular fixed assigned interrupt */
374 if (!apicd->is_managed && !apicd->can_reserve)
376 /* If the interrupt has a global reservation, nothing to do */
377 if (apicd->has_reserved)
380 raw_spin_lock_irqsave(&vector_lock, flags);
381 clear_irq_vector(irqd);
382 if (apicd->can_reserve)
383 reserve_irq_vector_locked(irqd);
385 vector_assign_managed_shutdown(irqd);
386 raw_spin_unlock_irqrestore(&vector_lock, flags);
389 static int activate_reserved(struct irq_data *irqd)
391 struct apic_chip_data *apicd = apic_chip_data(irqd);
394 ret = assign_irq_vector_any_locked(irqd);
396 apicd->has_reserved = false;
398 * Core might have disabled reservation mode after
399 * allocating the irq descriptor. Ideally this should
400 * happen before allocation time, but that would require
401 * completely convoluted ways of transporting that
404 if (!irqd_can_reserve(irqd))
405 apicd->can_reserve = false;
409 * Check to ensure that the effective affinity mask is a subset
410 * the user supplied affinity mask, and warn the user if it is not
412 if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
413 irq_data_get_affinity_mask(irqd))) {
414 pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n",
421 static int activate_managed(struct irq_data *irqd)
423 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
426 cpumask_and(vector_searchmask, dest, cpu_online_mask);
427 if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
428 /* Something in the core code broke! Survive gracefully */
429 pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
433 ret = assign_managed_vector(irqd, vector_searchmask);
435 * This should not happen. The vector reservation got buggered. Handle
438 if (WARN_ON_ONCE(ret < 0)) {
439 pr_err("Managed startup irq %u, no vector available\n",
445 static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd,
448 struct apic_chip_data *apicd = apic_chip_data(irqd);
452 trace_vector_activate(irqd->irq, apicd->is_managed,
453 apicd->can_reserve, reserve);
455 raw_spin_lock_irqsave(&vector_lock, flags);
456 if (!apicd->can_reserve && !apicd->is_managed)
457 assign_irq_vector_any_locked(irqd);
458 else if (reserve || irqd_is_managed_and_shutdown(irqd))
459 vector_assign_managed_shutdown(irqd);
460 else if (apicd->is_managed)
461 ret = activate_managed(irqd);
462 else if (apicd->has_reserved)
463 ret = activate_reserved(irqd);
464 raw_spin_unlock_irqrestore(&vector_lock, flags);
468 static void vector_free_reserved_and_managed(struct irq_data *irqd)
470 const struct cpumask *dest = irq_data_get_affinity_mask(irqd);
471 struct apic_chip_data *apicd = apic_chip_data(irqd);
473 trace_vector_teardown(irqd->irq, apicd->is_managed,
474 apicd->has_reserved);
476 if (apicd->has_reserved)
477 irq_matrix_remove_reserved(vector_matrix);
478 if (apicd->is_managed)
479 irq_matrix_remove_managed(vector_matrix, dest);
482 static void x86_vector_free_irqs(struct irq_domain *domain,
483 unsigned int virq, unsigned int nr_irqs)
485 struct apic_chip_data *apicd;
486 struct irq_data *irqd;
490 for (i = 0; i < nr_irqs; i++) {
491 irqd = irq_domain_get_irq_data(x86_vector_domain, virq + i);
492 if (irqd && irqd->chip_data) {
493 raw_spin_lock_irqsave(&vector_lock, flags);
494 clear_irq_vector(irqd);
495 vector_free_reserved_and_managed(irqd);
496 apicd = irqd->chip_data;
497 irq_domain_reset_irq_data(irqd);
498 raw_spin_unlock_irqrestore(&vector_lock, flags);
499 free_apic_chip_data(apicd);
504 static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd,
505 struct apic_chip_data *apicd)
508 bool realloc = false;
510 apicd->vector = ISA_IRQ_VECTOR(virq);
513 raw_spin_lock_irqsave(&vector_lock, flags);
515 * If the interrupt is activated, then it must stay at this vector
516 * position. That's usually the timer interrupt (0).
518 if (irqd_is_activated(irqd)) {
519 trace_vector_setup(virq, true, 0);
520 apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu);
522 /* Release the vector */
523 apicd->can_reserve = true;
524 irqd_set_can_reserve(irqd);
525 clear_irq_vector(irqd);
528 raw_spin_unlock_irqrestore(&vector_lock, flags);
532 static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
533 unsigned int nr_irqs, void *arg)
535 struct irq_alloc_info *info = arg;
536 struct apic_chip_data *apicd;
537 struct irq_data *irqd;
543 /* Currently vector allocator can't guarantee contiguous allocations */
544 if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
547 for (i = 0; i < nr_irqs; i++) {
548 irqd = irq_domain_get_irq_data(domain, virq + i);
550 node = irq_data_get_node(irqd);
551 WARN_ON_ONCE(irqd->chip_data);
552 apicd = alloc_apic_chip_data(node);
558 apicd->irq = virq + i;
559 irqd->chip = &lapic_controller;
560 irqd->chip_data = apicd;
561 irqd->hwirq = virq + i;
562 irqd_set_single_target(irqd);
564 /* Don't invoke affinity setter on deactivated interrupts */
565 irqd_set_affinity_on_activate(irqd);
568 * Legacy vectors are already assigned when the IOAPIC
569 * takes them over. They stay on the same vector. This is
570 * required for check_timer() to work correctly as it might
571 * switch back to legacy mode. Only update the hardware
574 if (info->flags & X86_IRQ_ALLOC_LEGACY) {
575 if (!vector_configure_legacy(virq + i, irqd, apicd))
579 err = assign_irq_vector_policy(irqd, info);
580 trace_vector_setup(virq + i, false, err);
582 irqd->chip_data = NULL;
583 free_apic_chip_data(apicd);
591 x86_vector_free_irqs(domain, virq, i);
595 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
596 static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
597 struct irq_data *irqd, int ind)
599 struct apic_chip_data apicd;
604 irq_matrix_debug_show(m, vector_matrix, ind);
609 if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) {
610 seq_printf(m, "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq));
611 seq_printf(m, "%*sTarget: Legacy PIC all CPUs\n", ind, "");
615 if (!irqd->chip_data) {
616 seq_printf(m, "%*sVector: Not assigned\n", ind, "");
620 raw_spin_lock_irqsave(&vector_lock, flags);
621 memcpy(&apicd, irqd->chip_data, sizeof(apicd));
622 raw_spin_unlock_irqrestore(&vector_lock, flags);
624 seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector);
625 seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu);
626 if (apicd.prev_vector) {
627 seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector);
628 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu);
630 seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0);
631 seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0);
632 seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0);
633 seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0);
634 seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist));
638 static const struct irq_domain_ops x86_vector_domain_ops = {
639 .alloc = x86_vector_alloc_irqs,
640 .free = x86_vector_free_irqs,
641 .activate = x86_vector_activate,
642 .deactivate = x86_vector_deactivate,
643 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
644 .debug_show = x86_vector_debug_show,
648 int __init arch_probe_nr_irqs(void)
652 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
653 nr_irqs = NR_VECTORS * nr_cpu_ids;
655 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
656 #if defined(CONFIG_PCI_MSI)
658 * for MSI and HT dyn irq
660 if (gsi_top <= NR_IRQS_LEGACY)
661 nr += 8 * nr_cpu_ids;
669 * We don't know if PIC is present at this point so we need to do
670 * probe() to get the right number of legacy IRQs.
672 return legacy_pic->probe();
675 void lapic_assign_legacy_vector(unsigned int irq, bool replace)
678 * Use assign system here so it wont get accounted as allocated
679 * and moveable in the cpu hotplug check and it prevents managed
680 * irq reservation from touching it.
682 irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace);
685 void __init lapic_update_legacy_vectors(void)
689 if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0)
693 * If the IO/APIC is disabled via config, kernel command line or
694 * lack of enumeration then all legacy interrupts are routed
695 * through the PIC. Make sure that they are marked as legacy
696 * vectors. PIC_CASCADE_IRQ has already been marked in
697 * lapic_assign_system_vectors().
699 for (i = 0; i < nr_legacy_irqs(); i++) {
700 if (i != PIC_CASCADE_IR)
701 lapic_assign_legacy_vector(i, true);
705 void __init lapic_assign_system_vectors(void)
707 unsigned int i, vector = 0;
709 for_each_set_bit_from(vector, system_vectors, NR_VECTORS)
710 irq_matrix_assign_system(vector_matrix, vector, false);
712 if (nr_legacy_irqs() > 1)
713 lapic_assign_legacy_vector(PIC_CASCADE_IR, false);
715 /* System vectors are reserved, online it */
716 irq_matrix_online(vector_matrix);
718 /* Mark the preallocated legacy interrupts */
719 for (i = 0; i < nr_legacy_irqs(); i++) {
720 if (i != PIC_CASCADE_IR)
721 irq_matrix_assign(vector_matrix, ISA_IRQ_VECTOR(i));
725 int __init arch_early_irq_init(void)
727 struct fwnode_handle *fn;
729 fn = irq_domain_alloc_named_fwnode("VECTOR");
731 x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
733 BUG_ON(x86_vector_domain == NULL);
734 irq_set_default_host(x86_vector_domain);
736 arch_init_msi_domain(x86_vector_domain);
738 BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
741 * Allocate the vector matrix allocator data structure and limit the
744 vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR,
745 FIRST_SYSTEM_VECTOR);
746 BUG_ON(!vector_matrix);
748 return arch_early_ioapic_init();
753 static struct irq_desc *__setup_vector_irq(int vector)
755 int isairq = vector - ISA_IRQ_VECTOR(0);
757 /* Check whether the irq is in the legacy space */
758 if (isairq < 0 || isairq >= nr_legacy_irqs())
759 return VECTOR_UNUSED;
760 /* Check whether the irq is handled by the IOAPIC */
761 if (test_bit(isairq, &io_apic_irqs))
762 return VECTOR_UNUSED;
763 return irq_to_desc(isairq);
766 /* Online the local APIC infrastructure and initialize the vectors */
767 void lapic_online(void)
771 lockdep_assert_held(&vector_lock);
773 /* Online the vector matrix array for this CPU */
774 irq_matrix_online(vector_matrix);
777 * The interrupt affinity logic never targets interrupts to offline
778 * CPUs. The exception are the legacy PIC interrupts. In general
779 * they are only targeted to CPU0, but depending on the platform
780 * they can be distributed to any online CPU in hardware. The
781 * kernel has no influence on that. So all active legacy vectors
782 * must be installed on all CPUs. All non legacy interrupts can be
785 for (vector = 0; vector < NR_VECTORS; vector++)
786 this_cpu_write(vector_irq[vector], __setup_vector_irq(vector));
789 void lapic_offline(void)
792 irq_matrix_offline(vector_matrix);
793 unlock_vector_lock();
796 static int apic_set_affinity(struct irq_data *irqd,
797 const struct cpumask *dest, bool force)
801 if (WARN_ON_ONCE(!irqd_is_activated(irqd)))
804 raw_spin_lock(&vector_lock);
805 cpumask_and(vector_searchmask, dest, cpu_online_mask);
806 if (irqd_affinity_is_managed(irqd))
807 err = assign_managed_vector(irqd, vector_searchmask);
809 err = assign_vector_locked(irqd, vector_searchmask);
810 raw_spin_unlock(&vector_lock);
811 return err ? err : IRQ_SET_MASK_OK;
815 # define apic_set_affinity NULL
818 static int apic_retrigger_irq(struct irq_data *irqd)
820 struct apic_chip_data *apicd = apic_chip_data(irqd);
823 raw_spin_lock_irqsave(&vector_lock, flags);
824 apic->send_IPI(apicd->cpu, apicd->vector);
825 raw_spin_unlock_irqrestore(&vector_lock, flags);
830 void apic_ack_irq(struct irq_data *irqd)
836 void apic_ack_edge(struct irq_data *irqd)
838 irq_complete_move(irqd_cfg(irqd));
842 static struct irq_chip lapic_controller = {
844 .irq_ack = apic_ack_edge,
845 .irq_set_affinity = apic_set_affinity,
846 .irq_retrigger = apic_retrigger_irq,
851 static void free_moved_vector(struct apic_chip_data *apicd)
853 unsigned int vector = apicd->prev_vector;
854 unsigned int cpu = apicd->prev_cpu;
855 bool managed = apicd->is_managed;
858 * This should never happen. Managed interrupts are not
859 * migrated except on CPU down, which does not involve the
860 * cleanup vector. But try to keep the accounting correct
863 WARN_ON_ONCE(managed);
865 trace_vector_free_moved(apicd->irq, cpu, vector, managed);
866 irq_matrix_free(vector_matrix, cpu, vector, managed);
867 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
868 hlist_del_init(&apicd->clist);
869 apicd->prev_vector = 0;
870 apicd->move_in_progress = 0;
873 asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
875 struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
876 struct apic_chip_data *apicd;
877 struct hlist_node *tmp;
880 /* Prevent vectors vanishing under us */
881 raw_spin_lock(&vector_lock);
883 hlist_for_each_entry_safe(apicd, tmp, clhead, clist) {
884 unsigned int irr, vector = apicd->prev_vector;
887 * Paranoia: Check if the vector that needs to be cleaned
888 * up is registered at the APICs IRR. If so, then this is
889 * not the best time to clean it up. Clean it up in the
890 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
891 * to this CPU. IRQ_MOVE_CLEANUP_VECTOR is the lowest
892 * priority external vector, so on return from this
893 * interrupt the device interrupt will happen first.
895 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
896 if (irr & (1U << (vector % 32))) {
897 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
900 free_moved_vector(apicd);
903 raw_spin_unlock(&vector_lock);
907 static void __send_cleanup_vector(struct apic_chip_data *apicd)
911 raw_spin_lock(&vector_lock);
912 apicd->move_in_progress = 0;
913 cpu = apicd->prev_cpu;
914 if (cpu_online(cpu)) {
915 hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu));
916 apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR);
918 apicd->prev_vector = 0;
920 raw_spin_unlock(&vector_lock);
923 void send_cleanup_vector(struct irq_cfg *cfg)
925 struct apic_chip_data *apicd;
927 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
928 if (apicd->move_in_progress)
929 __send_cleanup_vector(apicd);
932 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
934 struct apic_chip_data *apicd;
936 apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg);
937 if (likely(!apicd->move_in_progress))
940 if (vector == apicd->vector && apicd->cpu == smp_processor_id())
941 __send_cleanup_vector(apicd);
944 void irq_complete_move(struct irq_cfg *cfg)
946 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
950 * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
952 void irq_force_complete_move(struct irq_desc *desc)
954 struct apic_chip_data *apicd;
955 struct irq_data *irqd;
959 * The function is called for all descriptors regardless of which
960 * irqdomain they belong to. For example if an IRQ is provided by
961 * an irq_chip as part of a GPIO driver, the chip data for that
962 * descriptor is specific to the irq_chip in question.
964 * Check first that the chip_data is what we expect
965 * (apic_chip_data) before touching it any further.
967 irqd = irq_domain_get_irq_data(x86_vector_domain,
968 irq_desc_get_irq(desc));
972 raw_spin_lock(&vector_lock);
973 apicd = apic_chip_data(irqd);
978 * If prev_vector is empty, no action required.
980 vector = apicd->prev_vector;
985 * This is tricky. If the cleanup of the old vector has not been
986 * done yet, then the following setaffinity call will fail with
987 * -EBUSY. This can leave the interrupt in a stale state.
989 * All CPUs are stuck in stop machine with interrupts disabled so
990 * calling __irq_complete_move() would be completely pointless.
992 * 1) The interrupt is in move_in_progress state. That means that we
993 * have not seen an interrupt since the io_apic was reprogrammed to
996 * 2) The interrupt has fired on the new vector, but the cleanup IPIs
997 * have not been processed yet.
999 if (apicd->move_in_progress) {
1001 * In theory there is a race:
1003 * set_ioapic(new_vector) <-- Interrupt is raised before update
1004 * is effective, i.e. it's raised on
1007 * So if the target cpu cannot handle that interrupt before
1008 * the old vector is cleaned up, we get a spurious interrupt
1009 * and in the worst case the ioapic irq line becomes stale.
1011 * But in case of cpu hotplug this should be a non issue
1012 * because if the affinity update happens right before all
1013 * cpus rendevouz in stop machine, there is no way that the
1014 * interrupt can be blocked on the target cpu because all cpus
1015 * loops first with interrupts enabled in stop machine, so the
1016 * old vector is not yet cleaned up when the interrupt fires.
1018 * So the only way to run into this issue is if the delivery
1019 * of the interrupt on the apic/system bus would be delayed
1020 * beyond the point where the target cpu disables interrupts
1021 * in stop machine. I doubt that it can happen, but at least
1022 * there is a theroretical chance. Virtualization might be
1023 * able to expose this, but AFAICT the IOAPIC emulation is not
1024 * as stupid as the real hardware.
1026 * Anyway, there is nothing we can do about that at this point
1027 * w/o refactoring the whole fixup_irq() business completely.
1028 * We print at least the irq number and the old vector number,
1029 * so we have the necessary information when a problem in that
1032 pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
1035 free_moved_vector(apicd);
1037 raw_spin_unlock(&vector_lock);
1040 #ifdef CONFIG_HOTPLUG_CPU
1042 * Note, this is not accurate accounting, but at least good enough to
1043 * prevent that the actual interrupt move will run out of vectors.
1045 int lapic_can_unplug_cpu(void)
1047 unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
1050 raw_spin_lock(&vector_lock);
1051 tomove = irq_matrix_allocated(vector_matrix);
1052 avl = irq_matrix_available(vector_matrix, true);
1054 pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n",
1059 rsvd = irq_matrix_reserved(vector_matrix);
1061 pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n",
1065 raw_spin_unlock(&vector_lock);
1068 #endif /* HOTPLUG_CPU */
1071 static void __init print_APIC_field(int base)
1077 for (i = 0; i < 8; i++)
1078 pr_cont("%08x", apic_read(base + i*0x10));
1083 static void __init print_local_APIC(void *dummy)
1085 unsigned int i, v, ver, maxlvt;
1088 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
1089 smp_processor_id(), hard_smp_processor_id());
1090 v = apic_read(APIC_ID);
1091 pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
1092 v = apic_read(APIC_LVR);
1093 pr_info("... APIC VERSION: %08x\n", v);
1094 ver = GET_APIC_VERSION(v);
1095 maxlvt = lapic_get_maxlvt();
1097 v = apic_read(APIC_TASKPRI);
1098 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1101 if (APIC_INTEGRATED(ver)) {
1102 if (!APIC_XAPIC(ver)) {
1103 v = apic_read(APIC_ARBPRI);
1104 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
1105 v, v & APIC_ARBPRI_MASK);
1107 v = apic_read(APIC_PROCPRI);
1108 pr_debug("... APIC PROCPRI: %08x\n", v);
1112 * Remote read supported only in the 82489DX and local APIC for
1113 * Pentium processors.
1115 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1116 v = apic_read(APIC_RRR);
1117 pr_debug("... APIC RRR: %08x\n", v);
1120 v = apic_read(APIC_LDR);
1121 pr_debug("... APIC LDR: %08x\n", v);
1122 if (!x2apic_enabled()) {
1123 v = apic_read(APIC_DFR);
1124 pr_debug("... APIC DFR: %08x\n", v);
1126 v = apic_read(APIC_SPIV);
1127 pr_debug("... APIC SPIV: %08x\n", v);
1129 pr_debug("... APIC ISR field:\n");
1130 print_APIC_field(APIC_ISR);
1131 pr_debug("... APIC TMR field:\n");
1132 print_APIC_field(APIC_TMR);
1133 pr_debug("... APIC IRR field:\n");
1134 print_APIC_field(APIC_IRR);
1137 if (APIC_INTEGRATED(ver)) {
1138 /* Due to the Pentium erratum 3AP. */
1140 apic_write(APIC_ESR, 0);
1142 v = apic_read(APIC_ESR);
1143 pr_debug("... APIC ESR: %08x\n", v);
1146 icr = apic_icr_read();
1147 pr_debug("... APIC ICR: %08x\n", (u32)icr);
1148 pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
1150 v = apic_read(APIC_LVTT);
1151 pr_debug("... APIC LVTT: %08x\n", v);
1155 v = apic_read(APIC_LVTPC);
1156 pr_debug("... APIC LVTPC: %08x\n", v);
1158 v = apic_read(APIC_LVT0);
1159 pr_debug("... APIC LVT0: %08x\n", v);
1160 v = apic_read(APIC_LVT1);
1161 pr_debug("... APIC LVT1: %08x\n", v);
1165 v = apic_read(APIC_LVTERR);
1166 pr_debug("... APIC LVTERR: %08x\n", v);
1169 v = apic_read(APIC_TMICT);
1170 pr_debug("... APIC TMICT: %08x\n", v);
1171 v = apic_read(APIC_TMCCT);
1172 pr_debug("... APIC TMCCT: %08x\n", v);
1173 v = apic_read(APIC_TDCR);
1174 pr_debug("... APIC TDCR: %08x\n", v);
1176 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1177 v = apic_read(APIC_EFEAT);
1178 maxlvt = (v >> 16) & 0xff;
1179 pr_debug("... APIC EFEAT: %08x\n", v);
1180 v = apic_read(APIC_ECTRL);
1181 pr_debug("... APIC ECTRL: %08x\n", v);
1182 for (i = 0; i < maxlvt; i++) {
1183 v = apic_read(APIC_EILVTn(i));
1184 pr_debug("... APIC EILVT%d: %08x\n", i, v);
1190 static void __init print_local_APICs(int maxcpu)
1198 for_each_online_cpu(cpu) {
1201 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1206 static void __init print_PIC(void)
1209 unsigned long flags;
1211 if (!nr_legacy_irqs())
1214 pr_debug("\nprinting PIC contents\n");
1216 raw_spin_lock_irqsave(&i8259A_lock, flags);
1218 v = inb(0xa1) << 8 | inb(0x21);
1219 pr_debug("... PIC IMR: %04x\n", v);
1221 v = inb(0xa0) << 8 | inb(0x20);
1222 pr_debug("... PIC IRR: %04x\n", v);
1226 v = inb(0xa0) << 8 | inb(0x20);
1230 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1232 pr_debug("... PIC ISR: %04x\n", v);
1234 v = inb(0x4d1) << 8 | inb(0x4d0);
1235 pr_debug("... PIC ELCR: %04x\n", v);
1238 static int show_lapic __initdata = 1;
1239 static __init int setup_show_lapic(char *arg)
1243 if (strcmp(arg, "all") == 0) {
1244 show_lapic = CONFIG_NR_CPUS;
1246 get_option(&arg, &num);
1253 __setup("show_lapic=", setup_show_lapic);
1255 static int __init print_ICs(void)
1257 if (apic_verbosity == APIC_QUIET)
1262 /* don't print out if apic is not there */
1263 if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
1266 print_local_APICs(show_lapic);
1272 late_initcall(print_ICs);