2 * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * Moved from arch/x86/kernel/apic/io_apic.c.
6 * Jiang Liu <jiang.liu@linux.intel.com>
7 * Enable support of hierarchical irqdomains
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/init.h>
16 #include <linux/compiler.h>
17 #include <linux/slab.h>
18 #include <asm/irqdomain.h>
19 #include <asm/hw_irq.h>
21 #include <asm/i8259.h>
23 #include <asm/irq_remapping.h>
25 struct apic_chip_data {
28 cpumask_var_t old_domain;
29 u8 move_in_progress : 1;
32 struct irq_domain *x86_vector_domain;
33 EXPORT_SYMBOL_GPL(x86_vector_domain);
34 static DEFINE_RAW_SPINLOCK(vector_lock);
35 static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
36 static struct irq_chip lapic_controller;
37 #ifdef CONFIG_X86_IO_APIC
38 static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
41 void lock_vector_lock(void)
43 /* Used to the online set of cpus does not change
44 * during assign_irq_vector.
46 raw_spin_lock(&vector_lock);
49 void unlock_vector_lock(void)
51 raw_spin_unlock(&vector_lock);
54 static struct apic_chip_data *apic_chip_data(struct irq_data *irq_data)
59 while (irq_data->parent_data)
60 irq_data = irq_data->parent_data;
62 return irq_data->chip_data;
65 struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
67 struct apic_chip_data *data = apic_chip_data(irq_data);
69 return data ? &data->cfg : NULL;
71 EXPORT_SYMBOL_GPL(irqd_cfg);
73 struct irq_cfg *irq_cfg(unsigned int irq)
75 return irqd_cfg(irq_get_irq_data(irq));
78 static struct apic_chip_data *alloc_apic_chip_data(int node)
80 struct apic_chip_data *data;
82 data = kzalloc_node(sizeof(*data), GFP_KERNEL, node);
85 if (!zalloc_cpumask_var_node(&data->domain, GFP_KERNEL, node))
87 if (!zalloc_cpumask_var_node(&data->old_domain, GFP_KERNEL, node))
91 free_cpumask_var(data->domain);
97 static void free_apic_chip_data(unsigned int virq, struct apic_chip_data *data)
99 #ifdef CONFIG_X86_IO_APIC
100 if (virq < nr_legacy_irqs())
101 legacy_irq_data[virq] = NULL;
104 free_cpumask_var(data->domain);
105 free_cpumask_var(data->old_domain);
110 static int __assign_irq_vector(int irq, struct apic_chip_data *d,
111 const struct cpumask *mask)
114 * NOTE! The local APIC isn't very good at handling
115 * multiple interrupts at the same interrupt level.
116 * As the interrupt level is determined by taking the
117 * vector number and shifting that right by 4, we
118 * want to spread these out a bit so that they don't
119 * all fall in the same interrupt level.
121 * Also, we've got to be careful not to trash gate
122 * 0x80, because int 0x80 is hm, kind of importantish. ;)
124 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
125 static int current_offset = VECTOR_OFFSET_START % 16;
129 * If there is still a move in progress or the previous move has not
130 * been cleaned up completely, tell the caller to come back later.
132 if (d->move_in_progress ||
133 cpumask_intersects(d->old_domain, cpu_online_mask))
136 /* Only try and allocate irqs on cpus that are present */
137 cpumask_clear(d->old_domain);
138 cpumask_clear(searched_cpumask);
139 cpu = cpumask_first_and(mask, cpu_online_mask);
140 while (cpu < nr_cpu_ids) {
143 /* Get the possible target cpus for @mask/@cpu from the apic */
144 apic->vector_allocation_domain(cpu, vector_cpumask, mask);
147 * Clear the offline cpus from @vector_cpumask for searching
148 * and verify whether the result overlaps with @mask. If true,
149 * then the call to apic->cpu_mask_to_apicid_and() will
150 * succeed as well. If not, no point in trying to find a
151 * vector in this mask.
153 cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
154 if (!cpumask_intersects(vector_searchmask, mask))
157 if (cpumask_subset(vector_cpumask, d->domain)) {
158 if (cpumask_equal(vector_cpumask, d->domain))
161 * Mark the cpus which are not longer in the mask for
164 cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
165 vector = d->cfg.vector;
169 vector = current_vector;
170 offset = current_offset;
173 if (vector >= first_system_vector) {
174 offset = (offset + 1) % 16;
175 vector = FIRST_EXTERNAL_VECTOR + offset;
178 /* If the search wrapped around, try the next cpu */
179 if (unlikely(current_vector == vector))
182 if (test_bit(vector, used_vectors))
185 for_each_cpu(new_cpu, vector_searchmask) {
186 if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
190 current_vector = vector;
191 current_offset = offset;
192 /* Schedule the old vector for cleanup on all cpus */
194 cpumask_copy(d->old_domain, d->domain);
195 for_each_cpu(new_cpu, vector_searchmask)
196 per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
201 * We exclude the current @vector_cpumask from the requested
202 * @mask and try again with the next online cpu in the
203 * result. We cannot modify @mask, so we use @vector_cpumask
204 * as a temporary buffer here as it will be reassigned when
205 * calling apic->vector_allocation_domain() above.
207 cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
208 cpumask_andnot(vector_cpumask, mask, searched_cpumask);
209 cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
216 * Exclude offline cpus from the cleanup mask and set the
217 * move_in_progress flag when the result is not empty.
219 cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
220 d->move_in_progress = !cpumask_empty(d->old_domain);
221 d->cfg.old_vector = d->move_in_progress ? d->cfg.vector : 0;
222 d->cfg.vector = vector;
223 cpumask_copy(d->domain, vector_cpumask);
226 * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
227 * as we already established, that mask & d->domain & cpu_online_mask
230 BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
231 &d->cfg.dest_apicid));
235 static int assign_irq_vector(int irq, struct apic_chip_data *data,
236 const struct cpumask *mask)
241 raw_spin_lock_irqsave(&vector_lock, flags);
242 err = __assign_irq_vector(irq, data, mask);
243 raw_spin_unlock_irqrestore(&vector_lock, flags);
247 static int assign_irq_vector_policy(int irq, int node,
248 struct apic_chip_data *data,
249 struct irq_alloc_info *info)
251 if (info && info->mask)
252 return assign_irq_vector(irq, data, info->mask);
253 if (node != NUMA_NO_NODE &&
254 assign_irq_vector(irq, data, cpumask_of_node(node)) == 0)
256 return assign_irq_vector(irq, data, apic->target_cpus());
259 static void clear_irq_vector(int irq, struct apic_chip_data *data)
261 struct irq_desc *desc;
264 if (!data->cfg.vector)
267 vector = data->cfg.vector;
268 for_each_cpu_and(cpu, data->domain, cpu_online_mask)
269 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
271 data->cfg.vector = 0;
272 cpumask_clear(data->domain);
275 * If move is in progress or the old_domain mask is not empty,
276 * i.e. the cleanup IPI has not been processed yet, we need to remove
277 * the old references to desc from all cpus vector tables.
279 if (!data->move_in_progress && cpumask_empty(data->old_domain))
282 desc = irq_to_desc(irq);
283 for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
284 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
286 if (per_cpu(vector_irq, cpu)[vector] != desc)
288 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
292 data->move_in_progress = 0;
295 void init_irq_alloc_info(struct irq_alloc_info *info,
296 const struct cpumask *mask)
298 memset(info, 0, sizeof(*info));
302 void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
307 memset(dst, 0, sizeof(*dst));
310 static void x86_vector_free_irqs(struct irq_domain *domain,
311 unsigned int virq, unsigned int nr_irqs)
313 struct apic_chip_data *apic_data;
314 struct irq_data *irq_data;
318 for (i = 0; i < nr_irqs; i++) {
319 irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
320 if (irq_data && irq_data->chip_data) {
321 raw_spin_lock_irqsave(&vector_lock, flags);
322 clear_irq_vector(virq + i, irq_data->chip_data);
323 apic_data = irq_data->chip_data;
324 irq_domain_reset_irq_data(irq_data);
325 raw_spin_unlock_irqrestore(&vector_lock, flags);
326 free_apic_chip_data(virq + i, apic_data);
331 static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
332 unsigned int nr_irqs, void *arg)
334 struct irq_alloc_info *info = arg;
335 struct apic_chip_data *data;
336 struct irq_data *irq_data;
342 /* Currently vector allocator can't guarantee contiguous allocations */
343 if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
346 for (i = 0; i < nr_irqs; i++) {
347 irq_data = irq_domain_get_irq_data(domain, virq + i);
349 node = irq_data_get_node(irq_data);
350 #ifdef CONFIG_X86_IO_APIC
351 if (virq + i < nr_legacy_irqs() && legacy_irq_data[virq + i])
352 data = legacy_irq_data[virq + i];
355 data = alloc_apic_chip_data(node);
361 irq_data->chip = &lapic_controller;
362 irq_data->chip_data = data;
363 irq_data->hwirq = virq + i;
364 err = assign_irq_vector_policy(virq + i, node, data, info);
366 irq_data->chip_data = NULL;
367 free_apic_chip_data(virq + i, data);
375 x86_vector_free_irqs(domain, virq, i);
379 static const struct irq_domain_ops x86_vector_domain_ops = {
380 .alloc = x86_vector_alloc_irqs,
381 .free = x86_vector_free_irqs,
384 int __init arch_probe_nr_irqs(void)
388 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
389 nr_irqs = NR_VECTORS * nr_cpu_ids;
391 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
392 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
394 * for MSI and HT dyn irq
396 if (gsi_top <= NR_IRQS_LEGACY)
397 nr += 8 * nr_cpu_ids;
405 * We don't know if PIC is present at this point so we need to do
406 * probe() to get the right number of legacy IRQs.
408 return legacy_pic->probe();
411 #ifdef CONFIG_X86_IO_APIC
412 static void init_legacy_irqs(void)
414 int i, node = cpu_to_node(0);
415 struct apic_chip_data *data;
418 * For legacy IRQ's, start with assigning irq0 to irq15 to
419 * ISA_IRQ_VECTOR(i) for all cpu's.
421 for (i = 0; i < nr_legacy_irqs(); i++) {
422 data = legacy_irq_data[i] = alloc_apic_chip_data(node);
425 data->cfg.vector = ISA_IRQ_VECTOR(i);
426 cpumask_setall(data->domain);
427 irq_set_chip_data(i, data);
431 static void init_legacy_irqs(void) { }
434 int __init arch_early_irq_init(void)
438 x86_vector_domain = irq_domain_add_tree(NULL, &x86_vector_domain_ops,
440 BUG_ON(x86_vector_domain == NULL);
441 irq_set_default_host(x86_vector_domain);
443 arch_init_msi_domain(x86_vector_domain);
444 arch_init_htirq_domain(x86_vector_domain);
446 BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
447 BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
448 BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
450 return arch_early_ioapic_init();
453 /* Initialize vector_irq on a new cpu */
454 static void __setup_vector_irq(int cpu)
456 struct apic_chip_data *data;
457 struct irq_desc *desc;
460 /* Mark the inuse vectors */
461 for_each_irq_desc(irq, desc) {
462 struct irq_data *idata = irq_desc_get_irq_data(desc);
464 data = apic_chip_data(idata);
465 if (!data || !cpumask_test_cpu(cpu, data->domain))
467 vector = data->cfg.vector;
468 per_cpu(vector_irq, cpu)[vector] = desc;
470 /* Mark the free vectors */
471 for (vector = 0; vector < NR_VECTORS; ++vector) {
472 desc = per_cpu(vector_irq, cpu)[vector];
473 if (IS_ERR_OR_NULL(desc))
476 data = apic_chip_data(irq_desc_get_irq_data(desc));
477 if (!cpumask_test_cpu(cpu, data->domain))
478 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
483 * Setup the vector to irq mappings. Must be called with vector_lock held.
485 void setup_vector_irq(int cpu)
489 lockdep_assert_held(&vector_lock);
491 * On most of the platforms, legacy PIC delivers the interrupts on the
492 * boot cpu. But there are certain platforms where PIC interrupts are
493 * delivered to multiple cpu's. If the legacy IRQ is handled by the
494 * legacy PIC, for the new cpu that is coming online, setup the static
495 * legacy vector to irq mapping:
497 for (irq = 0; irq < nr_legacy_irqs(); irq++)
498 per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq);
500 __setup_vector_irq(cpu);
503 static int apic_retrigger_irq(struct irq_data *irq_data)
505 struct apic_chip_data *data = apic_chip_data(irq_data);
509 raw_spin_lock_irqsave(&vector_lock, flags);
510 cpu = cpumask_first_and(data->domain, cpu_online_mask);
511 apic->send_IPI_mask(cpumask_of(cpu), data->cfg.vector);
512 raw_spin_unlock_irqrestore(&vector_lock, flags);
517 void apic_ack_edge(struct irq_data *data)
519 irq_complete_move(irqd_cfg(data));
524 static int apic_set_affinity(struct irq_data *irq_data,
525 const struct cpumask *dest, bool force)
527 struct apic_chip_data *data = irq_data->chip_data;
528 int err, irq = irq_data->irq;
530 if (!IS_ENABLED(CONFIG_SMP))
533 if (!cpumask_intersects(dest, cpu_online_mask))
536 err = assign_irq_vector(irq, data, dest);
537 return err ? err : IRQ_SET_MASK_OK;
540 static struct irq_chip lapic_controller = {
541 .irq_ack = apic_ack_edge,
542 .irq_set_affinity = apic_set_affinity,
543 .irq_retrigger = apic_retrigger_irq,
547 static void __send_cleanup_vector(struct apic_chip_data *data)
549 raw_spin_lock(&vector_lock);
550 cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
551 data->move_in_progress = 0;
552 if (!cpumask_empty(data->old_domain))
553 apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
554 raw_spin_unlock(&vector_lock);
557 void send_cleanup_vector(struct irq_cfg *cfg)
559 struct apic_chip_data *data;
561 data = container_of(cfg, struct apic_chip_data, cfg);
562 if (data->move_in_progress)
563 __send_cleanup_vector(data);
566 asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
572 /* Prevent vectors vanishing under us */
573 raw_spin_lock(&vector_lock);
575 me = smp_processor_id();
576 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
577 struct apic_chip_data *data;
578 struct irq_desc *desc;
582 desc = __this_cpu_read(vector_irq[vector]);
583 if (IS_ERR_OR_NULL(desc))
586 if (!raw_spin_trylock(&desc->lock)) {
587 raw_spin_unlock(&vector_lock);
589 raw_spin_lock(&vector_lock);
593 data = apic_chip_data(irq_desc_get_irq_data(desc));
598 * Nothing to cleanup if irq migration is in progress
599 * or this cpu is not set in the cleanup mask.
601 if (data->move_in_progress ||
602 !cpumask_test_cpu(me, data->old_domain))
606 * We have two cases to handle here:
607 * 1) vector is unchanged but the target mask got reduced
608 * 2) vector and the target mask has changed
610 * #1 is obvious, but in #2 we have two vectors with the same
611 * irq descriptor: the old and the new vector. So we need to
612 * make sure that we only cleanup the old vector. The new
613 * vector has the current @vector number in the config and
614 * this cpu is part of the target mask. We better leave that
617 if (vector == data->cfg.vector &&
618 cpumask_test_cpu(me, data->domain))
621 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
623 * Check if the vector that needs to be cleanedup is
624 * registered at the cpu's IRR. If so, then this is not
625 * the best time to clean it up. Lets clean it up in the
626 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
629 if (irr & (1 << (vector % 32))) {
630 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
633 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
634 cpumask_clear_cpu(me, data->old_domain);
636 raw_spin_unlock(&desc->lock);
639 raw_spin_unlock(&vector_lock);
644 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
647 struct apic_chip_data *data;
649 data = container_of(cfg, struct apic_chip_data, cfg);
650 if (likely(!data->move_in_progress))
653 me = smp_processor_id();
654 if (vector == data->cfg.vector && cpumask_test_cpu(me, data->domain))
655 __send_cleanup_vector(data);
658 void irq_complete_move(struct irq_cfg *cfg)
660 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
664 * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
666 void irq_force_complete_move(struct irq_desc *desc)
668 struct irq_data *irqdata;
669 struct apic_chip_data *data;
674 * The function is called for all descriptors regardless of which
675 * irqdomain they belong to. For example if an IRQ is provided by
676 * an irq_chip as part of a GPIO driver, the chip data for that
677 * descriptor is specific to the irq_chip in question.
679 * Check first that the chip_data is what we expect
680 * (apic_chip_data) before touching it any further.
682 irqdata = irq_domain_get_irq_data(x86_vector_domain,
683 irq_desc_get_irq(desc));
687 data = apic_chip_data(irqdata);
688 cfg = data ? &data->cfg : NULL;
694 * This is tricky. If the cleanup of @data->old_domain has not been
695 * done yet, then the following setaffinity call will fail with
696 * -EBUSY. This can leave the interrupt in a stale state.
698 * All CPUs are stuck in stop machine with interrupts disabled so
699 * calling __irq_complete_move() would be completely pointless.
701 raw_spin_lock(&vector_lock);
703 * Clean out all offline cpus (including the outgoing one) from the
706 cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
709 * If move_in_progress is cleared and the old_domain mask is empty,
710 * then there is nothing to cleanup. fixup_irqs() will take care of
711 * the stale vectors on the outgoing cpu.
713 if (!data->move_in_progress && cpumask_empty(data->old_domain)) {
714 raw_spin_unlock(&vector_lock);
719 * 1) The interrupt is in move_in_progress state. That means that we
720 * have not seen an interrupt since the io_apic was reprogrammed to
723 * 2) The interrupt has fired on the new vector, but the cleanup IPIs
724 * have not been processed yet.
726 if (data->move_in_progress) {
728 * In theory there is a race:
730 * set_ioapic(new_vector) <-- Interrupt is raised before update
731 * is effective, i.e. it's raised on
734 * So if the target cpu cannot handle that interrupt before
735 * the old vector is cleaned up, we get a spurious interrupt
736 * and in the worst case the ioapic irq line becomes stale.
738 * But in case of cpu hotplug this should be a non issue
739 * because if the affinity update happens right before all
740 * cpus rendevouz in stop machine, there is no way that the
741 * interrupt can be blocked on the target cpu because all cpus
742 * loops first with interrupts enabled in stop machine, so the
743 * old vector is not yet cleaned up when the interrupt fires.
745 * So the only way to run into this issue is if the delivery
746 * of the interrupt on the apic/system bus would be delayed
747 * beyond the point where the target cpu disables interrupts
748 * in stop machine. I doubt that it can happen, but at least
749 * there is a theroretical chance. Virtualization might be
750 * able to expose this, but AFAICT the IOAPIC emulation is not
751 * as stupid as the real hardware.
753 * Anyway, there is nothing we can do about that at this point
754 * w/o refactoring the whole fixup_irq() business completely.
755 * We print at least the irq number and the old vector number,
756 * so we have the necessary information when a problem in that
759 pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
760 irqdata->irq, cfg->old_vector);
763 * If old_domain is not empty, then other cpus still have the irq
764 * descriptor set in their vector array. Clean it up.
766 for_each_cpu(cpu, data->old_domain)
767 per_cpu(vector_irq, cpu)[cfg->old_vector] = VECTOR_UNUSED;
769 /* Cleanup the left overs of the (half finished) move */
770 cpumask_clear(data->old_domain);
771 data->move_in_progress = 0;
772 raw_spin_unlock(&vector_lock);
776 static void __init print_APIC_field(int base)
782 for (i = 0; i < 8; i++)
783 pr_cont("%08x", apic_read(base + i*0x10));
788 static void __init print_local_APIC(void *dummy)
790 unsigned int i, v, ver, maxlvt;
793 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
794 smp_processor_id(), hard_smp_processor_id());
795 v = apic_read(APIC_ID);
796 pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
797 v = apic_read(APIC_LVR);
798 pr_info("... APIC VERSION: %08x\n", v);
799 ver = GET_APIC_VERSION(v);
800 maxlvt = lapic_get_maxlvt();
802 v = apic_read(APIC_TASKPRI);
803 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
806 if (APIC_INTEGRATED(ver)) {
807 if (!APIC_XAPIC(ver)) {
808 v = apic_read(APIC_ARBPRI);
809 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
810 v, v & APIC_ARBPRI_MASK);
812 v = apic_read(APIC_PROCPRI);
813 pr_debug("... APIC PROCPRI: %08x\n", v);
817 * Remote read supported only in the 82489DX and local APIC for
818 * Pentium processors.
820 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
821 v = apic_read(APIC_RRR);
822 pr_debug("... APIC RRR: %08x\n", v);
825 v = apic_read(APIC_LDR);
826 pr_debug("... APIC LDR: %08x\n", v);
827 if (!x2apic_enabled()) {
828 v = apic_read(APIC_DFR);
829 pr_debug("... APIC DFR: %08x\n", v);
831 v = apic_read(APIC_SPIV);
832 pr_debug("... APIC SPIV: %08x\n", v);
834 pr_debug("... APIC ISR field:\n");
835 print_APIC_field(APIC_ISR);
836 pr_debug("... APIC TMR field:\n");
837 print_APIC_field(APIC_TMR);
838 pr_debug("... APIC IRR field:\n");
839 print_APIC_field(APIC_IRR);
842 if (APIC_INTEGRATED(ver)) {
843 /* Due to the Pentium erratum 3AP. */
845 apic_write(APIC_ESR, 0);
847 v = apic_read(APIC_ESR);
848 pr_debug("... APIC ESR: %08x\n", v);
851 icr = apic_icr_read();
852 pr_debug("... APIC ICR: %08x\n", (u32)icr);
853 pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
855 v = apic_read(APIC_LVTT);
856 pr_debug("... APIC LVTT: %08x\n", v);
860 v = apic_read(APIC_LVTPC);
861 pr_debug("... APIC LVTPC: %08x\n", v);
863 v = apic_read(APIC_LVT0);
864 pr_debug("... APIC LVT0: %08x\n", v);
865 v = apic_read(APIC_LVT1);
866 pr_debug("... APIC LVT1: %08x\n", v);
870 v = apic_read(APIC_LVTERR);
871 pr_debug("... APIC LVTERR: %08x\n", v);
874 v = apic_read(APIC_TMICT);
875 pr_debug("... APIC TMICT: %08x\n", v);
876 v = apic_read(APIC_TMCCT);
877 pr_debug("... APIC TMCCT: %08x\n", v);
878 v = apic_read(APIC_TDCR);
879 pr_debug("... APIC TDCR: %08x\n", v);
881 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
882 v = apic_read(APIC_EFEAT);
883 maxlvt = (v >> 16) & 0xff;
884 pr_debug("... APIC EFEAT: %08x\n", v);
885 v = apic_read(APIC_ECTRL);
886 pr_debug("... APIC ECTRL: %08x\n", v);
887 for (i = 0; i < maxlvt; i++) {
888 v = apic_read(APIC_EILVTn(i));
889 pr_debug("... APIC EILVT%d: %08x\n", i, v);
895 static void __init print_local_APICs(int maxcpu)
903 for_each_online_cpu(cpu) {
906 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
911 static void __init print_PIC(void)
916 if (!nr_legacy_irqs())
919 pr_debug("\nprinting PIC contents\n");
921 raw_spin_lock_irqsave(&i8259A_lock, flags);
923 v = inb(0xa1) << 8 | inb(0x21);
924 pr_debug("... PIC IMR: %04x\n", v);
926 v = inb(0xa0) << 8 | inb(0x20);
927 pr_debug("... PIC IRR: %04x\n", v);
931 v = inb(0xa0) << 8 | inb(0x20);
935 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
937 pr_debug("... PIC ISR: %04x\n", v);
939 v = inb(0x4d1) << 8 | inb(0x4d0);
940 pr_debug("... PIC ELCR: %04x\n", v);
943 static int show_lapic __initdata = 1;
944 static __init int setup_show_lapic(char *arg)
948 if (strcmp(arg, "all") == 0) {
949 show_lapic = CONFIG_NR_CPUS;
951 get_option(&arg, &num);
958 __setup("show_lapic=", setup_show_lapic);
960 static int __init print_ICs(void)
962 if (apic_verbosity == APIC_QUIET)
967 /* don't print out if apic is not there */
968 if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
971 print_local_APICs(show_lapic);
977 late_initcall(print_ICs);