2 * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 * Moved from arch/x86/kernel/apic/io_apic.c.
6 * Jiang Liu <jiang.liu@linux.intel.com>
7 * Enable support of hierarchical irqdomains
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/init.h>
16 #include <linux/compiler.h>
17 #include <linux/slab.h>
18 #include <asm/irqdomain.h>
19 #include <asm/hw_irq.h>
21 #include <asm/i8259.h>
23 #include <asm/irq_remapping.h>
25 struct apic_chip_data {
28 cpumask_var_t old_domain;
29 u8 move_in_progress : 1;
32 struct irq_domain *x86_vector_domain;
33 EXPORT_SYMBOL_GPL(x86_vector_domain);
34 static DEFINE_RAW_SPINLOCK(vector_lock);
35 static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
36 static struct irq_chip lapic_controller;
37 #ifdef CONFIG_X86_IO_APIC
38 static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
41 void lock_vector_lock(void)
43 /* Used to the online set of cpus does not change
44 * during assign_irq_vector.
46 raw_spin_lock(&vector_lock);
49 void unlock_vector_lock(void)
51 raw_spin_unlock(&vector_lock);
54 static struct apic_chip_data *apic_chip_data(struct irq_data *irq_data)
59 while (irq_data->parent_data)
60 irq_data = irq_data->parent_data;
62 return irq_data->chip_data;
65 struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
67 struct apic_chip_data *data = apic_chip_data(irq_data);
69 return data ? &data->cfg : NULL;
71 EXPORT_SYMBOL_GPL(irqd_cfg);
73 struct irq_cfg *irq_cfg(unsigned int irq)
75 return irqd_cfg(irq_get_irq_data(irq));
78 static struct apic_chip_data *alloc_apic_chip_data(int node)
80 struct apic_chip_data *data;
82 data = kzalloc_node(sizeof(*data), GFP_KERNEL, node);
85 if (!zalloc_cpumask_var_node(&data->domain, GFP_KERNEL, node))
87 if (!zalloc_cpumask_var_node(&data->old_domain, GFP_KERNEL, node))
91 free_cpumask_var(data->domain);
97 static void free_apic_chip_data(struct apic_chip_data *data)
100 free_cpumask_var(data->domain);
101 free_cpumask_var(data->old_domain);
106 static int __assign_irq_vector(int irq, struct apic_chip_data *d,
107 const struct cpumask *mask,
108 struct irq_data *irqdata)
111 * NOTE! The local APIC isn't very good at handling
112 * multiple interrupts at the same interrupt level.
113 * As the interrupt level is determined by taking the
114 * vector number and shifting that right by 4, we
115 * want to spread these out a bit so that they don't
116 * all fall in the same interrupt level.
118 * Also, we've got to be careful not to trash gate
119 * 0x80, because int 0x80 is hm, kind of importantish. ;)
121 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
122 static int current_offset = VECTOR_OFFSET_START % 16;
126 * If there is still a move in progress or the previous move has not
127 * been cleaned up completely, tell the caller to come back later.
129 if (d->move_in_progress ||
130 cpumask_intersects(d->old_domain, cpu_online_mask))
133 /* Only try and allocate irqs on cpus that are present */
134 cpumask_clear(d->old_domain);
135 cpumask_clear(searched_cpumask);
136 cpu = cpumask_first_and(mask, cpu_online_mask);
137 while (cpu < nr_cpu_ids) {
140 /* Get the possible target cpus for @mask/@cpu from the apic */
141 apic->vector_allocation_domain(cpu, vector_cpumask, mask);
144 * Clear the offline cpus from @vector_cpumask for searching
145 * and verify whether the result overlaps with @mask. If true,
146 * then the call to apic->cpu_mask_to_apicid() will
147 * succeed as well. If not, no point in trying to find a
148 * vector in this mask.
150 cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
151 if (!cpumask_intersects(vector_searchmask, mask))
154 if (cpumask_subset(vector_cpumask, d->domain)) {
155 if (cpumask_equal(vector_cpumask, d->domain))
158 * Mark the cpus which are not longer in the mask for
161 cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
162 vector = d->cfg.vector;
166 vector = current_vector;
167 offset = current_offset;
170 if (vector >= FIRST_SYSTEM_VECTOR) {
171 offset = (offset + 1) % 16;
172 vector = FIRST_EXTERNAL_VECTOR + offset;
175 /* If the search wrapped around, try the next cpu */
176 if (unlikely(current_vector == vector))
179 if (test_bit(vector, used_vectors))
182 for_each_cpu(new_cpu, vector_searchmask) {
183 if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
187 current_vector = vector;
188 current_offset = offset;
189 /* Schedule the old vector for cleanup on all cpus */
191 cpumask_copy(d->old_domain, d->domain);
192 for_each_cpu(new_cpu, vector_searchmask)
193 per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
198 * We exclude the current @vector_cpumask from the requested
199 * @mask and try again with the next online cpu in the
200 * result. We cannot modify @mask, so we use @vector_cpumask
201 * as a temporary buffer here as it will be reassigned when
202 * calling apic->vector_allocation_domain() above.
204 cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
205 cpumask_andnot(vector_cpumask, mask, searched_cpumask);
206 cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
213 * Exclude offline cpus from the cleanup mask and set the
214 * move_in_progress flag when the result is not empty.
216 cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
217 d->move_in_progress = !cpumask_empty(d->old_domain);
218 d->cfg.old_vector = d->move_in_progress ? d->cfg.vector : 0;
219 d->cfg.vector = vector;
220 cpumask_copy(d->domain, vector_cpumask);
223 * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
224 * as we already established, that mask & d->domain & cpu_online_mask
227 * vector_searchmask is a subset of d->domain and has the offline
230 cpumask_and(vector_searchmask, vector_searchmask, mask);
231 BUG_ON(apic->cpu_mask_to_apicid(vector_searchmask, irqdata,
232 &d->cfg.dest_apicid));
236 static int assign_irq_vector(int irq, struct apic_chip_data *data,
237 const struct cpumask *mask,
238 struct irq_data *irqdata)
243 raw_spin_lock_irqsave(&vector_lock, flags);
244 err = __assign_irq_vector(irq, data, mask, irqdata);
245 raw_spin_unlock_irqrestore(&vector_lock, flags);
249 static int assign_irq_vector_policy(int irq, int node,
250 struct apic_chip_data *data,
251 struct irq_alloc_info *info,
252 struct irq_data *irqdata)
254 if (info && info->mask)
255 return assign_irq_vector(irq, data, info->mask, irqdata);
256 if (node != NUMA_NO_NODE &&
257 assign_irq_vector(irq, data, cpumask_of_node(node), irqdata) == 0)
259 return assign_irq_vector(irq, data, apic->target_cpus(), irqdata);
262 static void clear_irq_vector(int irq, struct apic_chip_data *data)
264 struct irq_desc *desc;
267 if (!data->cfg.vector)
270 vector = data->cfg.vector;
271 for_each_cpu_and(cpu, data->domain, cpu_online_mask)
272 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
274 data->cfg.vector = 0;
275 cpumask_clear(data->domain);
278 * If move is in progress or the old_domain mask is not empty,
279 * i.e. the cleanup IPI has not been processed yet, we need to remove
280 * the old references to desc from all cpus vector tables.
282 if (!data->move_in_progress && cpumask_empty(data->old_domain))
285 desc = irq_to_desc(irq);
286 for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
287 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
289 if (per_cpu(vector_irq, cpu)[vector] != desc)
291 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
295 data->move_in_progress = 0;
298 void init_irq_alloc_info(struct irq_alloc_info *info,
299 const struct cpumask *mask)
301 memset(info, 0, sizeof(*info));
305 void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
310 memset(dst, 0, sizeof(*dst));
313 static void x86_vector_free_irqs(struct irq_domain *domain,
314 unsigned int virq, unsigned int nr_irqs)
316 struct apic_chip_data *apic_data;
317 struct irq_data *irq_data;
321 for (i = 0; i < nr_irqs; i++) {
322 irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
323 if (irq_data && irq_data->chip_data) {
324 raw_spin_lock_irqsave(&vector_lock, flags);
325 clear_irq_vector(virq + i, irq_data->chip_data);
326 apic_data = irq_data->chip_data;
327 irq_domain_reset_irq_data(irq_data);
328 raw_spin_unlock_irqrestore(&vector_lock, flags);
329 free_apic_chip_data(apic_data);
330 #ifdef CONFIG_X86_IO_APIC
331 if (virq + i < nr_legacy_irqs())
332 legacy_irq_data[virq + i] = NULL;
338 static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
339 unsigned int nr_irqs, void *arg)
341 struct irq_alloc_info *info = arg;
342 struct apic_chip_data *data;
343 struct irq_data *irq_data;
349 /* Currently vector allocator can't guarantee contiguous allocations */
350 if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
353 for (i = 0; i < nr_irqs; i++) {
354 irq_data = irq_domain_get_irq_data(domain, virq + i);
356 node = irq_data_get_node(irq_data);
357 #ifdef CONFIG_X86_IO_APIC
358 if (virq + i < nr_legacy_irqs() && legacy_irq_data[virq + i])
359 data = legacy_irq_data[virq + i];
362 data = alloc_apic_chip_data(node);
368 irq_data->chip = &lapic_controller;
369 irq_data->chip_data = data;
370 irq_data->hwirq = virq + i;
372 /* Don't invoke affinity setter on deactivated interrupts */
373 irqd_set_affinity_on_activate(irq_data);
375 err = assign_irq_vector_policy(virq + i, node, data, info,
378 irq_data->chip_data = NULL;
379 free_apic_chip_data(data);
383 * If the apic destination mode is physical, then the
384 * effective affinity is restricted to a single target
385 * CPU. Mark the interrupt accordingly.
387 if (!apic->irq_dest_mode)
388 irqd_set_single_target(irq_data);
394 x86_vector_free_irqs(domain, virq, i);
398 static const struct irq_domain_ops x86_vector_domain_ops = {
399 .alloc = x86_vector_alloc_irqs,
400 .free = x86_vector_free_irqs,
403 int __init arch_probe_nr_irqs(void)
407 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
408 nr_irqs = NR_VECTORS * nr_cpu_ids;
410 nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
411 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
413 * for MSI and HT dyn irq
415 if (gsi_top <= NR_IRQS_LEGACY)
416 nr += 8 * nr_cpu_ids;
424 * We don't know if PIC is present at this point so we need to do
425 * probe() to get the right number of legacy IRQs.
427 return legacy_pic->probe();
430 #ifdef CONFIG_X86_IO_APIC
431 static void __init init_legacy_irqs(void)
433 int i, node = cpu_to_node(0);
434 struct apic_chip_data *data;
437 * For legacy IRQ's, start with assigning irq0 to irq15 to
438 * ISA_IRQ_VECTOR(i) for all cpu's.
440 for (i = 0; i < nr_legacy_irqs(); i++) {
441 data = legacy_irq_data[i] = alloc_apic_chip_data(node);
444 data->cfg.vector = ISA_IRQ_VECTOR(i);
445 cpumask_setall(data->domain);
446 irq_set_chip_data(i, data);
450 static inline void init_legacy_irqs(void) { }
453 int __init arch_early_irq_init(void)
455 struct fwnode_handle *fn;
459 fn = irq_domain_alloc_named_fwnode("VECTOR");
461 x86_vector_domain = irq_domain_create_tree(fn, &x86_vector_domain_ops,
463 BUG_ON(x86_vector_domain == NULL);
464 irq_set_default_host(x86_vector_domain);
466 arch_init_msi_domain(x86_vector_domain);
467 arch_init_htirq_domain(x86_vector_domain);
469 BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
470 BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
471 BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
473 return arch_early_ioapic_init();
476 /* Initialize vector_irq on a new cpu */
477 static void __setup_vector_irq(int cpu)
479 struct apic_chip_data *data;
480 struct irq_desc *desc;
483 /* Mark the inuse vectors */
484 for_each_irq_desc(irq, desc) {
485 struct irq_data *idata = irq_desc_get_irq_data(desc);
487 data = apic_chip_data(idata);
488 if (!data || !cpumask_test_cpu(cpu, data->domain))
490 vector = data->cfg.vector;
491 per_cpu(vector_irq, cpu)[vector] = desc;
493 /* Mark the free vectors */
494 for (vector = 0; vector < NR_VECTORS; ++vector) {
495 desc = per_cpu(vector_irq, cpu)[vector];
496 if (IS_ERR_OR_NULL(desc))
499 data = apic_chip_data(irq_desc_get_irq_data(desc));
500 if (!cpumask_test_cpu(cpu, data->domain))
501 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
506 * Setup the vector to irq mappings. Must be called with vector_lock held.
508 void setup_vector_irq(int cpu)
512 lockdep_assert_held(&vector_lock);
514 * On most of the platforms, legacy PIC delivers the interrupts on the
515 * boot cpu. But there are certain platforms where PIC interrupts are
516 * delivered to multiple cpu's. If the legacy IRQ is handled by the
517 * legacy PIC, for the new cpu that is coming online, setup the static
518 * legacy vector to irq mapping:
520 for (irq = 0; irq < nr_legacy_irqs(); irq++)
521 per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq);
523 __setup_vector_irq(cpu);
526 static int apic_retrigger_irq(struct irq_data *irq_data)
528 struct apic_chip_data *data = apic_chip_data(irq_data);
532 raw_spin_lock_irqsave(&vector_lock, flags);
533 cpu = cpumask_first_and(data->domain, cpu_online_mask);
534 apic->send_IPI_mask(cpumask_of(cpu), data->cfg.vector);
535 raw_spin_unlock_irqrestore(&vector_lock, flags);
540 void apic_ack_edge(struct irq_data *data)
542 irq_complete_move(irqd_cfg(data));
547 static int apic_set_affinity(struct irq_data *irq_data,
548 const struct cpumask *dest, bool force)
550 struct apic_chip_data *data = irq_data->chip_data;
551 int err, irq = irq_data->irq;
553 if (!IS_ENABLED(CONFIG_SMP))
556 if (!cpumask_intersects(dest, cpu_online_mask))
559 err = assign_irq_vector(irq, data, dest, irq_data);
560 return err ? err : IRQ_SET_MASK_OK;
563 static struct irq_chip lapic_controller = {
565 .irq_ack = apic_ack_edge,
566 .irq_set_affinity = apic_set_affinity,
567 .irq_retrigger = apic_retrigger_irq,
571 static void __send_cleanup_vector(struct apic_chip_data *data)
573 raw_spin_lock(&vector_lock);
574 cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
575 data->move_in_progress = 0;
576 if (!cpumask_empty(data->old_domain))
577 apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
578 raw_spin_unlock(&vector_lock);
581 void send_cleanup_vector(struct irq_cfg *cfg)
583 struct apic_chip_data *data;
585 data = container_of(cfg, struct apic_chip_data, cfg);
586 if (data->move_in_progress)
587 __send_cleanup_vector(data);
590 asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
596 /* Prevent vectors vanishing under us */
597 raw_spin_lock(&vector_lock);
599 me = smp_processor_id();
600 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
601 struct apic_chip_data *data;
602 struct irq_desc *desc;
606 desc = __this_cpu_read(vector_irq[vector]);
607 if (IS_ERR_OR_NULL(desc))
610 if (!raw_spin_trylock(&desc->lock)) {
611 raw_spin_unlock(&vector_lock);
613 raw_spin_lock(&vector_lock);
617 data = apic_chip_data(irq_desc_get_irq_data(desc));
622 * Nothing to cleanup if irq migration is in progress
623 * or this cpu is not set in the cleanup mask.
625 if (data->move_in_progress ||
626 !cpumask_test_cpu(me, data->old_domain))
630 * We have two cases to handle here:
631 * 1) vector is unchanged but the target mask got reduced
632 * 2) vector and the target mask has changed
634 * #1 is obvious, but in #2 we have two vectors with the same
635 * irq descriptor: the old and the new vector. So we need to
636 * make sure that we only cleanup the old vector. The new
637 * vector has the current @vector number in the config and
638 * this cpu is part of the target mask. We better leave that
641 if (vector == data->cfg.vector &&
642 cpumask_test_cpu(me, data->domain))
645 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
647 * Check if the vector that needs to be cleanedup is
648 * registered at the cpu's IRR. If so, then this is not
649 * the best time to clean it up. Lets clean it up in the
650 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
653 if (irr & (1 << (vector % 32))) {
654 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
657 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
658 cpumask_clear_cpu(me, data->old_domain);
660 raw_spin_unlock(&desc->lock);
663 raw_spin_unlock(&vector_lock);
668 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
671 struct apic_chip_data *data;
673 data = container_of(cfg, struct apic_chip_data, cfg);
674 if (likely(!data->move_in_progress))
677 me = smp_processor_id();
678 if (vector == data->cfg.vector && cpumask_test_cpu(me, data->domain))
679 __send_cleanup_vector(data);
682 void irq_complete_move(struct irq_cfg *cfg)
684 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
688 * Called from fixup_irqs() with @desc->lock held and interrupts disabled.
690 void irq_force_complete_move(struct irq_desc *desc)
692 struct irq_data *irqdata;
693 struct apic_chip_data *data;
698 * The function is called for all descriptors regardless of which
699 * irqdomain they belong to. For example if an IRQ is provided by
700 * an irq_chip as part of a GPIO driver, the chip data for that
701 * descriptor is specific to the irq_chip in question.
703 * Check first that the chip_data is what we expect
704 * (apic_chip_data) before touching it any further.
706 irqdata = irq_domain_get_irq_data(x86_vector_domain,
707 irq_desc_get_irq(desc));
711 data = apic_chip_data(irqdata);
712 cfg = data ? &data->cfg : NULL;
718 * This is tricky. If the cleanup of @data->old_domain has not been
719 * done yet, then the following setaffinity call will fail with
720 * -EBUSY. This can leave the interrupt in a stale state.
722 * All CPUs are stuck in stop machine with interrupts disabled so
723 * calling __irq_complete_move() would be completely pointless.
725 raw_spin_lock(&vector_lock);
727 * Clean out all offline cpus (including the outgoing one) from the
730 cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
733 * If move_in_progress is cleared and the old_domain mask is empty,
734 * then there is nothing to cleanup. fixup_irqs() will take care of
735 * the stale vectors on the outgoing cpu.
737 if (!data->move_in_progress && cpumask_empty(data->old_domain)) {
738 raw_spin_unlock(&vector_lock);
743 * 1) The interrupt is in move_in_progress state. That means that we
744 * have not seen an interrupt since the io_apic was reprogrammed to
747 * 2) The interrupt has fired on the new vector, but the cleanup IPIs
748 * have not been processed yet.
750 if (data->move_in_progress) {
752 * In theory there is a race:
754 * set_ioapic(new_vector) <-- Interrupt is raised before update
755 * is effective, i.e. it's raised on
758 * So if the target cpu cannot handle that interrupt before
759 * the old vector is cleaned up, we get a spurious interrupt
760 * and in the worst case the ioapic irq line becomes stale.
762 * But in case of cpu hotplug this should be a non issue
763 * because if the affinity update happens right before all
764 * cpus rendevouz in stop machine, there is no way that the
765 * interrupt can be blocked on the target cpu because all cpus
766 * loops first with interrupts enabled in stop machine, so the
767 * old vector is not yet cleaned up when the interrupt fires.
769 * So the only way to run into this issue is if the delivery
770 * of the interrupt on the apic/system bus would be delayed
771 * beyond the point where the target cpu disables interrupts
772 * in stop machine. I doubt that it can happen, but at least
773 * there is a theroretical chance. Virtualization might be
774 * able to expose this, but AFAICT the IOAPIC emulation is not
775 * as stupid as the real hardware.
777 * Anyway, there is nothing we can do about that at this point
778 * w/o refactoring the whole fixup_irq() business completely.
779 * We print at least the irq number and the old vector number,
780 * so we have the necessary information when a problem in that
783 pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n",
784 irqdata->irq, cfg->old_vector);
787 * If old_domain is not empty, then other cpus still have the irq
788 * descriptor set in their vector array. Clean it up.
790 for_each_cpu(cpu, data->old_domain)
791 per_cpu(vector_irq, cpu)[cfg->old_vector] = VECTOR_UNUSED;
793 /* Cleanup the left overs of the (half finished) move */
794 cpumask_clear(data->old_domain);
795 data->move_in_progress = 0;
796 raw_spin_unlock(&vector_lock);
800 static void __init print_APIC_field(int base)
806 for (i = 0; i < 8; i++)
807 pr_cont("%08x", apic_read(base + i*0x10));
812 static void __init print_local_APIC(void *dummy)
814 unsigned int i, v, ver, maxlvt;
817 pr_debug("printing local APIC contents on CPU#%d/%d:\n",
818 smp_processor_id(), hard_smp_processor_id());
819 v = apic_read(APIC_ID);
820 pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id());
821 v = apic_read(APIC_LVR);
822 pr_info("... APIC VERSION: %08x\n", v);
823 ver = GET_APIC_VERSION(v);
824 maxlvt = lapic_get_maxlvt();
826 v = apic_read(APIC_TASKPRI);
827 pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
830 if (APIC_INTEGRATED(ver)) {
831 if (!APIC_XAPIC(ver)) {
832 v = apic_read(APIC_ARBPRI);
833 pr_debug("... APIC ARBPRI: %08x (%02x)\n",
834 v, v & APIC_ARBPRI_MASK);
836 v = apic_read(APIC_PROCPRI);
837 pr_debug("... APIC PROCPRI: %08x\n", v);
841 * Remote read supported only in the 82489DX and local APIC for
842 * Pentium processors.
844 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
845 v = apic_read(APIC_RRR);
846 pr_debug("... APIC RRR: %08x\n", v);
849 v = apic_read(APIC_LDR);
850 pr_debug("... APIC LDR: %08x\n", v);
851 if (!x2apic_enabled()) {
852 v = apic_read(APIC_DFR);
853 pr_debug("... APIC DFR: %08x\n", v);
855 v = apic_read(APIC_SPIV);
856 pr_debug("... APIC SPIV: %08x\n", v);
858 pr_debug("... APIC ISR field:\n");
859 print_APIC_field(APIC_ISR);
860 pr_debug("... APIC TMR field:\n");
861 print_APIC_field(APIC_TMR);
862 pr_debug("... APIC IRR field:\n");
863 print_APIC_field(APIC_IRR);
866 if (APIC_INTEGRATED(ver)) {
867 /* Due to the Pentium erratum 3AP. */
869 apic_write(APIC_ESR, 0);
871 v = apic_read(APIC_ESR);
872 pr_debug("... APIC ESR: %08x\n", v);
875 icr = apic_icr_read();
876 pr_debug("... APIC ICR: %08x\n", (u32)icr);
877 pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
879 v = apic_read(APIC_LVTT);
880 pr_debug("... APIC LVTT: %08x\n", v);
884 v = apic_read(APIC_LVTPC);
885 pr_debug("... APIC LVTPC: %08x\n", v);
887 v = apic_read(APIC_LVT0);
888 pr_debug("... APIC LVT0: %08x\n", v);
889 v = apic_read(APIC_LVT1);
890 pr_debug("... APIC LVT1: %08x\n", v);
894 v = apic_read(APIC_LVTERR);
895 pr_debug("... APIC LVTERR: %08x\n", v);
898 v = apic_read(APIC_TMICT);
899 pr_debug("... APIC TMICT: %08x\n", v);
900 v = apic_read(APIC_TMCCT);
901 pr_debug("... APIC TMCCT: %08x\n", v);
902 v = apic_read(APIC_TDCR);
903 pr_debug("... APIC TDCR: %08x\n", v);
905 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
906 v = apic_read(APIC_EFEAT);
907 maxlvt = (v >> 16) & 0xff;
908 pr_debug("... APIC EFEAT: %08x\n", v);
909 v = apic_read(APIC_ECTRL);
910 pr_debug("... APIC ECTRL: %08x\n", v);
911 for (i = 0; i < maxlvt; i++) {
912 v = apic_read(APIC_EILVTn(i));
913 pr_debug("... APIC EILVT%d: %08x\n", i, v);
919 static void __init print_local_APICs(int maxcpu)
927 for_each_online_cpu(cpu) {
930 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
935 static void __init print_PIC(void)
940 if (!nr_legacy_irqs())
943 pr_debug("\nprinting PIC contents\n");
945 raw_spin_lock_irqsave(&i8259A_lock, flags);
947 v = inb(0xa1) << 8 | inb(0x21);
948 pr_debug("... PIC IMR: %04x\n", v);
950 v = inb(0xa0) << 8 | inb(0x20);
951 pr_debug("... PIC IRR: %04x\n", v);
955 v = inb(0xa0) << 8 | inb(0x20);
959 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
961 pr_debug("... PIC ISR: %04x\n", v);
963 v = inb(0x4d1) << 8 | inb(0x4d0);
964 pr_debug("... PIC ELCR: %04x\n", v);
967 static int show_lapic __initdata = 1;
968 static __init int setup_show_lapic(char *arg)
972 if (strcmp(arg, "all") == 0) {
973 show_lapic = CONFIG_NR_CPUS;
975 get_option(&arg, &num);
982 __setup("show_lapic=", setup_show_lapic);
984 static int __init print_ICs(void)
986 if (apic_verbosity == APIC_QUIET)
991 /* don't print out if apic is not there */
992 if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
995 print_local_APICs(show_lapic);
1001 late_initcall(print_ICs);