2 * Local APIC handling, local APIC timers
4 * (c) 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
7 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
8 * thanks to Eric Gilmore
10 * for testing these extensively.
11 * Maciej W. Rozycki : Various updates and fixes.
12 * Mikael Pettersson : Power Management for UP-APIC.
14 * Mikael Pettersson : PM converted to driver model.
17 #include <linux/perf_event.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/mc146818rtc.h>
20 #include <linux/acpi_pmtmr.h>
21 #include <linux/clockchips.h>
22 #include <linux/interrupt.h>
23 #include <linux/bootmem.h>
24 #include <linux/ftrace.h>
25 #include <linux/ioport.h>
26 #include <linux/export.h>
27 #include <linux/syscore_ops.h>
28 #include <linux/delay.h>
29 #include <linux/timex.h>
30 #include <linux/i8253.h>
31 #include <linux/dmar.h>
32 #include <linux/init.h>
33 #include <linux/cpu.h>
34 #include <linux/dmi.h>
35 #include <linux/smp.h>
38 #include <asm/trace/irq_vectors.h>
39 #include <asm/irq_remapping.h>
40 #include <asm/perf_event.h>
41 #include <asm/x86_init.h>
42 #include <asm/pgalloc.h>
43 #include <linux/atomic.h>
44 #include <asm/barrier.h>
45 #include <asm/mpspec.h>
46 #include <asm/i8259.h>
47 #include <asm/proto.h>
49 #include <asm/io_apic.h>
57 #include <asm/hypervisor.h>
58 #include <asm/cpu_device_id.h>
59 #include <asm/intel-family.h>
60 #include <asm/irq_regs.h>
62 unsigned int num_processors;
64 unsigned disabled_cpus;
66 /* Processor that is doing the boot up */
67 unsigned int boot_cpu_physical_apicid = -1U;
68 EXPORT_SYMBOL_GPL(boot_cpu_physical_apicid);
70 u8 boot_cpu_apic_version;
73 * The highest APIC ID seen during enumeration.
75 static unsigned int max_physical_apicid;
78 * Bitmask of physically existing CPUs:
80 physid_mask_t phys_cpu_present_map;
83 * Processor to be disabled specified by kernel parameter
84 * disable_cpu_apicid=<int>, mostly used for the kdump 2nd kernel to
85 * avoid undefined behaviour caused by sending INIT from AP to BSP.
87 static unsigned int disabled_cpu_apicid __read_mostly = BAD_APICID;
90 * This variable controls which CPUs receive external NMIs. By default,
91 * external NMIs are delivered only to the BSP.
93 static int apic_extnmi = APIC_EXTNMI_BSP;
96 * Map cpu index to physical APIC ID
98 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
99 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid, BAD_APICID);
100 DEFINE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid, U32_MAX);
101 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
102 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
103 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_acpiid);
108 * On x86_32, the mapping between cpu and logical apicid may vary
109 * depending on apic in use. The following early percpu variable is
110 * used for the mapping. This is where the behaviors of x86_64 and 32
111 * actually diverge. Let's keep it ugly for now.
113 DEFINE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid, BAD_APICID);
115 /* Local APIC was disabled by the BIOS and enabled by the kernel */
116 static int enabled_via_apicbase;
119 * Handle interrupt mode configuration register (IMCR).
120 * This register controls whether the interrupt signals
121 * that reach the BSP come from the master PIC or from the
122 * local APIC. Before entering Symmetric I/O Mode, either
123 * the BIOS or the operating system must switch out of
124 * PIC Mode by changing the IMCR.
126 static inline void imcr_pic_to_apic(void)
128 /* select IMCR register */
130 /* NMI and 8259 INTR go through APIC */
134 static inline void imcr_apic_to_pic(void)
136 /* select IMCR register */
138 /* NMI and 8259 INTR go directly to BSP */
144 * Knob to control our willingness to enable the local APIC.
148 static int force_enable_local_apic __initdata;
151 * APIC command line parameters
153 static int __init parse_lapic(char *arg)
155 if (IS_ENABLED(CONFIG_X86_32) && !arg)
156 force_enable_local_apic = 1;
157 else if (arg && !strncmp(arg, "notscdeadline", 13))
158 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
161 early_param("lapic", parse_lapic);
164 static int apic_calibrate_pmtmr __initdata;
165 static __init int setup_apicpmtimer(char *s)
167 apic_calibrate_pmtmr = 1;
171 __setup("apicpmtimer", setup_apicpmtimer);
174 unsigned long mp_lapic_addr;
176 /* Disable local APIC timer from the kernel commandline or via dmi quirk */
177 static int disable_apic_timer __initdata;
178 /* Local APIC timer works in C2 */
179 int local_apic_timer_c2_ok;
180 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
183 * Debug level, exported for io_apic.c
189 /* Have we found an MP table */
190 int smp_found_config;
192 static struct resource lapic_resource = {
193 .name = "Local APIC",
194 .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
197 unsigned int lapic_timer_frequency = 0;
199 static void apic_pm_activate(void);
201 static unsigned long apic_phys;
204 * Get the LAPIC version
206 static inline int lapic_get_version(void)
208 return GET_APIC_VERSION(apic_read(APIC_LVR));
212 * Check, if the APIC is integrated or a separate chip
214 static inline int lapic_is_integrated(void)
216 return APIC_INTEGRATED(lapic_get_version());
220 * Check, whether this is a modern or a first generation APIC
222 static int modern_apic(void)
224 /* AMD systems use old APIC versions, so check the CPU */
225 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
226 boot_cpu_data.x86 >= 0xf)
228 return lapic_get_version() >= 0x14;
232 * right after this call apic become NOOP driven
233 * so apic->write/read doesn't do anything
235 static void __init apic_disable(void)
237 pr_info("APIC: switched to apic NOOP\n");
241 void native_apic_wait_icr_idle(void)
243 while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
247 u32 native_safe_apic_wait_icr_idle(void)
254 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
257 inc_irq_stat(icr_read_retry_count);
259 } while (timeout++ < 1000);
264 void native_apic_icr_write(u32 low, u32 id)
268 local_irq_save(flags);
269 apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
270 apic_write(APIC_ICR, low);
271 local_irq_restore(flags);
274 u64 native_apic_icr_read(void)
278 icr2 = apic_read(APIC_ICR2);
279 icr1 = apic_read(APIC_ICR);
281 return icr1 | ((u64)icr2 << 32);
286 * get_physical_broadcast - Get number of physical broadcast IDs
288 int get_physical_broadcast(void)
290 return modern_apic() ? 0xff : 0xf;
295 * lapic_get_maxlvt - get the maximum number of local vector table entries
297 int lapic_get_maxlvt(void)
300 * - we always have APIC integrated on 64bit mode
301 * - 82489DXs do not report # of LVT entries
303 return lapic_is_integrated() ? GET_APIC_MAXLVT(apic_read(APIC_LVR)) : 2;
311 #define APIC_DIVISOR 16
312 #define TSC_DIVISOR 8
315 * This function sets up the local APIC timer, with a timeout of
316 * 'clocks' APIC bus clock. During calibration we actually call
317 * this function twice on the boot CPU, once with a bogus timeout
318 * value, second time for real. The other (noncalibrating) CPUs
319 * call this function only once, with the real, calibrated value.
321 * We do reads before writes even if unnecessary, to get around the
322 * P5 APIC double write bug.
324 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
326 unsigned int lvtt_value, tmp_value;
328 lvtt_value = LOCAL_TIMER_VECTOR;
330 lvtt_value |= APIC_LVT_TIMER_PERIODIC;
331 else if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
332 lvtt_value |= APIC_LVT_TIMER_TSCDEADLINE;
334 if (!lapic_is_integrated())
335 lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
338 lvtt_value |= APIC_LVT_MASKED;
340 apic_write(APIC_LVTT, lvtt_value);
342 if (lvtt_value & APIC_LVT_TIMER_TSCDEADLINE) {
344 * See Intel SDM: TSC-Deadline Mode chapter. In xAPIC mode,
345 * writing to the APIC LVTT and TSC_DEADLINE MSR isn't serialized.
346 * According to Intel, MFENCE can do the serialization here.
348 asm volatile("mfence" : : : "memory");
355 tmp_value = apic_read(APIC_TDCR);
356 apic_write(APIC_TDCR,
357 (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
361 apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
365 * Setup extended LVT, AMD specific
367 * Software should use the LVT offsets the BIOS provides. The offsets
368 * are determined by the subsystems using it like those for MCE
369 * threshold or IBS. On K8 only offset 0 (APIC500) and MCE interrupts
370 * are supported. Beginning with family 10h at least 4 offsets are
373 * Since the offsets must be consistent for all cores, we keep track
374 * of the LVT offsets in software and reserve the offset for the same
375 * vector also to be used on other cores. An offset is freed by
376 * setting the entry to APIC_EILVT_MASKED.
378 * If the BIOS is right, there should be no conflicts. Otherwise a
379 * "[Firmware Bug]: ..." error message is generated. However, if
380 * software does not properly determines the offsets, it is not
381 * necessarily a BIOS bug.
384 static atomic_t eilvt_offsets[APIC_EILVT_NR_MAX];
386 static inline int eilvt_entry_is_changeable(unsigned int old, unsigned int new)
388 return (old & APIC_EILVT_MASKED)
389 || (new == APIC_EILVT_MASKED)
390 || ((new & ~APIC_EILVT_MASKED) == old);
393 static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
395 unsigned int rsvd, vector;
397 if (offset >= APIC_EILVT_NR_MAX)
400 rsvd = atomic_read(&eilvt_offsets[offset]);
402 vector = rsvd & ~APIC_EILVT_MASKED; /* 0: unassigned */
403 if (vector && !eilvt_entry_is_changeable(vector, new))
404 /* may not change if vectors are different */
406 rsvd = atomic_cmpxchg(&eilvt_offsets[offset], rsvd, new);
407 } while (rsvd != new);
409 rsvd &= ~APIC_EILVT_MASKED;
410 if (rsvd && rsvd != vector)
411 pr_info("LVT offset %d assigned for vector 0x%02x\n",
418 * If mask=1, the LVT entry does not generate interrupts while mask=0
419 * enables the vector. See also the BKDGs. Must be called with
420 * preemption disabled.
423 int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
425 unsigned long reg = APIC_EILVTn(offset);
426 unsigned int new, old, reserved;
428 new = (mask << 16) | (msg_type << 8) | vector;
429 old = apic_read(reg);
430 reserved = reserve_eilvt_offset(offset, new);
432 if (reserved != new) {
433 pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
434 "vector 0x%x, but the register is already in use for "
435 "vector 0x%x on another cpu\n",
436 smp_processor_id(), reg, offset, new, reserved);
440 if (!eilvt_entry_is_changeable(old, new)) {
441 pr_err(FW_BUG "cpu %d, try to use APIC%lX (LVT offset %d) for "
442 "vector 0x%x, but the register is already in use for "
443 "vector 0x%x on this cpu\n",
444 smp_processor_id(), reg, offset, new, old);
448 apic_write(reg, new);
452 EXPORT_SYMBOL_GPL(setup_APIC_eilvt);
455 * Program the next event, relative to now
457 static int lapic_next_event(unsigned long delta,
458 struct clock_event_device *evt)
460 apic_write(APIC_TMICT, delta);
464 static int lapic_next_deadline(unsigned long delta,
465 struct clock_event_device *evt)
469 /* This MSR is special and need a special fence: */
473 wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
477 static int lapic_timer_shutdown(struct clock_event_device *evt)
481 /* Lapic used as dummy for broadcast ? */
482 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
485 v = apic_read(APIC_LVTT);
486 v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
487 apic_write(APIC_LVTT, v);
488 apic_write(APIC_TMICT, 0);
493 lapic_timer_set_periodic_oneshot(struct clock_event_device *evt, bool oneshot)
495 /* Lapic used as dummy for broadcast ? */
496 if (evt->features & CLOCK_EVT_FEAT_DUMMY)
499 __setup_APIC_LVTT(lapic_timer_frequency, oneshot, 1);
503 static int lapic_timer_set_periodic(struct clock_event_device *evt)
505 return lapic_timer_set_periodic_oneshot(evt, false);
508 static int lapic_timer_set_oneshot(struct clock_event_device *evt)
510 return lapic_timer_set_periodic_oneshot(evt, true);
514 * Local APIC timer broadcast function
516 static void lapic_timer_broadcast(const struct cpumask *mask)
519 apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
525 * The local apic timer can be used for any function which is CPU local.
527 static struct clock_event_device lapic_clockevent = {
529 .features = CLOCK_EVT_FEAT_PERIODIC |
530 CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP
531 | CLOCK_EVT_FEAT_DUMMY,
533 .set_state_shutdown = lapic_timer_shutdown,
534 .set_state_periodic = lapic_timer_set_periodic,
535 .set_state_oneshot = lapic_timer_set_oneshot,
536 .set_state_oneshot_stopped = lapic_timer_shutdown,
537 .set_next_event = lapic_next_event,
538 .broadcast = lapic_timer_broadcast,
542 static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
544 #define DEADLINE_MODEL_MATCH_FUNC(model, func) \
545 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&func }
547 #define DEADLINE_MODEL_MATCH_REV(model, rev) \
548 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)rev }
550 static __init u32 hsx_deadline_rev(void)
552 switch (boot_cpu_data.x86_stepping) {
553 case 0x02: return 0x3a; /* EP */
554 case 0x04: return 0x0f; /* EX */
560 static __init u32 bdx_deadline_rev(void)
562 switch (boot_cpu_data.x86_stepping) {
563 case 0x02: return 0x00000011;
564 case 0x03: return 0x0700000e;
565 case 0x04: return 0x0f00000c;
566 case 0x05: return 0x0e000003;
572 static __init u32 skx_deadline_rev(void)
574 switch (boot_cpu_data.x86_stepping) {
575 case 0x03: return 0x01000136;
576 case 0x04: return 0x02000014;
579 if (boot_cpu_data.x86_stepping > 4)
585 static const struct x86_cpu_id deadline_match[] __initconst = {
586 DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev),
587 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020),
588 DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev),
589 DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X, skx_deadline_rev),
591 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22),
592 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20),
593 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_GT3E, 0x17),
595 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_CORE, 0x25),
596 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_GT3E, 0x17),
598 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_MOBILE, 0xb2),
599 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_DESKTOP, 0xb2),
601 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE_MOBILE, 0x52),
602 DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_KABYLAKE_DESKTOP, 0x52),
607 static __init bool apic_validate_deadline_timer(void)
609 const struct x86_cpu_id *m;
612 if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
614 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
617 m = x86_match_cpu(deadline_match);
622 * Function pointers will have the MSB set due to address layout,
623 * immediate revisions will not.
625 if ((long)m->driver_data < 0)
626 rev = ((u32 (*)(void))(m->driver_data))();
628 rev = (u32)m->driver_data;
630 if (boot_cpu_data.microcode >= rev)
633 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
634 pr_err(FW_BUG "TSC_DEADLINE disabled due to Errata; "
635 "/*(DEBLOBBED)*/\n", rev);
640 * Setup the local APIC timer for this CPU. Copy the initialized values
641 * of the boot CPU and register the clock event in the framework.
643 static void setup_APIC_timer(void)
645 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
647 if (this_cpu_has(X86_FEATURE_ARAT)) {
648 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
649 /* Make LAPIC timer preferrable over percpu HPET */
650 lapic_clockevent.rating = 150;
653 memcpy(levt, &lapic_clockevent, sizeof(*levt));
654 levt->cpumask = cpumask_of(smp_processor_id());
656 if (this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
657 levt->name = "lapic-deadline";
658 levt->features &= ~(CLOCK_EVT_FEAT_PERIODIC |
659 CLOCK_EVT_FEAT_DUMMY);
660 levt->set_next_event = lapic_next_deadline;
661 clockevents_config_and_register(levt,
662 tsc_khz * (1000 / TSC_DIVISOR),
665 clockevents_register_device(levt);
669 * Install the updated TSC frequency from recalibration at the TSC
670 * deadline clockevent devices.
672 static void __lapic_update_tsc_freq(void *info)
674 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
676 if (!this_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
679 clockevents_update_freq(levt, tsc_khz * (1000 / TSC_DIVISOR));
682 void lapic_update_tsc_freq(void)
685 * The clockevent device's ->mult and ->shift can both be
686 * changed. In order to avoid races, schedule the frequency
687 * update code on each CPU.
689 on_each_cpu(__lapic_update_tsc_freq, NULL, 0);
693 * In this functions we calibrate APIC bus clocks to the external timer.
695 * We want to do the calibration only once since we want to have local timer
696 * irqs syncron. CPUs connected by the same APIC bus have the very same bus
699 * This was previously done by reading the PIT/HPET and waiting for a wrap
700 * around to find out, that a tick has elapsed. I have a box, where the PIT
701 * readout is broken, so it never gets out of the wait loop again. This was
702 * also reported by others.
704 * Monitoring the jiffies value is inaccurate and the clockevents
705 * infrastructure allows us to do a simple substitution of the interrupt
708 * The calibration routine also uses the pm_timer when possible, as the PIT
709 * happens to run way too slow (factor 2.3 on my VAIO CoreDuo, which goes
710 * back to normal later in the boot process).
713 #define LAPIC_CAL_LOOPS (HZ/10)
715 static __initdata int lapic_cal_loops = -1;
716 static __initdata long lapic_cal_t1, lapic_cal_t2;
717 static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
718 static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
719 static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
722 * Temporary interrupt handler and polled calibration function.
724 static void __init lapic_cal_handler(struct clock_event_device *dev)
726 unsigned long long tsc = 0;
727 long tapic = apic_read(APIC_TMCCT);
728 unsigned long pm = acpi_pm_read_early();
730 if (boot_cpu_has(X86_FEATURE_TSC))
733 switch (lapic_cal_loops++) {
735 lapic_cal_t1 = tapic;
736 lapic_cal_tsc1 = tsc;
738 lapic_cal_j1 = jiffies;
741 case LAPIC_CAL_LOOPS:
742 lapic_cal_t2 = tapic;
743 lapic_cal_tsc2 = tsc;
744 if (pm < lapic_cal_pm1)
745 pm += ACPI_PM_OVRRUN;
747 lapic_cal_j2 = jiffies;
753 calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
755 const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
756 const long pm_thresh = pm_100ms / 100;
760 #ifndef CONFIG_X86_PM_TIMER
764 apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm);
766 /* Check, if the PM timer is available */
770 mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
772 if (deltapm > (pm_100ms - pm_thresh) &&
773 deltapm < (pm_100ms + pm_thresh)) {
774 apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n");
778 res = (((u64)deltapm) * mult) >> 22;
779 do_div(res, 1000000);
780 pr_warning("APIC calibration not consistent "
781 "with PM-Timer: %ldms instead of 100ms\n",(long)res);
783 /* Correct the lapic counter value */
784 res = (((u64)(*delta)) * pm_100ms);
785 do_div(res, deltapm);
786 pr_info("APIC delta adjusted to PM-Timer: "
787 "%lu (%ld)\n", (unsigned long)res, *delta);
790 /* Correct the tsc counter value */
791 if (boot_cpu_has(X86_FEATURE_TSC)) {
792 res = (((u64)(*deltatsc)) * pm_100ms);
793 do_div(res, deltapm);
794 apic_printk(APIC_VERBOSE, "TSC delta adjusted to "
795 "PM-Timer: %lu (%ld)\n",
796 (unsigned long)res, *deltatsc);
797 *deltatsc = (long)res;
803 static int __init calibrate_APIC_clock(void)
805 struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
806 u64 tsc_perj = 0, tsc_start = 0;
807 unsigned long jif_start;
808 unsigned long deltaj;
809 long delta, deltatsc;
810 int pm_referenced = 0;
813 * check if lapic timer has already been calibrated by platform
814 * specific routine, such as tsc calibration code. if so, we just fill
815 * in the clockevent structure and return.
818 if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) {
820 } else if (lapic_timer_frequency) {
821 apic_printk(APIC_VERBOSE, "lapic timer already calibrated %d\n",
822 lapic_timer_frequency);
823 lapic_clockevent.mult = div_sc(lapic_timer_frequency/APIC_DIVISOR,
824 TICK_NSEC, lapic_clockevent.shift);
825 lapic_clockevent.max_delta_ns =
826 clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
827 lapic_clockevent.max_delta_ticks = 0x7FFFFF;
828 lapic_clockevent.min_delta_ns =
829 clockevent_delta2ns(0xF, &lapic_clockevent);
830 lapic_clockevent.min_delta_ticks = 0xF;
831 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
835 apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
836 "calibrating APIC timer ...\n");
839 * There are platforms w/o global clockevent devices. Instead of
840 * making the calibration conditional on that, use a polling based
841 * approach everywhere.
846 * Setup the APIC counter to maximum. There is no way the lapic
847 * can underflow in the 100ms detection time frame
849 __setup_APIC_LVTT(0xffffffff, 0, 0);
852 * Methods to terminate the calibration loop:
853 * 1) Global clockevent if available (jiffies)
854 * 2) TSC if available and frequency is known
856 jif_start = READ_ONCE(jiffies);
860 tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
864 * Enable interrupts so the tick can fire, if a global
865 * clockevent device is available
869 while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
870 /* Wait for a tick to elapse */
873 u64 tsc_now = rdtsc();
874 if ((tsc_now - tsc_start) >= tsc_perj) {
875 tsc_start += tsc_perj;
879 unsigned long jif_now = READ_ONCE(jiffies);
881 if (time_after(jif_now, jif_start)) {
889 /* Invoke the calibration routine */
891 lapic_cal_handler(NULL);
897 /* Build delta t1-t2 as apic timer counts down */
898 delta = lapic_cal_t1 - lapic_cal_t2;
899 apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
901 deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
903 /* we trust the PM based calibration if possible */
904 pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
907 /* Calculate the scaled math multiplication factor */
908 lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
909 lapic_clockevent.shift);
910 lapic_clockevent.max_delta_ns =
911 clockevent_delta2ns(0x7FFFFFFF, &lapic_clockevent);
912 lapic_clockevent.max_delta_ticks = 0x7FFFFFFF;
913 lapic_clockevent.min_delta_ns =
914 clockevent_delta2ns(0xF, &lapic_clockevent);
915 lapic_clockevent.min_delta_ticks = 0xF;
917 lapic_timer_frequency = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
919 apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
920 apic_printk(APIC_VERBOSE, "..... mult: %u\n", lapic_clockevent.mult);
921 apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
922 lapic_timer_frequency);
924 if (boot_cpu_has(X86_FEATURE_TSC)) {
925 apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
927 (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
928 (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
931 apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
933 lapic_timer_frequency / (1000000 / HZ),
934 lapic_timer_frequency % (1000000 / HZ));
937 * Do a sanity check on the APIC calibration result
939 if (lapic_timer_frequency < (1000000 / HZ)) {
941 pr_warning("APIC frequency too slow, disabling apic timer\n");
945 levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
948 * PM timer calibration failed or not turned on so lets try APIC
949 * timer based calibration, if a global clockevent device is
952 if (!pm_referenced && global_clock_event) {
953 apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
956 * Setup the apic timer manually
958 levt->event_handler = lapic_cal_handler;
959 lapic_timer_set_periodic(levt);
960 lapic_cal_loops = -1;
962 /* Let the interrupts run */
965 while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
968 /* Stop the lapic timer */
970 lapic_timer_shutdown(levt);
973 deltaj = lapic_cal_j2 - lapic_cal_j1;
974 apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);
976 /* Check, if the jiffies result is consistent */
977 if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
978 apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
980 levt->features |= CLOCK_EVT_FEAT_DUMMY;
984 if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
985 pr_warning("APIC timer disabled due to verification failure\n");
993 * Setup the boot APIC
995 * Calibrate and verify the result.
997 void __init setup_boot_APIC_clock(void)
1000 * The local apic timer can be disabled via the kernel
1001 * commandline or from the CPU detection code. Register the lapic
1002 * timer as a dummy clock event source on SMP systems, so the
1003 * broadcast mechanism is used. On UP systems simply ignore it.
1005 if (disable_apic_timer) {
1006 pr_info("Disabling APIC timer\n");
1007 /* No broadcast on UP ! */
1008 if (num_possible_cpus() > 1) {
1009 lapic_clockevent.mult = 1;
1015 if (calibrate_APIC_clock()) {
1016 /* No broadcast on UP ! */
1017 if (num_possible_cpus() > 1)
1023 * If nmi_watchdog is set to IO_APIC, we need the
1024 * PIT/HPET going. Otherwise register lapic as a dummy
1027 lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
1029 /* Setup the lapic or request the broadcast */
1031 amd_e400_c1e_apic_setup();
1034 void setup_secondary_APIC_clock(void)
1037 amd_e400_c1e_apic_setup();
1041 * The guts of the apic timer interrupt
1043 static void local_apic_timer_interrupt(void)
1045 struct clock_event_device *evt = this_cpu_ptr(&lapic_events);
1048 * Normally we should not be here till LAPIC has been initialized but
1049 * in some cases like kdump, its possible that there is a pending LAPIC
1050 * timer interrupt from previous kernel's context and is delivered in
1051 * new kernel the moment interrupts are enabled.
1053 * Interrupts are enabled early and LAPIC is setup much later, hence
1054 * its possible that when we get here evt->event_handler is NULL.
1055 * Check for event_handler being NULL and discard the interrupt as
1058 if (!evt->event_handler) {
1059 pr_warning("Spurious LAPIC timer interrupt on cpu %d\n",
1060 smp_processor_id());
1062 lapic_timer_shutdown(evt);
1067 * the NMI deadlock-detector uses this.
1069 inc_irq_stat(apic_timer_irqs);
1071 evt->event_handler(evt);
1075 * Local APIC timer interrupt. This is the most natural way for doing
1076 * local interrupts, but local timer interrupts can be emulated by
1077 * broadcast interrupts too. [in case the hw doesn't support APIC timers]
1079 * [ if a single-CPU system runs an SMP kernel then we call the local
1080 * interrupt as well. Thus we cannot inline the local irq ... ]
1082 __visible void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
1084 struct pt_regs *old_regs = set_irq_regs(regs);
1087 * NOTE! We'd better ACK the irq immediately,
1088 * because timer handling can be slow.
1090 * update_process_times() expects us to have done irq_enter().
1091 * Besides, if we don't timer interrupts ignore the global
1092 * interrupt lock, which is the WrongThing (tm) to do.
1095 trace_local_timer_entry(LOCAL_TIMER_VECTOR);
1096 local_apic_timer_interrupt();
1097 trace_local_timer_exit(LOCAL_TIMER_VECTOR);
1100 set_irq_regs(old_regs);
1103 int setup_profiling_timer(unsigned int multiplier)
1109 * Local APIC start and shutdown
1113 * clear_local_APIC - shutdown the local APIC
1115 * This is called, when a CPU is disabled and before rebooting, so the state of
1116 * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
1117 * leftovers during boot.
1119 void clear_local_APIC(void)
1124 /* APIC hasn't been mapped yet */
1125 if (!x2apic_mode && !apic_phys)
1128 maxlvt = lapic_get_maxlvt();
1130 * Masking an LVT entry can trigger a local APIC error
1131 * if the vector is zero. Mask LVTERR first to prevent this.
1134 v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
1135 apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
1138 * Careful: we have to set masks only first to deassert
1139 * any level-triggered sources.
1141 v = apic_read(APIC_LVTT);
1142 apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
1143 v = apic_read(APIC_LVT0);
1144 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1145 v = apic_read(APIC_LVT1);
1146 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
1148 v = apic_read(APIC_LVTPC);
1149 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
1152 /* lets not touch this if we didn't frob it */
1153 #ifdef CONFIG_X86_THERMAL_VECTOR
1155 v = apic_read(APIC_LVTTHMR);
1156 apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
1159 #ifdef CONFIG_X86_MCE_INTEL
1161 v = apic_read(APIC_LVTCMCI);
1162 if (!(v & APIC_LVT_MASKED))
1163 apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED);
1168 * Clean APIC state for other OSs:
1170 apic_write(APIC_LVTT, APIC_LVT_MASKED);
1171 apic_write(APIC_LVT0, APIC_LVT_MASKED);
1172 apic_write(APIC_LVT1, APIC_LVT_MASKED);
1174 apic_write(APIC_LVTERR, APIC_LVT_MASKED);
1176 apic_write(APIC_LVTPC, APIC_LVT_MASKED);
1178 /* Integrated APIC (!82489DX) ? */
1179 if (lapic_is_integrated()) {
1181 /* Clear ESR due to Pentium errata 3AP and 11AP */
1182 apic_write(APIC_ESR, 0);
1183 apic_read(APIC_ESR);
1188 * disable_local_APIC - clear and disable the local APIC
1190 void disable_local_APIC(void)
1194 /* APIC hasn't been mapped yet */
1195 if (!x2apic_mode && !apic_phys)
1201 * Disable APIC (implies clearing of registers
1204 value = apic_read(APIC_SPIV);
1205 value &= ~APIC_SPIV_APIC_ENABLED;
1206 apic_write(APIC_SPIV, value);
1208 #ifdef CONFIG_X86_32
1210 * When LAPIC was disabled by the BIOS and enabled by the kernel,
1211 * restore the disabled state.
1213 if (enabled_via_apicbase) {
1216 rdmsr(MSR_IA32_APICBASE, l, h);
1217 l &= ~MSR_IA32_APICBASE_ENABLE;
1218 wrmsr(MSR_IA32_APICBASE, l, h);
1224 * If Linux enabled the LAPIC against the BIOS default disable it down before
1225 * re-entering the BIOS on shutdown. Otherwise the BIOS may get confused and
1226 * not power-off. Additionally clear all LVT entries before disable_local_APIC
1227 * for the case where Linux didn't enable the LAPIC.
1229 void lapic_shutdown(void)
1231 unsigned long flags;
1233 if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config())
1236 local_irq_save(flags);
1238 #ifdef CONFIG_X86_32
1239 if (!enabled_via_apicbase)
1243 disable_local_APIC();
1246 local_irq_restore(flags);
1250 * sync_Arb_IDs - synchronize APIC bus arbitration IDs
1252 void __init sync_Arb_IDs(void)
1255 * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
1258 if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1264 apic_wait_icr_idle();
1266 apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
1267 apic_write(APIC_ICR, APIC_DEST_ALLINC |
1268 APIC_INT_LEVELTRIG | APIC_DM_INIT);
1271 enum apic_intr_mode_id apic_intr_mode;
1273 static int __init apic_intr_mode_select(void)
1275 /* Check kernel option */
1277 pr_info("APIC disabled via kernel command line\n");
1282 #ifdef CONFIG_X86_64
1283 /* On 64-bit, the APIC must be integrated, Check local APIC only */
1284 if (!boot_cpu_has(X86_FEATURE_APIC)) {
1286 pr_info("APIC disabled by BIOS\n");
1290 /* On 32-bit, the APIC may be integrated APIC or 82489DX */
1292 /* Neither 82489DX nor integrated APIC ? */
1293 if (!boot_cpu_has(X86_FEATURE_APIC) && !smp_found_config) {
1298 /* If the BIOS pretends there is an integrated APIC ? */
1299 if (!boot_cpu_has(X86_FEATURE_APIC) &&
1300 APIC_INTEGRATED(boot_cpu_apic_version)) {
1302 pr_err(FW_BUG "Local APIC %d not detected, force emulation\n",
1303 boot_cpu_physical_apicid);
1308 /* Check MP table or ACPI MADT configuration */
1309 if (!smp_found_config) {
1310 disable_ioapic_support();
1312 pr_info("APIC: ACPI MADT or MP tables are not detected\n");
1313 return APIC_VIRTUAL_WIRE_NO_CONFIG;
1315 return APIC_VIRTUAL_WIRE;
1319 /* If SMP should be disabled, then really disable it! */
1320 if (!setup_max_cpus) {
1321 pr_info("APIC: SMP mode deactivated\n");
1322 return APIC_SYMMETRIC_IO_NO_ROUTING;
1325 if (read_apic_id() != boot_cpu_physical_apicid) {
1326 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
1327 read_apic_id(), boot_cpu_physical_apicid);
1328 /* Or can we switch back to PIC here? */
1332 return APIC_SYMMETRIC_IO;
1336 * An initial setup of the virtual wire mode.
1338 void __init init_bsp_APIC(void)
1343 * Don't do the setup now if we have a SMP BIOS as the
1344 * through-I/O-APIC virtual wire mode might be active.
1346 if (smp_found_config || !boot_cpu_has(X86_FEATURE_APIC))
1350 * Do not trust the local APIC being empty at bootup.
1357 value = apic_read(APIC_SPIV);
1358 value &= ~APIC_VECTOR_MASK;
1359 value |= APIC_SPIV_APIC_ENABLED;
1361 #ifdef CONFIG_X86_32
1362 /* This bit is reserved on P4/Xeon and should be cleared */
1363 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
1364 (boot_cpu_data.x86 == 15))
1365 value &= ~APIC_SPIV_FOCUS_DISABLED;
1368 value |= APIC_SPIV_FOCUS_DISABLED;
1369 value |= SPURIOUS_APIC_VECTOR;
1370 apic_write(APIC_SPIV, value);
1373 * Set up the virtual wire mode.
1375 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1376 value = APIC_DM_NMI;
1377 if (!lapic_is_integrated()) /* 82489DX */
1378 value |= APIC_LVT_LEVEL_TRIGGER;
1379 if (apic_extnmi == APIC_EXTNMI_NONE)
1380 value |= APIC_LVT_MASKED;
1381 apic_write(APIC_LVT1, value);
1384 /* Init the interrupt delivery mode for the BSP */
1385 void __init apic_intr_mode_init(void)
1387 bool upmode = IS_ENABLED(CONFIG_UP_LATE_INIT);
1389 apic_intr_mode = apic_intr_mode_select();
1391 switch (apic_intr_mode) {
1393 pr_info("APIC: Keep in PIC mode(8259)\n");
1395 case APIC_VIRTUAL_WIRE:
1396 pr_info("APIC: Switch to virtual wire mode setup\n");
1397 default_setup_apic_routing();
1399 case APIC_VIRTUAL_WIRE_NO_CONFIG:
1400 pr_info("APIC: Switch to virtual wire mode setup with no configuration\n");
1402 default_setup_apic_routing();
1404 case APIC_SYMMETRIC_IO:
1405 pr_info("APIC: Switch to symmetric I/O mode setup\n");
1406 default_setup_apic_routing();
1408 case APIC_SYMMETRIC_IO_NO_ROUTING:
1409 pr_info("APIC: Switch to symmetric I/O mode setup in no SMP routine\n");
1413 apic_bsp_setup(upmode);
1416 static void lapic_setup_esr(void)
1418 unsigned int oldvalue, value, maxlvt;
1420 if (!lapic_is_integrated()) {
1421 pr_info("No ESR for 82489DX.\n");
1425 if (apic->disable_esr) {
1427 * Something untraceable is creating bad interrupts on
1428 * secondary quads ... for the moment, just leave the
1429 * ESR disabled - we can't do anything useful with the
1430 * errors anyway - mbligh
1432 pr_info("Leaving ESR disabled.\n");
1436 maxlvt = lapic_get_maxlvt();
1437 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1438 apic_write(APIC_ESR, 0);
1439 oldvalue = apic_read(APIC_ESR);
1441 /* enables sending errors */
1442 value = ERROR_APIC_VECTOR;
1443 apic_write(APIC_LVTERR, value);
1446 * spec says clear errors after enabling vector.
1449 apic_write(APIC_ESR, 0);
1450 value = apic_read(APIC_ESR);
1451 if (value != oldvalue)
1452 apic_printk(APIC_VERBOSE, "ESR value before enabling "
1453 "vector: 0x%08x after: 0x%08x\n",
1457 #define APIC_IR_REGS APIC_ISR_NR
1458 #define APIC_IR_BITS (APIC_IR_REGS * 32)
1459 #define APIC_IR_MAPSIZE (APIC_IR_BITS / BITS_PER_LONG)
1462 unsigned long map[APIC_IR_MAPSIZE];
1463 u32 regs[APIC_IR_REGS];
1466 static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr)
1471 for (i = 0; i < APIC_IR_REGS; i++)
1472 irr->regs[i] = apic_read(APIC_IRR + i * 0x10);
1475 for (i = 0; i < APIC_IR_REGS; i++)
1476 isr->regs[i] = apic_read(APIC_ISR + i * 0x10);
1479 * If the ISR map is not empty. ACK the APIC and run another round
1480 * to verify whether a pending IRR has been unblocked and turned
1483 if (!bitmap_empty(isr->map, APIC_IR_BITS)) {
1485 * There can be multiple ISR bits set when a high priority
1486 * interrupt preempted a lower priority one. Issue an ACK
1489 for_each_set_bit(bit, isr->map, APIC_IR_BITS)
1494 return !bitmap_empty(irr->map, APIC_IR_BITS);
1498 * After a crash, we no longer service the interrupts and a pending
1499 * interrupt from previous kernel might still have ISR bit set.
1501 * Most probably by now the CPU has serviced that pending interrupt and it
1502 * might not have done the ack_APIC_irq() because it thought, interrupt
1503 * came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear
1504 * the ISR bit and cpu thinks it has already serivced the interrupt. Hence
1505 * a vector might get locked. It was noticed for timer irq (vector
1506 * 0x31). Issue an extra EOI to clear ISR.
1508 * If there are pending IRR bits they turn into ISR bits after a higher
1509 * priority ISR bit has been acked.
1511 static void apic_pending_intr_clear(void)
1513 union apic_ir irr, isr;
1516 /* 512 loops are way oversized and give the APIC a chance to obey. */
1517 for (i = 0; i < 512; i++) {
1518 if (!apic_check_and_ack(&irr, &isr))
1521 /* Dump the IRR/ISR content if that failed */
1522 pr_warn("APIC: Stale IRR: %256pb ISR: %256pb\n", irr.map, isr.map);
1526 * setup_local_APIC - setup the local APIC
1528 * Used to setup local APIC while initializing BSP or bringing up APs.
1529 * Always called with preemption disabled.
1531 static void setup_local_APIC(void)
1533 int cpu = smp_processor_id();
1538 disable_ioapic_support();
1543 * If this comes from kexec/kcrash the APIC might be enabled in
1544 * SPIV. Soft disable it before doing further initialization.
1546 value = apic_read(APIC_SPIV);
1547 value &= ~APIC_SPIV_APIC_ENABLED;
1548 apic_write(APIC_SPIV, value);
1550 #ifdef CONFIG_X86_32
1551 /* Pound the ESR really hard over the head with a big hammer - mbligh */
1552 if (lapic_is_integrated() && apic->disable_esr) {
1553 apic_write(APIC_ESR, 0);
1554 apic_write(APIC_ESR, 0);
1555 apic_write(APIC_ESR, 0);
1556 apic_write(APIC_ESR, 0);
1559 perf_events_lapic_init();
1562 * Double-check whether this APIC is really registered.
1563 * This is meaningless in clustered apic mode, so we skip it.
1565 BUG_ON(!apic->apic_id_registered());
1568 * Intel recommends to set DFR, LDR and TPR before enabling
1569 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
1570 * document number 292116). So here it goes...
1572 apic->init_apic_ldr();
1574 #ifdef CONFIG_X86_32
1575 if (apic->dest_logical) {
1576 int logical_apicid, ldr_apicid;
1579 * APIC LDR is initialized. If logical_apicid mapping was
1580 * initialized during get_smp_config(), make sure it matches
1583 logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
1584 ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
1585 if (logical_apicid != BAD_APICID)
1586 WARN_ON(logical_apicid != ldr_apicid);
1587 /* Always use the value from LDR. */
1588 early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid;
1593 * Set Task Priority to 'accept all'. We never change this
1596 value = apic_read(APIC_TASKPRI);
1597 value &= ~APIC_TPRI_MASK;
1598 apic_write(APIC_TASKPRI, value);
1600 /* Clear eventually stale ISR/IRR bits */
1601 apic_pending_intr_clear();
1604 * Now that we are all set up, enable the APIC
1606 value = apic_read(APIC_SPIV);
1607 value &= ~APIC_VECTOR_MASK;
1611 value |= APIC_SPIV_APIC_ENABLED;
1613 #ifdef CONFIG_X86_32
1615 * Some unknown Intel IO/APIC (or APIC) errata is biting us with
1616 * certain networking cards. If high frequency interrupts are
1617 * happening on a particular IOAPIC pin, plus the IOAPIC routing
1618 * entry is masked/unmasked at a high rate as well then sooner or
1619 * later IOAPIC line gets 'stuck', no more interrupts are received
1620 * from the device. If focus CPU is disabled then the hang goes
1623 * [ This bug can be reproduced easily with a level-triggered
1624 * PCI Ne2000 networking cards and PII/PIII processors, dual
1628 * Actually disabling the focus CPU check just makes the hang less
1629 * frequent as it makes the interrupt distributon model be more
1630 * like LRU than MRU (the short-term load is more even across CPUs).
1634 * - enable focus processor (bit==0)
1635 * - 64bit mode always use processor focus
1636 * so no need to set it
1638 value &= ~APIC_SPIV_FOCUS_DISABLED;
1642 * Set spurious IRQ vector
1644 value |= SPURIOUS_APIC_VECTOR;
1645 apic_write(APIC_SPIV, value);
1648 * Set up LVT0, LVT1:
1650 * set up through-local-APIC on the boot CPU's LINT0. This is not
1651 * strictly necessary in pure symmetric-IO mode, but sometimes
1652 * we delegate interrupts to the 8259A.
1655 * TODO: set up through-local-APIC from through-I/O-APIC? --macro
1657 value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
1658 if (!cpu && (pic_mode || !value || skip_ioapic_setup)) {
1659 value = APIC_DM_EXTINT;
1660 apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu);
1662 value = APIC_DM_EXTINT | APIC_LVT_MASKED;
1663 apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", cpu);
1665 apic_write(APIC_LVT0, value);
1668 * Only the BSP sees the LINT1 NMI signal by default. This can be
1669 * modified by apic_extnmi= boot option.
1671 if ((!cpu && apic_extnmi != APIC_EXTNMI_NONE) ||
1672 apic_extnmi == APIC_EXTNMI_ALL)
1673 value = APIC_DM_NMI;
1675 value = APIC_DM_NMI | APIC_LVT_MASKED;
1678 if (!lapic_is_integrated())
1679 value |= APIC_LVT_LEVEL_TRIGGER;
1680 apic_write(APIC_LVT1, value);
1682 #ifdef CONFIG_X86_MCE_INTEL
1683 /* Recheck CMCI information after local APIC is up on CPU #0 */
1689 static void end_local_APIC_setup(void)
1693 #ifdef CONFIG_X86_32
1696 /* Disable the local apic timer */
1697 value = apic_read(APIC_LVTT);
1698 value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
1699 apic_write(APIC_LVTT, value);
1707 * APIC setup function for application processors. Called from smpboot.c
1709 void apic_ap_setup(void)
1712 end_local_APIC_setup();
1715 #ifdef CONFIG_X86_X2APIC
1723 static int x2apic_state;
1725 static void __x2apic_disable(void)
1729 if (!boot_cpu_has(X86_FEATURE_APIC))
1732 rdmsrl(MSR_IA32_APICBASE, msr);
1733 if (!(msr & X2APIC_ENABLE))
1735 /* Disable xapic and x2apic first and then reenable xapic mode */
1736 wrmsrl(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
1737 wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
1738 printk_once(KERN_INFO "x2apic disabled\n");
1741 static void __x2apic_enable(void)
1745 rdmsrl(MSR_IA32_APICBASE, msr);
1746 if (msr & X2APIC_ENABLE)
1748 wrmsrl(MSR_IA32_APICBASE, msr | X2APIC_ENABLE);
1749 printk_once(KERN_INFO "x2apic enabled\n");
1752 static int __init setup_nox2apic(char *str)
1754 if (x2apic_enabled()) {
1755 int apicid = native_apic_msr_read(APIC_ID);
1757 if (apicid >= 255) {
1758 pr_warning("Apicid: %08x, cannot enforce nox2apic\n",
1762 pr_warning("x2apic already enabled.\n");
1765 setup_clear_cpu_cap(X86_FEATURE_X2APIC);
1766 x2apic_state = X2APIC_DISABLED;
1770 early_param("nox2apic", setup_nox2apic);
1772 /* Called from cpu_init() to enable x2apic on (secondary) cpus */
1773 void x2apic_setup(void)
1776 * If x2apic is not in ON state, disable it if already enabled
1779 if (x2apic_state != X2APIC_ON) {
1786 static __init void x2apic_disable(void)
1788 u32 x2apic_id, state = x2apic_state;
1791 x2apic_state = X2APIC_DISABLED;
1793 if (state != X2APIC_ON)
1796 x2apic_id = read_apic_id();
1797 if (x2apic_id >= 255)
1798 panic("Cannot disable x2apic, id: %08x\n", x2apic_id);
1801 register_lapic_address(mp_lapic_addr);
1804 static __init void x2apic_enable(void)
1806 if (x2apic_state != X2APIC_OFF)
1810 x2apic_state = X2APIC_ON;
1814 static __init void try_to_enable_x2apic(int remap_mode)
1816 if (x2apic_state == X2APIC_DISABLED)
1819 if (remap_mode != IRQ_REMAP_X2APIC_MODE) {
1821 * Using X2APIC without IR is not architecturally supported
1822 * on bare metal but may be supported in guests.
1824 if (!x86_init.hyper.x2apic_available()) {
1825 pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n");
1831 * Without IR, all CPUs can be addressed by IOAPIC/MSI only
1832 * in physical mode, and CPUs with an APIC ID that cannnot
1833 * be addressed must not be brought online.
1835 x2apic_set_max_apicid(255);
1841 void __init check_x2apic(void)
1843 if (x2apic_enabled()) {
1844 pr_info("x2apic: enabled by BIOS, switching to x2apic ops\n");
1846 x2apic_state = X2APIC_ON;
1847 } else if (!boot_cpu_has(X86_FEATURE_X2APIC)) {
1848 x2apic_state = X2APIC_DISABLED;
1851 #else /* CONFIG_X86_X2APIC */
1852 static int __init validate_x2apic(void)
1854 if (!apic_is_x2apic_enabled())
1857 * Checkme: Can we simply turn off x2apic here instead of panic?
1859 panic("BIOS has enabled x2apic but kernel doesn't support x2apic, please disable x2apic in BIOS.\n");
1861 early_initcall(validate_x2apic);
1863 static inline void try_to_enable_x2apic(int remap_mode) { }
1864 static inline void __x2apic_enable(void) { }
1865 #endif /* !CONFIG_X86_X2APIC */
1867 void __init enable_IR_x2apic(void)
1869 unsigned long flags;
1872 if (skip_ioapic_setup) {
1873 pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n");
1877 ir_stat = irq_remapping_prepare();
1878 if (ir_stat < 0 && !x2apic_supported())
1881 ret = save_ioapic_entries();
1883 pr_info("Saving IO-APIC state failed: %d\n", ret);
1887 local_irq_save(flags);
1888 legacy_pic->mask_all();
1889 mask_ioapic_entries();
1891 /* If irq_remapping_prepare() succeeded, try to enable it */
1893 ir_stat = irq_remapping_enable();
1894 /* ir_stat contains the remap mode or an error code */
1895 try_to_enable_x2apic(ir_stat);
1898 restore_ioapic_entries();
1899 legacy_pic->restore_mask();
1900 local_irq_restore(flags);
1903 #ifdef CONFIG_X86_64
1905 * Detect and enable local APICs on non-SMP boards.
1906 * Original code written by Keir Fraser.
1907 * On AMD64 we trust the BIOS - if it says no APIC it is likely
1908 * not correctly set up (usually the APIC timer won't work etc.)
1910 static int __init detect_init_APIC(void)
1912 if (!boot_cpu_has(X86_FEATURE_APIC)) {
1913 pr_info("No local APIC present\n");
1917 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1922 static int __init apic_verify(void)
1927 * The APIC feature bit should now be enabled
1930 features = cpuid_edx(1);
1931 if (!(features & (1 << X86_FEATURE_APIC))) {
1932 pr_warning("Could not enable APIC!\n");
1935 set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
1936 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
1938 /* The BIOS may have set up the APIC at some other address */
1939 if (boot_cpu_data.x86 >= 6) {
1940 rdmsr(MSR_IA32_APICBASE, l, h);
1941 if (l & MSR_IA32_APICBASE_ENABLE)
1942 mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
1945 pr_info("Found and enabled local APIC!\n");
1949 int __init apic_force_enable(unsigned long addr)
1957 * Some BIOSes disable the local APIC in the APIC_BASE
1958 * MSR. This can only be done in software for Intel P6 or later
1959 * and AMD K7 (Model > 1) or later.
1961 if (boot_cpu_data.x86 >= 6) {
1962 rdmsr(MSR_IA32_APICBASE, l, h);
1963 if (!(l & MSR_IA32_APICBASE_ENABLE)) {
1964 pr_info("Local APIC disabled by BIOS -- reenabling.\n");
1965 l &= ~MSR_IA32_APICBASE_BASE;
1966 l |= MSR_IA32_APICBASE_ENABLE | addr;
1967 wrmsr(MSR_IA32_APICBASE, l, h);
1968 enabled_via_apicbase = 1;
1971 return apic_verify();
1975 * Detect and initialize APIC
1977 static int __init detect_init_APIC(void)
1979 /* Disabled by kernel option? */
1983 switch (boot_cpu_data.x86_vendor) {
1984 case X86_VENDOR_AMD:
1985 if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
1986 (boot_cpu_data.x86 >= 15))
1989 case X86_VENDOR_INTEL:
1990 if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
1991 (boot_cpu_data.x86 == 5 && boot_cpu_has(X86_FEATURE_APIC)))
1998 if (!boot_cpu_has(X86_FEATURE_APIC)) {
2000 * Over-ride BIOS and try to enable the local APIC only if
2001 * "lapic" specified.
2003 if (!force_enable_local_apic) {
2004 pr_info("Local APIC disabled by BIOS -- "
2005 "you can enable it with \"lapic\"\n");
2008 if (apic_force_enable(APIC_DEFAULT_PHYS_BASE))
2020 pr_info("No local APIC present or hardware disabled\n");
2026 * init_apic_mappings - initialize APIC mappings
2028 void __init init_apic_mappings(void)
2030 unsigned int new_apicid;
2032 if (apic_validate_deadline_timer())
2033 pr_info("TSC deadline timer available\n");
2036 boot_cpu_physical_apicid = read_apic_id();
2040 /* If no local APIC can be found return early */
2041 if (!smp_found_config && detect_init_APIC()) {
2042 /* lets NOP'ify apic operations */
2043 pr_info("APIC: disable apic facility\n");
2046 apic_phys = mp_lapic_addr;
2049 * If the system has ACPI MADT tables or MP info, the LAPIC
2050 * address is already registered.
2052 if (!acpi_lapic && !smp_found_config)
2053 register_lapic_address(apic_phys);
2057 * Fetch the APIC ID of the BSP in case we have a
2058 * default configuration (or the MP table is broken).
2060 new_apicid = read_apic_id();
2061 if (boot_cpu_physical_apicid != new_apicid) {
2062 boot_cpu_physical_apicid = new_apicid;
2064 * yeah -- we lie about apic_version
2065 * in case if apic was disabled via boot option
2066 * but it's not a problem for SMP compiled kernel
2067 * since apic_intr_mode_select is prepared for such
2068 * a case and disable smp mode
2070 boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR));
2074 void __init register_lapic_address(unsigned long address)
2076 mp_lapic_addr = address;
2079 set_fixmap_nocache(FIX_APIC_BASE, address);
2080 apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
2081 APIC_BASE, address);
2083 if (boot_cpu_physical_apicid == -1U) {
2084 boot_cpu_physical_apicid = read_apic_id();
2085 boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR));
2090 * Local APIC interrupts
2094 * This interrupt should _never_ happen with our APIC/SMP architecture
2096 __visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
2098 u8 vector = ~regs->orig_ax;
2102 trace_spurious_apic_entry(vector);
2104 inc_irq_stat(irq_spurious_count);
2107 * If this is a spurious interrupt then do not acknowledge
2109 if (vector == SPURIOUS_APIC_VECTOR) {
2111 pr_info("Spurious APIC interrupt (vector 0xFF) on CPU#%d, should never happen.\n",
2112 smp_processor_id());
2117 * If it is a vectored one, verify it's set in the ISR. If set,
2120 v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1));
2121 if (v & (1 << (vector & 0x1f))) {
2122 pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n",
2123 vector, smp_processor_id());
2126 pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n",
2127 vector, smp_processor_id());
2130 trace_spurious_apic_exit(vector);
2135 * This interrupt should never happen with our APIC/SMP architecture
2137 __visible void __irq_entry smp_error_interrupt(struct pt_regs *regs)
2139 static const char * const error_interrupt_reason[] = {
2140 "Send CS error", /* APIC Error Bit 0 */
2141 "Receive CS error", /* APIC Error Bit 1 */
2142 "Send accept error", /* APIC Error Bit 2 */
2143 "Receive accept error", /* APIC Error Bit 3 */
2144 "Redirectable IPI", /* APIC Error Bit 4 */
2145 "Send illegal vector", /* APIC Error Bit 5 */
2146 "Received illegal vector", /* APIC Error Bit 6 */
2147 "Illegal register address", /* APIC Error Bit 7 */
2152 trace_error_apic_entry(ERROR_APIC_VECTOR);
2154 /* First tickle the hardware, only then report what went on. -- REW */
2155 if (lapic_get_maxlvt() > 3) /* Due to the Pentium erratum 3AP. */
2156 apic_write(APIC_ESR, 0);
2157 v = apic_read(APIC_ESR);
2159 atomic_inc(&irq_err_count);
2161 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
2162 smp_processor_id(), v);
2167 apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]);
2172 apic_printk(APIC_DEBUG, KERN_CONT "\n");
2174 trace_error_apic_exit(ERROR_APIC_VECTOR);
2179 * connect_bsp_APIC - attach the APIC to the interrupt system
2181 static void __init connect_bsp_APIC(void)
2183 #ifdef CONFIG_X86_32
2186 * Do not trust the local APIC being empty at bootup.
2190 * PIC mode, enable APIC mode in the IMCR, i.e. connect BSP's
2191 * local APIC to INT and NMI lines.
2193 apic_printk(APIC_VERBOSE, "leaving PIC mode, "
2194 "enabling APIC mode.\n");
2201 * disconnect_bsp_APIC - detach the APIC from the interrupt system
2202 * @virt_wire_setup: indicates, whether virtual wire mode is selected
2204 * Virtual wire mode is necessary to deliver legacy interrupts even when the
2207 void disconnect_bsp_APIC(int virt_wire_setup)
2211 #ifdef CONFIG_X86_32
2214 * Put the board back into PIC mode (has an effect only on
2215 * certain older boards). Note that APIC interrupts, including
2216 * IPIs, won't work beyond this point! The only exception are
2219 apic_printk(APIC_VERBOSE, "disabling APIC mode, "
2220 "entering PIC mode.\n");
2226 /* Go back to Virtual Wire compatibility mode */
2228 /* For the spurious interrupt use vector F, and enable it */
2229 value = apic_read(APIC_SPIV);
2230 value &= ~APIC_VECTOR_MASK;
2231 value |= APIC_SPIV_APIC_ENABLED;
2233 apic_write(APIC_SPIV, value);
2235 if (!virt_wire_setup) {
2237 * For LVT0 make it edge triggered, active high,
2238 * external and enabled
2240 value = apic_read(APIC_LVT0);
2241 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
2242 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
2243 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
2244 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
2245 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
2246 apic_write(APIC_LVT0, value);
2249 apic_write(APIC_LVT0, APIC_LVT_MASKED);
2253 * For LVT1 make it edge triggered, active high,
2256 value = apic_read(APIC_LVT1);
2257 value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
2258 APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
2259 APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
2260 value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
2261 value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
2262 apic_write(APIC_LVT1, value);
2266 * The number of allocated logical CPU IDs. Since logical CPU IDs are allocated
2267 * contiguously, it equals to current allocated max logical CPU ID plus 1.
2268 * All allocated CPU IDs should be in the [0, nr_logical_cpuids) range,
2269 * so the maximum of nr_logical_cpuids is nr_cpu_ids.
2271 * NOTE: Reserve 0 for BSP.
2273 static int nr_logical_cpuids = 1;
2276 * Used to store mapping between logical CPU IDs and APIC IDs.
2278 static int cpuid_to_apicid[] = {
2279 [0 ... NR_CPUS - 1] = -1,
2282 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
2284 return phys_id == cpuid_to_apicid[cpu];
2289 * apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
2290 * @id: APIC ID to check
2292 bool apic_id_is_primary_thread(unsigned int apicid)
2296 if (smp_num_siblings == 1)
2298 /* Isolate the SMT bit(s) in the APICID and check for 0 */
2299 mask = (1U << (fls(smp_num_siblings) - 1)) - 1;
2300 return !(apicid & mask);
2305 * Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids
2306 * and cpuid_to_apicid[] synchronized.
2308 static int allocate_logical_cpuid(int apicid)
2313 * cpuid <-> apicid mapping is persistent, so when a cpu is up,
2314 * check if the kernel has allocated a cpuid for it.
2316 for (i = 0; i < nr_logical_cpuids; i++) {
2317 if (cpuid_to_apicid[i] == apicid)
2321 /* Allocate a new cpuid. */
2322 if (nr_logical_cpuids >= nr_cpu_ids) {
2323 WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %u reached. "
2324 "Processor %d/0x%x and the rest are ignored.\n",
2325 nr_cpu_ids, nr_logical_cpuids, apicid);
2329 cpuid_to_apicid[nr_logical_cpuids] = apicid;
2330 return nr_logical_cpuids++;
2333 int generic_processor_info(int apicid, int version)
2335 int cpu, max = nr_cpu_ids;
2336 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
2337 phys_cpu_present_map);
2340 * boot_cpu_physical_apicid is designed to have the apicid
2341 * returned by read_apic_id(), i.e, the apicid of the
2342 * currently booting-up processor. However, on some platforms,
2343 * it is temporarily modified by the apicid reported as BSP
2344 * through MP table. Concretely:
2346 * - arch/x86/kernel/mpparse.c: MP_processor_info()
2347 * - arch/x86/mm/amdtopology.c: amd_numa_init()
2349 * This function is executed with the modified
2350 * boot_cpu_physical_apicid. So, disabled_cpu_apicid kernel
2351 * parameter doesn't work to disable APs on kdump 2nd kernel.
2353 * Since fixing handling of boot_cpu_physical_apicid requires
2354 * another discussion and tests on each platform, we leave it
2355 * for now and here we use read_apic_id() directly in this
2356 * function, generic_processor_info().
2358 if (disabled_cpu_apicid != BAD_APICID &&
2359 disabled_cpu_apicid != read_apic_id() &&
2360 disabled_cpu_apicid == apicid) {
2361 int thiscpu = num_processors + disabled_cpus;
2363 pr_warning("APIC: Disabling requested cpu."
2364 " Processor %d/0x%x ignored.\n",
2372 * If boot cpu has not been detected yet, then only allow upto
2373 * nr_cpu_ids - 1 processors and keep one slot free for boot cpu
2375 if (!boot_cpu_detected && num_processors >= nr_cpu_ids - 1 &&
2376 apicid != boot_cpu_physical_apicid) {
2377 int thiscpu = max + disabled_cpus - 1;
2380 "APIC: NR_CPUS/possible_cpus limit of %i almost"
2381 " reached. Keeping one slot for boot cpu."
2382 " Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
2388 if (num_processors >= nr_cpu_ids) {
2389 int thiscpu = max + disabled_cpus;
2391 pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
2392 "reached. Processor %d/0x%x ignored.\n",
2393 max, thiscpu, apicid);
2399 if (apicid == boot_cpu_physical_apicid) {
2401 * x86_bios_cpu_apicid is required to have processors listed
2402 * in same order as logical cpu numbers. Hence the first
2403 * entry is BSP, and so on.
2404 * boot_cpu_init() already hold bit 0 in cpu_present_mask
2409 /* Logical cpuid 0 is reserved for BSP. */
2410 cpuid_to_apicid[0] = apicid;
2412 cpu = allocate_logical_cpuid(apicid);
2422 if (version == 0x0) {
2423 pr_warning("BIOS bug: APIC version is 0 for CPU %d/0x%x, fixing up to 0x10\n",
2428 if (version != boot_cpu_apic_version) {
2429 pr_warning("BIOS bug: APIC version mismatch, boot CPU: %x, CPU %d: version %x\n",
2430 boot_cpu_apic_version, cpu, version);
2433 if (apicid > max_physical_apicid)
2434 max_physical_apicid = apicid;
2436 #if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
2437 early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
2438 early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
2440 #ifdef CONFIG_X86_32
2441 early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
2442 apic->x86_32_early_logical_apicid(cpu);
2444 set_cpu_possible(cpu, true);
2445 physid_set(apicid, phys_cpu_present_map);
2446 set_cpu_present(cpu, true);
2452 int hard_smp_processor_id(void)
2454 return read_apic_id();
2458 * Override the generic EOI implementation with an optimized version.
2459 * Only called during early boot when only one CPU is active and with
2460 * interrupts disabled, so we know this does not race with actual APIC driver
2463 void __init apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v))
2467 for (drv = __apicdrivers; drv < __apicdrivers_end; drv++) {
2468 /* Should happen once for each apic */
2469 WARN_ON((*drv)->eoi_write == eoi_write);
2470 (*drv)->native_eoi_write = (*drv)->eoi_write;
2471 (*drv)->eoi_write = eoi_write;
2475 static void __init apic_bsp_up_setup(void)
2477 #ifdef CONFIG_X86_64
2478 apic_write(APIC_ID, apic->set_apic_id(boot_cpu_physical_apicid));
2481 * Hack: In case of kdump, after a crash, kernel might be booting
2482 * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
2483 * might be zero if read from MP tables. Get it from LAPIC.
2485 # ifdef CONFIG_CRASH_DUMP
2486 boot_cpu_physical_apicid = read_apic_id();
2489 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
2493 * apic_bsp_setup - Setup function for local apic and io-apic
2494 * @upmode: Force UP mode (for APIC_init_uniprocessor)
2497 * apic_id of BSP APIC
2499 void __init apic_bsp_setup(bool upmode)
2503 apic_bsp_up_setup();
2507 end_local_APIC_setup();
2508 irq_remap_enable_fault_handling();
2510 lapic_update_legacy_vectors();
2513 #ifdef CONFIG_UP_LATE_INIT
2514 void __init up_late_init(void)
2516 if (apic_intr_mode == APIC_PIC)
2519 /* Setup local timer */
2520 x86_init.timers.setup_percpu_clockev();
2531 * 'active' is true if the local APIC was enabled by us and
2532 * not the BIOS; this signifies that we are also responsible
2533 * for disabling it before entering apm/acpi suspend
2536 /* r/w apic fields */
2537 unsigned int apic_id;
2538 unsigned int apic_taskpri;
2539 unsigned int apic_ldr;
2540 unsigned int apic_dfr;
2541 unsigned int apic_spiv;
2542 unsigned int apic_lvtt;
2543 unsigned int apic_lvtpc;
2544 unsigned int apic_lvt0;
2545 unsigned int apic_lvt1;
2546 unsigned int apic_lvterr;
2547 unsigned int apic_tmict;
2548 unsigned int apic_tdcr;
2549 unsigned int apic_thmr;
2550 unsigned int apic_cmci;
2553 static int lapic_suspend(void)
2555 unsigned long flags;
2558 if (!apic_pm_state.active)
2561 maxlvt = lapic_get_maxlvt();
2563 apic_pm_state.apic_id = apic_read(APIC_ID);
2564 apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
2565 apic_pm_state.apic_ldr = apic_read(APIC_LDR);
2566 apic_pm_state.apic_dfr = apic_read(APIC_DFR);
2567 apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
2568 apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
2570 apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
2571 apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
2572 apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
2573 apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
2574 apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
2575 apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
2576 #ifdef CONFIG_X86_THERMAL_VECTOR
2578 apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
2580 #ifdef CONFIG_X86_MCE_INTEL
2582 apic_pm_state.apic_cmci = apic_read(APIC_LVTCMCI);
2585 local_irq_save(flags);
2586 disable_local_APIC();
2588 irq_remapping_disable();
2590 local_irq_restore(flags);
2594 static void lapic_resume(void)
2597 unsigned long flags;
2600 if (!apic_pm_state.active)
2603 local_irq_save(flags);
2606 * IO-APIC and PIC have their own resume routines.
2607 * We just mask them here to make sure the interrupt
2608 * subsystem is completely quiet while we enable x2apic
2609 * and interrupt-remapping.
2611 mask_ioapic_entries();
2612 legacy_pic->mask_all();
2618 * Make sure the APICBASE points to the right address
2620 * FIXME! This will be wrong if we ever support suspend on
2621 * SMP! We'll need to do this as part of the CPU restore!
2623 if (boot_cpu_data.x86 >= 6) {
2624 rdmsr(MSR_IA32_APICBASE, l, h);
2625 l &= ~MSR_IA32_APICBASE_BASE;
2626 l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
2627 wrmsr(MSR_IA32_APICBASE, l, h);
2631 maxlvt = lapic_get_maxlvt();
2632 apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
2633 apic_write(APIC_ID, apic_pm_state.apic_id);
2634 apic_write(APIC_DFR, apic_pm_state.apic_dfr);
2635 apic_write(APIC_LDR, apic_pm_state.apic_ldr);
2636 apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
2637 apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
2638 apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
2639 apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
2640 #ifdef CONFIG_X86_THERMAL_VECTOR
2642 apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
2644 #ifdef CONFIG_X86_MCE_INTEL
2646 apic_write(APIC_LVTCMCI, apic_pm_state.apic_cmci);
2649 apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
2650 apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
2651 apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
2652 apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
2653 apic_write(APIC_ESR, 0);
2654 apic_read(APIC_ESR);
2655 apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
2656 apic_write(APIC_ESR, 0);
2657 apic_read(APIC_ESR);
2659 irq_remapping_reenable(x2apic_mode);
2661 local_irq_restore(flags);
2665 * This device has no shutdown method - fully functioning local APICs
2666 * are needed on every CPU up until machine_halt/restart/poweroff.
2669 static struct syscore_ops lapic_syscore_ops = {
2670 .resume = lapic_resume,
2671 .suspend = lapic_suspend,
2674 static void apic_pm_activate(void)
2676 apic_pm_state.active = 1;
2679 static int __init init_lapic_sysfs(void)
2681 /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
2682 if (boot_cpu_has(X86_FEATURE_APIC))
2683 register_syscore_ops(&lapic_syscore_ops);
2688 /* local apic needs to resume before other devices access its registers. */
2689 core_initcall(init_lapic_sysfs);
2691 #else /* CONFIG_PM */
2693 static void apic_pm_activate(void) { }
2695 #endif /* CONFIG_PM */
2697 #ifdef CONFIG_X86_64
2699 static int multi_checked;
2702 static int set_multi(const struct dmi_system_id *d)
2706 pr_info("APIC: %s detected, Multi Chassis\n", d->ident);
2711 static const struct dmi_system_id multi_dmi_table[] = {
2713 .callback = set_multi,
2714 .ident = "IBM System Summit2",
2716 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
2717 DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"),
2723 static void dmi_check_multi(void)
2728 dmi_check_system(multi_dmi_table);
2733 * apic_is_clustered_box() -- Check if we can expect good TSC
2735 * Thus far, the major user of this is IBM's Summit2 series:
2736 * Clustered boxes may have unsynced TSC problems if they are
2738 * Use DMI to check them
2740 int apic_is_clustered_box(void)
2748 * APIC command line parameters
2750 static int __init setup_disableapic(char *arg)
2753 setup_clear_cpu_cap(X86_FEATURE_APIC);
2756 early_param("disableapic", setup_disableapic);
2758 /* same as disableapic, for compatibility */
2759 static int __init setup_nolapic(char *arg)
2761 return setup_disableapic(arg);
2763 early_param("nolapic", setup_nolapic);
2765 static int __init parse_lapic_timer_c2_ok(char *arg)
2767 local_apic_timer_c2_ok = 1;
2770 early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
2772 static int __init parse_disable_apic_timer(char *arg)
2774 disable_apic_timer = 1;
2777 early_param("noapictimer", parse_disable_apic_timer);
2779 static int __init parse_nolapic_timer(char *arg)
2781 disable_apic_timer = 1;
2784 early_param("nolapic_timer", parse_nolapic_timer);
2786 static int __init apic_set_verbosity(char *arg)
2789 #ifdef CONFIG_X86_64
2790 skip_ioapic_setup = 0;
2796 if (strcmp("debug", arg) == 0)
2797 apic_verbosity = APIC_DEBUG;
2798 else if (strcmp("verbose", arg) == 0)
2799 apic_verbosity = APIC_VERBOSE;
2800 #ifdef CONFIG_X86_64
2802 pr_warning("APIC Verbosity level %s not recognised"
2803 " use apic=verbose or apic=debug\n", arg);
2810 early_param("apic", apic_set_verbosity);
2812 static int __init lapic_insert_resource(void)
2817 /* Put local APIC into the resource map. */
2818 lapic_resource.start = apic_phys;
2819 lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
2820 insert_resource(&iomem_resource, &lapic_resource);
2826 * need call insert after e820__reserve_resources()
2827 * that is using request_resource
2829 late_initcall(lapic_insert_resource);
2831 static int __init apic_set_disabled_cpu_apicid(char *arg)
2833 if (!arg || !get_option(&arg, &disabled_cpu_apicid))
2838 early_param("disable_cpu_apicid", apic_set_disabled_cpu_apicid);
2840 static int __init apic_set_extnmi(char *arg)
2845 if (!strncmp("all", arg, 3))
2846 apic_extnmi = APIC_EXTNMI_ALL;
2847 else if (!strncmp("none", arg, 4))
2848 apic_extnmi = APIC_EXTNMI_NONE;
2849 else if (!strncmp("bsp", arg, 3))
2850 apic_extnmi = APIC_EXTNMI_BSP;
2852 pr_warn("Unknown external NMI delivery mode `%s' ignored\n", arg);
2858 early_param("apic_extnmi", apic_set_extnmi);