1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/perf_event.h>
3 #include <linux/jump_label.h>
4 #include <linux/export.h>
5 #include <linux/types.h>
6 #include <linux/init.h>
7 #include <linux/slab.h>
8 #include <linux/delay.h>
9 #include <linux/jiffies.h>
10 #include <asm/apicdef.h>
14 #include "../perf_event.h"
16 static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp);
17 static unsigned long perf_nmi_window;
19 /* AMD Event 0xFFF: Merge. Used with Large Increment per Cycle events */
20 #define AMD_MERGE_EVENT ((0xFULL << 32) | 0xFFULL)
21 #define AMD_MERGE_EVENT_ENABLE (AMD_MERGE_EVENT | ARCH_PERFMON_EVENTSEL_ENABLE)
23 /* PMC Enable and Overflow bits for PerfCntrGlobal* registers */
24 static u64 amd_pmu_global_cntr_mask __read_mostly;
26 static __initconst const u64 amd_hw_cache_event_ids
27 [PERF_COUNT_HW_CACHE_MAX]
28 [PERF_COUNT_HW_CACHE_OP_MAX]
29 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
33 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
34 [ C(RESULT_MISS) ] = 0x0141, /* Data Cache Misses */
37 [ C(RESULT_ACCESS) ] = 0,
38 [ C(RESULT_MISS) ] = 0,
40 [ C(OP_PREFETCH) ] = {
41 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
42 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
47 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
48 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
51 [ C(RESULT_ACCESS) ] = -1,
52 [ C(RESULT_MISS) ] = -1,
54 [ C(OP_PREFETCH) ] = {
55 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
56 [ C(RESULT_MISS) ] = 0,
61 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
62 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
65 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
66 [ C(RESULT_MISS) ] = 0,
68 [ C(OP_PREFETCH) ] = {
69 [ C(RESULT_ACCESS) ] = 0,
70 [ C(RESULT_MISS) ] = 0,
75 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
76 [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */
79 [ C(RESULT_ACCESS) ] = 0,
80 [ C(RESULT_MISS) ] = 0,
82 [ C(OP_PREFETCH) ] = {
83 [ C(RESULT_ACCESS) ] = 0,
84 [ C(RESULT_MISS) ] = 0,
89 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
90 [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */
93 [ C(RESULT_ACCESS) ] = -1,
94 [ C(RESULT_MISS) ] = -1,
96 [ C(OP_PREFETCH) ] = {
97 [ C(RESULT_ACCESS) ] = -1,
98 [ C(RESULT_MISS) ] = -1,
103 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
104 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
107 [ C(RESULT_ACCESS) ] = -1,
108 [ C(RESULT_MISS) ] = -1,
110 [ C(OP_PREFETCH) ] = {
111 [ C(RESULT_ACCESS) ] = -1,
112 [ C(RESULT_MISS) ] = -1,
117 [ C(RESULT_ACCESS) ] = 0xb8e9, /* CPU Request to Memory, l+r */
118 [ C(RESULT_MISS) ] = 0x98e9, /* CPU Request to Memory, r */
121 [ C(RESULT_ACCESS) ] = -1,
122 [ C(RESULT_MISS) ] = -1,
124 [ C(OP_PREFETCH) ] = {
125 [ C(RESULT_ACCESS) ] = -1,
126 [ C(RESULT_MISS) ] = -1,
131 static __initconst const u64 amd_hw_cache_event_ids_f17h
132 [PERF_COUNT_HW_CACHE_MAX]
133 [PERF_COUNT_HW_CACHE_OP_MAX]
134 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
137 [C(RESULT_ACCESS)] = 0x0040, /* Data Cache Accesses */
138 [C(RESULT_MISS)] = 0xc860, /* L2$ access from DC Miss */
141 [C(RESULT_ACCESS)] = 0,
142 [C(RESULT_MISS)] = 0,
145 [C(RESULT_ACCESS)] = 0xff5a, /* h/w prefetch DC Fills */
146 [C(RESULT_MISS)] = 0,
151 [C(RESULT_ACCESS)] = 0x0080, /* Instruction cache fetches */
152 [C(RESULT_MISS)] = 0x0081, /* Instruction cache misses */
155 [C(RESULT_ACCESS)] = -1,
156 [C(RESULT_MISS)] = -1,
159 [C(RESULT_ACCESS)] = 0,
160 [C(RESULT_MISS)] = 0,
165 [C(RESULT_ACCESS)] = 0,
166 [C(RESULT_MISS)] = 0,
169 [C(RESULT_ACCESS)] = 0,
170 [C(RESULT_MISS)] = 0,
173 [C(RESULT_ACCESS)] = 0,
174 [C(RESULT_MISS)] = 0,
179 [C(RESULT_ACCESS)] = 0xff45, /* All L2 DTLB accesses */
180 [C(RESULT_MISS)] = 0xf045, /* L2 DTLB misses (PT walks) */
183 [C(RESULT_ACCESS)] = 0,
184 [C(RESULT_MISS)] = 0,
187 [C(RESULT_ACCESS)] = 0,
188 [C(RESULT_MISS)] = 0,
193 [C(RESULT_ACCESS)] = 0x0084, /* L1 ITLB misses, L2 ITLB hits */
194 [C(RESULT_MISS)] = 0xff85, /* L1 ITLB misses, L2 misses */
197 [C(RESULT_ACCESS)] = -1,
198 [C(RESULT_MISS)] = -1,
201 [C(RESULT_ACCESS)] = -1,
202 [C(RESULT_MISS)] = -1,
207 [C(RESULT_ACCESS)] = 0x00c2, /* Retired Branch Instr. */
208 [C(RESULT_MISS)] = 0x00c3, /* Retired Mispredicted BI */
211 [C(RESULT_ACCESS)] = -1,
212 [C(RESULT_MISS)] = -1,
215 [C(RESULT_ACCESS)] = -1,
216 [C(RESULT_MISS)] = -1,
221 [C(RESULT_ACCESS)] = 0,
222 [C(RESULT_MISS)] = 0,
225 [C(RESULT_ACCESS)] = -1,
226 [C(RESULT_MISS)] = -1,
229 [C(RESULT_ACCESS)] = -1,
230 [C(RESULT_MISS)] = -1,
236 * AMD Performance Monitor K7 and later, up to and including Family 16h:
238 static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
240 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
241 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
242 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x077d,
243 [PERF_COUNT_HW_CACHE_MISSES] = 0x077e,
244 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
245 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
246 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00d0, /* "Decoder empty" event */
247 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x00d1, /* "Dispatch stalls" event */
251 * AMD Performance Monitor Family 17h and later:
253 static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
255 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
256 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
257 [PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
258 [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
259 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
260 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
261 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
262 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = 0x0187,
265 static u64 amd_pmu_event_map(int hw_event)
267 if (boot_cpu_data.x86 >= 0x17)
268 return amd_f17h_perfmon_event_map[hw_event];
270 return amd_perfmon_event_map[hw_event];
274 * Previously calculated offsets
276 static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
277 static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
281 * 4 counters starting at 0xc0010000 each offset by 1
283 * CPUs with core performance counter extensions:
284 * 6 counters starting at 0xc0010200 each offset by 2
286 static inline int amd_pmu_addr_offset(int index, bool eventsel)
294 offset = event_offsets[index];
296 offset = count_offsets[index];
301 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
307 event_offsets[index] = offset;
309 count_offsets[index] = offset;
315 * AMD64 events are detected based on their event codes.
317 static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
319 return ((hwc->config >> 24) & 0x0f00) | (hwc->config & 0x00ff);
322 static inline bool amd_is_pair_event_code(struct hw_perf_event *hwc)
324 if (!(x86_pmu.flags & PMU_FL_PAIR))
327 switch (amd_get_event_code(hwc)) {
328 case 0x003: return true; /* Retired SSE/AVX FLOPs */
329 default: return false;
333 DEFINE_STATIC_CALL_RET0(amd_pmu_branch_hw_config, *x86_pmu.hw_config);
335 static int amd_core_hw_config(struct perf_event *event)
337 if (event->attr.exclude_host && event->attr.exclude_guest)
339 * When HO == GO == 1 the hardware treats that as GO == HO == 0
340 * and will count in both modes. We don't want to count in that
341 * case so we emulate no-counting by setting US = OS = 0.
343 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
344 ARCH_PERFMON_EVENTSEL_OS);
345 else if (event->attr.exclude_host)
346 event->hw.config |= AMD64_EVENTSEL_GUESTONLY;
347 else if (event->attr.exclude_guest)
348 event->hw.config |= AMD64_EVENTSEL_HOSTONLY;
350 if ((x86_pmu.flags & PMU_FL_PAIR) && amd_is_pair_event_code(&event->hw))
351 event->hw.flags |= PERF_X86_EVENT_PAIR;
353 if (has_branch_stack(event))
354 return static_call(amd_pmu_branch_hw_config)(event);
359 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
361 return (hwc->config & 0xe0) == 0xe0;
364 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
366 struct amd_nb *nb = cpuc->amd_nb;
368 return nb && nb->nb_id != -1;
371 static int amd_pmu_hw_config(struct perf_event *event)
375 /* pass precise event sampling to ibs: */
376 if (event->attr.precise_ip && get_ibs_caps())
377 return forward_event_to_ibs(event);
379 if (has_branch_stack(event) && !x86_pmu.lbr_nr)
382 ret = x86_pmu_hw_config(event);
386 if (event->attr.type == PERF_TYPE_RAW)
387 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
389 return amd_core_hw_config(event);
392 static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
393 struct perf_event *event)
395 struct amd_nb *nb = cpuc->amd_nb;
399 * need to scan whole list because event may not have
400 * been assigned during scheduling
402 * no race condition possible because event can only
403 * be removed on one CPU at a time AND PMU is disabled
406 for (i = 0; i < x86_pmu.num_counters; i++) {
407 if (cmpxchg(nb->owners + i, event, NULL) == event)
413 * AMD64 NorthBridge events need special treatment because
414 * counter access needs to be synchronized across all cores
415 * of a package. Refer to BKDG section 3.12
417 * NB events are events measuring L3 cache, Hypertransport
418 * traffic. They are identified by an event code >= 0xe00.
419 * They measure events on the NorthBride which is shared
420 * by all cores on a package. NB events are counted on a
421 * shared set of counters. When a NB event is programmed
422 * in a counter, the data actually comes from a shared
423 * counter. Thus, access to those counters needs to be
426 * We implement the synchronization such that no two cores
427 * can be measuring NB events using the same counters. Thus,
428 * we maintain a per-NB allocation table. The available slot
429 * is propagated using the event_constraint structure.
431 * We provide only one choice for each NB event based on
432 * the fact that only NB events have restrictions. Consequently,
433 * if a counter is available, there is a guarantee the NB event
434 * will be assigned to it. If no slot is available, an empty
435 * constraint is returned and scheduling will eventually fail
438 * Note that all cores attached the same NB compete for the same
439 * counters to host NB events, this is why we use atomic ops. Some
440 * multi-chip CPUs may have more than one NB.
442 * Given that resources are allocated (cmpxchg), they must be
443 * eventually freed for others to use. This is accomplished by
444 * calling __amd_put_nb_event_constraints()
446 * Non NB events are not impacted by this restriction.
448 static struct event_constraint *
449 __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
450 struct event_constraint *c)
452 struct hw_perf_event *hwc = &event->hw;
453 struct amd_nb *nb = cpuc->amd_nb;
454 struct perf_event *old;
464 * detect if already present, if so reuse
466 * cannot merge with actual allocation
467 * because of possible holes
469 * event can already be present yet not assigned (in hwc->idx)
470 * because of successive calls to x86_schedule_events() from
471 * hw_perf_group_sched_in() without hw_perf_enable()
473 for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
474 if (new == -1 || hwc->idx == idx)
475 /* assign free slot, prefer hwc->idx */
476 old = cmpxchg(nb->owners + idx, NULL, event);
477 else if (nb->owners[idx] == event)
478 /* event already present */
483 if (old && old != event)
486 /* reassign to this slot */
488 cmpxchg(nb->owners + new, event, NULL);
491 /* already present, reuse */
497 return &emptyconstraint;
499 return &nb->event_constraints[new];
502 static struct amd_nb *amd_alloc_nb(int cpu)
507 nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu));
514 * initialize all possible NB constraints
516 for (i = 0; i < x86_pmu.num_counters; i++) {
517 __set_bit(i, nb->event_constraints[i].idxmsk);
518 nb->event_constraints[i].weight = 1;
523 typedef void (amd_pmu_branch_reset_t)(void);
524 DEFINE_STATIC_CALL_NULL(amd_pmu_branch_reset, amd_pmu_branch_reset_t);
526 static void amd_pmu_cpu_reset(int cpu)
529 static_call(amd_pmu_branch_reset)();
531 if (x86_pmu.version < 2)
534 /* Clear enable bits i.e. PerfCntrGlobalCtl.PerfCntrEn */
535 wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, 0);
538 * Clear freeze and overflow bits i.e. PerfCntrGLobalStatus.LbrFreeze
539 * and PerfCntrGLobalStatus.PerfCntrOvfl
541 wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
542 GLOBAL_STATUS_LBRS_FROZEN | amd_pmu_global_cntr_mask);
545 static int amd_pmu_cpu_prepare(int cpu)
547 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
549 cpuc->lbr_sel = kzalloc_node(sizeof(struct er_account), GFP_KERNEL,
554 WARN_ON_ONCE(cpuc->amd_nb);
556 if (!x86_pmu.amd_nb_constraints)
559 cpuc->amd_nb = amd_alloc_nb(cpu);
563 kfree(cpuc->lbr_sel);
564 cpuc->lbr_sel = NULL;
569 static void amd_pmu_cpu_starting(int cpu)
571 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
572 void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
576 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
577 amd_pmu_cpu_reset(cpu);
579 if (!x86_pmu.amd_nb_constraints)
582 nb_id = topology_die_id(cpu);
583 WARN_ON_ONCE(nb_id == BAD_APICID);
585 for_each_online_cpu(i) {
586 nb = per_cpu(cpu_hw_events, i).amd_nb;
587 if (WARN_ON_ONCE(!nb))
590 if (nb->nb_id == nb_id) {
591 *onln = cpuc->amd_nb;
597 cpuc->amd_nb->nb_id = nb_id;
598 cpuc->amd_nb->refcnt++;
601 static void amd_pmu_cpu_dead(int cpu)
603 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
605 kfree(cpuhw->lbr_sel);
606 cpuhw->lbr_sel = NULL;
607 amd_pmu_cpu_reset(cpu);
609 if (!x86_pmu.amd_nb_constraints)
613 struct amd_nb *nb = cpuhw->amd_nb;
615 if (nb->nb_id == -1 || --nb->refcnt == 0)
618 cpuhw->amd_nb = NULL;
622 static inline void amd_pmu_set_global_ctl(u64 ctl)
624 wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl);
627 static inline u64 amd_pmu_get_global_status(void)
631 /* PerfCntrGlobalStatus is read-only */
632 rdmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status);
637 static inline void amd_pmu_ack_global_status(u64 status)
640 * PerfCntrGlobalStatus is read-only but an overflow acknowledgment
641 * mechanism exists; writing 1 to a bit in PerfCntrGlobalStatusClr
642 * clears the same bit in PerfCntrGlobalStatus
645 wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, status);
648 static bool amd_pmu_test_overflow_topbit(int idx)
652 rdmsrl(x86_pmu_event_addr(idx), counter);
654 return !(counter & BIT_ULL(x86_pmu.cntval_bits - 1));
657 static bool amd_pmu_test_overflow_status(int idx)
659 return amd_pmu_get_global_status() & BIT_ULL(idx);
662 DEFINE_STATIC_CALL(amd_pmu_test_overflow, amd_pmu_test_overflow_topbit);
665 * When a PMC counter overflows, an NMI is used to process the event and
666 * reset the counter. NMI latency can result in the counter being updated
667 * before the NMI can run, which can result in what appear to be spurious
668 * NMIs. This function is intended to wait for the NMI to run and reset
669 * the counter to avoid possible unhandled NMI messages.
671 #define OVERFLOW_WAIT_COUNT 50
673 static void amd_pmu_wait_on_overflow(int idx)
678 * Wait for the counter to be reset if it has overflowed. This loop
679 * should exit very, very quickly, but just in case, don't wait
682 for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
683 if (!static_call(amd_pmu_test_overflow)(idx))
686 /* Might be in IRQ context, so can't sleep */
691 static void amd_pmu_check_overflow(void)
693 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
697 * This shouldn't be called from NMI context, but add a safeguard here
698 * to return, since if we're in NMI context we can't wait for an NMI
699 * to reset an overflowed counter value.
705 * Check each counter for overflow and wait for it to be reset by the
706 * NMI if it has overflowed. This relies on the fact that all active
707 * counters are always enabled when this function is called and
708 * ARCH_PERFMON_EVENTSEL_INT is always set.
710 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
711 if (!test_bit(idx, cpuc->active_mask))
714 amd_pmu_wait_on_overflow(idx);
718 static void amd_pmu_enable_event(struct perf_event *event)
720 x86_pmu_enable_event(event);
723 static void amd_pmu_enable_all(int added)
725 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
728 amd_brs_enable_all();
730 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
731 /* only activate events which are marked as active */
732 if (!test_bit(idx, cpuc->active_mask))
735 amd_pmu_enable_event(cpuc->events[idx]);
739 static void amd_pmu_v2_enable_event(struct perf_event *event)
741 struct hw_perf_event *hwc = &event->hw;
744 * Testing cpu_hw_events.enabled should be skipped in this case unlike
745 * in x86_pmu_enable_event().
747 * Since cpu_hw_events.enabled is set only after returning from
748 * x86_pmu_start(), the PMCs must be programmed and kept ready.
749 * Counting starts only after x86_pmu_enable_all() is called.
751 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
754 static __always_inline void amd_pmu_core_enable_all(void)
756 amd_pmu_set_global_ctl(amd_pmu_global_cntr_mask);
759 static void amd_pmu_v2_enable_all(int added)
761 amd_pmu_lbr_enable_all();
762 amd_pmu_core_enable_all();
765 static void amd_pmu_disable_event(struct perf_event *event)
767 x86_pmu_disable_event(event);
770 * This can be called from NMI context (via x86_pmu_stop). The counter
771 * may have overflowed, but either way, we'll never see it get reset
772 * by the NMI if we're already in the NMI. And the NMI latency support
773 * below will take care of any pending NMI that might have been
774 * generated by the overflow.
779 amd_pmu_wait_on_overflow(event->hw.idx);
782 static void amd_pmu_disable_all(void)
784 amd_brs_disable_all();
785 x86_pmu_disable_all();
786 amd_pmu_check_overflow();
789 static __always_inline void amd_pmu_core_disable_all(void)
791 amd_pmu_set_global_ctl(0);
794 static void amd_pmu_v2_disable_all(void)
796 amd_pmu_core_disable_all();
797 amd_pmu_lbr_disable_all();
798 amd_pmu_check_overflow();
801 DEFINE_STATIC_CALL_NULL(amd_pmu_branch_add, *x86_pmu.add);
803 static void amd_pmu_add_event(struct perf_event *event)
805 if (needs_branch_stack(event))
806 static_call(amd_pmu_branch_add)(event);
809 DEFINE_STATIC_CALL_NULL(amd_pmu_branch_del, *x86_pmu.del);
811 static void amd_pmu_del_event(struct perf_event *event)
813 if (needs_branch_stack(event))
814 static_call(amd_pmu_branch_del)(event);
818 * Because of NMI latency, if multiple PMC counters are active or other sources
819 * of NMIs are received, the perf NMI handler can handle one or more overflowed
820 * PMC counters outside of the NMI associated with the PMC overflow. If the NMI
821 * doesn't arrive at the LAPIC in time to become a pending NMI, then the kernel
822 * back-to-back NMI support won't be active. This PMC handler needs to take into
823 * account that this can occur, otherwise this could result in unknown NMI
824 * messages being issued. Examples of this is PMC overflow while in the NMI
825 * handler when multiple PMCs are active or PMC overflow while handling some
826 * other source of an NMI.
828 * Attempt to mitigate this by creating an NMI window in which un-handled NMIs
829 * received during this window will be claimed. This prevents extending the
830 * window past when it is possible that latent NMIs should be received. The
831 * per-CPU perf_nmi_tstamp will be set to the window end time whenever perf has
832 * handled a counter. When an un-handled NMI is received, it will be claimed
833 * only if arriving within that window.
835 static inline int amd_pmu_adjust_nmi_window(int handled)
838 * If a counter was handled, record a timestamp such that un-handled
839 * NMIs will be claimed if arriving within that window.
842 this_cpu_write(perf_nmi_tstamp, jiffies + perf_nmi_window);
847 if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp)))
853 static int amd_pmu_handle_irq(struct pt_regs *regs)
855 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
860 * Save the PMU state.
861 * It needs to be restored when leaving the handler.
863 pmu_enabled = cpuc->enabled;
866 amd_brs_disable_all();
868 /* Drain BRS is in use (could be inactive) */
872 /* Process any counter overflows */
873 handled = x86_pmu_handle_irq(regs);
875 cpuc->enabled = pmu_enabled;
877 amd_brs_enable_all();
879 return amd_pmu_adjust_nmi_window(handled);
882 static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
884 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
885 struct perf_sample_data data;
886 struct hw_perf_event *hwc;
887 struct perf_event *event;
888 int handled = 0, idx;
889 u64 reserved, status, mask;
893 * Save the PMU state as it needs to be restored when leaving the
896 pmu_enabled = cpuc->enabled;
899 /* Stop counting but do not disable LBR */
900 amd_pmu_core_disable_all();
902 status = amd_pmu_get_global_status();
904 /* Check if any overflows are pending */
908 /* Read branch records before unfreezing */
909 if (status & GLOBAL_STATUS_LBRS_FROZEN) {
911 status &= ~GLOBAL_STATUS_LBRS_FROZEN;
914 reserved = status & ~amd_pmu_global_cntr_mask;
916 pr_warn_once("Reserved PerfCntrGlobalStatus bits are set (0x%llx), please consider updating microcode\n",
919 /* Clear any reserved bits set by buggy microcode */
920 status &= amd_pmu_global_cntr_mask;
922 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
923 if (!test_bit(idx, cpuc->active_mask))
926 event = cpuc->events[idx];
928 x86_perf_event_update(event);
931 if (!(status & mask))
937 perf_sample_data_init(&data, 0, hwc->last_period);
939 if (!x86_perf_event_set_period(event))
942 if (has_branch_stack(event))
943 perf_sample_save_brstack(&data, event, &cpuc->lbr_stack);
945 if (perf_event_overflow(event, &data, regs))
946 x86_pmu_stop(event, 0);
950 * It should never be the case that some overflows are not handled as
951 * the corresponding PMCs are expected to be inactive according to the
956 /* Clear overflow and freeze bits */
957 amd_pmu_ack_global_status(~status);
960 * Unmasking the LVTPC is not required as the Mask (M) bit of the LVT
961 * PMI entry is not set by the local APIC when a PMC overflow occurs
963 inc_irq_stat(apic_perf_irqs);
966 cpuc->enabled = pmu_enabled;
968 /* Resume counting only if PMU is active */
970 amd_pmu_core_enable_all();
972 return amd_pmu_adjust_nmi_window(handled);
975 static struct event_constraint *
976 amd_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
977 struct perf_event *event)
980 * if not NB event or no NB, then no constraints
982 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
983 return &unconstrained;
985 return __amd_get_nb_event_constraints(cpuc, event, NULL);
988 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
989 struct perf_event *event)
991 if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
992 __amd_put_nb_event_constraints(cpuc, event);
995 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
996 PMU_FORMAT_ATTR(umask, "config:8-15" );
997 PMU_FORMAT_ATTR(edge, "config:18" );
998 PMU_FORMAT_ATTR(inv, "config:23" );
999 PMU_FORMAT_ATTR(cmask, "config:24-31" );
1001 static struct attribute *amd_format_attr[] = {
1002 &format_attr_event.attr,
1003 &format_attr_umask.attr,
1004 &format_attr_edge.attr,
1005 &format_attr_inv.attr,
1006 &format_attr_cmask.attr,
1010 /* AMD Family 15h */
1012 #define AMD_EVENT_TYPE_MASK 0x000000F0ULL
1014 #define AMD_EVENT_FP 0x00000000ULL ... 0x00000010ULL
1015 #define AMD_EVENT_LS 0x00000020ULL ... 0x00000030ULL
1016 #define AMD_EVENT_DC 0x00000040ULL ... 0x00000050ULL
1017 #define AMD_EVENT_CU 0x00000060ULL ... 0x00000070ULL
1018 #define AMD_EVENT_IC_DE 0x00000080ULL ... 0x00000090ULL
1019 #define AMD_EVENT_EX_LS 0x000000C0ULL
1020 #define AMD_EVENT_DE 0x000000D0ULL
1021 #define AMD_EVENT_NB 0x000000E0ULL ... 0x000000F0ULL
1024 * AMD family 15h event code/PMC mappings:
1026 * type = event_code & 0x0F0:
1028 * 0x000 FP PERF_CTL[5:3]
1029 * 0x010 FP PERF_CTL[5:3]
1030 * 0x020 LS PERF_CTL[5:0]
1031 * 0x030 LS PERF_CTL[5:0]
1032 * 0x040 DC PERF_CTL[5:0]
1033 * 0x050 DC PERF_CTL[5:0]
1034 * 0x060 CU PERF_CTL[2:0]
1035 * 0x070 CU PERF_CTL[2:0]
1036 * 0x080 IC/DE PERF_CTL[2:0]
1037 * 0x090 IC/DE PERF_CTL[2:0]
1040 * 0x0C0 EX/LS PERF_CTL[5:0]
1041 * 0x0D0 DE PERF_CTL[2:0]
1042 * 0x0E0 NB NB_PERF_CTL[3:0]
1043 * 0x0F0 NB NB_PERF_CTL[3:0]
1047 * 0x000 FP PERF_CTL[3], PERF_CTL[5:3] (*)
1048 * 0x003 FP PERF_CTL[3]
1049 * 0x004 FP PERF_CTL[3], PERF_CTL[5:3] (*)
1050 * 0x00B FP PERF_CTL[3]
1051 * 0x00D FP PERF_CTL[3]
1052 * 0x023 DE PERF_CTL[2:0]
1053 * 0x02D LS PERF_CTL[3]
1054 * 0x02E LS PERF_CTL[3,0]
1055 * 0x031 LS PERF_CTL[2:0] (**)
1056 * 0x043 CU PERF_CTL[2:0]
1057 * 0x045 CU PERF_CTL[2:0]
1058 * 0x046 CU PERF_CTL[2:0]
1059 * 0x054 CU PERF_CTL[2:0]
1060 * 0x055 CU PERF_CTL[2:0]
1061 * 0x08F IC PERF_CTL[0]
1062 * 0x187 DE PERF_CTL[0]
1063 * 0x188 DE PERF_CTL[0]
1064 * 0x0DB EX PERF_CTL[5:0]
1065 * 0x0DC LS PERF_CTL[5:0]
1066 * 0x0DD LS PERF_CTL[5:0]
1067 * 0x0DE LS PERF_CTL[5:0]
1068 * 0x0DF LS PERF_CTL[5:0]
1069 * 0x1C0 EX PERF_CTL[5:3]
1070 * 0x1D6 EX PERF_CTL[5:0]
1071 * 0x1D8 EX PERF_CTL[5:0]
1073 * (*) depending on the umask all FPU counters may be used
1074 * (**) only one unitmask enabled at a time
1077 static struct event_constraint amd_f15_PMC0 = EVENT_CONSTRAINT(0, 0x01, 0);
1078 static struct event_constraint amd_f15_PMC20 = EVENT_CONSTRAINT(0, 0x07, 0);
1079 static struct event_constraint amd_f15_PMC3 = EVENT_CONSTRAINT(0, 0x08, 0);
1080 static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09, 0);
1081 static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
1082 static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
1084 static struct event_constraint *
1085 amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx,
1086 struct perf_event *event)
1088 struct hw_perf_event *hwc = &event->hw;
1089 unsigned int event_code = amd_get_event_code(hwc);
1091 switch (event_code & AMD_EVENT_TYPE_MASK) {
1093 switch (event_code) {
1095 if (!(hwc->config & 0x0000F000ULL))
1097 if (!(hwc->config & 0x00000F00ULL))
1099 return &amd_f15_PMC3;
1101 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
1103 return &amd_f15_PMC3;
1107 return &amd_f15_PMC3;
1109 return &amd_f15_PMC53;
1112 case AMD_EVENT_EX_LS:
1113 switch (event_code) {
1120 return &amd_f15_PMC20;
1122 return &amd_f15_PMC3;
1124 return &amd_f15_PMC30;
1126 if (hweight_long(hwc->config & ARCH_PERFMON_EVENTSEL_UMASK) <= 1)
1127 return &amd_f15_PMC20;
1128 return &emptyconstraint;
1130 return &amd_f15_PMC53;
1132 return &amd_f15_PMC50;
1135 case AMD_EVENT_IC_DE:
1137 switch (event_code) {
1141 return &amd_f15_PMC0;
1142 case 0x0DB ... 0x0DF:
1145 return &amd_f15_PMC50;
1147 return &amd_f15_PMC20;
1150 /* moved to uncore.c */
1151 return &emptyconstraint;
1153 return &emptyconstraint;
1157 static struct event_constraint pair_constraint;
1159 static struct event_constraint *
1160 amd_get_event_constraints_f17h(struct cpu_hw_events *cpuc, int idx,
1161 struct perf_event *event)
1163 struct hw_perf_event *hwc = &event->hw;
1165 if (amd_is_pair_event_code(hwc))
1166 return &pair_constraint;
1168 return &unconstrained;
1171 static void amd_put_event_constraints_f17h(struct cpu_hw_events *cpuc,
1172 struct perf_event *event)
1174 struct hw_perf_event *hwc = &event->hw;
1176 if (is_counter_pair(hwc))
1181 * Because of the way BRS operates with an inactive and active phases, and
1182 * the link to one counter, it is not possible to have two events using BRS
1183 * scheduled at the same time. There would be an issue with enforcing the
1184 * period of each one and given that the BRS saturates, it would not be possible
1185 * to guarantee correlated content for all events. Therefore, in situations
1186 * where multiple events want to use BRS, the kernel enforces mutual exclusion.
1187 * Exclusion is enforced by chosing only one counter for events using BRS.
1188 * The event scheduling logic will then automatically multiplex the
1189 * events and ensure that at most one event is actively using BRS.
1191 * The BRS counter could be any counter, but there is no constraint on Fam19h,
1192 * therefore all counters are equal and thus we pick the first one: PMC0
1194 static struct event_constraint amd_fam19h_brs_cntr0_constraint =
1195 EVENT_CONSTRAINT(0, 0x1, AMD64_RAW_EVENT_MASK);
1197 static struct event_constraint amd_fam19h_brs_pair_cntr0_constraint =
1198 __EVENT_CONSTRAINT(0, 0x1, AMD64_RAW_EVENT_MASK, 1, 0, PERF_X86_EVENT_PAIR);
1200 static struct event_constraint *
1201 amd_get_event_constraints_f19h(struct cpu_hw_events *cpuc, int idx,
1202 struct perf_event *event)
1204 struct hw_perf_event *hwc = &event->hw;
1205 bool has_brs = has_amd_brs(hwc);
1208 * In case BRS is used with an event requiring a counter pair,
1209 * the kernel allows it but only on counter 0 & 1 to enforce
1210 * multiplexing requiring to protect BRS in case of multiple
1213 if (amd_is_pair_event_code(hwc)) {
1214 return has_brs ? &amd_fam19h_brs_pair_cntr0_constraint
1219 return &amd_fam19h_brs_cntr0_constraint;
1221 return &unconstrained;
1225 static ssize_t amd_event_sysfs_show(char *page, u64 config)
1227 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT) |
1228 (config & AMD64_EVENTSEL_EVENT) >> 24;
1230 return x86_event_sysfs_show(page, config, event);
1233 static void amd_pmu_limit_period(struct perf_event *event, s64 *left)
1236 * Decrease period by the depth of the BRS feature to get the last N
1237 * taken branches and approximate the desired period
1239 if (has_branch_stack(event) && *left > x86_pmu.lbr_nr)
1240 *left -= x86_pmu.lbr_nr;
1243 static __initconst const struct x86_pmu amd_pmu = {
1245 .handle_irq = amd_pmu_handle_irq,
1246 .disable_all = amd_pmu_disable_all,
1247 .enable_all = amd_pmu_enable_all,
1248 .enable = amd_pmu_enable_event,
1249 .disable = amd_pmu_disable_event,
1250 .hw_config = amd_pmu_hw_config,
1251 .schedule_events = x86_schedule_events,
1252 .eventsel = MSR_K7_EVNTSEL0,
1253 .perfctr = MSR_K7_PERFCTR0,
1254 .addr_offset = amd_pmu_addr_offset,
1255 .event_map = amd_pmu_event_map,
1256 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
1257 .num_counters = AMD64_NUM_COUNTERS,
1258 .add = amd_pmu_add_event,
1259 .del = amd_pmu_del_event,
1261 .cntval_mask = (1ULL << 48) - 1,
1263 /* use highest bit to detect overflow */
1264 .max_period = (1ULL << 47) - 1,
1265 .get_event_constraints = amd_get_event_constraints,
1266 .put_event_constraints = amd_put_event_constraints,
1268 .format_attrs = amd_format_attr,
1269 .events_sysfs_show = amd_event_sysfs_show,
1271 .cpu_prepare = amd_pmu_cpu_prepare,
1272 .cpu_starting = amd_pmu_cpu_starting,
1273 .cpu_dead = amd_pmu_cpu_dead,
1275 .amd_nb_constraints = 1,
1278 static ssize_t branches_show(struct device *cdev,
1279 struct device_attribute *attr,
1282 return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr);
1285 static DEVICE_ATTR_RO(branches);
1287 static struct attribute *amd_pmu_branches_attrs[] = {
1288 &dev_attr_branches.attr,
1293 amd_branches_is_visible(struct kobject *kobj, struct attribute *attr, int i)
1295 return x86_pmu.lbr_nr ? attr->mode : 0;
1298 static struct attribute_group group_caps_amd_branches = {
1300 .attrs = amd_pmu_branches_attrs,
1301 .is_visible = amd_branches_is_visible,
1304 #ifdef CONFIG_PERF_EVENTS_AMD_BRS
1306 EVENT_ATTR_STR(branch-brs, amd_branch_brs,
1307 "event=" __stringify(AMD_FAM19H_BRS_EVENT)"\n");
1309 static struct attribute *amd_brs_events_attrs[] = {
1310 EVENT_PTR(amd_branch_brs),
1315 amd_brs_is_visible(struct kobject *kobj, struct attribute *attr, int i)
1317 return static_cpu_has(X86_FEATURE_BRS) && x86_pmu.lbr_nr ?
1321 static struct attribute_group group_events_amd_brs = {
1323 .attrs = amd_brs_events_attrs,
1324 .is_visible = amd_brs_is_visible,
1327 #endif /* CONFIG_PERF_EVENTS_AMD_BRS */
1329 static const struct attribute_group *amd_attr_update[] = {
1330 &group_caps_amd_branches,
1331 #ifdef CONFIG_PERF_EVENTS_AMD_BRS
1332 &group_events_amd_brs,
1337 static int __init amd_core_pmu_init(void)
1339 union cpuid_0x80000022_ebx ebx;
1340 u64 even_ctr_mask = 0ULL;
1343 if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
1346 /* Avoid calculating the value each time in the NMI handler */
1347 perf_nmi_window = msecs_to_jiffies(100);
1350 * If core performance counter extensions exists, we must use
1351 * MSR_F15H_PERF_CTL/MSR_F15H_PERF_CTR msrs. See also
1352 * amd_pmu_addr_offset().
1354 x86_pmu.eventsel = MSR_F15H_PERF_CTL;
1355 x86_pmu.perfctr = MSR_F15H_PERF_CTR;
1356 x86_pmu.num_counters = AMD64_NUM_COUNTERS_CORE;
1358 /* Check for Performance Monitoring v2 support */
1359 if (boot_cpu_has(X86_FEATURE_PERFMON_V2)) {
1360 ebx.full = cpuid_ebx(EXT_PERFMON_DEBUG_FEATURES);
1362 /* Update PMU version for later usage */
1363 x86_pmu.version = 2;
1365 /* Find the number of available Core PMCs */
1366 x86_pmu.num_counters = ebx.split.num_core_pmc;
1368 amd_pmu_global_cntr_mask = (1ULL << x86_pmu.num_counters) - 1;
1370 /* Update PMC handling functions */
1371 x86_pmu.enable_all = amd_pmu_v2_enable_all;
1372 x86_pmu.disable_all = amd_pmu_v2_disable_all;
1373 x86_pmu.enable = amd_pmu_v2_enable_event;
1374 x86_pmu.handle_irq = amd_pmu_v2_handle_irq;
1375 static_call_update(amd_pmu_test_overflow, amd_pmu_test_overflow_status);
1379 * AMD Core perfctr has separate MSRs for the NB events, see
1380 * the amd/uncore.c driver.
1382 x86_pmu.amd_nb_constraints = 0;
1384 if (boot_cpu_data.x86 == 0x15) {
1386 x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
1388 if (boot_cpu_data.x86 >= 0x17) {
1389 pr_cont("Fam17h+ ");
1391 * Family 17h and compatibles have constraints for Large
1392 * Increment per Cycle events: they may only be assigned an
1393 * even numbered counter that has a consecutive adjacent odd
1394 * numbered counter following it.
1396 for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
1397 even_ctr_mask |= BIT_ULL(i);
1399 pair_constraint = (struct event_constraint)
1400 __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
1401 x86_pmu.num_counters / 2, 0,
1402 PERF_X86_EVENT_PAIR);
1404 x86_pmu.get_event_constraints = amd_get_event_constraints_f17h;
1405 x86_pmu.put_event_constraints = amd_put_event_constraints_f17h;
1406 x86_pmu.perf_ctr_pair_en = AMD_MERGE_EVENT_ENABLE;
1407 x86_pmu.flags |= PMU_FL_PAIR;
1410 /* LBR and BRS are mutually exclusive features */
1411 if (!amd_pmu_lbr_init()) {
1412 /* LBR requires flushing on context switch */
1413 x86_pmu.sched_task = amd_pmu_lbr_sched_task;
1414 static_call_update(amd_pmu_branch_hw_config, amd_pmu_lbr_hw_config);
1415 static_call_update(amd_pmu_branch_reset, amd_pmu_lbr_reset);
1416 static_call_update(amd_pmu_branch_add, amd_pmu_lbr_add);
1417 static_call_update(amd_pmu_branch_del, amd_pmu_lbr_del);
1418 } else if (!amd_brs_init()) {
1420 * BRS requires special event constraints and flushing on ctxsw.
1422 x86_pmu.get_event_constraints = amd_get_event_constraints_f19h;
1423 x86_pmu.sched_task = amd_pmu_brs_sched_task;
1424 x86_pmu.limit_period = amd_pmu_limit_period;
1426 static_call_update(amd_pmu_branch_hw_config, amd_brs_hw_config);
1427 static_call_update(amd_pmu_branch_reset, amd_brs_reset);
1428 static_call_update(amd_pmu_branch_add, amd_pmu_brs_add);
1429 static_call_update(amd_pmu_branch_del, amd_pmu_brs_del);
1432 * put_event_constraints callback same as Fam17h, set above
1435 /* branch sampling must be stopped when entering low power */
1436 amd_brs_lopwr_init();
1439 x86_pmu.attr_update = amd_attr_update;
1441 pr_cont("core perfctr, ");
1445 __init int amd_pmu_init(void)
1449 /* Performance-monitoring supported from K7 and later: */
1450 if (boot_cpu_data.x86 < 6)
1455 ret = amd_core_pmu_init();
1459 if (num_possible_cpus() == 1) {
1461 * No point in allocating data structures to serialize
1462 * against other CPUs, when there is only the one CPU.
1464 x86_pmu.amd_nb_constraints = 0;
1467 if (boot_cpu_data.x86 >= 0x17)
1468 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids_f17h, sizeof(hw_cache_event_ids));
1470 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, sizeof(hw_cache_event_ids));
1475 static inline void amd_pmu_reload_virt(void)
1477 if (x86_pmu.version >= 2) {
1479 * Clear global enable bits, reprogram the PERF_CTL
1480 * registers with updated perf_ctr_virt_mask and then
1481 * set global enable bits once again
1483 amd_pmu_v2_disable_all();
1484 amd_pmu_enable_all(0);
1485 amd_pmu_v2_enable_all(0);
1489 amd_pmu_disable_all();
1490 amd_pmu_enable_all(0);
1493 void amd_pmu_enable_virt(void)
1495 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1497 cpuc->perf_ctr_virt_mask = 0;
1499 /* Reload all events */
1500 amd_pmu_reload_virt();
1502 EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
1504 void amd_pmu_disable_virt(void)
1506 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1509 * We only mask out the Host-only bit so that host-only counting works
1510 * when SVM is disabled. If someone sets up a guest-only counter when
1511 * SVM is disabled the Guest-only bits still gets set and the counter
1512 * will not count anything.
1514 cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
1516 /* Reload all events */
1517 amd_pmu_reload_virt();
1519 EXPORT_SYMBOL_GPL(amd_pmu_disable_virt);