2 * Performance events - AMD IBS
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
6 * For licencing details see kernel-base/COPYING
9 #include <linux/perf_event.h>
10 #include <linux/init.h>
11 #include <linux/export.h>
12 #include <linux/pci.h>
13 #include <linux/ptrace.h>
14 #include <linux/syscore_ops.h>
15 #include <linux/sched/clock.h>
19 #include "../perf_event.h"
23 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
25 #include <linux/kprobes.h>
26 #include <linux/hardirq.h>
29 #include <asm/amd-ibs.h>
31 #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
32 #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
38 * ENABLED; tracks the pmu::add(), pmu::del() state, when set the counter is taken
39 * and any further add()s must fail.
41 * STARTED/STOPPING/STOPPED; deal with pmu::start(), pmu::stop() state but are
42 * complicated by the fact that the IBS hardware can send late NMIs (ie. after
43 * we've cleared the EN bit).
45 * In order to consume these late NMIs we have the STOPPED state, any NMI that
46 * happens after we've cleared the EN state will clear this bit and report the
47 * NMI handled (this is fundamentally racy in the face or multiple NMI sources,
48 * someone else can consume our BIT and our NMI will go unhandled).
50 * And since we cannot set/clear this separate bit together with the EN bit,
51 * there are races; if we cleared STARTED early, an NMI could land in
52 * between clearing STARTED and clearing the EN bit (in fact multiple NMIs
53 * could happen if the period is small enough), and consume our STOPPED bit
54 * and trigger streams of unhandled NMIs.
56 * If, however, we clear STARTED late, an NMI can hit between clearing the
57 * EN bit and clearing STARTED, still see STARTED set and process the event.
58 * If this event will have the VALID bit clear, we bail properly, but this
59 * is not a given. With VALID set we can end up calling pmu::stop() again
60 * (the throttle logic) and trigger the WARNs in there.
62 * So what we do is set STOPPING before clearing EN to avoid the pmu::stop()
63 * nesting, and clear STARTED late, so that we have a well defined state over
64 * the clearing of the EN bit.
66 * XXX: we could probably be using !atomic bitops for all this.
79 struct perf_event *event;
80 unsigned long state[BITS_TO_LONGS(IBS_MAX_STATES)];
91 unsigned long offset_mask[1];
93 unsigned int fetch_count_reset_broken : 1;
94 unsigned int fetch_ignore_if_zero_rip : 1;
95 struct cpu_perf_ibs __percpu *pcpu;
97 u64 (*get_count)(u64 config);
101 perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period)
103 s64 left = local64_read(&hwc->period_left);
104 s64 period = hwc->sample_period;
108 * If we are way outside a reasonable range then just skip forward:
110 if (unlikely(left <= -period)) {
112 local64_set(&hwc->period_left, left);
113 hwc->last_period = period;
117 if (unlikely(left < (s64)min)) {
119 local64_set(&hwc->period_left, left);
120 hwc->last_period = period;
125 * If the hw period that triggers the sw overflow is too short
126 * we might hit the irq handler. This biases the results.
127 * Thus we shorten the next-to-last period and set the last
128 * period to the max period.
138 *hw_period = (u64)left;
144 perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width)
146 struct hw_perf_event *hwc = &event->hw;
147 int shift = 64 - width;
152 * Careful: an NMI might modify the previous event value.
154 * Our tactic to handle this is to first atomically read and
155 * exchange a new raw count - then add that new-prev delta
156 * count to the generic event atomically:
158 prev_raw_count = local64_read(&hwc->prev_count);
159 if (!local64_try_cmpxchg(&hwc->prev_count,
160 &prev_raw_count, new_raw_count))
164 * Now we have the new raw value and have updated the prev
165 * timestamp already. We can now calculate the elapsed delta
166 * (event-)time and add that to the generic event.
168 * Careful, not all hw sign-extends above the physical width
171 delta = (new_raw_count << shift) - (prev_raw_count << shift);
174 local64_add(delta, &event->count);
175 local64_sub(delta, &hwc->period_left);
180 static struct perf_ibs perf_ibs_fetch;
181 static struct perf_ibs perf_ibs_op;
183 static struct perf_ibs *get_ibs_pmu(int type)
185 if (perf_ibs_fetch.pmu.type == type)
186 return &perf_ibs_fetch;
187 if (perf_ibs_op.pmu.type == type)
193 * core pmu config -> IBS config
195 * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
196 * perf record -a -e r076:p ... # same as -e cpu-cycles:p
197 * perf record -a -e r0C1:p ... # use ibs op counting micro-ops
199 * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
200 * MSRC001_1033) is used to select either cycle or micro-ops counting
203 static int core_pmu_ibs_config(struct perf_event *event, u64 *config)
205 switch (event->attr.type) {
206 case PERF_TYPE_HARDWARE:
207 switch (event->attr.config) {
208 case PERF_COUNT_HW_CPU_CYCLES:
214 switch (event->attr.config) {
219 *config = IBS_OP_CNT_CTL;
231 * The rip of IBS samples has skid 0. Thus, IBS supports precise
232 * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
233 * rip is invalid when IBS was not able to record the rip correctly.
234 * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
236 int forward_event_to_ibs(struct perf_event *event)
240 if (!event->attr.precise_ip || event->attr.precise_ip > 2)
243 if (!core_pmu_ibs_config(event, &config)) {
244 event->attr.type = perf_ibs_op.pmu.type;
245 event->attr.config = config;
251 * Grouping of IBS events is not possible since IBS can have only
252 * one event active at any point in time.
254 static int validate_group(struct perf_event *event)
256 struct perf_event *sibling;
258 if (event->group_leader == event)
261 if (event->group_leader->pmu == event->pmu)
264 for_each_sibling_event(sibling, event->group_leader) {
265 if (sibling->pmu == event->pmu)
271 static int perf_ibs_init(struct perf_event *event)
273 struct hw_perf_event *hwc = &event->hw;
274 struct perf_ibs *perf_ibs;
278 perf_ibs = get_ibs_pmu(event->attr.type);
282 config = event->attr.config;
284 if (event->pmu != &perf_ibs->pmu)
287 if (config & ~perf_ibs->config_mask)
290 ret = validate_group(event);
294 if (hwc->sample_period) {
295 if (config & perf_ibs->cnt_mask)
296 /* raw max_cnt may not be set */
298 if (!event->attr.sample_freq && hwc->sample_period & 0x0f)
300 * lower 4 bits can not be set in ibs max cnt,
301 * but allowing it in case we adjust the
302 * sample period to set a frequency.
305 hwc->sample_period &= ~0x0FULL;
306 if (!hwc->sample_period)
307 hwc->sample_period = 0x10;
309 max_cnt = config & perf_ibs->cnt_mask;
310 config &= ~perf_ibs->cnt_mask;
311 event->attr.sample_period = max_cnt << 4;
312 hwc->sample_period = event->attr.sample_period;
315 if (!hwc->sample_period)
319 * If we modify hwc->sample_period, we also need to update
320 * hwc->last_period and hwc->period_left.
322 hwc->last_period = hwc->sample_period;
323 local64_set(&hwc->period_left, hwc->sample_period);
325 hwc->config_base = perf_ibs->msr;
326 hwc->config = config;
331 static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
332 struct hw_perf_event *hwc, u64 *period)
336 /* ignore lower 4 bits in min count: */
337 overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
338 local64_set(&hwc->prev_count, 0);
343 static u64 get_ibs_fetch_count(u64 config)
345 union ibs_fetch_ctl fetch_ctl = (union ibs_fetch_ctl)config;
347 return fetch_ctl.fetch_cnt << 4;
350 static u64 get_ibs_op_count(u64 config)
352 union ibs_op_ctl op_ctl = (union ibs_op_ctl)config;
356 * If the internal 27-bit counter rolled over, the count is MaxCnt
357 * and the lower 7 bits of CurCnt are randomized.
358 * Otherwise CurCnt has the full 27-bit current counter value.
361 count = op_ctl.opmaxcnt << 4;
362 if (ibs_caps & IBS_CAPS_OPCNTEXT)
363 count += op_ctl.opmaxcnt_ext << 20;
364 } else if (ibs_caps & IBS_CAPS_RDWROPCNT) {
365 count = op_ctl.opcurcnt;
372 perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
375 u64 count = perf_ibs->get_count(*config);
378 * Set width to 64 since we do not overflow on max width but
379 * instead on max count. In perf_ibs_set_period() we clear
380 * prev count manually on overflow.
382 while (!perf_event_try_update(event, count, 64)) {
383 rdmsrl(event->hw.config_base, *config);
384 count = perf_ibs->get_count(*config);
388 static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
389 struct hw_perf_event *hwc, u64 config)
391 u64 tmp = hwc->config | config;
393 if (perf_ibs->fetch_count_reset_broken)
394 wrmsrl(hwc->config_base, tmp & ~perf_ibs->enable_mask);
396 wrmsrl(hwc->config_base, tmp | perf_ibs->enable_mask);
400 * Erratum #420 Instruction-Based Sampling Engine May Generate
401 * Interrupt that Cannot Be Cleared:
403 * Must clear counter mask first, then clear the enable bit. See
404 * Revision Guide for AMD Family 10h Processors, Publication #41322.
406 static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
407 struct hw_perf_event *hwc, u64 config)
409 config &= ~perf_ibs->cnt_mask;
410 if (boot_cpu_data.x86 == 0x10)
411 wrmsrl(hwc->config_base, config);
412 config &= ~perf_ibs->enable_mask;
413 wrmsrl(hwc->config_base, config);
417 * We cannot restore the ibs pmu state, so we always needs to update
418 * the event while stopping it and then reset the state when starting
419 * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
420 * perf_ibs_start()/perf_ibs_stop() and instead always do it.
422 static void perf_ibs_start(struct perf_event *event, int flags)
424 struct hw_perf_event *hwc = &event->hw;
425 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
426 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
427 u64 period, config = 0;
429 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
432 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
435 perf_ibs_set_period(perf_ibs, hwc, &period);
436 if (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_OPCNTEXT)) {
437 config |= period & IBS_OP_MAX_CNT_EXT_MASK;
438 period &= ~IBS_OP_MAX_CNT_EXT_MASK;
440 config |= period >> 4;
443 * Set STARTED before enabling the hardware, such that a subsequent NMI
446 set_bit(IBS_STARTED, pcpu->state);
447 clear_bit(IBS_STOPPING, pcpu->state);
448 perf_ibs_enable_event(perf_ibs, hwc, config);
450 perf_event_update_userpage(event);
453 static void perf_ibs_stop(struct perf_event *event, int flags)
455 struct hw_perf_event *hwc = &event->hw;
456 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
457 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
461 if (test_and_set_bit(IBS_STOPPING, pcpu->state))
464 stopping = test_bit(IBS_STARTED, pcpu->state);
466 if (!stopping && (hwc->state & PERF_HES_UPTODATE))
469 rdmsrl(hwc->config_base, config);
473 * Set STOPPED before disabling the hardware, such that it
474 * must be visible to NMIs the moment we clear the EN bit,
475 * at which point we can generate an !VALID sample which
476 * we need to consume.
478 set_bit(IBS_STOPPED, pcpu->state);
479 perf_ibs_disable_event(perf_ibs, hwc, config);
481 * Clear STARTED after disabling the hardware; if it were
482 * cleared before an NMI hitting after the clear but before
483 * clearing the EN bit might think it a spurious NMI and not
486 * Clearing it after, however, creates the problem of the NMI
487 * handler seeing STARTED but not having a valid sample.
489 clear_bit(IBS_STARTED, pcpu->state);
490 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
491 hwc->state |= PERF_HES_STOPPED;
494 if (hwc->state & PERF_HES_UPTODATE)
498 * Clear valid bit to not count rollovers on update, rollovers
499 * are only updated in the irq handler.
501 config &= ~perf_ibs->valid_mask;
503 perf_ibs_event_update(perf_ibs, event, &config);
504 hwc->state |= PERF_HES_UPTODATE;
507 static int perf_ibs_add(struct perf_event *event, int flags)
509 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
510 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
512 if (test_and_set_bit(IBS_ENABLED, pcpu->state))
515 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
519 if (flags & PERF_EF_START)
520 perf_ibs_start(event, PERF_EF_RELOAD);
525 static void perf_ibs_del(struct perf_event *event, int flags)
527 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
528 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
530 if (!test_and_clear_bit(IBS_ENABLED, pcpu->state))
533 perf_ibs_stop(event, PERF_EF_UPDATE);
537 perf_event_update_userpage(event);
540 static void perf_ibs_read(struct perf_event *event) { }
543 * We need to initialize with empty group if all attributes in the
546 static struct attribute *attrs_empty[] = {
550 static struct attribute_group empty_format_group = {
552 .attrs = attrs_empty,
555 static struct attribute_group empty_caps_group = {
557 .attrs = attrs_empty,
560 static const struct attribute_group *empty_attr_groups[] = {
566 PMU_FORMAT_ATTR(rand_en, "config:57");
567 PMU_FORMAT_ATTR(cnt_ctl, "config:19");
568 PMU_EVENT_ATTR_STRING(l3missonly, fetch_l3missonly, "config:59");
569 PMU_EVENT_ATTR_STRING(l3missonly, op_l3missonly, "config:16");
570 PMU_EVENT_ATTR_STRING(zen4_ibs_extensions, zen4_ibs_extensions, "1");
573 zen4_ibs_extensions_is_visible(struct kobject *kobj, struct attribute *attr, int i)
575 return ibs_caps & IBS_CAPS_ZEN4 ? attr->mode : 0;
578 static struct attribute *rand_en_attrs[] = {
579 &format_attr_rand_en.attr,
583 static struct attribute *fetch_l3missonly_attrs[] = {
584 &fetch_l3missonly.attr.attr,
588 static struct attribute *zen4_ibs_extensions_attrs[] = {
589 &zen4_ibs_extensions.attr.attr,
593 static struct attribute_group group_rand_en = {
595 .attrs = rand_en_attrs,
598 static struct attribute_group group_fetch_l3missonly = {
600 .attrs = fetch_l3missonly_attrs,
601 .is_visible = zen4_ibs_extensions_is_visible,
604 static struct attribute_group group_zen4_ibs_extensions = {
606 .attrs = zen4_ibs_extensions_attrs,
607 .is_visible = zen4_ibs_extensions_is_visible,
610 static const struct attribute_group *fetch_attr_groups[] = {
616 static const struct attribute_group *fetch_attr_update[] = {
617 &group_fetch_l3missonly,
618 &group_zen4_ibs_extensions,
623 cnt_ctl_is_visible(struct kobject *kobj, struct attribute *attr, int i)
625 return ibs_caps & IBS_CAPS_OPCNT ? attr->mode : 0;
628 static struct attribute *cnt_ctl_attrs[] = {
629 &format_attr_cnt_ctl.attr,
633 static struct attribute *op_l3missonly_attrs[] = {
634 &op_l3missonly.attr.attr,
638 static struct attribute_group group_cnt_ctl = {
640 .attrs = cnt_ctl_attrs,
641 .is_visible = cnt_ctl_is_visible,
644 static struct attribute_group group_op_l3missonly = {
646 .attrs = op_l3missonly_attrs,
647 .is_visible = zen4_ibs_extensions_is_visible,
650 static const struct attribute_group *op_attr_update[] = {
652 &group_op_l3missonly,
653 &group_zen4_ibs_extensions,
657 static struct perf_ibs perf_ibs_fetch = {
659 .task_ctx_nr = perf_hw_context,
661 .event_init = perf_ibs_init,
664 .start = perf_ibs_start,
665 .stop = perf_ibs_stop,
666 .read = perf_ibs_read,
667 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
669 .msr = MSR_AMD64_IBSFETCHCTL,
670 .config_mask = IBS_FETCH_CONFIG_MASK,
671 .cnt_mask = IBS_FETCH_MAX_CNT,
672 .enable_mask = IBS_FETCH_ENABLE,
673 .valid_mask = IBS_FETCH_VAL,
674 .max_period = IBS_FETCH_MAX_CNT << 4,
675 .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
676 .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
678 .get_count = get_ibs_fetch_count,
681 static struct perf_ibs perf_ibs_op = {
683 .task_ctx_nr = perf_hw_context,
685 .event_init = perf_ibs_init,
688 .start = perf_ibs_start,
689 .stop = perf_ibs_stop,
690 .read = perf_ibs_read,
691 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
693 .msr = MSR_AMD64_IBSOPCTL,
694 .config_mask = IBS_OP_CONFIG_MASK,
695 .cnt_mask = IBS_OP_MAX_CNT | IBS_OP_CUR_CNT |
697 .enable_mask = IBS_OP_ENABLE,
698 .valid_mask = IBS_OP_VAL,
699 .max_period = IBS_OP_MAX_CNT << 4,
700 .offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
701 .offset_max = MSR_AMD64_IBSOP_REG_COUNT,
703 .get_count = get_ibs_op_count,
706 static void perf_ibs_get_mem_op(union ibs_op_data3 *op_data3,
707 struct perf_sample_data *data)
709 union perf_mem_data_src *data_src = &data->data_src;
711 data_src->mem_op = PERF_MEM_OP_NA;
714 data_src->mem_op = PERF_MEM_OP_LOAD;
715 else if (op_data3->st_op)
716 data_src->mem_op = PERF_MEM_OP_STORE;
720 * Processors having CPUID_Fn8000001B_EAX[11] aka IBS_CAPS_ZEN4 has
721 * more fine granular DataSrc encodings. Others have coarse.
723 static u8 perf_ibs_data_src(union ibs_op_data2 *op_data2)
725 if (ibs_caps & IBS_CAPS_ZEN4)
726 return (op_data2->data_src_hi << 3) | op_data2->data_src_lo;
728 return op_data2->data_src_lo;
731 #define L(x) (PERF_MEM_S(LVL, x) | PERF_MEM_S(LVL, HIT))
732 #define LN(x) PERF_MEM_S(LVLNUM, x)
733 #define REM PERF_MEM_S(REMOTE, REMOTE)
734 #define HOPS(x) PERF_MEM_S(HOPS, x)
736 static u64 g_data_src[8] = {
737 [IBS_DATA_SRC_LOC_CACHE] = L(L3) | L(REM_CCE1) | LN(ANY_CACHE) | HOPS(0),
738 [IBS_DATA_SRC_DRAM] = L(LOC_RAM) | LN(RAM),
739 [IBS_DATA_SRC_REM_CACHE] = L(REM_CCE2) | LN(ANY_CACHE) | REM | HOPS(1),
740 [IBS_DATA_SRC_IO] = L(IO) | LN(IO),
743 #define RMT_NODE_BITS (1 << IBS_DATA_SRC_DRAM)
744 #define RMT_NODE_APPLICABLE(x) (RMT_NODE_BITS & (1 << x))
746 static u64 g_zen4_data_src[32] = {
747 [IBS_DATA_SRC_EXT_LOC_CACHE] = L(L3) | LN(L3),
748 [IBS_DATA_SRC_EXT_NEAR_CCX_CACHE] = L(REM_CCE1) | LN(ANY_CACHE) | REM | HOPS(0),
749 [IBS_DATA_SRC_EXT_DRAM] = L(LOC_RAM) | LN(RAM),
750 [IBS_DATA_SRC_EXT_FAR_CCX_CACHE] = L(REM_CCE2) | LN(ANY_CACHE) | REM | HOPS(1),
751 [IBS_DATA_SRC_EXT_PMEM] = LN(PMEM),
752 [IBS_DATA_SRC_EXT_IO] = L(IO) | LN(IO),
753 [IBS_DATA_SRC_EXT_EXT_MEM] = LN(CXL),
756 #define ZEN4_RMT_NODE_BITS ((1 << IBS_DATA_SRC_EXT_DRAM) | \
757 (1 << IBS_DATA_SRC_EXT_PMEM) | \
758 (1 << IBS_DATA_SRC_EXT_EXT_MEM))
759 #define ZEN4_RMT_NODE_APPLICABLE(x) (ZEN4_RMT_NODE_BITS & (1 << x))
761 static __u64 perf_ibs_get_mem_lvl(union ibs_op_data2 *op_data2,
762 union ibs_op_data3 *op_data3,
763 struct perf_sample_data *data)
765 union perf_mem_data_src *data_src = &data->data_src;
766 u8 ibs_data_src = perf_ibs_data_src(op_data2);
768 data_src->mem_lvl = 0;
769 data_src->mem_lvl_num = 0;
772 * DcMiss, L2Miss, DataSrc, DcMissLat etc. are all invalid for Uncached
773 * memory accesses. So, check DcUcMemAcc bit early.
775 if (op_data3->dc_uc_mem_acc && ibs_data_src != IBS_DATA_SRC_EXT_IO)
776 return L(UNC) | LN(UNC);
779 if (op_data3->dc_miss == 0)
780 return L(L1) | LN(L1);
783 if (op_data3->l2_miss == 0) {
785 if (boot_cpu_data.x86 != 0x19 || boot_cpu_data.x86_model > 0xF ||
786 !(op_data3->sw_pf || op_data3->dc_miss_no_mab_alloc))
787 return L(L2) | LN(L2);
791 * OP_DATA2 is valid only for load ops. Skip all checks which
792 * uses OP_DATA2[DataSrc].
794 if (data_src->mem_op != PERF_MEM_OP_LOAD)
797 if (ibs_caps & IBS_CAPS_ZEN4) {
798 u64 val = g_zen4_data_src[ibs_data_src];
803 /* HOPS_1 because IBS doesn't provide remote socket detail */
804 if (op_data2->rmt_node && ZEN4_RMT_NODE_APPLICABLE(ibs_data_src)) {
805 if (ibs_data_src == IBS_DATA_SRC_EXT_DRAM)
806 val = L(REM_RAM1) | LN(RAM) | REM | HOPS(1);
808 val |= REM | HOPS(1);
813 u64 val = g_data_src[ibs_data_src];
818 /* HOPS_1 because IBS doesn't provide remote socket detail */
819 if (op_data2->rmt_node && RMT_NODE_APPLICABLE(ibs_data_src)) {
820 if (ibs_data_src == IBS_DATA_SRC_DRAM)
821 val = L(REM_RAM1) | LN(RAM) | REM | HOPS(1);
823 val |= REM | HOPS(1);
831 * MAB (Miss Address Buffer) Hit. MAB keeps track of outstanding
832 * DC misses. However, such data may come from any level in mem
833 * hierarchy. IBS provides detail about both MAB as well as actual
834 * DataSrc simultaneously. Prioritize DataSrc over MAB, i.e. set
835 * MAB only when IBS fails to provide DataSrc.
837 if (op_data3->dc_miss_no_mab_alloc)
838 return L(LFB) | LN(LFB);
840 /* Don't set HIT with NA */
841 return PERF_MEM_S(LVL, NA) | LN(NA);
844 static bool perf_ibs_cache_hit_st_valid(void)
846 /* 0: Uninitialized, 1: Valid, -1: Invalid */
847 static int cache_hit_st_valid;
849 if (unlikely(!cache_hit_st_valid)) {
850 if (boot_cpu_data.x86 == 0x19 &&
851 (boot_cpu_data.x86_model <= 0xF ||
852 (boot_cpu_data.x86_model >= 0x20 &&
853 boot_cpu_data.x86_model <= 0x5F))) {
854 cache_hit_st_valid = -1;
856 cache_hit_st_valid = 1;
860 return cache_hit_st_valid == 1;
863 static void perf_ibs_get_mem_snoop(union ibs_op_data2 *op_data2,
864 struct perf_sample_data *data)
866 union perf_mem_data_src *data_src = &data->data_src;
869 data_src->mem_snoop = PERF_MEM_SNOOP_NA;
871 if (!perf_ibs_cache_hit_st_valid() ||
872 data_src->mem_op != PERF_MEM_OP_LOAD ||
873 data_src->mem_lvl & PERF_MEM_LVL_L1 ||
874 data_src->mem_lvl & PERF_MEM_LVL_L2 ||
875 op_data2->cache_hit_st)
878 ibs_data_src = perf_ibs_data_src(op_data2);
880 if (ibs_caps & IBS_CAPS_ZEN4) {
881 if (ibs_data_src == IBS_DATA_SRC_EXT_LOC_CACHE ||
882 ibs_data_src == IBS_DATA_SRC_EXT_NEAR_CCX_CACHE ||
883 ibs_data_src == IBS_DATA_SRC_EXT_FAR_CCX_CACHE)
884 data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
885 } else if (ibs_data_src == IBS_DATA_SRC_LOC_CACHE) {
886 data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
890 static void perf_ibs_get_tlb_lvl(union ibs_op_data3 *op_data3,
891 struct perf_sample_data *data)
893 union perf_mem_data_src *data_src = &data->data_src;
895 data_src->mem_dtlb = PERF_MEM_TLB_NA;
897 if (!op_data3->dc_lin_addr_valid)
900 if (!op_data3->dc_l1tlb_miss) {
901 data_src->mem_dtlb = PERF_MEM_TLB_L1 | PERF_MEM_TLB_HIT;
905 if (!op_data3->dc_l2tlb_miss) {
906 data_src->mem_dtlb = PERF_MEM_TLB_L2 | PERF_MEM_TLB_HIT;
910 data_src->mem_dtlb = PERF_MEM_TLB_L2 | PERF_MEM_TLB_MISS;
913 static void perf_ibs_get_mem_lock(union ibs_op_data3 *op_data3,
914 struct perf_sample_data *data)
916 union perf_mem_data_src *data_src = &data->data_src;
918 data_src->mem_lock = PERF_MEM_LOCK_NA;
920 if (op_data3->dc_locked_op)
921 data_src->mem_lock = PERF_MEM_LOCK_LOCKED;
924 #define ibs_op_msr_idx(msr) (msr - MSR_AMD64_IBSOPCTL)
926 static void perf_ibs_get_data_src(struct perf_ibs_data *ibs_data,
927 struct perf_sample_data *data,
928 union ibs_op_data2 *op_data2,
929 union ibs_op_data3 *op_data3)
931 union perf_mem_data_src *data_src = &data->data_src;
933 data_src->val |= perf_ibs_get_mem_lvl(op_data2, op_data3, data);
934 perf_ibs_get_mem_snoop(op_data2, data);
935 perf_ibs_get_tlb_lvl(op_data3, data);
936 perf_ibs_get_mem_lock(op_data3, data);
939 static __u64 perf_ibs_get_op_data2(struct perf_ibs_data *ibs_data,
940 union ibs_op_data3 *op_data3)
942 __u64 val = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA2)];
945 if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model <= 0xF &&
946 (op_data3->sw_pf || op_data3->dc_miss_no_mab_alloc)) {
948 * OP_DATA2 has only two fields on Zen3: DataSrc and RmtNode.
949 * DataSrc=0 is 'No valid status' and RmtNode is invalid when
957 static void perf_ibs_parse_ld_st_data(__u64 sample_type,
958 struct perf_ibs_data *ibs_data,
959 struct perf_sample_data *data)
961 union ibs_op_data3 op_data3;
962 union ibs_op_data2 op_data2;
963 union ibs_op_data op_data;
965 data->data_src.val = PERF_MEM_NA;
966 op_data3.val = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA3)];
968 perf_ibs_get_mem_op(&op_data3, data);
969 if (data->data_src.mem_op != PERF_MEM_OP_LOAD &&
970 data->data_src.mem_op != PERF_MEM_OP_STORE)
973 op_data2.val = perf_ibs_get_op_data2(ibs_data, &op_data3);
975 if (sample_type & PERF_SAMPLE_DATA_SRC) {
976 perf_ibs_get_data_src(ibs_data, data, &op_data2, &op_data3);
977 data->sample_flags |= PERF_SAMPLE_DATA_SRC;
980 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE && op_data3.dc_miss &&
981 data->data_src.mem_op == PERF_MEM_OP_LOAD) {
982 op_data.val = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA)];
984 if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
985 data->weight.var1_dw = op_data3.dc_miss_lat;
986 data->weight.var2_w = op_data.tag_to_ret_ctr;
987 } else if (sample_type & PERF_SAMPLE_WEIGHT) {
988 data->weight.full = op_data3.dc_miss_lat;
990 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
993 if (sample_type & PERF_SAMPLE_ADDR && op_data3.dc_lin_addr_valid) {
994 data->addr = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSDCLINAD)];
995 data->sample_flags |= PERF_SAMPLE_ADDR;
998 if (sample_type & PERF_SAMPLE_PHYS_ADDR && op_data3.dc_phy_addr_valid) {
999 data->phys_addr = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSDCPHYSAD)];
1000 data->sample_flags |= PERF_SAMPLE_PHYS_ADDR;
1004 static int perf_ibs_get_offset_max(struct perf_ibs *perf_ibs, u64 sample_type,
1007 if (sample_type & PERF_SAMPLE_RAW ||
1008 (perf_ibs == &perf_ibs_op &&
1009 (sample_type & PERF_SAMPLE_DATA_SRC ||
1010 sample_type & PERF_SAMPLE_WEIGHT_TYPE ||
1011 sample_type & PERF_SAMPLE_ADDR ||
1012 sample_type & PERF_SAMPLE_PHYS_ADDR)))
1013 return perf_ibs->offset_max;
1019 static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
1021 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
1022 struct perf_event *event = pcpu->event;
1023 struct hw_perf_event *hwc;
1024 struct perf_sample_data data;
1025 struct perf_raw_record raw;
1026 struct pt_regs regs;
1027 struct perf_ibs_data ibs_data;
1028 int offset, size, check_rip, offset_max, throttle = 0;
1030 u64 *buf, *config, period, new_config = 0;
1032 if (!test_bit(IBS_STARTED, pcpu->state)) {
1035 * Catch spurious interrupts after stopping IBS: After
1036 * disabling IBS there could be still incoming NMIs
1037 * with samples that even have the valid bit cleared.
1038 * Mark all this NMIs as handled.
1040 if (test_and_clear_bit(IBS_STOPPED, pcpu->state))
1046 if (WARN_ON_ONCE(!event))
1050 msr = hwc->config_base;
1051 buf = ibs_data.regs;
1053 if (!(*buf++ & perf_ibs->valid_mask))
1056 config = &ibs_data.regs[0];
1057 perf_ibs_event_update(perf_ibs, event, config);
1058 perf_sample_data_init(&data, 0, hwc->last_period);
1059 if (!perf_ibs_set_period(perf_ibs, hwc, &period))
1060 goto out; /* no sw counter overflow */
1062 ibs_data.caps = ibs_caps;
1065 check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
1067 offset_max = perf_ibs_get_offset_max(perf_ibs, event->attr.sample_type, check_rip);
1070 rdmsrl(msr + offset, *buf++);
1072 offset = find_next_bit(perf_ibs->offset_mask,
1073 perf_ibs->offset_max,
1075 } while (offset < offset_max);
1077 * Read IbsBrTarget, IbsOpData4, and IbsExtdCtl separately
1078 * depending on their availability.
1079 * Can't add to offset_max as they are staggered
1081 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
1082 if (perf_ibs == &perf_ibs_op) {
1083 if (ibs_caps & IBS_CAPS_BRNTRGT) {
1084 rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
1087 if (ibs_caps & IBS_CAPS_OPDATA4) {
1088 rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
1092 if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) {
1093 rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++);
1097 ibs_data.size = sizeof(u64) * size;
1100 if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
1101 regs.flags &= ~PERF_EFLAGS_EXACT;
1103 /* Workaround for erratum #1197 */
1104 if (perf_ibs->fetch_ignore_if_zero_rip && !(ibs_data.regs[1]))
1107 set_linear_ip(®s, ibs_data.regs[1]);
1108 regs.flags |= PERF_EFLAGS_EXACT;
1111 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
1112 raw = (struct perf_raw_record){
1114 .size = sizeof(u32) + ibs_data.size,
1115 .data = ibs_data.data,
1118 perf_sample_save_raw_data(&data, &raw);
1121 if (perf_ibs == &perf_ibs_op)
1122 perf_ibs_parse_ld_st_data(event->attr.sample_type, &ibs_data, &data);
1125 * rip recorded by IbsOpRip will not be consistent with rsp and rbp
1126 * recorded as part of interrupt regs. Thus we need to use rip from
1127 * interrupt regs while unwinding call stack.
1129 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
1130 perf_sample_save_callchain(&data, event, iregs);
1132 throttle = perf_event_overflow(event, &data, ®s);
1135 perf_ibs_stop(event, 0);
1137 if (perf_ibs == &perf_ibs_op) {
1138 if (ibs_caps & IBS_CAPS_OPCNTEXT) {
1139 new_config = period & IBS_OP_MAX_CNT_EXT_MASK;
1140 period &= ~IBS_OP_MAX_CNT_EXT_MASK;
1142 if ((ibs_caps & IBS_CAPS_RDWROPCNT) && (*config & IBS_OP_CNT_CTL))
1143 new_config |= *config & IBS_OP_CUR_CNT_RAND;
1145 new_config |= period >> 4;
1147 perf_ibs_enable_event(perf_ibs, hwc, new_config);
1150 perf_event_update_userpage(event);
1156 perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1158 u64 stamp = sched_clock();
1161 handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs);
1162 handled += perf_ibs_handle_irq(&perf_ibs_op, regs);
1165 inc_irq_stat(apic_perf_irqs);
1167 perf_sample_event_took(sched_clock() - stamp);
1171 NOKPROBE_SYMBOL(perf_ibs_nmi_handler);
1173 static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
1175 struct cpu_perf_ibs __percpu *pcpu;
1178 pcpu = alloc_percpu(struct cpu_perf_ibs);
1182 perf_ibs->pcpu = pcpu;
1184 ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
1186 perf_ibs->pcpu = NULL;
1193 static __init int perf_ibs_fetch_init(void)
1196 * Some chips fail to reset the fetch count when it is written; instead
1197 * they need a 0-1 transition of IbsFetchEn.
1199 if (boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18)
1200 perf_ibs_fetch.fetch_count_reset_broken = 1;
1202 if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model < 0x10)
1203 perf_ibs_fetch.fetch_ignore_if_zero_rip = 1;
1205 if (ibs_caps & IBS_CAPS_ZEN4)
1206 perf_ibs_fetch.config_mask |= IBS_FETCH_L3MISSONLY;
1208 perf_ibs_fetch.pmu.attr_groups = fetch_attr_groups;
1209 perf_ibs_fetch.pmu.attr_update = fetch_attr_update;
1211 return perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
1214 static __init int perf_ibs_op_init(void)
1216 if (ibs_caps & IBS_CAPS_OPCNT)
1217 perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
1219 if (ibs_caps & IBS_CAPS_OPCNTEXT) {
1220 perf_ibs_op.max_period |= IBS_OP_MAX_CNT_EXT_MASK;
1221 perf_ibs_op.config_mask |= IBS_OP_MAX_CNT_EXT_MASK;
1222 perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK;
1225 if (ibs_caps & IBS_CAPS_ZEN4)
1226 perf_ibs_op.config_mask |= IBS_OP_L3MISSONLY;
1228 perf_ibs_op.pmu.attr_groups = empty_attr_groups;
1229 perf_ibs_op.pmu.attr_update = op_attr_update;
1231 return perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
1234 static __init int perf_event_ibs_init(void)
1238 ret = perf_ibs_fetch_init();
1242 ret = perf_ibs_op_init();
1246 ret = register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
1250 pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
1254 perf_pmu_unregister(&perf_ibs_op.pmu);
1255 free_percpu(perf_ibs_op.pcpu);
1256 perf_ibs_op.pcpu = NULL;
1258 perf_pmu_unregister(&perf_ibs_fetch.pmu);
1259 free_percpu(perf_ibs_fetch.pcpu);
1260 perf_ibs_fetch.pcpu = NULL;
1265 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
1267 static __init int perf_event_ibs_init(void)
1274 /* IBS - apic initialization, for perf and oprofile */
1276 static __init u32 __get_ibs_caps(void)
1279 unsigned int max_level;
1281 if (!boot_cpu_has(X86_FEATURE_IBS))
1284 /* check IBS cpuid feature flags */
1285 max_level = cpuid_eax(0x80000000);
1286 if (max_level < IBS_CPUID_FEATURES)
1287 return IBS_CAPS_DEFAULT;
1289 caps = cpuid_eax(IBS_CPUID_FEATURES);
1290 if (!(caps & IBS_CAPS_AVAIL))
1291 /* cpuid flags not valid */
1292 return IBS_CAPS_DEFAULT;
1297 u32 get_ibs_caps(void)
1302 EXPORT_SYMBOL(get_ibs_caps);
1304 static inline int get_eilvt(int offset)
1306 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
1309 static inline int put_eilvt(int offset)
1311 return !setup_APIC_eilvt(offset, 0, 0, 1);
1315 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
1317 static inline int ibs_eilvt_valid(void)
1325 rdmsrl(MSR_AMD64_IBSCTL, val);
1326 offset = val & IBSCTL_LVT_OFFSET_MASK;
1328 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
1329 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
1330 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
1334 if (!get_eilvt(offset)) {
1335 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
1336 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
1347 static int setup_ibs_ctl(int ibs_eilvt_off)
1349 struct pci_dev *cpu_cfg;
1356 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
1357 PCI_DEVICE_ID_AMD_10H_NB_MISC,
1362 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
1363 | IBSCTL_LVT_OFFSET_VALID);
1364 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
1365 if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
1366 pci_dev_put(cpu_cfg);
1367 pr_debug("Failed to setup IBS LVT offset, IBSCTL = 0x%08x\n",
1374 pr_debug("No CPU node configured for IBS\n");
1382 * This runs only on the current cpu. We try to find an LVT offset and
1383 * setup the local APIC. For this we must disable preemption. On
1384 * success we initialize all nodes with this offset. This updates then
1385 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
1386 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
1387 * is using the new offset.
1389 static void force_ibs_eilvt_setup(void)
1395 /* find the next free available EILVT entry, skip offset 0 */
1396 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
1397 if (get_eilvt(offset))
1402 if (offset == APIC_EILVT_NR_MAX) {
1403 pr_debug("No EILVT entry available\n");
1407 ret = setup_ibs_ctl(offset);
1411 if (!ibs_eilvt_valid())
1414 pr_info("LVT offset %d assigned\n", offset);
1424 static void ibs_eilvt_setup(void)
1427 * Force LVT offset assignment for family 10h: The offsets are
1428 * not assigned by the BIOS for this family, so the OS is
1429 * responsible for doing it. If the OS assignment fails, fall
1430 * back to BIOS settings and try to setup this.
1432 if (boot_cpu_data.x86 == 0x10)
1433 force_ibs_eilvt_setup();
1436 static inline int get_ibs_lvt_offset(void)
1440 rdmsrl(MSR_AMD64_IBSCTL, val);
1441 if (!(val & IBSCTL_LVT_OFFSET_VALID))
1444 return val & IBSCTL_LVT_OFFSET_MASK;
1447 static void setup_APIC_ibs(void)
1451 offset = get_ibs_lvt_offset();
1455 if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
1458 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
1459 smp_processor_id());
1462 static void clear_APIC_ibs(void)
1466 offset = get_ibs_lvt_offset();
1468 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
1471 static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu)
1479 static int perf_ibs_suspend(void)
1485 static void perf_ibs_resume(void)
1491 static struct syscore_ops perf_ibs_syscore_ops = {
1492 .resume = perf_ibs_resume,
1493 .suspend = perf_ibs_suspend,
1496 static void perf_ibs_pm_init(void)
1498 register_syscore_ops(&perf_ibs_syscore_ops);
1503 static inline void perf_ibs_pm_init(void) { }
1507 static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu)
1513 static __init int amd_ibs_init(void)
1517 caps = __get_ibs_caps();
1519 return -ENODEV; /* ibs not supported by the cpu */
1523 if (!ibs_eilvt_valid())
1529 /* make ibs_caps visible to other cpus: */
1532 * x86_pmu_amd_ibs_starting_cpu will be called from core on
1535 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
1536 "perf/x86/amd/ibs:starting",
1537 x86_pmu_amd_ibs_starting_cpu,
1538 x86_pmu_amd_ibs_dying_cpu);
1540 return perf_event_ibs_init();
1543 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
1544 device_initcall(amd_ibs_init);