2 * Performance events - AMD IBS
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
6 * For licencing details see kernel-base/COPYING
9 #include <linux/perf_event.h>
10 #include <linux/init.h>
11 #include <linux/export.h>
12 #include <linux/pci.h>
13 #include <linux/ptrace.h>
14 #include <linux/syscore_ops.h>
15 #include <linux/sched/clock.h>
19 #include "../perf_event.h"
23 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
25 #include <linux/kprobes.h>
26 #include <linux/hardirq.h>
29 #include <asm/amd-ibs.h>
31 #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
32 #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
38 * ENABLED; tracks the pmu::add(), pmu::del() state, when set the counter is taken
39 * and any further add()s must fail.
41 * STARTED/STOPPING/STOPPED; deal with pmu::start(), pmu::stop() state but are
42 * complicated by the fact that the IBS hardware can send late NMIs (ie. after
43 * we've cleared the EN bit).
45 * In order to consume these late NMIs we have the STOPPED state, any NMI that
46 * happens after we've cleared the EN state will clear this bit and report the
47 * NMI handled (this is fundamentally racy in the face or multiple NMI sources,
48 * someone else can consume our BIT and our NMI will go unhandled).
50 * And since we cannot set/clear this separate bit together with the EN bit,
51 * there are races; if we cleared STARTED early, an NMI could land in
52 * between clearing STARTED and clearing the EN bit (in fact multiple NMIs
53 * could happen if the period is small enough), and consume our STOPPED bit
54 * and trigger streams of unhandled NMIs.
56 * If, however, we clear STARTED late, an NMI can hit between clearing the
57 * EN bit and clearing STARTED, still see STARTED set and process the event.
58 * If this event will have the VALID bit clear, we bail properly, but this
59 * is not a given. With VALID set we can end up calling pmu::stop() again
60 * (the throttle logic) and trigger the WARNs in there.
62 * So what we do is set STOPPING before clearing EN to avoid the pmu::stop()
63 * nesting, and clear STARTED late, so that we have a well defined state over
64 * the clearing of the EN bit.
66 * XXX: we could probably be using !atomic bitops for all this.
79 struct perf_event *event;
80 unsigned long state[BITS_TO_LONGS(IBS_MAX_STATES)];
91 unsigned long offset_mask[1];
93 unsigned int fetch_count_reset_broken : 1;
94 unsigned int fetch_ignore_if_zero_rip : 1;
95 struct cpu_perf_ibs __percpu *pcpu;
97 u64 (*get_count)(u64 config);
101 perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period)
103 s64 left = local64_read(&hwc->period_left);
104 s64 period = hwc->sample_period;
108 * If we are way outside a reasonable range then just skip forward:
110 if (unlikely(left <= -period)) {
112 local64_set(&hwc->period_left, left);
113 hwc->last_period = period;
117 if (unlikely(left < (s64)min)) {
119 local64_set(&hwc->period_left, left);
120 hwc->last_period = period;
125 * If the hw period that triggers the sw overflow is too short
126 * we might hit the irq handler. This biases the results.
127 * Thus we shorten the next-to-last period and set the last
128 * period to the max period.
138 *hw_period = (u64)left;
144 perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width)
146 struct hw_perf_event *hwc = &event->hw;
147 int shift = 64 - width;
152 * Careful: an NMI might modify the previous event value.
154 * Our tactic to handle this is to first atomically read and
155 * exchange a new raw count - then add that new-prev delta
156 * count to the generic event atomically:
158 prev_raw_count = local64_read(&hwc->prev_count);
159 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
160 new_raw_count) != prev_raw_count)
164 * Now we have the new raw value and have updated the prev
165 * timestamp already. We can now calculate the elapsed delta
166 * (event-)time and add that to the generic event.
168 * Careful, not all hw sign-extends above the physical width
171 delta = (new_raw_count << shift) - (prev_raw_count << shift);
174 local64_add(delta, &event->count);
175 local64_sub(delta, &hwc->period_left);
180 static struct perf_ibs perf_ibs_fetch;
181 static struct perf_ibs perf_ibs_op;
183 static struct perf_ibs *get_ibs_pmu(int type)
185 if (perf_ibs_fetch.pmu.type == type)
186 return &perf_ibs_fetch;
187 if (perf_ibs_op.pmu.type == type)
193 * core pmu config -> IBS config
195 * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
196 * perf record -a -e r076:p ... # same as -e cpu-cycles:p
197 * perf record -a -e r0C1:p ... # use ibs op counting micro-ops
199 * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
200 * MSRC001_1033) is used to select either cycle or micro-ops counting
203 static int core_pmu_ibs_config(struct perf_event *event, u64 *config)
205 switch (event->attr.type) {
206 case PERF_TYPE_HARDWARE:
207 switch (event->attr.config) {
208 case PERF_COUNT_HW_CPU_CYCLES:
214 switch (event->attr.config) {
219 *config = IBS_OP_CNT_CTL;
231 * The rip of IBS samples has skid 0. Thus, IBS supports precise
232 * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
233 * rip is invalid when IBS was not able to record the rip correctly.
234 * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
236 int forward_event_to_ibs(struct perf_event *event)
240 if (!event->attr.precise_ip || event->attr.precise_ip > 2)
243 if (!core_pmu_ibs_config(event, &config)) {
244 event->attr.type = perf_ibs_op.pmu.type;
245 event->attr.config = config;
250 static int perf_ibs_init(struct perf_event *event)
252 struct hw_perf_event *hwc = &event->hw;
253 struct perf_ibs *perf_ibs;
256 perf_ibs = get_ibs_pmu(event->attr.type);
260 config = event->attr.config;
262 if (event->pmu != &perf_ibs->pmu)
265 if (config & ~perf_ibs->config_mask)
268 if (hwc->sample_period) {
269 if (config & perf_ibs->cnt_mask)
270 /* raw max_cnt may not be set */
272 if (!event->attr.sample_freq && hwc->sample_period & 0x0f)
274 * lower 4 bits can not be set in ibs max cnt,
275 * but allowing it in case we adjust the
276 * sample period to set a frequency.
279 hwc->sample_period &= ~0x0FULL;
280 if (!hwc->sample_period)
281 hwc->sample_period = 0x10;
283 max_cnt = config & perf_ibs->cnt_mask;
284 config &= ~perf_ibs->cnt_mask;
285 event->attr.sample_period = max_cnt << 4;
286 hwc->sample_period = event->attr.sample_period;
289 if (!hwc->sample_period)
293 * If we modify hwc->sample_period, we also need to update
294 * hwc->last_period and hwc->period_left.
296 hwc->last_period = hwc->sample_period;
297 local64_set(&hwc->period_left, hwc->sample_period);
299 hwc->config_base = perf_ibs->msr;
300 hwc->config = config;
305 static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
306 struct hw_perf_event *hwc, u64 *period)
310 /* ignore lower 4 bits in min count: */
311 overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
312 local64_set(&hwc->prev_count, 0);
317 static u64 get_ibs_fetch_count(u64 config)
319 union ibs_fetch_ctl fetch_ctl = (union ibs_fetch_ctl)config;
321 return fetch_ctl.fetch_cnt << 4;
324 static u64 get_ibs_op_count(u64 config)
326 union ibs_op_ctl op_ctl = (union ibs_op_ctl)config;
330 * If the internal 27-bit counter rolled over, the count is MaxCnt
331 * and the lower 7 bits of CurCnt are randomized.
332 * Otherwise CurCnt has the full 27-bit current counter value.
335 count = op_ctl.opmaxcnt << 4;
336 if (ibs_caps & IBS_CAPS_OPCNTEXT)
337 count += op_ctl.opmaxcnt_ext << 20;
338 } else if (ibs_caps & IBS_CAPS_RDWROPCNT) {
339 count = op_ctl.opcurcnt;
346 perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
349 u64 count = perf_ibs->get_count(*config);
352 * Set width to 64 since we do not overflow on max width but
353 * instead on max count. In perf_ibs_set_period() we clear
354 * prev count manually on overflow.
356 while (!perf_event_try_update(event, count, 64)) {
357 rdmsrl(event->hw.config_base, *config);
358 count = perf_ibs->get_count(*config);
362 static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
363 struct hw_perf_event *hwc, u64 config)
365 u64 tmp = hwc->config | config;
367 if (perf_ibs->fetch_count_reset_broken)
368 wrmsrl(hwc->config_base, tmp & ~perf_ibs->enable_mask);
370 wrmsrl(hwc->config_base, tmp | perf_ibs->enable_mask);
374 * Erratum #420 Instruction-Based Sampling Engine May Generate
375 * Interrupt that Cannot Be Cleared:
377 * Must clear counter mask first, then clear the enable bit. See
378 * Revision Guide for AMD Family 10h Processors, Publication #41322.
380 static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
381 struct hw_perf_event *hwc, u64 config)
383 config &= ~perf_ibs->cnt_mask;
384 if (boot_cpu_data.x86 == 0x10)
385 wrmsrl(hwc->config_base, config);
386 config &= ~perf_ibs->enable_mask;
387 wrmsrl(hwc->config_base, config);
391 * We cannot restore the ibs pmu state, so we always needs to update
392 * the event while stopping it and then reset the state when starting
393 * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
394 * perf_ibs_start()/perf_ibs_stop() and instead always do it.
396 static void perf_ibs_start(struct perf_event *event, int flags)
398 struct hw_perf_event *hwc = &event->hw;
399 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
400 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
401 u64 period, config = 0;
403 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
406 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
409 perf_ibs_set_period(perf_ibs, hwc, &period);
410 if (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_OPCNTEXT)) {
411 config |= period & IBS_OP_MAX_CNT_EXT_MASK;
412 period &= ~IBS_OP_MAX_CNT_EXT_MASK;
414 config |= period >> 4;
417 * Set STARTED before enabling the hardware, such that a subsequent NMI
420 set_bit(IBS_STARTED, pcpu->state);
421 clear_bit(IBS_STOPPING, pcpu->state);
422 perf_ibs_enable_event(perf_ibs, hwc, config);
424 perf_event_update_userpage(event);
427 static void perf_ibs_stop(struct perf_event *event, int flags)
429 struct hw_perf_event *hwc = &event->hw;
430 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
431 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
435 if (test_and_set_bit(IBS_STOPPING, pcpu->state))
438 stopping = test_bit(IBS_STARTED, pcpu->state);
440 if (!stopping && (hwc->state & PERF_HES_UPTODATE))
443 rdmsrl(hwc->config_base, config);
447 * Set STOPPED before disabling the hardware, such that it
448 * must be visible to NMIs the moment we clear the EN bit,
449 * at which point we can generate an !VALID sample which
450 * we need to consume.
452 set_bit(IBS_STOPPED, pcpu->state);
453 perf_ibs_disable_event(perf_ibs, hwc, config);
455 * Clear STARTED after disabling the hardware; if it were
456 * cleared before an NMI hitting after the clear but before
457 * clearing the EN bit might think it a spurious NMI and not
460 * Clearing it after, however, creates the problem of the NMI
461 * handler seeing STARTED but not having a valid sample.
463 clear_bit(IBS_STARTED, pcpu->state);
464 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
465 hwc->state |= PERF_HES_STOPPED;
468 if (hwc->state & PERF_HES_UPTODATE)
472 * Clear valid bit to not count rollovers on update, rollovers
473 * are only updated in the irq handler.
475 config &= ~perf_ibs->valid_mask;
477 perf_ibs_event_update(perf_ibs, event, &config);
478 hwc->state |= PERF_HES_UPTODATE;
481 static int perf_ibs_add(struct perf_event *event, int flags)
483 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
484 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
486 if (test_and_set_bit(IBS_ENABLED, pcpu->state))
489 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
493 if (flags & PERF_EF_START)
494 perf_ibs_start(event, PERF_EF_RELOAD);
499 static void perf_ibs_del(struct perf_event *event, int flags)
501 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
502 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
504 if (!test_and_clear_bit(IBS_ENABLED, pcpu->state))
507 perf_ibs_stop(event, PERF_EF_UPDATE);
511 perf_event_update_userpage(event);
514 static void perf_ibs_read(struct perf_event *event) { }
517 * We need to initialize with empty group if all attributes in the
520 static struct attribute *attrs_empty[] = {
524 static struct attribute_group empty_format_group = {
526 .attrs = attrs_empty,
529 static struct attribute_group empty_caps_group = {
531 .attrs = attrs_empty,
534 static const struct attribute_group *empty_attr_groups[] = {
540 PMU_FORMAT_ATTR(rand_en, "config:57");
541 PMU_FORMAT_ATTR(cnt_ctl, "config:19");
542 PMU_EVENT_ATTR_STRING(l3missonly, fetch_l3missonly, "config:59");
543 PMU_EVENT_ATTR_STRING(l3missonly, op_l3missonly, "config:16");
544 PMU_EVENT_ATTR_STRING(zen4_ibs_extensions, zen4_ibs_extensions, "1");
547 zen4_ibs_extensions_is_visible(struct kobject *kobj, struct attribute *attr, int i)
549 return ibs_caps & IBS_CAPS_ZEN4 ? attr->mode : 0;
552 static struct attribute *rand_en_attrs[] = {
553 &format_attr_rand_en.attr,
557 static struct attribute *fetch_l3missonly_attrs[] = {
558 &fetch_l3missonly.attr.attr,
562 static struct attribute *zen4_ibs_extensions_attrs[] = {
563 &zen4_ibs_extensions.attr.attr,
567 static struct attribute_group group_rand_en = {
569 .attrs = rand_en_attrs,
572 static struct attribute_group group_fetch_l3missonly = {
574 .attrs = fetch_l3missonly_attrs,
575 .is_visible = zen4_ibs_extensions_is_visible,
578 static struct attribute_group group_zen4_ibs_extensions = {
580 .attrs = zen4_ibs_extensions_attrs,
581 .is_visible = zen4_ibs_extensions_is_visible,
584 static const struct attribute_group *fetch_attr_groups[] = {
590 static const struct attribute_group *fetch_attr_update[] = {
591 &group_fetch_l3missonly,
592 &group_zen4_ibs_extensions,
597 cnt_ctl_is_visible(struct kobject *kobj, struct attribute *attr, int i)
599 return ibs_caps & IBS_CAPS_OPCNT ? attr->mode : 0;
602 static struct attribute *cnt_ctl_attrs[] = {
603 &format_attr_cnt_ctl.attr,
607 static struct attribute *op_l3missonly_attrs[] = {
608 &op_l3missonly.attr.attr,
612 static struct attribute_group group_cnt_ctl = {
614 .attrs = cnt_ctl_attrs,
615 .is_visible = cnt_ctl_is_visible,
618 static struct attribute_group group_op_l3missonly = {
620 .attrs = op_l3missonly_attrs,
621 .is_visible = zen4_ibs_extensions_is_visible,
624 static const struct attribute_group *op_attr_update[] = {
626 &group_op_l3missonly,
627 &group_zen4_ibs_extensions,
631 static struct perf_ibs perf_ibs_fetch = {
633 .task_ctx_nr = perf_invalid_context,
635 .event_init = perf_ibs_init,
638 .start = perf_ibs_start,
639 .stop = perf_ibs_stop,
640 .read = perf_ibs_read,
641 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
643 .msr = MSR_AMD64_IBSFETCHCTL,
644 .config_mask = IBS_FETCH_CONFIG_MASK,
645 .cnt_mask = IBS_FETCH_MAX_CNT,
646 .enable_mask = IBS_FETCH_ENABLE,
647 .valid_mask = IBS_FETCH_VAL,
648 .max_period = IBS_FETCH_MAX_CNT << 4,
649 .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
650 .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
652 .get_count = get_ibs_fetch_count,
655 static struct perf_ibs perf_ibs_op = {
657 .task_ctx_nr = perf_invalid_context,
659 .event_init = perf_ibs_init,
662 .start = perf_ibs_start,
663 .stop = perf_ibs_stop,
664 .read = perf_ibs_read,
665 .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
667 .msr = MSR_AMD64_IBSOPCTL,
668 .config_mask = IBS_OP_CONFIG_MASK,
669 .cnt_mask = IBS_OP_MAX_CNT | IBS_OP_CUR_CNT |
671 .enable_mask = IBS_OP_ENABLE,
672 .valid_mask = IBS_OP_VAL,
673 .max_period = IBS_OP_MAX_CNT << 4,
674 .offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
675 .offset_max = MSR_AMD64_IBSOP_REG_COUNT,
677 .get_count = get_ibs_op_count,
680 static void perf_ibs_get_mem_op(union ibs_op_data3 *op_data3,
681 struct perf_sample_data *data)
683 union perf_mem_data_src *data_src = &data->data_src;
685 data_src->mem_op = PERF_MEM_OP_NA;
688 data_src->mem_op = PERF_MEM_OP_LOAD;
689 else if (op_data3->st_op)
690 data_src->mem_op = PERF_MEM_OP_STORE;
694 * Processors having CPUID_Fn8000001B_EAX[11] aka IBS_CAPS_ZEN4 has
695 * more fine granular DataSrc encodings. Others have coarse.
697 static u8 perf_ibs_data_src(union ibs_op_data2 *op_data2)
699 if (ibs_caps & IBS_CAPS_ZEN4)
700 return (op_data2->data_src_hi << 3) | op_data2->data_src_lo;
702 return op_data2->data_src_lo;
705 static void perf_ibs_get_mem_lvl(union ibs_op_data2 *op_data2,
706 union ibs_op_data3 *op_data3,
707 struct perf_sample_data *data)
709 union perf_mem_data_src *data_src = &data->data_src;
710 u8 ibs_data_src = perf_ibs_data_src(op_data2);
712 data_src->mem_lvl = 0;
715 * DcMiss, L2Miss, DataSrc, DcMissLat etc. are all invalid for Uncached
716 * memory accesses. So, check DcUcMemAcc bit early.
718 if (op_data3->dc_uc_mem_acc && ibs_data_src != IBS_DATA_SRC_EXT_IO) {
719 data_src->mem_lvl = PERF_MEM_LVL_UNC | PERF_MEM_LVL_HIT;
724 if (op_data3->dc_miss == 0) {
725 data_src->mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
730 if (op_data3->l2_miss == 0) {
732 if (boot_cpu_data.x86 != 0x19 || boot_cpu_data.x86_model > 0xF ||
733 !(op_data3->sw_pf || op_data3->dc_miss_no_mab_alloc)) {
734 data_src->mem_lvl = PERF_MEM_LVL_L2 | PERF_MEM_LVL_HIT;
740 * OP_DATA2 is valid only for load ops. Skip all checks which
741 * uses OP_DATA2[DataSrc].
743 if (data_src->mem_op != PERF_MEM_OP_LOAD)
747 if (ibs_caps & IBS_CAPS_ZEN4) {
748 if (ibs_data_src == IBS_DATA_SRC_EXT_LOC_CACHE) {
749 data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_HIT;
753 if (ibs_data_src == IBS_DATA_SRC_LOC_CACHE) {
754 data_src->mem_lvl = PERF_MEM_LVL_L3 | PERF_MEM_LVL_REM_CCE1 |
760 /* A peer cache in a near CCX */
761 if (ibs_caps & IBS_CAPS_ZEN4 &&
762 ibs_data_src == IBS_DATA_SRC_EXT_NEAR_CCX_CACHE) {
763 data_src->mem_lvl = PERF_MEM_LVL_REM_CCE1 | PERF_MEM_LVL_HIT;
767 /* A peer cache in a far CCX */
768 if (ibs_caps & IBS_CAPS_ZEN4) {
769 if (ibs_data_src == IBS_DATA_SRC_EXT_FAR_CCX_CACHE) {
770 data_src->mem_lvl = PERF_MEM_LVL_REM_CCE2 | PERF_MEM_LVL_HIT;
774 if (ibs_data_src == IBS_DATA_SRC_REM_CACHE) {
775 data_src->mem_lvl = PERF_MEM_LVL_REM_CCE2 | PERF_MEM_LVL_HIT;
781 if (ibs_data_src == IBS_DATA_SRC_EXT_DRAM) {
782 if (op_data2->rmt_node == 0)
783 data_src->mem_lvl = PERF_MEM_LVL_LOC_RAM | PERF_MEM_LVL_HIT;
785 data_src->mem_lvl = PERF_MEM_LVL_REM_RAM1 | PERF_MEM_LVL_HIT;
790 if (ibs_caps & IBS_CAPS_ZEN4 && ibs_data_src == IBS_DATA_SRC_EXT_PMEM) {
791 data_src->mem_lvl_num = PERF_MEM_LVLNUM_PMEM;
792 if (op_data2->rmt_node) {
793 data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
794 /* IBS doesn't provide Remote socket detail */
795 data_src->mem_hops = PERF_MEM_HOPS_1;
800 /* Extension Memory */
801 if (ibs_caps & IBS_CAPS_ZEN4 &&
802 ibs_data_src == IBS_DATA_SRC_EXT_EXT_MEM) {
803 data_src->mem_lvl_num = PERF_MEM_LVLNUM_CXL;
804 if (op_data2->rmt_node) {
805 data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
806 /* IBS doesn't provide Remote socket detail */
807 data_src->mem_hops = PERF_MEM_HOPS_1;
813 if (ibs_data_src == IBS_DATA_SRC_EXT_IO) {
814 data_src->mem_lvl = PERF_MEM_LVL_IO;
815 data_src->mem_lvl_num = PERF_MEM_LVLNUM_IO;
816 if (op_data2->rmt_node) {
817 data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
818 /* IBS doesn't provide Remote socket detail */
819 data_src->mem_hops = PERF_MEM_HOPS_1;
826 * MAB (Miss Address Buffer) Hit. MAB keeps track of outstanding
827 * DC misses. However, such data may come from any level in mem
828 * hierarchy. IBS provides detail about both MAB as well as actual
829 * DataSrc simultaneously. Prioritize DataSrc over MAB, i.e. set
830 * MAB only when IBS fails to provide DataSrc.
832 if (op_data3->dc_miss_no_mab_alloc) {
833 data_src->mem_lvl = PERF_MEM_LVL_LFB | PERF_MEM_LVL_HIT;
837 data_src->mem_lvl = PERF_MEM_LVL_NA;
840 static bool perf_ibs_cache_hit_st_valid(void)
842 /* 0: Uninitialized, 1: Valid, -1: Invalid */
843 static int cache_hit_st_valid;
845 if (unlikely(!cache_hit_st_valid)) {
846 if (boot_cpu_data.x86 == 0x19 &&
847 (boot_cpu_data.x86_model <= 0xF ||
848 (boot_cpu_data.x86_model >= 0x20 &&
849 boot_cpu_data.x86_model <= 0x5F))) {
850 cache_hit_st_valid = -1;
852 cache_hit_st_valid = 1;
856 return cache_hit_st_valid == 1;
859 static void perf_ibs_get_mem_snoop(union ibs_op_data2 *op_data2,
860 struct perf_sample_data *data)
862 union perf_mem_data_src *data_src = &data->data_src;
865 data_src->mem_snoop = PERF_MEM_SNOOP_NA;
867 if (!perf_ibs_cache_hit_st_valid() ||
868 data_src->mem_op != PERF_MEM_OP_LOAD ||
869 data_src->mem_lvl & PERF_MEM_LVL_L1 ||
870 data_src->mem_lvl & PERF_MEM_LVL_L2 ||
871 op_data2->cache_hit_st)
874 ibs_data_src = perf_ibs_data_src(op_data2);
876 if (ibs_caps & IBS_CAPS_ZEN4) {
877 if (ibs_data_src == IBS_DATA_SRC_EXT_LOC_CACHE ||
878 ibs_data_src == IBS_DATA_SRC_EXT_NEAR_CCX_CACHE ||
879 ibs_data_src == IBS_DATA_SRC_EXT_FAR_CCX_CACHE)
880 data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
881 } else if (ibs_data_src == IBS_DATA_SRC_LOC_CACHE) {
882 data_src->mem_snoop = PERF_MEM_SNOOP_HITM;
886 static void perf_ibs_get_tlb_lvl(union ibs_op_data3 *op_data3,
887 struct perf_sample_data *data)
889 union perf_mem_data_src *data_src = &data->data_src;
891 data_src->mem_dtlb = PERF_MEM_TLB_NA;
893 if (!op_data3->dc_lin_addr_valid)
896 if (!op_data3->dc_l1tlb_miss) {
897 data_src->mem_dtlb = PERF_MEM_TLB_L1 | PERF_MEM_TLB_HIT;
901 if (!op_data3->dc_l2tlb_miss) {
902 data_src->mem_dtlb = PERF_MEM_TLB_L2 | PERF_MEM_TLB_HIT;
906 data_src->mem_dtlb = PERF_MEM_TLB_L2 | PERF_MEM_TLB_MISS;
909 static void perf_ibs_get_mem_lock(union ibs_op_data3 *op_data3,
910 struct perf_sample_data *data)
912 union perf_mem_data_src *data_src = &data->data_src;
914 data_src->mem_lock = PERF_MEM_LOCK_NA;
916 if (op_data3->dc_locked_op)
917 data_src->mem_lock = PERF_MEM_LOCK_LOCKED;
920 #define ibs_op_msr_idx(msr) (msr - MSR_AMD64_IBSOPCTL)
922 static void perf_ibs_get_data_src(struct perf_ibs_data *ibs_data,
923 struct perf_sample_data *data,
924 union ibs_op_data2 *op_data2,
925 union ibs_op_data3 *op_data3)
927 perf_ibs_get_mem_lvl(op_data2, op_data3, data);
928 perf_ibs_get_mem_snoop(op_data2, data);
929 perf_ibs_get_tlb_lvl(op_data3, data);
930 perf_ibs_get_mem_lock(op_data3, data);
933 static __u64 perf_ibs_get_op_data2(struct perf_ibs_data *ibs_data,
934 union ibs_op_data3 *op_data3)
936 __u64 val = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA2)];
939 if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model <= 0xF &&
940 (op_data3->sw_pf || op_data3->dc_miss_no_mab_alloc)) {
942 * OP_DATA2 has only two fields on Zen3: DataSrc and RmtNode.
943 * DataSrc=0 is 'No valid status' and RmtNode is invalid when
951 static void perf_ibs_parse_ld_st_data(__u64 sample_type,
952 struct perf_ibs_data *ibs_data,
953 struct perf_sample_data *data)
955 union ibs_op_data3 op_data3;
956 union ibs_op_data2 op_data2;
957 union ibs_op_data op_data;
959 data->data_src.val = PERF_MEM_NA;
960 op_data3.val = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA3)];
962 perf_ibs_get_mem_op(&op_data3, data);
963 if (data->data_src.mem_op != PERF_MEM_OP_LOAD &&
964 data->data_src.mem_op != PERF_MEM_OP_STORE)
967 op_data2.val = perf_ibs_get_op_data2(ibs_data, &op_data3);
969 if (sample_type & PERF_SAMPLE_DATA_SRC) {
970 perf_ibs_get_data_src(ibs_data, data, &op_data2, &op_data3);
971 data->sample_flags |= PERF_SAMPLE_DATA_SRC;
974 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE && op_data3.dc_miss &&
975 data->data_src.mem_op == PERF_MEM_OP_LOAD) {
976 op_data.val = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSOPDATA)];
978 if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
979 data->weight.var1_dw = op_data3.dc_miss_lat;
980 data->weight.var2_w = op_data.tag_to_ret_ctr;
981 } else if (sample_type & PERF_SAMPLE_WEIGHT) {
982 data->weight.full = op_data3.dc_miss_lat;
984 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
987 if (sample_type & PERF_SAMPLE_ADDR && op_data3.dc_lin_addr_valid) {
988 data->addr = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSDCLINAD)];
989 data->sample_flags |= PERF_SAMPLE_ADDR;
992 if (sample_type & PERF_SAMPLE_PHYS_ADDR && op_data3.dc_phy_addr_valid) {
993 data->phys_addr = ibs_data->regs[ibs_op_msr_idx(MSR_AMD64_IBSDCPHYSAD)];
994 data->sample_flags |= PERF_SAMPLE_PHYS_ADDR;
998 static int perf_ibs_get_offset_max(struct perf_ibs *perf_ibs, u64 sample_type,
1001 if (sample_type & PERF_SAMPLE_RAW ||
1002 (perf_ibs == &perf_ibs_op &&
1003 (sample_type & PERF_SAMPLE_DATA_SRC ||
1004 sample_type & PERF_SAMPLE_WEIGHT_TYPE ||
1005 sample_type & PERF_SAMPLE_ADDR ||
1006 sample_type & PERF_SAMPLE_PHYS_ADDR)))
1007 return perf_ibs->offset_max;
1013 static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
1015 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
1016 struct perf_event *event = pcpu->event;
1017 struct hw_perf_event *hwc;
1018 struct perf_sample_data data;
1019 struct perf_raw_record raw;
1020 struct pt_regs regs;
1021 struct perf_ibs_data ibs_data;
1022 int offset, size, check_rip, offset_max, throttle = 0;
1024 u64 *buf, *config, period, new_config = 0;
1026 if (!test_bit(IBS_STARTED, pcpu->state)) {
1029 * Catch spurious interrupts after stopping IBS: After
1030 * disabling IBS there could be still incoming NMIs
1031 * with samples that even have the valid bit cleared.
1032 * Mark all this NMIs as handled.
1034 if (test_and_clear_bit(IBS_STOPPED, pcpu->state))
1040 if (WARN_ON_ONCE(!event))
1044 msr = hwc->config_base;
1045 buf = ibs_data.regs;
1047 if (!(*buf++ & perf_ibs->valid_mask))
1050 config = &ibs_data.regs[0];
1051 perf_ibs_event_update(perf_ibs, event, config);
1052 perf_sample_data_init(&data, 0, hwc->last_period);
1053 if (!perf_ibs_set_period(perf_ibs, hwc, &period))
1054 goto out; /* no sw counter overflow */
1056 ibs_data.caps = ibs_caps;
1059 check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
1061 offset_max = perf_ibs_get_offset_max(perf_ibs, event->attr.sample_type, check_rip);
1064 rdmsrl(msr + offset, *buf++);
1066 offset = find_next_bit(perf_ibs->offset_mask,
1067 perf_ibs->offset_max,
1069 } while (offset < offset_max);
1071 * Read IbsBrTarget, IbsOpData4, and IbsExtdCtl separately
1072 * depending on their availability.
1073 * Can't add to offset_max as they are staggered
1075 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
1076 if (perf_ibs == &perf_ibs_op) {
1077 if (ibs_caps & IBS_CAPS_BRNTRGT) {
1078 rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
1081 if (ibs_caps & IBS_CAPS_OPDATA4) {
1082 rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
1086 if (perf_ibs == &perf_ibs_fetch && (ibs_caps & IBS_CAPS_FETCHCTLEXTD)) {
1087 rdmsrl(MSR_AMD64_ICIBSEXTDCTL, *buf++);
1091 ibs_data.size = sizeof(u64) * size;
1094 if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
1095 regs.flags &= ~PERF_EFLAGS_EXACT;
1097 /* Workaround for erratum #1197 */
1098 if (perf_ibs->fetch_ignore_if_zero_rip && !(ibs_data.regs[1]))
1101 set_linear_ip(®s, ibs_data.regs[1]);
1102 regs.flags |= PERF_EFLAGS_EXACT;
1105 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
1106 raw = (struct perf_raw_record){
1108 .size = sizeof(u32) + ibs_data.size,
1109 .data = ibs_data.data,
1113 data.sample_flags |= PERF_SAMPLE_RAW;
1116 if (perf_ibs == &perf_ibs_op)
1117 perf_ibs_parse_ld_st_data(event->attr.sample_type, &ibs_data, &data);
1120 * rip recorded by IbsOpRip will not be consistent with rsp and rbp
1121 * recorded as part of interrupt regs. Thus we need to use rip from
1122 * interrupt regs while unwinding call stack.
1124 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
1125 data.callchain = perf_callchain(event, iregs);
1126 data.sample_flags |= PERF_SAMPLE_CALLCHAIN;
1129 throttle = perf_event_overflow(event, &data, ®s);
1132 perf_ibs_stop(event, 0);
1134 if (perf_ibs == &perf_ibs_op) {
1135 if (ibs_caps & IBS_CAPS_OPCNTEXT) {
1136 new_config = period & IBS_OP_MAX_CNT_EXT_MASK;
1137 period &= ~IBS_OP_MAX_CNT_EXT_MASK;
1139 if ((ibs_caps & IBS_CAPS_RDWROPCNT) && (*config & IBS_OP_CNT_CTL))
1140 new_config |= *config & IBS_OP_CUR_CNT_RAND;
1142 new_config |= period >> 4;
1144 perf_ibs_enable_event(perf_ibs, hwc, new_config);
1147 perf_event_update_userpage(event);
1153 perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1155 u64 stamp = sched_clock();
1158 handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs);
1159 handled += perf_ibs_handle_irq(&perf_ibs_op, regs);
1162 inc_irq_stat(apic_perf_irqs);
1164 perf_sample_event_took(sched_clock() - stamp);
1168 NOKPROBE_SYMBOL(perf_ibs_nmi_handler);
1170 static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
1172 struct cpu_perf_ibs __percpu *pcpu;
1175 pcpu = alloc_percpu(struct cpu_perf_ibs);
1179 perf_ibs->pcpu = pcpu;
1181 ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
1183 perf_ibs->pcpu = NULL;
1190 static __init int perf_ibs_fetch_init(void)
1193 * Some chips fail to reset the fetch count when it is written; instead
1194 * they need a 0-1 transition of IbsFetchEn.
1196 if (boot_cpu_data.x86 >= 0x16 && boot_cpu_data.x86 <= 0x18)
1197 perf_ibs_fetch.fetch_count_reset_broken = 1;
1199 if (boot_cpu_data.x86 == 0x19 && boot_cpu_data.x86_model < 0x10)
1200 perf_ibs_fetch.fetch_ignore_if_zero_rip = 1;
1202 if (ibs_caps & IBS_CAPS_ZEN4)
1203 perf_ibs_fetch.config_mask |= IBS_FETCH_L3MISSONLY;
1205 perf_ibs_fetch.pmu.attr_groups = fetch_attr_groups;
1206 perf_ibs_fetch.pmu.attr_update = fetch_attr_update;
1208 return perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
1211 static __init int perf_ibs_op_init(void)
1213 if (ibs_caps & IBS_CAPS_OPCNT)
1214 perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
1216 if (ibs_caps & IBS_CAPS_OPCNTEXT) {
1217 perf_ibs_op.max_period |= IBS_OP_MAX_CNT_EXT_MASK;
1218 perf_ibs_op.config_mask |= IBS_OP_MAX_CNT_EXT_MASK;
1219 perf_ibs_op.cnt_mask |= IBS_OP_MAX_CNT_EXT_MASK;
1222 if (ibs_caps & IBS_CAPS_ZEN4)
1223 perf_ibs_op.config_mask |= IBS_OP_L3MISSONLY;
1225 perf_ibs_op.pmu.attr_groups = empty_attr_groups;
1226 perf_ibs_op.pmu.attr_update = op_attr_update;
1228 return perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
1231 static __init int perf_event_ibs_init(void)
1235 ret = perf_ibs_fetch_init();
1239 ret = perf_ibs_op_init();
1243 ret = register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
1247 pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
1251 perf_pmu_unregister(&perf_ibs_op.pmu);
1252 free_percpu(perf_ibs_op.pcpu);
1253 perf_ibs_op.pcpu = NULL;
1255 perf_pmu_unregister(&perf_ibs_fetch.pmu);
1256 free_percpu(perf_ibs_fetch.pcpu);
1257 perf_ibs_fetch.pcpu = NULL;
1262 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
1264 static __init int perf_event_ibs_init(void)
1271 /* IBS - apic initialization, for perf and oprofile */
1273 static __init u32 __get_ibs_caps(void)
1276 unsigned int max_level;
1278 if (!boot_cpu_has(X86_FEATURE_IBS))
1281 /* check IBS cpuid feature flags */
1282 max_level = cpuid_eax(0x80000000);
1283 if (max_level < IBS_CPUID_FEATURES)
1284 return IBS_CAPS_DEFAULT;
1286 caps = cpuid_eax(IBS_CPUID_FEATURES);
1287 if (!(caps & IBS_CAPS_AVAIL))
1288 /* cpuid flags not valid */
1289 return IBS_CAPS_DEFAULT;
1294 u32 get_ibs_caps(void)
1299 EXPORT_SYMBOL(get_ibs_caps);
1301 static inline int get_eilvt(int offset)
1303 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
1306 static inline int put_eilvt(int offset)
1308 return !setup_APIC_eilvt(offset, 0, 0, 1);
1312 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
1314 static inline int ibs_eilvt_valid(void)
1322 rdmsrl(MSR_AMD64_IBSCTL, val);
1323 offset = val & IBSCTL_LVT_OFFSET_MASK;
1325 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
1326 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
1327 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
1331 if (!get_eilvt(offset)) {
1332 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
1333 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
1344 static int setup_ibs_ctl(int ibs_eilvt_off)
1346 struct pci_dev *cpu_cfg;
1353 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
1354 PCI_DEVICE_ID_AMD_10H_NB_MISC,
1359 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
1360 | IBSCTL_LVT_OFFSET_VALID);
1361 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
1362 if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
1363 pci_dev_put(cpu_cfg);
1364 pr_debug("Failed to setup IBS LVT offset, IBSCTL = 0x%08x\n",
1371 pr_debug("No CPU node configured for IBS\n");
1379 * This runs only on the current cpu. We try to find an LVT offset and
1380 * setup the local APIC. For this we must disable preemption. On
1381 * success we initialize all nodes with this offset. This updates then
1382 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
1383 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
1384 * is using the new offset.
1386 static void force_ibs_eilvt_setup(void)
1392 /* find the next free available EILVT entry, skip offset 0 */
1393 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
1394 if (get_eilvt(offset))
1399 if (offset == APIC_EILVT_NR_MAX) {
1400 pr_debug("No EILVT entry available\n");
1404 ret = setup_ibs_ctl(offset);
1408 if (!ibs_eilvt_valid())
1411 pr_info("LVT offset %d assigned\n", offset);
1421 static void ibs_eilvt_setup(void)
1424 * Force LVT offset assignment for family 10h: The offsets are
1425 * not assigned by the BIOS for this family, so the OS is
1426 * responsible for doing it. If the OS assignment fails, fall
1427 * back to BIOS settings and try to setup this.
1429 if (boot_cpu_data.x86 == 0x10)
1430 force_ibs_eilvt_setup();
1433 static inline int get_ibs_lvt_offset(void)
1437 rdmsrl(MSR_AMD64_IBSCTL, val);
1438 if (!(val & IBSCTL_LVT_OFFSET_VALID))
1441 return val & IBSCTL_LVT_OFFSET_MASK;
1444 static void setup_APIC_ibs(void)
1448 offset = get_ibs_lvt_offset();
1452 if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
1455 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
1456 smp_processor_id());
1459 static void clear_APIC_ibs(void)
1463 offset = get_ibs_lvt_offset();
1465 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
1468 static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu)
1476 static int perf_ibs_suspend(void)
1482 static void perf_ibs_resume(void)
1488 static struct syscore_ops perf_ibs_syscore_ops = {
1489 .resume = perf_ibs_resume,
1490 .suspend = perf_ibs_suspend,
1493 static void perf_ibs_pm_init(void)
1495 register_syscore_ops(&perf_ibs_syscore_ops);
1500 static inline void perf_ibs_pm_init(void) { }
1504 static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu)
1510 static __init int amd_ibs_init(void)
1514 caps = __get_ibs_caps();
1516 return -ENODEV; /* ibs not supported by the cpu */
1520 if (!ibs_eilvt_valid())
1526 /* make ibs_caps visible to other cpus: */
1529 * x86_pmu_amd_ibs_starting_cpu will be called from core on
1532 cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
1533 "perf/x86/amd/ibs:starting",
1534 x86_pmu_amd_ibs_starting_cpu,
1535 x86_pmu_amd_ibs_dying_cpu);
1537 return perf_event_ibs_init();
1540 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
1541 device_initcall(amd_ibs_init);