1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/bitops.h>
3 #include <linux/types.h>
4 #include <linux/slab.h>
5 #include <linux/sched/clock.h>
7 #include <asm/cpu_entry_area.h>
8 #include <asm/perf_event.h>
9 #include <asm/tlbflush.h>
12 #include <asm/timer.h>
14 #include "../perf_event.h"
16 /* Waste a full page so it can be mapped into the cpu_entry_area */
17 DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
19 /* The size of a BTS record in bytes: */
20 #define BTS_RECORD_SIZE 24
22 #define PEBS_FIXUP_SIZE PAGE_SIZE
25 * pebs_record_32 for p4 and core not supported
27 struct pebs_record_32 {
35 union intel_x86_pebs_dse {
38 unsigned int ld_dse:4;
39 unsigned int ld_stlb_miss:1;
40 unsigned int ld_locked:1;
41 unsigned int ld_data_blk:1;
42 unsigned int ld_addr_blk:1;
43 unsigned int ld_reserved:24;
46 unsigned int st_l1d_hit:1;
47 unsigned int st_reserved1:3;
48 unsigned int st_stlb_miss:1;
49 unsigned int st_locked:1;
50 unsigned int st_reserved2:26;
53 unsigned int st_lat_dse:4;
54 unsigned int st_lat_stlb_miss:1;
55 unsigned int st_lat_locked:1;
56 unsigned int ld_reserved3:26;
62 * Map PEBS Load Latency Data Source encodings to generic
63 * memory data source information
65 #define P(a, b) PERF_MEM_S(a, b)
66 #define OP_LH (P(OP, LOAD) | P(LVL, HIT))
67 #define LEVEL(x) P(LVLNUM, x)
68 #define REM P(REMOTE, REMOTE)
69 #define SNOOP_NONE_MISS (P(SNOOP, NONE) | P(SNOOP, MISS))
71 /* Version for Sandy Bridge and later */
72 static u64 pebs_data_source[] = {
73 P(OP, LOAD) | P(LVL, MISS) | LEVEL(L3) | P(SNOOP, NA),/* 0x00:ukn L3 */
74 OP_LH | P(LVL, L1) | LEVEL(L1) | P(SNOOP, NONE), /* 0x01: L1 local */
75 OP_LH | P(LVL, LFB) | LEVEL(LFB) | P(SNOOP, NONE), /* 0x02: LFB hit */
76 OP_LH | P(LVL, L2) | LEVEL(L2) | P(SNOOP, NONE), /* 0x03: L2 hit */
77 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, NONE), /* 0x04: L3 hit */
78 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, MISS), /* 0x05: L3 hit, snoop miss */
79 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT), /* 0x06: L3 hit, snoop hit */
80 OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM), /* 0x07: L3 hit, snoop hitm */
81 OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* 0x08: L3 miss snoop hit */
82 OP_LH | P(LVL, REM_CCE1) | REM | LEVEL(L3) | P(SNOOP, HITM), /* 0x09: L3 miss snoop hitm*/
83 OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | P(SNOOP, HIT), /* 0x0a: L3 miss, shared */
84 OP_LH | P(LVL, REM_RAM1) | REM | LEVEL(L3) | P(SNOOP, HIT), /* 0x0b: L3 miss, shared */
85 OP_LH | P(LVL, LOC_RAM) | LEVEL(RAM) | SNOOP_NONE_MISS, /* 0x0c: L3 miss, excl */
86 OP_LH | P(LVL, REM_RAM1) | LEVEL(RAM) | REM | SNOOP_NONE_MISS, /* 0x0d: L3 miss, excl */
87 OP_LH | P(LVL, IO) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0e: I/O */
88 OP_LH | P(LVL, UNC) | LEVEL(NA) | P(SNOOP, NONE), /* 0x0f: uncached */
91 /* Patch up minor differences in the bits */
92 void __init intel_pmu_pebs_data_source_nhm(void)
94 pebs_data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
95 pebs_data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
96 pebs_data_source[0x07] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
99 static void __init __intel_pmu_pebs_data_source_skl(bool pmem, u64 *data_source)
101 u64 pmem_or_l4 = pmem ? LEVEL(PMEM) : LEVEL(L4);
103 data_source[0x08] = OP_LH | pmem_or_l4 | P(SNOOP, HIT);
104 data_source[0x09] = OP_LH | pmem_or_l4 | REM | P(SNOOP, HIT);
105 data_source[0x0b] = OP_LH | LEVEL(RAM) | REM | P(SNOOP, NONE);
106 data_source[0x0c] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOPX, FWD);
107 data_source[0x0d] = OP_LH | LEVEL(ANY_CACHE) | REM | P(SNOOP, HITM);
110 void __init intel_pmu_pebs_data_source_skl(bool pmem)
112 __intel_pmu_pebs_data_source_skl(pmem, pebs_data_source);
115 static void __init __intel_pmu_pebs_data_source_grt(u64 *data_source)
117 data_source[0x05] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
118 data_source[0x06] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOP, HITM);
119 data_source[0x08] = OP_LH | P(LVL, L3) | LEVEL(L3) | P(SNOOPX, FWD);
122 void __init intel_pmu_pebs_data_source_grt(void)
124 __intel_pmu_pebs_data_source_grt(pebs_data_source);
127 void __init intel_pmu_pebs_data_source_adl(void)
131 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX].pebs_data_source;
132 memcpy(data_source, pebs_data_source, sizeof(pebs_data_source));
133 __intel_pmu_pebs_data_source_skl(false, data_source);
135 data_source = x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX].pebs_data_source;
136 memcpy(data_source, pebs_data_source, sizeof(pebs_data_source));
137 __intel_pmu_pebs_data_source_grt(data_source);
140 static u64 precise_store_data(u64 status)
142 union intel_x86_pebs_dse dse;
143 u64 val = P(OP, STORE) | P(SNOOP, NA) | P(LVL, L1) | P(TLB, L2);
149 * 1 = stored missed 2nd level TLB
151 * so it either hit the walker or the OS
152 * otherwise hit 2nd level TLB
154 if (dse.st_stlb_miss)
160 * bit 0: hit L1 data cache
161 * if not set, then all we know is that
170 * bit 5: Locked prefix
173 val |= P(LOCK, LOCKED);
178 static u64 precise_datala_hsw(struct perf_event *event, u64 status)
180 union perf_mem_data_src dse;
182 dse.val = PERF_MEM_NA;
184 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW)
185 dse.mem_op = PERF_MEM_OP_STORE;
186 else if (event->hw.flags & PERF_X86_EVENT_PEBS_LD_HSW)
187 dse.mem_op = PERF_MEM_OP_LOAD;
190 * L1 info only valid for following events:
192 * MEM_UOPS_RETIRED.STLB_MISS_STORES
193 * MEM_UOPS_RETIRED.LOCK_STORES
194 * MEM_UOPS_RETIRED.SPLIT_STORES
195 * MEM_UOPS_RETIRED.ALL_STORES
197 if (event->hw.flags & PERF_X86_EVENT_PEBS_ST_HSW) {
199 dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_HIT;
201 dse.mem_lvl = PERF_MEM_LVL_L1 | PERF_MEM_LVL_MISS;
206 static inline void pebs_set_tlb_lock(u64 *val, bool tlb, bool lock)
210 * 0 = did not miss 2nd level TLB
211 * 1 = missed 2nd level TLB
214 *val |= P(TLB, MISS) | P(TLB, L2);
216 *val |= P(TLB, HIT) | P(TLB, L1) | P(TLB, L2);
220 *val |= P(LOCK, LOCKED);
223 /* Retrieve the latency data for e-core of ADL */
224 u64 adl_latency_data_small(struct perf_event *event, u64 status)
226 union intel_x86_pebs_dse dse;
229 WARN_ON_ONCE(hybrid_pmu(event->pmu)->cpu_type == hybrid_big);
233 val = hybrid_var(event->pmu, pebs_data_source)[dse.ld_dse];
236 * For the atom core on ADL,
237 * bit 4: lock, bit 5: TLB access.
239 pebs_set_tlb_lock(&val, dse.ld_locked, dse.ld_stlb_miss);
249 static u64 load_latency_data(struct perf_event *event, u64 status)
251 union intel_x86_pebs_dse dse;
257 * use the mapping table for bit 0-3
259 val = hybrid_var(event->pmu, pebs_data_source)[dse.ld_dse];
262 * Nehalem models do not support TLB, Lock infos
264 if (x86_pmu.pebs_no_tlb) {
265 val |= P(TLB, NA) | P(LOCK, NA);
269 pebs_set_tlb_lock(&val, dse.ld_stlb_miss, dse.ld_locked);
272 * Ice Lake and earlier models do not support block infos.
274 if (!x86_pmu.pebs_block) {
279 * bit 6: load was blocked since its data could not be forwarded
280 * from a preceding store
286 * bit 7: load was blocked due to potential address conflict with
292 if (!dse.ld_data_blk && !dse.ld_addr_blk)
298 static u64 store_latency_data(struct perf_event *event, u64 status)
300 union intel_x86_pebs_dse dse;
301 union perf_mem_data_src src;
307 * use the mapping table for bit 0-3
309 val = hybrid_var(event->pmu, pebs_data_source)[dse.st_lat_dse];
311 pebs_set_tlb_lock(&val, dse.st_lat_stlb_miss, dse.st_lat_locked);
316 * the pebs_data_source table is only for loads
317 * so override the mem_op to say STORE instead
320 src.mem_op = P(OP,STORE);
325 struct pebs_record_core {
329 u64 r8, r9, r10, r11;
330 u64 r12, r13, r14, r15;
333 struct pebs_record_nhm {
337 u64 r8, r9, r10, r11;
338 u64 r12, r13, r14, r15;
339 u64 status, dla, dse, lat;
343 * Same as pebs_record_nhm, with two additional fields.
345 struct pebs_record_hsw {
349 u64 r8, r9, r10, r11;
350 u64 r12, r13, r14, r15;
351 u64 status, dla, dse, lat;
352 u64 real_ip, tsx_tuning;
355 union hsw_tsx_tuning {
357 u32 cycles_last_block : 32,
360 instruction_abort : 1,
361 non_instruction_abort : 1,
370 #define PEBS_HSW_TSX_FLAGS 0xff00000000ULL
372 /* Same as HSW, plus TSC */
374 struct pebs_record_skl {
378 u64 r8, r9, r10, r11;
379 u64 r12, r13, r14, r15;
380 u64 status, dla, dse, lat;
381 u64 real_ip, tsx_tuning;
385 void init_debug_store_on_cpu(int cpu)
387 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
392 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
393 (u32)((u64)(unsigned long)ds),
394 (u32)((u64)(unsigned long)ds >> 32));
397 void fini_debug_store_on_cpu(int cpu)
399 if (!per_cpu(cpu_hw_events, cpu).ds)
402 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
405 static DEFINE_PER_CPU(void *, insn_buffer);
407 static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
409 unsigned long start = (unsigned long)cea;
413 pa = virt_to_phys(addr);
416 for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
417 cea_set_pte(cea, pa, prot);
420 * This is a cross-CPU update of the cpu_entry_area, we must shoot down
421 * all TLB entries for it.
423 flush_tlb_kernel_range(start, start + size);
427 static void ds_clear_cea(void *cea, size_t size)
429 unsigned long start = (unsigned long)cea;
433 for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
434 cea_set_pte(cea, 0, PAGE_NONE);
436 flush_tlb_kernel_range(start, start + size);
440 static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
442 unsigned int order = get_order(size);
443 int node = cpu_to_node(cpu);
446 page = __alloc_pages_node(node, flags | __GFP_ZERO, order);
447 return page ? page_address(page) : NULL;
450 static void dsfree_pages(const void *buffer, size_t size)
453 free_pages((unsigned long)buffer, get_order(size));
456 static int alloc_pebs_buffer(int cpu)
458 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
459 struct debug_store *ds = hwev->ds;
460 size_t bsiz = x86_pmu.pebs_buffer_size;
461 int max, node = cpu_to_node(cpu);
462 void *buffer, *insn_buff, *cea;
467 buffer = dsalloc_pages(bsiz, GFP_KERNEL, cpu);
468 if (unlikely(!buffer))
472 * HSW+ already provides us the eventing ip; no need to allocate this
475 if (x86_pmu.intel_cap.pebs_format < 2) {
476 insn_buff = kzalloc_node(PEBS_FIXUP_SIZE, GFP_KERNEL, node);
478 dsfree_pages(buffer, bsiz);
481 per_cpu(insn_buffer, cpu) = insn_buff;
483 hwev->ds_pebs_vaddr = buffer;
484 /* Update the cpu entry area mapping */
485 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
486 ds->pebs_buffer_base = (unsigned long) cea;
487 ds_update_cea(cea, buffer, bsiz, PAGE_KERNEL);
488 ds->pebs_index = ds->pebs_buffer_base;
489 max = x86_pmu.pebs_record_size * (bsiz / x86_pmu.pebs_record_size);
490 ds->pebs_absolute_maximum = ds->pebs_buffer_base + max;
494 static void release_pebs_buffer(int cpu)
496 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
502 kfree(per_cpu(insn_buffer, cpu));
503 per_cpu(insn_buffer, cpu) = NULL;
505 /* Clear the fixmap */
506 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.pebs_buffer;
507 ds_clear_cea(cea, x86_pmu.pebs_buffer_size);
508 dsfree_pages(hwev->ds_pebs_vaddr, x86_pmu.pebs_buffer_size);
509 hwev->ds_pebs_vaddr = NULL;
512 static int alloc_bts_buffer(int cpu)
514 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
515 struct debug_store *ds = hwev->ds;
522 buffer = dsalloc_pages(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, cpu);
523 if (unlikely(!buffer)) {
524 WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
527 hwev->ds_bts_vaddr = buffer;
528 /* Update the fixmap */
529 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
530 ds->bts_buffer_base = (unsigned long) cea;
531 ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
532 ds->bts_index = ds->bts_buffer_base;
533 max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
534 ds->bts_absolute_maximum = ds->bts_buffer_base +
535 max * BTS_RECORD_SIZE;
536 ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
537 (max / 16) * BTS_RECORD_SIZE;
541 static void release_bts_buffer(int cpu)
543 struct cpu_hw_events *hwev = per_cpu_ptr(&cpu_hw_events, cpu);
549 /* Clear the fixmap */
550 cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers.bts_buffer;
551 ds_clear_cea(cea, BTS_BUFFER_SIZE);
552 dsfree_pages(hwev->ds_bts_vaddr, BTS_BUFFER_SIZE);
553 hwev->ds_bts_vaddr = NULL;
556 static int alloc_ds_buffer(int cpu)
558 struct debug_store *ds = &get_cpu_entry_area(cpu)->cpu_debug_store;
560 memset(ds, 0, sizeof(*ds));
561 per_cpu(cpu_hw_events, cpu).ds = ds;
565 static void release_ds_buffer(int cpu)
567 per_cpu(cpu_hw_events, cpu).ds = NULL;
570 void release_ds_buffers(void)
574 if (!x86_pmu.bts && !x86_pmu.pebs)
577 for_each_possible_cpu(cpu)
578 release_ds_buffer(cpu);
580 for_each_possible_cpu(cpu) {
582 * Again, ignore errors from offline CPUs, they will no longer
583 * observe cpu_hw_events.ds and not program the DS_AREA when
586 fini_debug_store_on_cpu(cpu);
589 for_each_possible_cpu(cpu) {
590 release_pebs_buffer(cpu);
591 release_bts_buffer(cpu);
595 void reserve_ds_buffers(void)
597 int bts_err = 0, pebs_err = 0;
600 x86_pmu.bts_active = 0;
601 x86_pmu.pebs_active = 0;
603 if (!x86_pmu.bts && !x86_pmu.pebs)
612 for_each_possible_cpu(cpu) {
613 if (alloc_ds_buffer(cpu)) {
618 if (!bts_err && alloc_bts_buffer(cpu))
621 if (!pebs_err && alloc_pebs_buffer(cpu))
624 if (bts_err && pebs_err)
629 for_each_possible_cpu(cpu)
630 release_bts_buffer(cpu);
634 for_each_possible_cpu(cpu)
635 release_pebs_buffer(cpu);
638 if (bts_err && pebs_err) {
639 for_each_possible_cpu(cpu)
640 release_ds_buffer(cpu);
642 if (x86_pmu.bts && !bts_err)
643 x86_pmu.bts_active = 1;
645 if (x86_pmu.pebs && !pebs_err)
646 x86_pmu.pebs_active = 1;
648 for_each_possible_cpu(cpu) {
650 * Ignores wrmsr_on_cpu() errors for offline CPUs they
651 * will get this call through intel_pmu_cpu_starting().
653 init_debug_store_on_cpu(cpu);
662 struct event_constraint bts_constraint =
663 EVENT_CONSTRAINT(0, 1ULL << INTEL_PMC_IDX_FIXED_BTS, 0);
665 void intel_pmu_enable_bts(u64 config)
667 unsigned long debugctlmsr;
669 debugctlmsr = get_debugctlmsr();
671 debugctlmsr |= DEBUGCTLMSR_TR;
672 debugctlmsr |= DEBUGCTLMSR_BTS;
673 if (config & ARCH_PERFMON_EVENTSEL_INT)
674 debugctlmsr |= DEBUGCTLMSR_BTINT;
676 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
677 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_OS;
679 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
680 debugctlmsr |= DEBUGCTLMSR_BTS_OFF_USR;
682 update_debugctlmsr(debugctlmsr);
685 void intel_pmu_disable_bts(void)
687 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
688 unsigned long debugctlmsr;
693 debugctlmsr = get_debugctlmsr();
696 ~(DEBUGCTLMSR_TR | DEBUGCTLMSR_BTS | DEBUGCTLMSR_BTINT |
697 DEBUGCTLMSR_BTS_OFF_OS | DEBUGCTLMSR_BTS_OFF_USR);
699 update_debugctlmsr(debugctlmsr);
702 int intel_pmu_drain_bts_buffer(void)
704 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
705 struct debug_store *ds = cpuc->ds;
711 struct perf_event *event = cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
712 struct bts_record *at, *base, *top;
713 struct perf_output_handle handle;
714 struct perf_event_header header;
715 struct perf_sample_data data;
716 unsigned long skip = 0;
722 if (!x86_pmu.bts_active)
725 base = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
726 top = (struct bts_record *)(unsigned long)ds->bts_index;
731 memset(®s, 0, sizeof(regs));
733 ds->bts_index = ds->bts_buffer_base;
735 perf_sample_data_init(&data, 0, event->hw.last_period);
738 * BTS leaks kernel addresses in branches across the cpl boundary,
739 * such as traps or system calls, so unless the user is asking for
740 * kernel tracing (and right now it's not possible), we'd need to
741 * filter them out. But first we need to count how many of those we
742 * have in the current batch. This is an extra O(n) pass, however,
743 * it's much faster than the other one especially considering that
744 * n <= 2560 (BTS_BUFFER_SIZE / BTS_RECORD_SIZE * 15/16; see the
745 * alloc_bts_buffer()).
747 for (at = base; at < top; at++) {
749 * Note that right now *this* BTS code only works if
750 * attr::exclude_kernel is set, but let's keep this extra
751 * check here in case that changes.
753 if (event->attr.exclude_kernel &&
754 (kernel_ip(at->from) || kernel_ip(at->to)))
759 * Prepare a generic sample, i.e. fill in the invariant fields.
760 * We will overwrite the from and to address before we output
764 perf_prepare_sample(&header, &data, event, ®s);
766 if (perf_output_begin(&handle, &data, event,
767 header.size * (top - base - skip)))
770 for (at = base; at < top; at++) {
771 /* Filter out any records that contain kernel addresses. */
772 if (event->attr.exclude_kernel &&
773 (kernel_ip(at->from) || kernel_ip(at->to)))
779 perf_output_sample(&handle, &header, &data, event);
782 perf_output_end(&handle);
784 /* There's new data available. */
785 event->hw.interrupts++;
786 event->pending_kill = POLL_IN;
792 static inline void intel_pmu_drain_pebs_buffer(void)
794 struct perf_sample_data data;
796 x86_pmu.drain_pebs(NULL, &data);
802 struct event_constraint intel_core2_pebs_event_constraints[] = {
803 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
804 INTEL_FLAGS_UEVENT_CONSTRAINT(0xfec1, 0x1), /* X87_OPS_RETIRED.ANY */
805 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
806 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
807 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
808 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
809 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
813 struct event_constraint intel_atom_pebs_event_constraints[] = {
814 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
815 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
816 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
817 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
818 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
819 /* Allow all events as PEBS with no flags */
820 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
824 struct event_constraint intel_slm_pebs_event_constraints[] = {
825 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
826 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
827 /* Allow all events as PEBS with no flags */
828 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
832 struct event_constraint intel_glm_pebs_event_constraints[] = {
833 /* Allow all events as PEBS with no flags */
834 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
838 struct event_constraint intel_grt_pebs_event_constraints[] = {
839 /* Allow all events as PEBS with no flags */
840 INTEL_HYBRID_LAT_CONSTRAINT(0x5d0, 0x3),
841 INTEL_HYBRID_LAT_CONSTRAINT(0x6d0, 0xf),
845 struct event_constraint intel_nehalem_pebs_event_constraints[] = {
846 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
847 INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
848 INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
849 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INST_RETIRED.ANY */
850 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
851 INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
852 INTEL_FLAGS_UEVENT_CONSTRAINT(0x02c5, 0xf), /* BR_MISP_RETIRED.NEAR_CALL */
853 INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
854 INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
855 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
856 INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
857 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
858 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
862 struct event_constraint intel_westmere_pebs_event_constraints[] = {
863 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
864 INTEL_FLAGS_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
865 INTEL_FLAGS_UEVENT_CONSTRAINT(0x010c, 0xf), /* MEM_STORE_RETIRED.DTLB_MISS */
866 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xf), /* INSTR_RETIRED.* */
867 INTEL_EVENT_CONSTRAINT(0xc2, 0xf), /* UOPS_RETIRED.* */
868 INTEL_FLAGS_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
869 INTEL_FLAGS_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
870 INTEL_FLAGS_EVENT_CONSTRAINT(0xc7, 0xf), /* SSEX_UOPS_RETIRED.* */
871 INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
872 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
873 INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
874 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
875 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
879 struct event_constraint intel_snb_pebs_event_constraints[] = {
880 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
881 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
882 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
883 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
884 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
885 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
886 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
887 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
888 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
889 /* Allow all events as PEBS with no flags */
890 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
894 struct event_constraint intel_ivb_pebs_event_constraints[] = {
895 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
896 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
897 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
898 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
899 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
900 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
901 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
902 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
903 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
904 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
905 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
906 /* Allow all events as PEBS with no flags */
907 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
911 struct event_constraint intel_hsw_pebs_event_constraints[] = {
912 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
913 INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
914 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
915 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
916 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
917 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
918 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
919 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
920 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
921 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
922 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
923 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
924 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
925 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
926 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
927 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
928 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_XLD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
929 /* Allow all events as PEBS with no flags */
930 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
934 struct event_constraint intel_bdw_pebs_event_constraints[] = {
935 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
936 INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
937 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
938 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
939 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
940 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
941 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
942 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
943 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
944 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_LOADS */
945 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_UOPS_RETIRED.ALL_LOADS */
946 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_STORES */
947 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_UOPS_RETIRED.SPLIT_STORES */
948 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_UOPS_RETIRED.ALL_STORES */
949 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
950 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_UOPS_L3_HIT_RETIRED.* */
951 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_UOPS_L3_MISS_RETIRED.* */
952 /* Allow all events as PEBS with no flags */
953 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
958 struct event_constraint intel_skl_pebs_event_constraints[] = {
959 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
960 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
961 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
962 /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
963 INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
964 INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */
965 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
966 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
967 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
968 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x22d0, 0xf), /* MEM_INST_RETIRED.LOCK_STORES */
969 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
970 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
971 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
972 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
973 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd1, 0xf), /* MEM_LOAD_RETIRED.* */
974 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd2, 0xf), /* MEM_LOAD_L3_HIT_RETIRED.* */
975 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD(0xd3, 0xf), /* MEM_LOAD_L3_MISS_RETIRED.* */
976 /* Allow all events as PEBS with no flags */
977 INTEL_ALL_EVENT_CONSTRAINT(0, 0xf),
981 struct event_constraint intel_icl_pebs_event_constraints[] = {
982 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x100000000ULL), /* old INST_RETIRED.PREC_DIST */
983 INTEL_FLAGS_UEVENT_CONSTRAINT(0x0100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
984 INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */
986 INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
987 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
988 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
989 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
990 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
991 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
992 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
993 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
995 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf), /* MEM_LOAD_*_RETIRED.* */
997 INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_INST_RETIRED.* */
1000 * Everything else is handled by PMU_FL_PEBS_ALL, because we
1001 * need the full constraints from the main table.
1004 EVENT_CONSTRAINT_END
1007 struct event_constraint intel_spr_pebs_event_constraints[] = {
1008 INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
1009 INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL),
1011 INTEL_FLAGS_EVENT_CONSTRAINT(0xc0, 0xfe),
1012 INTEL_PLD_CONSTRAINT(0x1cd, 0xfe),
1013 INTEL_PSD_CONSTRAINT(0x2cd, 0x1),
1014 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
1015 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
1016 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
1017 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
1018 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
1019 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
1020 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
1022 INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf),
1024 INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf),
1027 * Everything else is handled by PMU_FL_PEBS_ALL, because we
1028 * need the full constraints from the main table.
1031 EVENT_CONSTRAINT_END
1034 struct event_constraint *intel_pebs_constraints(struct perf_event *event)
1036 struct event_constraint *pebs_constraints = hybrid(event->pmu, pebs_constraints);
1037 struct event_constraint *c;
1039 if (!event->attr.precise_ip)
1042 if (pebs_constraints) {
1043 for_each_event_constraint(c, pebs_constraints) {
1044 if (constraint_match(c, event->hw.config)) {
1045 event->hw.flags |= c->flags;
1052 * Extended PEBS support
1053 * Makes the PEBS code search the normal constraints.
1055 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
1058 return &emptyconstraint;
1062 * We need the sched_task callback even for per-cpu events when we use
1063 * the large interrupt threshold, such that we can provide PID and TID
1066 static inline bool pebs_needs_sched_cb(struct cpu_hw_events *cpuc)
1068 if (cpuc->n_pebs == cpuc->n_pebs_via_pt)
1071 return cpuc->n_pebs && (cpuc->n_pebs == cpuc->n_large_pebs);
1074 void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
1076 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1078 if (!sched_in && pebs_needs_sched_cb(cpuc))
1079 intel_pmu_drain_pebs_buffer();
1082 static inline void pebs_update_threshold(struct cpu_hw_events *cpuc)
1084 struct debug_store *ds = cpuc->ds;
1085 int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events);
1086 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
1090 if (cpuc->n_pebs_via_pt)
1093 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
1094 reserved = max_pebs_events + num_counters_fixed;
1096 reserved = max_pebs_events;
1098 if (cpuc->n_pebs == cpuc->n_large_pebs) {
1099 threshold = ds->pebs_absolute_maximum -
1100 reserved * cpuc->pebs_record_size;
1102 threshold = ds->pebs_buffer_base + cpuc->pebs_record_size;
1105 ds->pebs_interrupt_threshold = threshold;
1108 static void adaptive_pebs_record_size_update(void)
1110 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1111 u64 pebs_data_cfg = cpuc->pebs_data_cfg;
1112 int sz = sizeof(struct pebs_basic);
1114 if (pebs_data_cfg & PEBS_DATACFG_MEMINFO)
1115 sz += sizeof(struct pebs_meminfo);
1116 if (pebs_data_cfg & PEBS_DATACFG_GP)
1117 sz += sizeof(struct pebs_gprs);
1118 if (pebs_data_cfg & PEBS_DATACFG_XMMS)
1119 sz += sizeof(struct pebs_xmm);
1120 if (pebs_data_cfg & PEBS_DATACFG_LBRS)
1121 sz += x86_pmu.lbr_nr * sizeof(struct lbr_entry);
1123 cpuc->pebs_record_size = sz;
1126 #define PERF_PEBS_MEMINFO_TYPE (PERF_SAMPLE_ADDR | PERF_SAMPLE_DATA_SRC | \
1127 PERF_SAMPLE_PHYS_ADDR | \
1128 PERF_SAMPLE_WEIGHT_TYPE | \
1129 PERF_SAMPLE_TRANSACTION | \
1130 PERF_SAMPLE_DATA_PAGE_SIZE)
1132 static u64 pebs_update_adaptive_cfg(struct perf_event *event)
1134 struct perf_event_attr *attr = &event->attr;
1135 u64 sample_type = attr->sample_type;
1136 u64 pebs_data_cfg = 0;
1137 bool gprs, tsx_weight;
1139 if (!(sample_type & ~(PERF_SAMPLE_IP|PERF_SAMPLE_TIME)) &&
1140 attr->precise_ip > 1)
1141 return pebs_data_cfg;
1143 if (sample_type & PERF_PEBS_MEMINFO_TYPE)
1144 pebs_data_cfg |= PEBS_DATACFG_MEMINFO;
1147 * We need GPRs when:
1148 * + user requested them
1149 * + precise_ip < 2 for the non event IP
1150 * + For RTM TSX weight we need GPRs for the abort code.
1152 gprs = (sample_type & PERF_SAMPLE_REGS_INTR) &&
1153 (attr->sample_regs_intr & PEBS_GP_REGS);
1155 tsx_weight = (sample_type & PERF_SAMPLE_WEIGHT_TYPE) &&
1156 ((attr->config & INTEL_ARCH_EVENT_MASK) ==
1157 x86_pmu.rtm_abort_event);
1159 if (gprs || (attr->precise_ip < 2) || tsx_weight)
1160 pebs_data_cfg |= PEBS_DATACFG_GP;
1162 if ((sample_type & PERF_SAMPLE_REGS_INTR) &&
1163 (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK))
1164 pebs_data_cfg |= PEBS_DATACFG_XMMS;
1166 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
1168 * For now always log all LBRs. Could configure this
1171 pebs_data_cfg |= PEBS_DATACFG_LBRS |
1172 ((x86_pmu.lbr_nr-1) << PEBS_DATACFG_LBR_SHIFT);
1175 return pebs_data_cfg;
1179 pebs_update_state(bool needed_cb, struct cpu_hw_events *cpuc,
1180 struct perf_event *event, bool add)
1182 struct pmu *pmu = event->ctx->pmu;
1184 * Make sure we get updated with the first PEBS
1185 * event. It will trigger also during removal, but
1186 * that does not hurt:
1188 bool update = cpuc->n_pebs == 1;
1190 if (needed_cb != pebs_needs_sched_cb(cpuc)) {
1192 perf_sched_cb_inc(pmu);
1194 perf_sched_cb_dec(pmu);
1200 * The PEBS record doesn't shrink on pmu::del(). Doing so would require
1201 * iterating all remaining PEBS events to reconstruct the config.
1203 if (x86_pmu.intel_cap.pebs_baseline && add) {
1206 /* Clear pebs_data_cfg and pebs_record_size for first PEBS. */
1207 if (cpuc->n_pebs == 1) {
1208 cpuc->pebs_data_cfg = 0;
1209 cpuc->pebs_record_size = sizeof(struct pebs_basic);
1212 pebs_data_cfg = pebs_update_adaptive_cfg(event);
1214 /* Update pebs_record_size if new event requires more data. */
1215 if (pebs_data_cfg & ~cpuc->pebs_data_cfg) {
1216 cpuc->pebs_data_cfg |= pebs_data_cfg;
1217 adaptive_pebs_record_size_update();
1223 pebs_update_threshold(cpuc);
1226 void intel_pmu_pebs_add(struct perf_event *event)
1228 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1229 struct hw_perf_event *hwc = &event->hw;
1230 bool needed_cb = pebs_needs_sched_cb(cpuc);
1233 if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
1234 cpuc->n_large_pebs++;
1235 if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT)
1236 cpuc->n_pebs_via_pt++;
1238 pebs_update_state(needed_cb, cpuc, event, true);
1241 static void intel_pmu_pebs_via_pt_disable(struct perf_event *event)
1243 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1245 if (!is_pebs_pt(event))
1248 if (!(cpuc->pebs_enabled & ~PEBS_VIA_PT_MASK))
1249 cpuc->pebs_enabled &= ~PEBS_VIA_PT_MASK;
1252 static void intel_pmu_pebs_via_pt_enable(struct perf_event *event)
1254 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1255 struct hw_perf_event *hwc = &event->hw;
1256 struct debug_store *ds = cpuc->ds;
1257 u64 value = ds->pebs_event_reset[hwc->idx];
1258 u32 base = MSR_RELOAD_PMC0;
1259 unsigned int idx = hwc->idx;
1261 if (!is_pebs_pt(event))
1264 if (!(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS))
1265 cpuc->pebs_enabled |= PEBS_PMI_AFTER_EACH_RECORD;
1267 cpuc->pebs_enabled |= PEBS_OUTPUT_PT;
1269 if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
1270 base = MSR_RELOAD_FIXED_CTR0;
1271 idx = hwc->idx - INTEL_PMC_IDX_FIXED;
1272 if (x86_pmu.intel_cap.pebs_format < 5)
1273 value = ds->pebs_event_reset[MAX_PEBS_EVENTS_FMT4 + idx];
1275 value = ds->pebs_event_reset[MAX_PEBS_EVENTS + idx];
1277 wrmsrl(base + idx, value);
1280 void intel_pmu_pebs_enable(struct perf_event *event)
1282 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1283 struct hw_perf_event *hwc = &event->hw;
1284 struct debug_store *ds = cpuc->ds;
1285 unsigned int idx = hwc->idx;
1287 hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
1289 cpuc->pebs_enabled |= 1ULL << hwc->idx;
1291 if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) && (x86_pmu.version < 5))
1292 cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
1293 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
1294 cpuc->pebs_enabled |= 1ULL << 63;
1296 if (x86_pmu.intel_cap.pebs_baseline) {
1297 hwc->config |= ICL_EVENTSEL_ADAPTIVE;
1298 if (cpuc->pebs_data_cfg != cpuc->active_pebs_data_cfg) {
1299 wrmsrl(MSR_PEBS_DATA_CFG, cpuc->pebs_data_cfg);
1300 cpuc->active_pebs_data_cfg = cpuc->pebs_data_cfg;
1304 if (idx >= INTEL_PMC_IDX_FIXED) {
1305 if (x86_pmu.intel_cap.pebs_format < 5)
1306 idx = MAX_PEBS_EVENTS_FMT4 + (idx - INTEL_PMC_IDX_FIXED);
1308 idx = MAX_PEBS_EVENTS + (idx - INTEL_PMC_IDX_FIXED);
1312 * Use auto-reload if possible to save a MSR write in the PMI.
1313 * This must be done in pmu::start(), because PERF_EVENT_IOC_PERIOD.
1315 if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
1316 ds->pebs_event_reset[idx] =
1317 (u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
1319 ds->pebs_event_reset[idx] = 0;
1322 intel_pmu_pebs_via_pt_enable(event);
1325 void intel_pmu_pebs_del(struct perf_event *event)
1327 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1328 struct hw_perf_event *hwc = &event->hw;
1329 bool needed_cb = pebs_needs_sched_cb(cpuc);
1332 if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS)
1333 cpuc->n_large_pebs--;
1334 if (hwc->flags & PERF_X86_EVENT_PEBS_VIA_PT)
1335 cpuc->n_pebs_via_pt--;
1337 pebs_update_state(needed_cb, cpuc, event, false);
1340 void intel_pmu_pebs_disable(struct perf_event *event)
1342 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1343 struct hw_perf_event *hwc = &event->hw;
1345 if (cpuc->n_pebs == cpuc->n_large_pebs &&
1346 cpuc->n_pebs != cpuc->n_pebs_via_pt)
1347 intel_pmu_drain_pebs_buffer();
1349 cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
1351 if ((event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT) &&
1352 (x86_pmu.version < 5))
1353 cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
1354 else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
1355 cpuc->pebs_enabled &= ~(1ULL << 63);
1357 intel_pmu_pebs_via_pt_disable(event);
1360 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
1362 hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
1365 void intel_pmu_pebs_enable_all(void)
1367 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1369 if (cpuc->pebs_enabled)
1370 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
1373 void intel_pmu_pebs_disable_all(void)
1375 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1377 if (cpuc->pebs_enabled)
1378 __intel_pmu_pebs_disable_all();
1381 static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
1383 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1384 unsigned long from = cpuc->lbr_entries[0].from;
1385 unsigned long old_to, to = cpuc->lbr_entries[0].to;
1386 unsigned long ip = regs->ip;
1392 * We don't need to fixup if the PEBS assist is fault like
1394 if (!x86_pmu.intel_cap.pebs_trap)
1398 * No LBR entry, no basic block, no rewinding
1400 if (!cpuc->lbr_stack.nr || !from || !to)
1404 * Basic blocks should never cross user/kernel boundaries
1406 if (kernel_ip(ip) != kernel_ip(to))
1410 * unsigned math, either ip is before the start (impossible) or
1411 * the basic block is larger than 1 page (sanity)
1413 if ((ip - to) > PEBS_FIXUP_SIZE)
1417 * We sampled a branch insn, rewind using the LBR stack
1420 set_linear_ip(regs, from);
1425 if (!kernel_ip(ip)) {
1427 u8 *buf = this_cpu_read(insn_buffer);
1429 /* 'size' must fit our buffer, see above */
1430 bytes = copy_from_user_nmi(buf, (void __user *)to, size);
1444 #ifdef CONFIG_X86_64
1445 is_64bit = kernel_ip(to) || any_64bit_mode(regs);
1447 insn_init(&insn, kaddr, size, is_64bit);
1450 * Make sure there was not a problem decoding the instruction.
1451 * This is doubly important because we have an infinite loop if
1454 if (insn_get_length(&insn))
1458 kaddr += insn.length;
1459 size -= insn.length;
1463 set_linear_ip(regs, old_to);
1468 * Even though we decoded the basic block, the instruction stream
1469 * never matched the given IP, either the TO or the IP got corrupted.
1474 static inline u64 intel_get_tsx_weight(u64 tsx_tuning)
1477 union hsw_tsx_tuning tsx = { .value = tsx_tuning };
1478 return tsx.cycles_last_block;
1483 static inline u64 intel_get_tsx_transaction(u64 tsx_tuning, u64 ax)
1485 u64 txn = (tsx_tuning & PEBS_HSW_TSX_FLAGS) >> 32;
1487 /* For RTM XABORTs also log the abort code from AX */
1488 if ((txn & PERF_TXN_TRANSACTION) && (ax & 1))
1489 txn |= ((ax >> 24) & 0xff) << PERF_TXN_ABORT_SHIFT;
1493 static inline u64 get_pebs_status(void *n)
1495 if (x86_pmu.intel_cap.pebs_format < 4)
1496 return ((struct pebs_record_nhm *)n)->status;
1497 return ((struct pebs_basic *)n)->applicable_counters;
1500 #define PERF_X86_EVENT_PEBS_HSW_PREC \
1501 (PERF_X86_EVENT_PEBS_ST_HSW | \
1502 PERF_X86_EVENT_PEBS_LD_HSW | \
1503 PERF_X86_EVENT_PEBS_NA_HSW)
1505 static u64 get_data_src(struct perf_event *event, u64 aux)
1507 u64 val = PERF_MEM_NA;
1508 int fl = event->hw.flags;
1509 bool fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
1511 if (fl & PERF_X86_EVENT_PEBS_LDLAT)
1512 val = load_latency_data(event, aux);
1513 else if (fl & PERF_X86_EVENT_PEBS_STLAT)
1514 val = store_latency_data(event, aux);
1515 else if (fl & PERF_X86_EVENT_PEBS_LAT_HYBRID)
1516 val = x86_pmu.pebs_latency_data(event, aux);
1517 else if (fst && (fl & PERF_X86_EVENT_PEBS_HSW_PREC))
1518 val = precise_datala_hsw(event, aux);
1520 val = precise_store_data(aux);
1524 static void setup_pebs_time(struct perf_event *event,
1525 struct perf_sample_data *data,
1528 /* Converting to a user-defined clock is not supported yet. */
1529 if (event->attr.use_clockid != 0)
1533 * Doesn't support the conversion when the TSC is unstable.
1534 * The TSC unstable case is a corner case and very unlikely to
1535 * happen. If it happens, the TSC in a PEBS record will be
1536 * dropped and fall back to perf_event_clock().
1538 if (!using_native_sched_clock() || !sched_clock_stable())
1541 data->time = native_sched_clock_from_tsc(tsc) + __sched_clock_offset;
1542 data->sample_flags |= PERF_SAMPLE_TIME;
1545 #define PERF_SAMPLE_ADDR_TYPE (PERF_SAMPLE_ADDR | \
1546 PERF_SAMPLE_PHYS_ADDR | \
1547 PERF_SAMPLE_DATA_PAGE_SIZE)
1549 static void setup_pebs_fixed_sample_data(struct perf_event *event,
1550 struct pt_regs *iregs, void *__pebs,
1551 struct perf_sample_data *data,
1552 struct pt_regs *regs)
1555 * We cast to the biggest pebs_record but are careful not to
1556 * unconditionally access the 'extra' entries.
1558 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1559 struct pebs_record_skl *pebs = __pebs;
1566 sample_type = event->attr.sample_type;
1567 fll = event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT;
1569 perf_sample_data_init(data, 0, event->hw.last_period);
1571 data->period = event->hw.last_period;
1574 * Use latency for weight (only avail with PEBS-LL)
1576 if (fll && (sample_type & PERF_SAMPLE_WEIGHT_TYPE)) {
1577 data->weight.full = pebs->lat;
1578 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
1582 * data.data_src encodes the data source
1584 if (sample_type & PERF_SAMPLE_DATA_SRC) {
1585 data->data_src.val = get_data_src(event, pebs->dse);
1586 data->sample_flags |= PERF_SAMPLE_DATA_SRC;
1590 * We must however always use iregs for the unwinder to stay sane; the
1591 * record BP,SP,IP can point into thin air when the record is from a
1592 * previous PMI context or an (I)RET happened between the record and
1595 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
1596 data->callchain = perf_callchain(event, iregs);
1597 data->sample_flags |= PERF_SAMPLE_CALLCHAIN;
1601 * We use the interrupt regs as a base because the PEBS record does not
1602 * contain a full regs set, specifically it seems to lack segment
1603 * descriptors, which get used by things like user_mode().
1605 * In the simple case fix up only the IP for PERF_SAMPLE_IP.
1610 * Initialize regs_>flags from PEBS,
1611 * Clear exact bit (which uses x86 EFLAGS Reserved bit 3),
1612 * i.e., do not rely on it being zero:
1614 regs->flags = pebs->flags & ~PERF_EFLAGS_EXACT;
1616 if (sample_type & PERF_SAMPLE_REGS_INTR) {
1617 regs->ax = pebs->ax;
1618 regs->bx = pebs->bx;
1619 regs->cx = pebs->cx;
1620 regs->dx = pebs->dx;
1621 regs->si = pebs->si;
1622 regs->di = pebs->di;
1624 regs->bp = pebs->bp;
1625 regs->sp = pebs->sp;
1627 #ifndef CONFIG_X86_32
1628 regs->r8 = pebs->r8;
1629 regs->r9 = pebs->r9;
1630 regs->r10 = pebs->r10;
1631 regs->r11 = pebs->r11;
1632 regs->r12 = pebs->r12;
1633 regs->r13 = pebs->r13;
1634 regs->r14 = pebs->r14;
1635 regs->r15 = pebs->r15;
1639 if (event->attr.precise_ip > 1) {
1641 * Haswell and later processors have an 'eventing IP'
1642 * (real IP) which fixes the off-by-1 skid in hardware.
1643 * Use it when precise_ip >= 2 :
1645 if (x86_pmu.intel_cap.pebs_format >= 2) {
1646 set_linear_ip(regs, pebs->real_ip);
1647 regs->flags |= PERF_EFLAGS_EXACT;
1649 /* Otherwise, use PEBS off-by-1 IP: */
1650 set_linear_ip(regs, pebs->ip);
1653 * With precise_ip >= 2, try to fix up the off-by-1 IP
1654 * using the LBR. If successful, the fixup function
1655 * corrects regs->ip and calls set_linear_ip() on regs:
1657 if (intel_pmu_pebs_fixup_ip(regs))
1658 regs->flags |= PERF_EFLAGS_EXACT;
1662 * When precise_ip == 1, return the PEBS off-by-1 IP,
1663 * no fixup attempted:
1665 set_linear_ip(regs, pebs->ip);
1669 if ((sample_type & PERF_SAMPLE_ADDR_TYPE) &&
1670 x86_pmu.intel_cap.pebs_format >= 1) {
1671 data->addr = pebs->dla;
1672 data->sample_flags |= PERF_SAMPLE_ADDR;
1675 if (x86_pmu.intel_cap.pebs_format >= 2) {
1676 /* Only set the TSX weight when no memory weight. */
1677 if ((sample_type & PERF_SAMPLE_WEIGHT_TYPE) && !fll) {
1678 data->weight.full = intel_get_tsx_weight(pebs->tsx_tuning);
1679 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
1681 if (sample_type & PERF_SAMPLE_TRANSACTION) {
1682 data->txn = intel_get_tsx_transaction(pebs->tsx_tuning,
1684 data->sample_flags |= PERF_SAMPLE_TRANSACTION;
1689 * v3 supplies an accurate time stamp, so we use that
1690 * for the time stamp.
1692 * We can only do this for the default trace clock.
1694 if (x86_pmu.intel_cap.pebs_format >= 3)
1695 setup_pebs_time(event, data, pebs->tsc);
1697 if (has_branch_stack(event)) {
1698 data->br_stack = &cpuc->lbr_stack;
1699 data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
1703 static void adaptive_pebs_save_regs(struct pt_regs *regs,
1704 struct pebs_gprs *gprs)
1706 regs->ax = gprs->ax;
1707 regs->bx = gprs->bx;
1708 regs->cx = gprs->cx;
1709 regs->dx = gprs->dx;
1710 regs->si = gprs->si;
1711 regs->di = gprs->di;
1712 regs->bp = gprs->bp;
1713 regs->sp = gprs->sp;
1714 #ifndef CONFIG_X86_32
1715 regs->r8 = gprs->r8;
1716 regs->r9 = gprs->r9;
1717 regs->r10 = gprs->r10;
1718 regs->r11 = gprs->r11;
1719 regs->r12 = gprs->r12;
1720 regs->r13 = gprs->r13;
1721 regs->r14 = gprs->r14;
1722 regs->r15 = gprs->r15;
1726 #define PEBS_LATENCY_MASK 0xffff
1727 #define PEBS_CACHE_LATENCY_OFFSET 32
1730 * With adaptive PEBS the layout depends on what fields are configured.
1733 static void setup_pebs_adaptive_sample_data(struct perf_event *event,
1734 struct pt_regs *iregs, void *__pebs,
1735 struct perf_sample_data *data,
1736 struct pt_regs *regs)
1738 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1739 struct pebs_basic *basic = __pebs;
1740 void *next_record = basic + 1;
1743 struct pebs_meminfo *meminfo = NULL;
1744 struct pebs_gprs *gprs = NULL;
1745 struct x86_perf_regs *perf_regs;
1750 perf_regs = container_of(regs, struct x86_perf_regs, regs);
1751 perf_regs->xmm_regs = NULL;
1753 sample_type = event->attr.sample_type;
1754 format_size = basic->format_size;
1755 perf_sample_data_init(data, 0, event->hw.last_period);
1756 data->period = event->hw.last_period;
1758 setup_pebs_time(event, data, basic->tsc);
1761 * We must however always use iregs for the unwinder to stay sane; the
1762 * record BP,SP,IP can point into thin air when the record is from a
1763 * previous PMI context or an (I)RET happened between the record and
1766 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
1767 data->callchain = perf_callchain(event, iregs);
1768 data->sample_flags |= PERF_SAMPLE_CALLCHAIN;
1772 /* The ip in basic is EventingIP */
1773 set_linear_ip(regs, basic->ip);
1774 regs->flags = PERF_EFLAGS_EXACT;
1777 * The record for MEMINFO is in front of GP
1778 * But PERF_SAMPLE_TRANSACTION needs gprs->ax.
1779 * Save the pointer here but process later.
1781 if (format_size & PEBS_DATACFG_MEMINFO) {
1782 meminfo = next_record;
1783 next_record = meminfo + 1;
1786 if (format_size & PEBS_DATACFG_GP) {
1788 next_record = gprs + 1;
1790 if (event->attr.precise_ip < 2) {
1791 set_linear_ip(regs, gprs->ip);
1792 regs->flags &= ~PERF_EFLAGS_EXACT;
1795 if (sample_type & PERF_SAMPLE_REGS_INTR)
1796 adaptive_pebs_save_regs(regs, gprs);
1799 if (format_size & PEBS_DATACFG_MEMINFO) {
1800 if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
1801 u64 weight = meminfo->latency;
1803 if (x86_pmu.flags & PMU_FL_INSTR_LATENCY) {
1804 data->weight.var2_w = weight & PEBS_LATENCY_MASK;
1805 weight >>= PEBS_CACHE_LATENCY_OFFSET;
1809 * Although meminfo::latency is defined as a u64,
1810 * only the lower 32 bits include the valid data
1811 * in practice on Ice Lake and earlier platforms.
1813 if (sample_type & PERF_SAMPLE_WEIGHT) {
1814 data->weight.full = weight ?:
1815 intel_get_tsx_weight(meminfo->tsx_tuning);
1817 data->weight.var1_dw = (u32)(weight & PEBS_LATENCY_MASK) ?:
1818 intel_get_tsx_weight(meminfo->tsx_tuning);
1820 data->sample_flags |= PERF_SAMPLE_WEIGHT_TYPE;
1823 if (sample_type & PERF_SAMPLE_DATA_SRC) {
1824 data->data_src.val = get_data_src(event, meminfo->aux);
1825 data->sample_flags |= PERF_SAMPLE_DATA_SRC;
1828 if (sample_type & PERF_SAMPLE_ADDR_TYPE) {
1829 data->addr = meminfo->address;
1830 data->sample_flags |= PERF_SAMPLE_ADDR;
1833 if (sample_type & PERF_SAMPLE_TRANSACTION) {
1834 data->txn = intel_get_tsx_transaction(meminfo->tsx_tuning,
1835 gprs ? gprs->ax : 0);
1836 data->sample_flags |= PERF_SAMPLE_TRANSACTION;
1840 if (format_size & PEBS_DATACFG_XMMS) {
1841 struct pebs_xmm *xmm = next_record;
1843 next_record = xmm + 1;
1844 perf_regs->xmm_regs = xmm->xmm;
1847 if (format_size & PEBS_DATACFG_LBRS) {
1848 struct lbr_entry *lbr = next_record;
1849 int num_lbr = ((format_size >> PEBS_DATACFG_LBR_SHIFT)
1851 next_record = next_record + num_lbr * sizeof(struct lbr_entry);
1853 if (has_branch_stack(event)) {
1854 intel_pmu_store_pebs_lbrs(lbr);
1855 data->br_stack = &cpuc->lbr_stack;
1856 data->sample_flags |= PERF_SAMPLE_BRANCH_STACK;
1860 WARN_ONCE(next_record != __pebs + (format_size >> 48),
1861 "PEBS record size %llu, expected %llu, config %llx\n",
1863 (u64)(next_record - __pebs),
1864 basic->format_size);
1867 static inline void *
1868 get_next_pebs_record_by_bit(void *base, void *top, int bit)
1870 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1875 * fmt0 does not have a status bitfield (does not use
1876 * perf_record_nhm format)
1878 if (x86_pmu.intel_cap.pebs_format < 1)
1884 for (at = base; at < top; at += cpuc->pebs_record_size) {
1885 unsigned long status = get_pebs_status(at);
1887 if (test_bit(bit, (unsigned long *)&status)) {
1888 /* PEBS v3 has accurate status bits */
1889 if (x86_pmu.intel_cap.pebs_format >= 3)
1892 if (status == (1 << bit))
1895 /* clear non-PEBS bit and re-check */
1896 pebs_status = status & cpuc->pebs_enabled;
1897 pebs_status &= PEBS_COUNTER_MASK;
1898 if (pebs_status == (1 << bit))
1905 void intel_pmu_auto_reload_read(struct perf_event *event)
1907 WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD));
1909 perf_pmu_disable(event->pmu);
1910 intel_pmu_drain_pebs_buffer();
1911 perf_pmu_enable(event->pmu);
1915 * Special variant of intel_pmu_save_and_restart() for auto-reload.
1918 intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
1920 struct hw_perf_event *hwc = &event->hw;
1921 int shift = 64 - x86_pmu.cntval_bits;
1922 u64 period = hwc->sample_period;
1923 u64 prev_raw_count, new_raw_count;
1929 * drain_pebs() only happens when the PMU is disabled.
1931 WARN_ON(this_cpu_read(cpu_hw_events.enabled));
1933 prev_raw_count = local64_read(&hwc->prev_count);
1934 rdpmcl(hwc->event_base_rdpmc, new_raw_count);
1935 local64_set(&hwc->prev_count, new_raw_count);
1938 * Since the counter increments a negative counter value and
1939 * overflows on the sign switch, giving the interval:
1943 * the difference between two consecutive reads is:
1945 * A) value2 - value1;
1946 * when no overflows have happened in between,
1948 * B) (0 - value1) + (value2 - (-period));
1949 * when one overflow happened in between,
1951 * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period));
1952 * when @n overflows happened in between.
1954 * Here A) is the obvious difference, B) is the extension to the
1955 * discrete interval, where the first term is to the top of the
1956 * interval and the second term is from the bottom of the next
1957 * interval and C) the extension to multiple intervals, where the
1958 * middle term is the whole intervals covered.
1960 * An equivalent of C, by reduction, is:
1962 * value2 - value1 + n * period
1964 new = ((s64)(new_raw_count << shift) >> shift);
1965 old = ((s64)(prev_raw_count << shift) >> shift);
1966 local64_add(new - old + count * period, &event->count);
1968 local64_set(&hwc->period_left, -new);
1970 perf_event_update_userpage(event);
1975 static __always_inline void
1976 __intel_pmu_pebs_event(struct perf_event *event,
1977 struct pt_regs *iregs,
1978 struct perf_sample_data *data,
1979 void *base, void *top,
1981 void (*setup_sample)(struct perf_event *,
1984 struct perf_sample_data *,
1987 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1988 struct hw_perf_event *hwc = &event->hw;
1989 struct x86_perf_regs perf_regs;
1990 struct pt_regs *regs = &perf_regs.regs;
1991 void *at = get_next_pebs_record_by_bit(base, top, bit);
1992 static struct pt_regs dummy_iregs;
1994 if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
1996 * Now, auto-reload is only enabled in fixed period mode.
1997 * The reload value is always hwc->sample_period.
1998 * May need to change it, if auto-reload is enabled in
2001 intel_pmu_save_and_restart_reload(event, count);
2002 } else if (!intel_pmu_save_and_restart(event))
2006 iregs = &dummy_iregs;
2009 setup_sample(event, iregs, at, data, regs);
2010 perf_event_output(event, data, regs);
2011 at += cpuc->pebs_record_size;
2012 at = get_next_pebs_record_by_bit(at, top, bit);
2016 setup_sample(event, iregs, at, data, regs);
2017 if (iregs == &dummy_iregs) {
2019 * The PEBS records may be drained in the non-overflow context,
2020 * e.g., large PEBS + context switch. Perf should treat the
2021 * last record the same as other PEBS records, and doesn't
2022 * invoke the generic overflow handler.
2024 perf_event_output(event, data, regs);
2027 * All but the last records are processed.
2028 * The last one is left to be able to call the overflow handler.
2030 if (perf_event_overflow(event, data, regs))
2031 x86_pmu_stop(event, 0);
2035 static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sample_data *data)
2037 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2038 struct debug_store *ds = cpuc->ds;
2039 struct perf_event *event = cpuc->events[0]; /* PMC0 only */
2040 struct pebs_record_core *at, *top;
2043 if (!x86_pmu.pebs_active)
2046 at = (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base;
2047 top = (struct pebs_record_core *)(unsigned long)ds->pebs_index;
2050 * Whatever else happens, drain the thing
2052 ds->pebs_index = ds->pebs_buffer_base;
2054 if (!test_bit(0, cpuc->active_mask))
2057 WARN_ON_ONCE(!event);
2059 if (!event->attr.precise_ip)
2064 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2065 intel_pmu_save_and_restart_reload(event, 0);
2069 __intel_pmu_pebs_event(event, iregs, data, at, top, 0, n,
2070 setup_pebs_fixed_sample_data);
2073 static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpuc, int size)
2075 struct perf_event *event;
2079 * The drain_pebs() could be called twice in a short period
2080 * for auto-reload event in pmu::read(). There are no
2081 * overflows have happened in between.
2082 * It needs to call intel_pmu_save_and_restart_reload() to
2083 * update the event->count for this case.
2085 for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, size) {
2086 event = cpuc->events[bit];
2087 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2088 intel_pmu_save_and_restart_reload(event, 0);
2092 static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sample_data *data)
2094 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2095 struct debug_store *ds = cpuc->ds;
2096 struct perf_event *event;
2097 void *base, *at, *top;
2098 short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
2099 short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
2103 if (!x86_pmu.pebs_active)
2106 base = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
2107 top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
2109 ds->pebs_index = ds->pebs_buffer_base;
2111 mask = (1ULL << x86_pmu.max_pebs_events) - 1;
2112 size = x86_pmu.max_pebs_events;
2113 if (x86_pmu.flags & PMU_FL_PEBS_ALL) {
2114 mask |= ((1ULL << x86_pmu.num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED;
2115 size = INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed;
2118 if (unlikely(base >= top)) {
2119 intel_pmu_pebs_event_update_no_drain(cpuc, size);
2123 for (at = base; at < top; at += x86_pmu.pebs_record_size) {
2124 struct pebs_record_nhm *p = at;
2127 pebs_status = p->status & cpuc->pebs_enabled;
2128 pebs_status &= mask;
2130 /* PEBS v3 has more accurate status bits */
2131 if (x86_pmu.intel_cap.pebs_format >= 3) {
2132 for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
2139 * On some CPUs the PEBS status can be zero when PEBS is
2140 * racing with clearing of GLOBAL_STATUS.
2142 * Normally we would drop that record, but in the
2143 * case when there is only a single active PEBS event
2144 * we can assume it's for that event.
2146 if (!pebs_status && cpuc->pebs_enabled &&
2147 !(cpuc->pebs_enabled & (cpuc->pebs_enabled-1)))
2148 pebs_status = p->status = cpuc->pebs_enabled;
2150 bit = find_first_bit((unsigned long *)&pebs_status,
2151 x86_pmu.max_pebs_events);
2152 if (bit >= x86_pmu.max_pebs_events)
2156 * The PEBS hardware does not deal well with the situation
2157 * when events happen near to each other and multiple bits
2158 * are set. But it should happen rarely.
2160 * If these events include one PEBS and multiple non-PEBS
2161 * events, it doesn't impact PEBS record. The record will
2162 * be handled normally. (slow path)
2164 * If these events include two or more PEBS events, the
2165 * records for the events can be collapsed into a single
2166 * one, and it's not possible to reconstruct all events
2167 * that caused the PEBS record. It's called collision.
2168 * If collision happened, the record will be dropped.
2170 if (pebs_status != (1ULL << bit)) {
2171 for_each_set_bit(i, (unsigned long *)&pebs_status, size)
2179 for_each_set_bit(bit, (unsigned long *)&mask, size) {
2180 if ((counts[bit] == 0) && (error[bit] == 0))
2183 event = cpuc->events[bit];
2184 if (WARN_ON_ONCE(!event))
2187 if (WARN_ON_ONCE(!event->attr.precise_ip))
2190 /* log dropped samples number */
2192 perf_log_lost_samples(event, error[bit]);
2194 if (iregs && perf_event_account_interrupt(event))
2195 x86_pmu_stop(event, 0);
2199 __intel_pmu_pebs_event(event, iregs, data, base,
2200 top, bit, counts[bit],
2201 setup_pebs_fixed_sample_data);
2206 static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sample_data *data)
2208 short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] = {};
2209 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2210 int max_pebs_events = hybrid(cpuc->pmu, max_pebs_events);
2211 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
2212 struct debug_store *ds = cpuc->ds;
2213 struct perf_event *event;
2214 void *base, *at, *top;
2218 if (!x86_pmu.pebs_active)
2221 base = (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base;
2222 top = (struct pebs_basic *)(unsigned long)ds->pebs_index;
2224 ds->pebs_index = ds->pebs_buffer_base;
2226 mask = ((1ULL << max_pebs_events) - 1) |
2227 (((1ULL << num_counters_fixed) - 1) << INTEL_PMC_IDX_FIXED);
2228 size = INTEL_PMC_IDX_FIXED + num_counters_fixed;
2230 if (unlikely(base >= top)) {
2231 intel_pmu_pebs_event_update_no_drain(cpuc, size);
2235 for (at = base; at < top; at += cpuc->pebs_record_size) {
2238 pebs_status = get_pebs_status(at) & cpuc->pebs_enabled;
2239 pebs_status &= mask;
2241 for_each_set_bit(bit, (unsigned long *)&pebs_status, size)
2245 for_each_set_bit(bit, (unsigned long *)&mask, size) {
2246 if (counts[bit] == 0)
2249 event = cpuc->events[bit];
2250 if (WARN_ON_ONCE(!event))
2253 if (WARN_ON_ONCE(!event->attr.precise_ip))
2256 __intel_pmu_pebs_event(event, iregs, data, base,
2257 top, bit, counts[bit],
2258 setup_pebs_adaptive_sample_data);
2263 * BTS, PEBS probe and setup
2266 void __init intel_ds_init(void)
2269 * No support for 32bit formats
2271 if (!boot_cpu_has(X86_FEATURE_DTES64))
2274 x86_pmu.bts = boot_cpu_has(X86_FEATURE_BTS);
2275 x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
2276 x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
2277 if (x86_pmu.version <= 4)
2278 x86_pmu.pebs_no_isolation = 1;
2281 char pebs_type = x86_pmu.intel_cap.pebs_trap ? '+' : '-';
2282 char *pebs_qual = "";
2283 int format = x86_pmu.intel_cap.pebs_format;
2286 x86_pmu.intel_cap.pebs_baseline = 0;
2290 pr_cont("PEBS fmt0%c, ", pebs_type);
2291 x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
2293 * Using >PAGE_SIZE buffers makes the WRMSR to
2294 * PERF_GLOBAL_CTRL in intel_pmu_enable_all()
2295 * mysteriously hang on Core2.
2297 * As a workaround, we don't do this.
2299 x86_pmu.pebs_buffer_size = PAGE_SIZE;
2300 x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
2304 pr_cont("PEBS fmt1%c, ", pebs_type);
2305 x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
2306 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
2310 pr_cont("PEBS fmt2%c, ", pebs_type);
2311 x86_pmu.pebs_record_size = sizeof(struct pebs_record_hsw);
2312 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
2316 pr_cont("PEBS fmt3%c, ", pebs_type);
2317 x86_pmu.pebs_record_size =
2318 sizeof(struct pebs_record_skl);
2319 x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
2320 x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME;
2325 x86_pmu.drain_pebs = intel_pmu_drain_pebs_icl;
2326 x86_pmu.pebs_record_size = sizeof(struct pebs_basic);
2327 if (x86_pmu.intel_cap.pebs_baseline) {
2328 x86_pmu.large_pebs_flags |=
2329 PERF_SAMPLE_BRANCH_STACK |
2331 x86_pmu.flags |= PMU_FL_PEBS_ALL;
2332 x86_pmu.pebs_capable = ~0ULL;
2333 pebs_qual = "-baseline";
2334 x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
2336 /* Only basic record supported */
2337 x86_pmu.large_pebs_flags &=
2338 ~(PERF_SAMPLE_ADDR |
2340 PERF_SAMPLE_DATA_SRC |
2341 PERF_SAMPLE_TRANSACTION |
2342 PERF_SAMPLE_REGS_USER |
2343 PERF_SAMPLE_REGS_INTR);
2345 pr_cont("PEBS fmt4%c%s, ", pebs_type, pebs_qual);
2347 if (!is_hybrid() && x86_pmu.intel_cap.pebs_output_pt_available) {
2348 pr_cont("PEBS-via-PT, ");
2349 x86_get_pmu(smp_processor_id())->capabilities |= PERF_PMU_CAP_AUX_OUTPUT;
2355 pr_cont("no PEBS fmt%d%c, ", format, pebs_type);
2361 void perf_restore_debug_store(void)
2363 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2365 if (!x86_pmu.bts && !x86_pmu.pebs)
2368 wrmsrl(MSR_IA32_DS_AREA, (unsigned long)ds);