1 #include <linux/perf_event.h>
2 #include <linux/types.h>
4 #include <asm/perf_event.h>
8 #include "../perf_event.h"
12 LBR_FORMAT_LIP = 0x01,
13 LBR_FORMAT_EIP = 0x02,
14 LBR_FORMAT_EIP_FLAGS = 0x03,
15 LBR_FORMAT_EIP_FLAGS2 = 0x04,
16 LBR_FORMAT_INFO = 0x05,
17 LBR_FORMAT_TIME = 0x06,
18 LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_TIME,
24 } lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
25 [LBR_FORMAT_EIP_FLAGS] = LBR_EIP_FLAGS,
26 [LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX,
30 * Intel LBR_SELECT bits
31 * Intel Vol3a, April 2011, Section 16.7 Table 16-10
33 * Hardware branch filter (not available on all CPUs)
35 #define LBR_KERNEL_BIT 0 /* do not capture at ring0 */
36 #define LBR_USER_BIT 1 /* do not capture at ring > 0 */
37 #define LBR_JCC_BIT 2 /* do not capture conditional branches */
38 #define LBR_REL_CALL_BIT 3 /* do not capture relative calls */
39 #define LBR_IND_CALL_BIT 4 /* do not capture indirect calls */
40 #define LBR_RETURN_BIT 5 /* do not capture near returns */
41 #define LBR_IND_JMP_BIT 6 /* do not capture indirect jumps */
42 #define LBR_REL_JMP_BIT 7 /* do not capture relative jumps */
43 #define LBR_FAR_BIT 8 /* do not capture far branches */
44 #define LBR_CALL_STACK_BIT 9 /* enable call stack */
47 * Following bit only exists in Linux; we mask it out before writing it to
48 * the actual MSR. But it helps the constraint perf code to understand
49 * that this is a separate configuration.
51 #define LBR_NO_INFO_BIT 63 /* don't read LBR_INFO. */
53 #define LBR_KERNEL (1 << LBR_KERNEL_BIT)
54 #define LBR_USER (1 << LBR_USER_BIT)
55 #define LBR_JCC (1 << LBR_JCC_BIT)
56 #define LBR_REL_CALL (1 << LBR_REL_CALL_BIT)
57 #define LBR_IND_CALL (1 << LBR_IND_CALL_BIT)
58 #define LBR_RETURN (1 << LBR_RETURN_BIT)
59 #define LBR_REL_JMP (1 << LBR_REL_JMP_BIT)
60 #define LBR_IND_JMP (1 << LBR_IND_JMP_BIT)
61 #define LBR_FAR (1 << LBR_FAR_BIT)
62 #define LBR_CALL_STACK (1 << LBR_CALL_STACK_BIT)
63 #define LBR_NO_INFO (1ULL << LBR_NO_INFO_BIT)
65 #define LBR_PLM (LBR_KERNEL | LBR_USER)
67 #define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
68 #define LBR_NOT_SUPP -1 /* LBR filter not supported */
69 #define LBR_IGN 0 /* ignored */
80 #define LBR_FROM_FLAG_MISPRED BIT_ULL(63)
81 #define LBR_FROM_FLAG_IN_TX BIT_ULL(62)
82 #define LBR_FROM_FLAG_ABORT BIT_ULL(61)
84 #define LBR_FROM_SIGNEXT_2MSB (BIT_ULL(60) | BIT_ULL(59))
87 * x86control flow change classification
88 * x86control flow changes include branches, interrupts, traps, faults
91 X86_BR_NONE = 0, /* unknown */
93 X86_BR_USER = 1 << 0, /* branch target is user */
94 X86_BR_KERNEL = 1 << 1, /* branch target is kernel */
96 X86_BR_CALL = 1 << 2, /* call */
97 X86_BR_RET = 1 << 3, /* return */
98 X86_BR_SYSCALL = 1 << 4, /* syscall */
99 X86_BR_SYSRET = 1 << 5, /* syscall return */
100 X86_BR_INT = 1 << 6, /* sw interrupt */
101 X86_BR_IRET = 1 << 7, /* return from interrupt */
102 X86_BR_JCC = 1 << 8, /* conditional */
103 X86_BR_JMP = 1 << 9, /* jump */
104 X86_BR_IRQ = 1 << 10,/* hw interrupt or trap or fault */
105 X86_BR_IND_CALL = 1 << 11,/* indirect calls */
106 X86_BR_ABORT = 1 << 12,/* transaction abort */
107 X86_BR_IN_TX = 1 << 13,/* in transaction */
108 X86_BR_NO_TX = 1 << 14,/* not in transaction */
109 X86_BR_ZERO_CALL = 1 << 15,/* zero length call */
110 X86_BR_CALL_STACK = 1 << 16,/* call stack */
111 X86_BR_IND_JMP = 1 << 17,/* indirect jump */
114 #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
115 #define X86_BR_ANYTX (X86_BR_NO_TX | X86_BR_IN_TX)
132 #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
134 #define X86_BR_ANY_CALL \
142 static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
145 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
146 * otherwise it becomes near impossible to get a reliable stack.
149 static void __intel_pmu_lbr_enable(bool pmi)
151 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
152 u64 debugctl, lbr_select = 0, orig_debugctl;
155 * No need to unfreeze manually, as v4 can do that as part
156 * of the GLOBAL_STATUS ack.
158 if (pmi && x86_pmu.version >= 4)
162 * No need to reprogram LBR_SELECT in a PMI, as it
166 lbr_select = cpuc->lbr_sel->config & x86_pmu.lbr_sel_mask;
167 if (!pmi && cpuc->lbr_sel)
168 wrmsrl(MSR_LBR_SELECT, lbr_select);
170 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
171 orig_debugctl = debugctl;
172 debugctl |= DEBUGCTLMSR_LBR;
174 * LBR callstack does not work well with FREEZE_LBRS_ON_PMI.
175 * If FREEZE_LBRS_ON_PMI is set, PMI near call/return instructions
176 * may cause superfluous increase/decrease of LBR_TOS.
178 if (!(lbr_select & LBR_CALL_STACK))
179 debugctl |= DEBUGCTLMSR_FREEZE_LBRS_ON_PMI;
180 if (orig_debugctl != debugctl)
181 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
184 static void __intel_pmu_lbr_disable(void)
188 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
189 debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
190 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
193 static void intel_pmu_lbr_reset_32(void)
197 for (i = 0; i < x86_pmu.lbr_nr; i++)
198 wrmsrl(x86_pmu.lbr_from + i, 0);
201 static void intel_pmu_lbr_reset_64(void)
205 for (i = 0; i < x86_pmu.lbr_nr; i++) {
206 wrmsrl(x86_pmu.lbr_from + i, 0);
207 wrmsrl(x86_pmu.lbr_to + i, 0);
208 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
209 wrmsrl(MSR_LBR_INFO_0 + i, 0);
213 void intel_pmu_lbr_reset(void)
218 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
219 intel_pmu_lbr_reset_32();
221 intel_pmu_lbr_reset_64();
225 * TOS = most recently recorded branch
227 static inline u64 intel_pmu_lbr_tos(void)
231 rdmsrl(x86_pmu.lbr_tos, tos);
241 * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
242 * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
243 * TSX is not supported they have no consistent behavior:
245 * - For wrmsr(), bits 61:62 are considered part of the sign extension.
246 * - For HW updates (branch captures) bits 61:62 are always OFF and are not
247 * part of the sign extension.
251 * 1) LBR has TSX format
252 * 2) CPU has no TSX support enabled
254 * ... then any value passed to wrmsr() must be sign extended to 63 bits and any
255 * value from rdmsr() must be converted to have a 61 bits sign extension,
256 * ignoring the TSX flags.
258 static inline bool lbr_from_signext_quirk_needed(void)
260 int lbr_format = x86_pmu.intel_cap.lbr_format;
261 bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
262 boot_cpu_has(X86_FEATURE_RTM);
264 return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
267 DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
269 /* If quirk is enabled, ensure sign extension is 63 bits: */
270 inline u64 lbr_from_signext_quirk_wr(u64 val)
272 if (static_branch_unlikely(&lbr_from_quirk_key)) {
274 * Sign extend into bits 61:62 while preserving bit 63.
276 * Quirk is enabled when TSX is disabled. Therefore TSX bits
277 * in val are always OFF and must be changed to be sign
278 * extension bits. Since bits 59:60 are guaranteed to be
279 * part of the sign extension bits, we can just copy them
282 val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
288 * If quirk is needed, ensure sign extension is 61 bits:
290 u64 lbr_from_signext_quirk_rd(u64 val)
292 if (static_branch_unlikely(&lbr_from_quirk_key)) {
294 * Quirk is on when TSX is not enabled. Therefore TSX
295 * flags must be read as OFF.
297 val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
302 static inline void wrlbr_from(unsigned int idx, u64 val)
304 val = lbr_from_signext_quirk_wr(val);
305 wrmsrl(x86_pmu.lbr_from + idx, val);
308 static inline void wrlbr_to(unsigned int idx, u64 val)
310 wrmsrl(x86_pmu.lbr_to + idx, val);
313 static inline u64 rdlbr_from(unsigned int idx)
317 rdmsrl(x86_pmu.lbr_from + idx, val);
319 return lbr_from_signext_quirk_rd(val);
322 static inline u64 rdlbr_to(unsigned int idx)
326 rdmsrl(x86_pmu.lbr_to + idx, val);
331 static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
334 unsigned lbr_idx, mask;
337 if (task_ctx->lbr_callstack_users == 0 ||
338 task_ctx->lbr_stack_state == LBR_NONE) {
339 intel_pmu_lbr_reset();
343 mask = x86_pmu.lbr_nr - 1;
345 for (i = 0; i < task_ctx->valid_lbrs; i++) {
346 lbr_idx = (tos - i) & mask;
347 wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
348 wrlbr_to (lbr_idx, task_ctx->lbr_to[i]);
350 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
351 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
354 for (; i < x86_pmu.lbr_nr; i++) {
355 lbr_idx = (tos - i) & mask;
356 wrlbr_from(lbr_idx, 0);
357 wrlbr_to(lbr_idx, 0);
358 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
359 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, 0);
362 wrmsrl(x86_pmu.lbr_tos, tos);
363 task_ctx->lbr_stack_state = LBR_NONE;
366 static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
368 unsigned lbr_idx, mask;
372 if (task_ctx->lbr_callstack_users == 0) {
373 task_ctx->lbr_stack_state = LBR_NONE;
377 mask = x86_pmu.lbr_nr - 1;
378 tos = intel_pmu_lbr_tos();
379 for (i = 0; i < x86_pmu.lbr_nr; i++) {
380 lbr_idx = (tos - i) & mask;
381 from = rdlbr_from(lbr_idx);
384 task_ctx->lbr_from[i] = from;
385 task_ctx->lbr_to[i] = rdlbr_to(lbr_idx);
386 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
387 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
389 task_ctx->valid_lbrs = i;
391 task_ctx->lbr_stack_state = LBR_VALID;
394 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
396 struct x86_perf_task_context *task_ctx;
399 * If LBR callstack feature is enabled and the stack was saved when
400 * the task was scheduled out, restore the stack. Otherwise flush
403 task_ctx = ctx ? ctx->task_ctx_data : NULL;
406 __intel_pmu_lbr_restore(task_ctx);
408 __intel_pmu_lbr_save(task_ctx);
413 * Since a context switch can flip the address space and LBR entries
414 * are not tagged with an identifier, we need to wipe the LBR, even for
415 * per-cpu events. You simply cannot resolve the branches from the old
419 intel_pmu_lbr_reset();
422 static inline bool branch_user_callstack(unsigned br_sel)
424 return (br_sel & X86_BR_USER) && (br_sel & X86_BR_CALL_STACK);
427 void intel_pmu_lbr_add(struct perf_event *event)
429 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
430 struct x86_perf_task_context *task_ctx;
435 cpuc->br_sel = event->hw.branch_reg.reg;
437 if (branch_user_callstack(cpuc->br_sel) && event->ctx->task_ctx_data) {
438 task_ctx = event->ctx->task_ctx_data;
439 task_ctx->lbr_callstack_users++;
443 * Request pmu::sched_task() callback, which will fire inside the
444 * regular perf event scheduling, so that call will:
446 * - restore or wipe; when LBR-callstack,
449 * when this is from __perf_event_task_sched_in().
451 * However, if this is from perf_install_in_context(), no such callback
452 * will follow and we'll need to reset the LBR here if this is the
455 * The problem is, we cannot tell these cases apart... but we can
456 * exclude the biggest chunk of cases by looking at
457 * event->total_time_running. An event that has accrued runtime cannot
458 * be 'new'. Conversely, a new event can get installed through the
459 * context switch path for the first time.
461 perf_sched_cb_inc(event->ctx->pmu);
462 if (!cpuc->lbr_users++ && !event->total_time_running)
463 intel_pmu_lbr_reset();
466 void intel_pmu_lbr_del(struct perf_event *event)
468 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
469 struct x86_perf_task_context *task_ctx;
474 if (branch_user_callstack(cpuc->br_sel) &&
475 event->ctx->task_ctx_data) {
476 task_ctx = event->ctx->task_ctx_data;
477 task_ctx->lbr_callstack_users--;
481 WARN_ON_ONCE(cpuc->lbr_users < 0);
482 perf_sched_cb_dec(event->ctx->pmu);
485 void intel_pmu_lbr_enable_all(bool pmi)
487 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
490 __intel_pmu_lbr_enable(pmi);
493 void intel_pmu_lbr_disable_all(void)
495 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
498 __intel_pmu_lbr_disable();
501 static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
503 unsigned long mask = x86_pmu.lbr_nr - 1;
504 u64 tos = intel_pmu_lbr_tos();
507 for (i = 0; i < x86_pmu.lbr_nr; i++) {
508 unsigned long lbr_idx = (tos - i) & mask;
517 rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
519 cpuc->lbr_entries[i].from = msr_lastbranch.from;
520 cpuc->lbr_entries[i].to = msr_lastbranch.to;
521 cpuc->lbr_entries[i].mispred = 0;
522 cpuc->lbr_entries[i].predicted = 0;
523 cpuc->lbr_entries[i].in_tx = 0;
524 cpuc->lbr_entries[i].abort = 0;
525 cpuc->lbr_entries[i].cycles = 0;
526 cpuc->lbr_entries[i].reserved = 0;
528 cpuc->lbr_stack.nr = i;
532 * Due to lack of segmentation in Linux the effective address (offset)
533 * is the same as the linear address, allowing us to merge the LIP and EIP
536 static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
538 bool need_info = false, call_stack = false;
539 unsigned long mask = x86_pmu.lbr_nr - 1;
540 int lbr_format = x86_pmu.intel_cap.lbr_format;
541 u64 tos = intel_pmu_lbr_tos();
544 int num = x86_pmu.lbr_nr;
547 need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
548 if (cpuc->lbr_sel->config & LBR_CALL_STACK)
552 for (i = 0; i < num; i++) {
553 unsigned long lbr_idx = (tos - i) & mask;
554 u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
557 int lbr_flags = lbr_desc[lbr_format];
559 from = rdlbr_from(lbr_idx);
560 to = rdlbr_to(lbr_idx);
563 * Read LBR call stack entries
564 * until invalid entry (0s) is detected.
566 if (call_stack && !from)
569 if (lbr_format == LBR_FORMAT_INFO && need_info) {
572 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, info);
573 mis = !!(info & LBR_INFO_MISPRED);
575 in_tx = !!(info & LBR_INFO_IN_TX);
576 abort = !!(info & LBR_INFO_ABORT);
577 cycles = (info & LBR_INFO_CYCLES);
580 if (lbr_format == LBR_FORMAT_TIME) {
581 mis = !!(from & LBR_FROM_FLAG_MISPRED);
584 cycles = ((to >> 48) & LBR_INFO_CYCLES);
586 to = (u64)((((s64)to) << 16) >> 16);
589 if (lbr_flags & LBR_EIP_FLAGS) {
590 mis = !!(from & LBR_FROM_FLAG_MISPRED);
594 if (lbr_flags & LBR_TSX) {
595 in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
596 abort = !!(from & LBR_FROM_FLAG_ABORT);
599 from = (u64)((((s64)from) << skip) >> skip);
602 * Some CPUs report duplicated abort records,
603 * with the second entry not having an abort bit set.
604 * Skip them here. This loop runs backwards,
605 * so we need to undo the previous record.
606 * If the abort just happened outside the window
607 * the extra entry cannot be removed.
609 if (abort && x86_pmu.lbr_double_abort && out > 0)
612 cpuc->lbr_entries[out].from = from;
613 cpuc->lbr_entries[out].to = to;
614 cpuc->lbr_entries[out].mispred = mis;
615 cpuc->lbr_entries[out].predicted = pred;
616 cpuc->lbr_entries[out].in_tx = in_tx;
617 cpuc->lbr_entries[out].abort = abort;
618 cpuc->lbr_entries[out].cycles = cycles;
619 cpuc->lbr_entries[out].reserved = 0;
622 cpuc->lbr_stack.nr = out;
625 void intel_pmu_lbr_read(void)
627 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
629 if (!cpuc->lbr_users)
632 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
633 intel_pmu_lbr_read_32(cpuc);
635 intel_pmu_lbr_read_64(cpuc);
637 intel_pmu_lbr_filter(cpuc);
642 * - in case there is no HW filter
643 * - in case the HW filter has errata or limitations
645 static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
647 u64 br_type = event->attr.branch_sample_type;
650 if (br_type & PERF_SAMPLE_BRANCH_USER)
653 if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
654 mask |= X86_BR_KERNEL;
656 /* we ignore BRANCH_HV here */
658 if (br_type & PERF_SAMPLE_BRANCH_ANY)
661 if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
662 mask |= X86_BR_ANY_CALL;
664 if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
665 mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
667 if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
668 mask |= X86_BR_IND_CALL;
670 if (br_type & PERF_SAMPLE_BRANCH_ABORT_TX)
671 mask |= X86_BR_ABORT;
673 if (br_type & PERF_SAMPLE_BRANCH_IN_TX)
674 mask |= X86_BR_IN_TX;
676 if (br_type & PERF_SAMPLE_BRANCH_NO_TX)
677 mask |= X86_BR_NO_TX;
679 if (br_type & PERF_SAMPLE_BRANCH_COND)
682 if (br_type & PERF_SAMPLE_BRANCH_CALL_STACK) {
683 if (!x86_pmu_has_lbr_callstack())
685 if (mask & ~(X86_BR_USER | X86_BR_KERNEL))
687 mask |= X86_BR_CALL | X86_BR_IND_CALL | X86_BR_RET |
691 if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
692 mask |= X86_BR_IND_JMP;
694 if (br_type & PERF_SAMPLE_BRANCH_CALL)
695 mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
697 * stash actual user request into reg, it may
698 * be used by fixup code for some CPU
700 event->hw.branch_reg.reg = mask;
705 * setup the HW LBR filter
706 * Used only when available, may not be enough to disambiguate
707 * all branches, may need the help of the SW filter
709 static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
711 struct hw_perf_event_extra *reg;
712 u64 br_type = event->attr.branch_sample_type;
716 for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
717 if (!(br_type & (1ULL << i)))
720 v = x86_pmu.lbr_sel_map[i];
721 if (v == LBR_NOT_SUPP)
728 reg = &event->hw.branch_reg;
729 reg->idx = EXTRA_REG_LBR;
732 * The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
733 * in suppress mode. So LBR_SELECT should be set to
734 * (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
735 * But the 10th bit LBR_CALL_STACK does not operate
738 reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
740 if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
741 (br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
742 (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO))
743 reg->config |= LBR_NO_INFO;
748 int intel_pmu_setup_lbr_filter(struct perf_event *event)
759 * setup SW LBR filter
761 ret = intel_pmu_setup_sw_lbr_filter(event);
766 * setup HW LBR filter, if any
768 if (x86_pmu.lbr_sel_map)
769 ret = intel_pmu_setup_hw_lbr_filter(event);
775 * return the type of control flow change at address "from"
776 * instruction is not necessarily a branch (in case of interrupt).
778 * The branch type returned also includes the priv level of the
779 * target of the control flow change (X86_BR_USER, X86_BR_KERNEL).
781 * If a branch type is unknown OR the instruction cannot be
782 * decoded (e.g., text page not present), then X86_BR_NONE is
785 static int branch_type(unsigned long from, unsigned long to, int abort)
789 int bytes_read, bytes_left;
790 int ret = X86_BR_NONE;
791 int ext, to_plm, from_plm;
792 u8 buf[MAX_INSN_SIZE];
795 to_plm = kernel_ip(to) ? X86_BR_KERNEL : X86_BR_USER;
796 from_plm = kernel_ip(from) ? X86_BR_KERNEL : X86_BR_USER;
799 * maybe zero if lbr did not fill up after a reset by the time
800 * we get a PMU interrupt
802 if (from == 0 || to == 0)
806 return X86_BR_ABORT | to_plm;
808 if (from_plm == X86_BR_USER) {
810 * can happen if measuring at the user level only
811 * and we interrupt in a kernel thread, e.g., idle.
816 /* may fail if text not present */
817 bytes_left = copy_from_user_nmi(buf, (void __user *)from,
819 bytes_read = MAX_INSN_SIZE - bytes_left;
826 * The LBR logs any address in the IP, even if the IP just
827 * faulted. This means userspace can control the from address.
828 * Ensure we don't blindy read any address by validating it is
829 * a known text address.
831 if (kernel_text_address(from)) {
834 * Assume we can get the maximum possible size
835 * when grabbing kernel data. This is not
836 * _strictly_ true since we could possibly be
837 * executing up next to a memory hole, but
838 * it is very unlikely to be a problem.
840 bytes_read = MAX_INSN_SIZE;
847 * decoder needs to know the ABI especially
848 * on 64-bit systems running 32-bit apps
851 is64 = kernel_ip((unsigned long)addr) || !test_thread_flag(TIF_IA32);
853 insn_init(&insn, addr, bytes_read, is64);
854 insn_get_opcode(&insn);
855 if (!insn.opcode.got)
858 switch (insn.opcode.bytes[0]) {
860 switch (insn.opcode.bytes[1]) {
861 case 0x05: /* syscall */
862 case 0x34: /* sysenter */
863 ret = X86_BR_SYSCALL;
865 case 0x07: /* sysret */
866 case 0x35: /* sysexit */
869 case 0x80 ... 0x8f: /* conditional */
876 case 0x70 ... 0x7f: /* conditional */
879 case 0xc2: /* near ret */
880 case 0xc3: /* near ret */
881 case 0xca: /* far ret */
882 case 0xcb: /* far ret */
885 case 0xcf: /* iret */
888 case 0xcc ... 0xce: /* int */
891 case 0xe8: /* call near rel */
892 insn_get_immediate(&insn);
893 if (insn.immediate1.value == 0) {
894 /* zero length call */
895 ret = X86_BR_ZERO_CALL;
898 case 0x9a: /* call far absolute */
901 case 0xe0 ... 0xe3: /* loop jmp */
904 case 0xe9 ... 0xeb: /* jmp */
907 case 0xff: /* call near absolute, call far absolute ind */
908 insn_get_modrm(&insn);
909 ext = (insn.modrm.bytes[0] >> 3) & 0x7;
911 case 2: /* near ind call */
912 case 3: /* far ind call */
913 ret = X86_BR_IND_CALL;
917 ret = X86_BR_IND_JMP;
925 * interrupts, traps, faults (and thus ring transition) may
926 * occur on any instructions. Thus, to classify them correctly,
927 * we need to first look at the from and to priv levels. If they
928 * are different and to is in the kernel, then it indicates
929 * a ring transition. If the from instruction is not a ring
930 * transition instr (syscall, systenter, int), then it means
931 * it was a irq, trap or fault.
933 * we have no way of detecting kernel to kernel faults.
935 if (from_plm == X86_BR_USER && to_plm == X86_BR_KERNEL
936 && ret != X86_BR_SYSCALL && ret != X86_BR_INT)
940 * branch priv level determined by target as
941 * is done by HW when LBR_SELECT is implemented
943 if (ret != X86_BR_NONE)
950 * implement actual branch filter based on user demand.
951 * Hardware may not exactly satisfy that request, thus
952 * we need to inspect opcodes. Mismatched branches are
953 * discarded. Therefore, the number of branches returned
954 * in PERF_SAMPLE_BRANCH_STACK sample may vary.
957 intel_pmu_lbr_filter(struct cpu_hw_events *cpuc)
960 int br_sel = cpuc->br_sel;
962 bool compress = false;
964 /* if sampling all branches, then nothing to filter */
965 if ((br_sel & X86_BR_ALL) == X86_BR_ALL)
968 for (i = 0; i < cpuc->lbr_stack.nr; i++) {
970 from = cpuc->lbr_entries[i].from;
971 to = cpuc->lbr_entries[i].to;
973 type = branch_type(from, to, cpuc->lbr_entries[i].abort);
974 if (type != X86_BR_NONE && (br_sel & X86_BR_ANYTX)) {
975 if (cpuc->lbr_entries[i].in_tx)
976 type |= X86_BR_IN_TX;
978 type |= X86_BR_NO_TX;
981 /* if type does not correspond, then discard */
982 if (type == X86_BR_NONE || (br_sel & type) != type) {
983 cpuc->lbr_entries[i].from = 0;
991 /* remove all entries with from=0 */
992 for (i = 0; i < cpuc->lbr_stack.nr; ) {
993 if (!cpuc->lbr_entries[i].from) {
995 while (++j < cpuc->lbr_stack.nr)
996 cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j];
997 cpuc->lbr_stack.nr--;
998 if (!cpuc->lbr_entries[i].from)
1006 * Map interface branch filters onto LBR filters
1008 static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1009 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1010 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1011 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1012 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1013 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_REL_JMP
1014 | LBR_IND_JMP | LBR_FAR,
1016 * NHM/WSM erratum: must include REL_JMP+IND_JMP to get CALL branches
1018 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] =
1019 LBR_REL_CALL | LBR_IND_CALL | LBR_REL_JMP | LBR_IND_JMP | LBR_FAR,
1021 * NHM/WSM erratum: must include IND_JMP to capture IND_CALL
1023 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
1024 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1025 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1028 static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1029 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1030 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1031 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1032 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1033 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1034 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1036 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1037 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1038 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1039 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
1042 static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
1043 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
1044 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
1045 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
1046 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGN,
1047 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
1048 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1050 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
1051 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
1052 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
1053 | LBR_RETURN | LBR_CALL_STACK,
1054 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
1055 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
1059 void __init intel_pmu_lbr_init_core(void)
1062 x86_pmu.lbr_tos = MSR_LBR_TOS;
1063 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1064 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1067 * SW branch filter usage:
1068 * - compensate for lack of HW filter
1072 /* nehalem/westmere */
1073 void __init intel_pmu_lbr_init_nhm(void)
1075 x86_pmu.lbr_nr = 16;
1076 x86_pmu.lbr_tos = MSR_LBR_TOS;
1077 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1078 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1080 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1081 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1084 * SW branch filter usage:
1085 * - workaround LBR_SEL errata (see above)
1086 * - support syscall, sysret capture.
1087 * That requires LBR_FAR but that means far
1088 * jmp need to be filtered out
1093 void __init intel_pmu_lbr_init_snb(void)
1095 x86_pmu.lbr_nr = 16;
1096 x86_pmu.lbr_tos = MSR_LBR_TOS;
1097 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1098 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1100 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1101 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1104 * SW branch filter usage:
1105 * - support syscall, sysret capture.
1106 * That requires LBR_FAR but that means far
1107 * jmp need to be filtered out
1112 void intel_pmu_lbr_init_hsw(void)
1114 x86_pmu.lbr_nr = 16;
1115 x86_pmu.lbr_tos = MSR_LBR_TOS;
1116 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1117 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1119 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1120 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1122 if (lbr_from_signext_quirk_needed())
1123 static_branch_enable(&lbr_from_quirk_key);
1127 __init void intel_pmu_lbr_init_skl(void)
1129 x86_pmu.lbr_nr = 32;
1130 x86_pmu.lbr_tos = MSR_LBR_TOS;
1131 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1132 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1134 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1135 x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
1138 * SW branch filter usage:
1139 * - support syscall, sysret capture.
1140 * That requires LBR_FAR but that means far
1141 * jmp need to be filtered out
1146 void __init intel_pmu_lbr_init_atom(void)
1149 * only models starting at stepping 10 seems
1150 * to have an operational LBR which can freeze
1153 if (boot_cpu_data.x86_model == 28
1154 && boot_cpu_data.x86_stepping < 10) {
1155 pr_cont("LBR disabled due to erratum");
1160 x86_pmu.lbr_tos = MSR_LBR_TOS;
1161 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1162 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1165 * SW branch filter usage:
1166 * - compensate for lack of HW filter
1171 void __init intel_pmu_lbr_init_slm(void)
1174 x86_pmu.lbr_tos = MSR_LBR_TOS;
1175 x86_pmu.lbr_from = MSR_LBR_CORE_FROM;
1176 x86_pmu.lbr_to = MSR_LBR_CORE_TO;
1178 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1179 x86_pmu.lbr_sel_map = nhm_lbr_sel_map;
1182 * SW branch filter usage:
1183 * - compensate for lack of HW filter
1185 pr_cont("8-deep LBR, ");
1188 /* Knights Landing */
1189 void intel_pmu_lbr_init_knl(void)
1192 x86_pmu.lbr_tos = MSR_LBR_TOS;
1193 x86_pmu.lbr_from = MSR_LBR_NHM_FROM;
1194 x86_pmu.lbr_to = MSR_LBR_NHM_TO;
1196 x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
1197 x86_pmu.lbr_sel_map = snb_lbr_sel_map;
1199 /* Knights Landing does have MISPREDICT bit */
1200 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
1201 x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;