1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/perf_event.h>
3 #include <asm/perf_event.h>
5 #include "../perf_event.h"
7 /* LBR Branch Select valid bits */
8 #define LBR_SELECT_MASK 0x1ff
11 * LBR Branch Select filter bits which when set, ensures that the
12 * corresponding type of branches are not recorded
14 #define LBR_SELECT_KERNEL 0 /* Branches ending in CPL = 0 */
15 #define LBR_SELECT_USER 1 /* Branches ending in CPL > 0 */
16 #define LBR_SELECT_JCC 2 /* Conditional branches */
17 #define LBR_SELECT_CALL_NEAR_REL 3 /* Near relative calls */
18 #define LBR_SELECT_CALL_NEAR_IND 4 /* Indirect relative calls */
19 #define LBR_SELECT_RET_NEAR 5 /* Near returns */
20 #define LBR_SELECT_JMP_NEAR_IND 6 /* Near indirect jumps (excl. calls and returns) */
21 #define LBR_SELECT_JMP_NEAR_REL 7 /* Near relative jumps (excl. calls) */
22 #define LBR_SELECT_FAR_BRANCH 8 /* Far branches */
24 #define LBR_KERNEL BIT(LBR_SELECT_KERNEL)
25 #define LBR_USER BIT(LBR_SELECT_USER)
26 #define LBR_JCC BIT(LBR_SELECT_JCC)
27 #define LBR_REL_CALL BIT(LBR_SELECT_CALL_NEAR_REL)
28 #define LBR_IND_CALL BIT(LBR_SELECT_CALL_NEAR_IND)
29 #define LBR_RETURN BIT(LBR_SELECT_RET_NEAR)
30 #define LBR_REL_JMP BIT(LBR_SELECT_JMP_NEAR_REL)
31 #define LBR_IND_JMP BIT(LBR_SELECT_JMP_NEAR_IND)
32 #define LBR_FAR BIT(LBR_SELECT_FAR_BRANCH)
33 #define LBR_NOT_SUPP -1 /* unsupported filter */
37 (LBR_JCC | LBR_REL_CALL | LBR_IND_CALL | LBR_RETURN | \
38 LBR_REL_JMP | LBR_IND_JMP | LBR_FAR)
62 static __always_inline void amd_pmu_lbr_set_from(unsigned int idx, u64 val)
64 wrmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2, val);
67 static __always_inline void amd_pmu_lbr_set_to(unsigned int idx, u64 val)
69 wrmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val);
72 static __always_inline u64 amd_pmu_lbr_get_from(unsigned int idx)
76 rdmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2, val);
81 static __always_inline u64 amd_pmu_lbr_get_to(unsigned int idx)
85 rdmsrl(MSR_AMD_SAMP_BR_FROM + idx * 2 + 1, val);
90 static __always_inline u64 sign_ext_branch_ip(u64 ip)
92 u32 shift = 64 - boot_cpu_data.x86_virt_bits;
94 return (u64)(((s64)ip << shift) >> shift);
97 static void amd_pmu_lbr_filter(void)
99 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
100 int br_sel = cpuc->br_sel, offset, type, i, j;
101 bool compress = false;
102 bool fused_only = false;
105 /* If sampling all branches, there is nothing to filter */
106 if (((br_sel & X86_BR_ALL) == X86_BR_ALL) &&
107 ((br_sel & X86_BR_TYPE_SAVE) != X86_BR_TYPE_SAVE))
110 for (i = 0; i < cpuc->lbr_stack.nr; i++) {
111 from = cpuc->lbr_entries[i].from;
112 to = cpuc->lbr_entries[i].to;
113 type = branch_type_fused(from, to, 0, &offset);
116 * Adjust the branch from address in case of instruction
117 * fusion where it points to an instruction preceding the
121 cpuc->lbr_entries[i].from += offset;
126 /* If type does not correspond, then discard */
127 if (type == X86_BR_NONE || (br_sel & type) != type) {
128 cpuc->lbr_entries[i].from = 0; /* mark invalid */
132 if ((br_sel & X86_BR_TYPE_SAVE) == X86_BR_TYPE_SAVE)
133 cpuc->lbr_entries[i].type = common_branch_type(type);
139 /* Remove all invalid entries */
140 for (i = 0; i < cpuc->lbr_stack.nr; ) {
141 if (!cpuc->lbr_entries[i].from) {
143 while (++j < cpuc->lbr_stack.nr)
144 cpuc->lbr_entries[j - 1] = cpuc->lbr_entries[j];
145 cpuc->lbr_stack.nr--;
146 if (!cpuc->lbr_entries[i].from)
153 static const int lbr_spec_map[PERF_BR_SPEC_MAX] = {
155 PERF_BR_SPEC_WRONG_PATH,
156 PERF_BR_NON_SPEC_CORRECT_PATH,
157 PERF_BR_SPEC_CORRECT_PATH,
160 void amd_pmu_lbr_read(void)
162 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
163 struct perf_branch_entry *br = cpuc->lbr_entries;
164 struct branch_entry entry;
167 if (!cpuc->lbr_users)
170 for (i = 0; i < x86_pmu.lbr_nr; i++) {
171 entry.from.full = amd_pmu_lbr_get_from(i);
172 entry.to.full = amd_pmu_lbr_get_to(i);
175 * Check if a branch has been logged; if valid = 0, spec = 0
176 * then no branch was recorded
178 if (!entry.to.split.valid && !entry.to.split.spec)
181 perf_clear_branch_entry_bitfields(br + out);
183 br[out].from = sign_ext_branch_ip(entry.from.split.ip);
184 br[out].to = sign_ext_branch_ip(entry.to.split.ip);
185 br[out].mispred = entry.from.split.mispredict;
186 br[out].predicted = !br[out].mispred;
189 * Set branch speculation information using the status of
190 * the valid and spec bits.
192 * When valid = 0, spec = 0, no branch was recorded and the
193 * entry is discarded as seen above.
195 * When valid = 0, spec = 1, the recorded branch was
196 * speculative but took the wrong path.
198 * When valid = 1, spec = 0, the recorded branch was
199 * non-speculative but took the correct path.
201 * When valid = 1, spec = 1, the recorded branch was
202 * speculative and took the correct path
204 idx = (entry.to.split.valid << 1) | entry.to.split.spec;
205 br[out].spec = lbr_spec_map[idx];
209 cpuc->lbr_stack.nr = out;
212 * Internal register renaming always ensures that LBR From[0] and
213 * LBR To[0] always represent the TOS
215 cpuc->lbr_stack.hw_idx = 0;
217 /* Perform further software filtering */
218 amd_pmu_lbr_filter();
221 static const int lbr_select_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
222 [PERF_SAMPLE_BRANCH_USER_SHIFT] = LBR_USER,
223 [PERF_SAMPLE_BRANCH_KERNEL_SHIFT] = LBR_KERNEL,
224 [PERF_SAMPLE_BRANCH_HV_SHIFT] = LBR_IGNORE,
226 [PERF_SAMPLE_BRANCH_ANY_SHIFT] = LBR_ANY,
227 [PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT] = LBR_REL_CALL | LBR_IND_CALL | LBR_FAR,
228 [PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT] = LBR_RETURN | LBR_FAR,
229 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
230 [PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT] = LBR_NOT_SUPP,
231 [PERF_SAMPLE_BRANCH_IN_TX_SHIFT] = LBR_NOT_SUPP,
232 [PERF_SAMPLE_BRANCH_NO_TX_SHIFT] = LBR_NOT_SUPP,
233 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
235 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_NOT_SUPP,
236 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
237 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
239 [PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT] = LBR_NOT_SUPP,
240 [PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT] = LBR_NOT_SUPP,
243 static int amd_pmu_lbr_setup_filter(struct perf_event *event)
245 struct hw_perf_event_extra *reg = &event->hw.branch_reg;
246 u64 br_type = event->attr.branch_sample_type;
254 if (br_type & PERF_SAMPLE_BRANCH_USER)
257 if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
258 mask |= X86_BR_KERNEL;
260 /* Ignore BRANCH_HV here */
262 if (br_type & PERF_SAMPLE_BRANCH_ANY)
265 if (br_type & PERF_SAMPLE_BRANCH_ANY_CALL)
266 mask |= X86_BR_ANY_CALL;
268 if (br_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
269 mask |= X86_BR_RET | X86_BR_IRET | X86_BR_SYSRET;
271 if (br_type & PERF_SAMPLE_BRANCH_IND_CALL)
272 mask |= X86_BR_IND_CALL;
274 if (br_type & PERF_SAMPLE_BRANCH_COND)
277 if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
278 mask |= X86_BR_IND_JMP;
280 if (br_type & PERF_SAMPLE_BRANCH_CALL)
281 mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
283 if (br_type & PERF_SAMPLE_BRANCH_TYPE_SAVE)
284 mask |= X86_BR_TYPE_SAVE;
289 for (i = 0; i < PERF_SAMPLE_BRANCH_MAX_SHIFT; i++) {
290 if (!(br_type & BIT_ULL(i)))
293 v = lbr_select_map[i];
294 if (v == LBR_NOT_SUPP)
301 /* Filter bits operate in suppress mode */
302 reg->config = mask ^ LBR_SELECT_MASK;
307 int amd_pmu_lbr_hw_config(struct perf_event *event)
311 /* LBR is not recommended in counting mode */
312 if (!is_sampling_event(event))
315 ret = amd_pmu_lbr_setup_filter(event);
317 event->attach_state |= PERF_ATTACH_SCHED_CB;
322 void amd_pmu_lbr_reset(void)
324 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
330 /* Reset all branch records individually */
331 for (i = 0; i < x86_pmu.lbr_nr; i++) {
332 amd_pmu_lbr_set_from(i, 0);
333 amd_pmu_lbr_set_to(i, 0);
336 cpuc->last_task_ctx = NULL;
337 cpuc->last_log_id = 0;
338 wrmsrl(MSR_AMD64_LBR_SELECT, 0);
341 void amd_pmu_lbr_add(struct perf_event *event)
343 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
344 struct hw_perf_event_extra *reg = &event->hw.branch_reg;
349 if (has_branch_stack(event)) {
350 cpuc->lbr_select = 1;
351 cpuc->lbr_sel->config = reg->config;
352 cpuc->br_sel = reg->reg;
355 perf_sched_cb_inc(event->pmu);
357 if (!cpuc->lbr_users++ && !event->total_time_running)
361 void amd_pmu_lbr_del(struct perf_event *event)
363 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
368 if (has_branch_stack(event))
369 cpuc->lbr_select = 0;
372 WARN_ON_ONCE(cpuc->lbr_users < 0);
373 perf_sched_cb_dec(event->pmu);
376 void amd_pmu_lbr_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
378 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
381 * A context switch can flip the address space and LBR entries are
382 * not tagged with an identifier. Hence, branches cannot be resolved
383 * from the old address space and the LBR records should be wiped.
385 if (cpuc->lbr_users && sched_in)
389 void amd_pmu_lbr_enable_all(void)
391 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
392 u64 lbr_select, dbg_ctl, dbg_extn_cfg;
394 if (!cpuc->lbr_users || !x86_pmu.lbr_nr)
397 /* Set hardware branch filter */
398 if (cpuc->lbr_select) {
399 lbr_select = cpuc->lbr_sel->config & LBR_SELECT_MASK;
400 wrmsrl(MSR_AMD64_LBR_SELECT, lbr_select);
403 rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
404 rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
406 wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
407 wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg | DBG_EXTN_CFG_LBRV2EN);
410 void amd_pmu_lbr_disable_all(void)
412 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
413 u64 dbg_ctl, dbg_extn_cfg;
415 if (!cpuc->lbr_users || !x86_pmu.lbr_nr)
418 rdmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg);
419 rdmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl);
421 wrmsrl(MSR_AMD_DBG_EXTN_CFG, dbg_extn_cfg & ~DBG_EXTN_CFG_LBRV2EN);
422 wrmsrl(MSR_IA32_DEBUGCTLMSR, dbg_ctl & ~DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
425 __init int amd_pmu_lbr_init(void)
427 union cpuid_0x80000022_ebx ebx;
429 if (x86_pmu.version < 2 || !boot_cpu_has(X86_FEATURE_AMD_LBR_V2))
432 /* Set number of entries */
433 ebx.full = cpuid_ebx(EXT_PERFMON_DEBUG_FEATURES);
434 x86_pmu.lbr_nr = ebx.split.lbr_v2_stack_sz;
436 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);