GNU Linux-libre 4.14.251-gnu1
[releases.git] / arch / x86 / include / asm / perf_event.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_PERF_EVENT_H
3 #define _ASM_X86_PERF_EVENT_H
4
5 /*
6  * Performance event hw details:
7  */
8
9 #define INTEL_PMC_MAX_GENERIC                                  32
10 #define INTEL_PMC_MAX_FIXED                                     3
11 #define INTEL_PMC_IDX_FIXED                                    32
12
13 #define X86_PMC_IDX_MAX                                        64
14
15 #define MSR_ARCH_PERFMON_PERFCTR0                             0xc1
16 #define MSR_ARCH_PERFMON_PERFCTR1                             0xc2
17
18 #define MSR_ARCH_PERFMON_EVENTSEL0                           0x186
19 #define MSR_ARCH_PERFMON_EVENTSEL1                           0x187
20
21 #define ARCH_PERFMON_EVENTSEL_EVENT                     0x000000FFULL
22 #define ARCH_PERFMON_EVENTSEL_UMASK                     0x0000FF00ULL
23 #define ARCH_PERFMON_EVENTSEL_USR                       (1ULL << 16)
24 #define ARCH_PERFMON_EVENTSEL_OS                        (1ULL << 17)
25 #define ARCH_PERFMON_EVENTSEL_EDGE                      (1ULL << 18)
26 #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL               (1ULL << 19)
27 #define ARCH_PERFMON_EVENTSEL_INT                       (1ULL << 20)
28 #define ARCH_PERFMON_EVENTSEL_ANY                       (1ULL << 21)
29 #define ARCH_PERFMON_EVENTSEL_ENABLE                    (1ULL << 22)
30 #define ARCH_PERFMON_EVENTSEL_INV                       (1ULL << 23)
31 #define ARCH_PERFMON_EVENTSEL_CMASK                     0xFF000000ULL
32
33 #define HSW_IN_TX                                       (1ULL << 32)
34 #define HSW_IN_TX_CHECKPOINTED                          (1ULL << 33)
35
36 #define AMD64_EVENTSEL_INT_CORE_ENABLE                  (1ULL << 36)
37 #define AMD64_EVENTSEL_GUESTONLY                        (1ULL << 40)
38 #define AMD64_EVENTSEL_HOSTONLY                         (1ULL << 41)
39
40 #define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT               37
41 #define AMD64_EVENTSEL_INT_CORE_SEL_MASK                \
42         (0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
43
44 #define AMD64_EVENTSEL_EVENT    \
45         (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
46 #define INTEL_ARCH_EVENT_MASK   \
47         (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
48
49 #define AMD64_L3_SLICE_SHIFT                            48
50 #define AMD64_L3_SLICE_MASK                             \
51         ((0xFULL) << AMD64_L3_SLICE_SHIFT)
52
53 #define AMD64_L3_THREAD_SHIFT                           56
54 #define AMD64_L3_THREAD_MASK                            \
55         ((0xFFULL) << AMD64_L3_THREAD_SHIFT)
56
57 #define X86_RAW_EVENT_MASK              \
58         (ARCH_PERFMON_EVENTSEL_EVENT |  \
59          ARCH_PERFMON_EVENTSEL_UMASK |  \
60          ARCH_PERFMON_EVENTSEL_EDGE  |  \
61          ARCH_PERFMON_EVENTSEL_INV   |  \
62          ARCH_PERFMON_EVENTSEL_CMASK)
63 #define X86_ALL_EVENT_FLAGS                     \
64         (ARCH_PERFMON_EVENTSEL_EDGE |           \
65          ARCH_PERFMON_EVENTSEL_INV |            \
66          ARCH_PERFMON_EVENTSEL_CMASK |          \
67          ARCH_PERFMON_EVENTSEL_ANY |            \
68          ARCH_PERFMON_EVENTSEL_PIN_CONTROL |    \
69          HSW_IN_TX |                            \
70          HSW_IN_TX_CHECKPOINTED)
71 #define AMD64_RAW_EVENT_MASK            \
72         (X86_RAW_EVENT_MASK          |  \
73          AMD64_EVENTSEL_EVENT)
74 #define AMD64_RAW_EVENT_MASK_NB         \
75         (AMD64_EVENTSEL_EVENT        |  \
76          ARCH_PERFMON_EVENTSEL_UMASK)
77 #define AMD64_NUM_COUNTERS                              4
78 #define AMD64_NUM_COUNTERS_CORE                         6
79 #define AMD64_NUM_COUNTERS_NB                           4
80
81 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL           0x3c
82 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK         (0x00 << 8)
83 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX         0
84 #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
85                 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
86
87 #define ARCH_PERFMON_BRANCH_MISSES_RETIRED              6
88 #define ARCH_PERFMON_EVENTS_COUNT                       7
89
90 /*
91  * Intel "Architectural Performance Monitoring" CPUID
92  * detection/enumeration details:
93  */
94 union cpuid10_eax {
95         struct {
96                 unsigned int version_id:8;
97                 unsigned int num_counters:8;
98                 unsigned int bit_width:8;
99                 unsigned int mask_length:8;
100         } split;
101         unsigned int full;
102 };
103
104 union cpuid10_ebx {
105         struct {
106                 unsigned int no_unhalted_core_cycles:1;
107                 unsigned int no_instructions_retired:1;
108                 unsigned int no_unhalted_reference_cycles:1;
109                 unsigned int no_llc_reference:1;
110                 unsigned int no_llc_misses:1;
111                 unsigned int no_branch_instruction_retired:1;
112                 unsigned int no_branch_misses_retired:1;
113         } split;
114         unsigned int full;
115 };
116
117 union cpuid10_edx {
118         struct {
119                 unsigned int num_counters_fixed:5;
120                 unsigned int bit_width_fixed:8;
121                 unsigned int reserved:19;
122         } split;
123         unsigned int full;
124 };
125
126 struct x86_pmu_capability {
127         int             version;
128         int             num_counters_gp;
129         int             num_counters_fixed;
130         int             bit_width_gp;
131         int             bit_width_fixed;
132         unsigned int    events_mask;
133         int             events_mask_len;
134 };
135
136 /*
137  * Fixed-purpose performance events:
138  */
139
140 /*
141  * All 3 fixed-mode PMCs are configured via this single MSR:
142  */
143 #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
144
145 /*
146  * The counts are available in three separate MSRs:
147  */
148
149 /* Instr_Retired.Any: */
150 #define MSR_ARCH_PERFMON_FIXED_CTR0     0x309
151 #define INTEL_PMC_IDX_FIXED_INSTRUCTIONS        (INTEL_PMC_IDX_FIXED + 0)
152
153 /* CPU_CLK_Unhalted.Core: */
154 #define MSR_ARCH_PERFMON_FIXED_CTR1     0x30a
155 #define INTEL_PMC_IDX_FIXED_CPU_CYCLES  (INTEL_PMC_IDX_FIXED + 1)
156
157 /* CPU_CLK_Unhalted.Ref: */
158 #define MSR_ARCH_PERFMON_FIXED_CTR2     0x30b
159 #define INTEL_PMC_IDX_FIXED_REF_CYCLES  (INTEL_PMC_IDX_FIXED + 2)
160 #define INTEL_PMC_MSK_FIXED_REF_CYCLES  (1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
161
162 /*
163  * We model BTS tracing as another fixed-mode PMC.
164  *
165  * We choose a value in the middle of the fixed event range, since lower
166  * values are used by actual fixed events and higher values are used
167  * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
168  */
169 #define INTEL_PMC_IDX_FIXED_BTS                         (INTEL_PMC_IDX_FIXED + 16)
170
171 #define GLOBAL_STATUS_COND_CHG                          BIT_ULL(63)
172 #define GLOBAL_STATUS_BUFFER_OVF                        BIT_ULL(62)
173 #define GLOBAL_STATUS_UNC_OVF                           BIT_ULL(61)
174 #define GLOBAL_STATUS_ASIF                              BIT_ULL(60)
175 #define GLOBAL_STATUS_COUNTERS_FROZEN                   BIT_ULL(59)
176 #define GLOBAL_STATUS_LBRS_FROZEN                       BIT_ULL(58)
177 #define GLOBAL_STATUS_TRACE_TOPAPMI                     BIT_ULL(55)
178
179 /*
180  * IBS cpuid feature detection
181  */
182
183 #define IBS_CPUID_FEATURES              0x8000001b
184
185 /*
186  * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
187  * bit 0 is used to indicate the existence of IBS.
188  */
189 #define IBS_CAPS_AVAIL                  (1U<<0)
190 #define IBS_CAPS_FETCHSAM               (1U<<1)
191 #define IBS_CAPS_OPSAM                  (1U<<2)
192 #define IBS_CAPS_RDWROPCNT              (1U<<3)
193 #define IBS_CAPS_OPCNT                  (1U<<4)
194 #define IBS_CAPS_BRNTRGT                (1U<<5)
195 #define IBS_CAPS_OPCNTEXT               (1U<<6)
196 #define IBS_CAPS_RIPINVALIDCHK          (1U<<7)
197 #define IBS_CAPS_OPBRNFUSE              (1U<<8)
198 #define IBS_CAPS_FETCHCTLEXTD           (1U<<9)
199 #define IBS_CAPS_OPDATA4                (1U<<10)
200
201 #define IBS_CAPS_DEFAULT                (IBS_CAPS_AVAIL         \
202                                          | IBS_CAPS_FETCHSAM    \
203                                          | IBS_CAPS_OPSAM)
204
205 /*
206  * IBS APIC setup
207  */
208 #define IBSCTL                          0x1cc
209 #define IBSCTL_LVT_OFFSET_VALID         (1ULL<<8)
210 #define IBSCTL_LVT_OFFSET_MASK          0x0F
211
212 /* IBS fetch bits/masks */
213 #define IBS_FETCH_RAND_EN       (1ULL<<57)
214 #define IBS_FETCH_VAL           (1ULL<<49)
215 #define IBS_FETCH_ENABLE        (1ULL<<48)
216 #define IBS_FETCH_CNT           0xFFFF0000ULL
217 #define IBS_FETCH_MAX_CNT       0x0000FFFFULL
218
219 /*
220  * IBS op bits/masks
221  * The lower 7 bits of the current count are random bits
222  * preloaded by hardware and ignored in software
223  */
224 #define IBS_OP_CUR_CNT          (0xFFF80ULL<<32)
225 #define IBS_OP_CUR_CNT_RAND     (0x0007FULL<<32)
226 #define IBS_OP_CNT_CTL          (1ULL<<19)
227 #define IBS_OP_VAL              (1ULL<<18)
228 #define IBS_OP_ENABLE           (1ULL<<17)
229 #define IBS_OP_MAX_CNT          0x0000FFFFULL
230 #define IBS_OP_MAX_CNT_EXT      0x007FFFFFULL   /* not a register bit mask */
231 #define IBS_RIP_INVALID         (1ULL<<38)
232
233 #ifdef CONFIG_X86_LOCAL_APIC
234 extern u32 get_ibs_caps(void);
235 #else
236 static inline u32 get_ibs_caps(void) { return 0; }
237 #endif
238
239 #ifdef CONFIG_PERF_EVENTS
240 extern void perf_events_lapic_init(void);
241
242 /*
243  * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
244  * unused and ABI specified to be 0, so nobody should care what we do with
245  * them.
246  *
247  * EXACT - the IP points to the exact instruction that triggered the
248  *         event (HW bugs exempt).
249  * VM    - original X86_VM_MASK; see set_linear_ip().
250  */
251 #define PERF_EFLAGS_EXACT       (1UL << 3)
252 #define PERF_EFLAGS_VM          (1UL << 5)
253
254 struct pt_regs;
255 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
256 extern unsigned long perf_misc_flags(struct pt_regs *regs);
257 #define perf_misc_flags(regs)   perf_misc_flags(regs)
258
259 #include <asm/stacktrace.h>
260
261 /*
262  * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
263  * and the comment with PERF_EFLAGS_EXACT.
264  */
265 #define perf_arch_fetch_caller_regs(regs, __ip)         {       \
266         (regs)->ip = (__ip);                                    \
267         (regs)->bp = caller_frame_pointer();                    \
268         (regs)->cs = __KERNEL_CS;                               \
269         regs->flags = 0;                                        \
270         asm volatile(                                           \
271                 _ASM_MOV "%%"_ASM_SP ", %0\n"                   \
272                 : "=m" ((regs)->sp)                             \
273                 :: "memory"                                     \
274         );                                                      \
275 }
276
277 struct perf_guest_switch_msr {
278         unsigned msr;
279         u64 host, guest;
280 };
281
282 extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
283 extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
284 extern void perf_check_microcode(void);
285 #else
286 static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
287 {
288         *nr = 0;
289         return NULL;
290 }
291
292 static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
293 {
294         memset(cap, 0, sizeof(*cap));
295 }
296
297 static inline void perf_events_lapic_init(void) { }
298 static inline void perf_check_microcode(void) { }
299 #endif
300
301 #ifdef CONFIG_CPU_SUP_INTEL
302  extern void intel_pt_handle_vmx(int on);
303 #endif
304
305 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
306  extern void amd_pmu_enable_virt(void);
307  extern void amd_pmu_disable_virt(void);
308 #else
309  static inline void amd_pmu_enable_virt(void) { }
310  static inline void amd_pmu_disable_virt(void) { }
311 #endif
312
313 #define arch_perf_out_copy_user copy_from_user_nmi
314
315 #endif /* _ASM_X86_PERF_EVENT_H */