1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/perf_event.h>
3 #include <linux/sysfs.h>
4 #include <linux/nospec.h>
5 #include <asm/intel-family.h>
20 static bool test_aperfmperf(int idx, void *data)
22 return boot_cpu_has(X86_FEATURE_APERFMPERF);
25 static bool test_ptsc(int idx, void *data)
27 return boot_cpu_has(X86_FEATURE_PTSC);
30 static bool test_irperf(int idx, void *data)
32 return boot_cpu_has(X86_FEATURE_IRPERF);
35 static bool test_therm_status(int idx, void *data)
37 return boot_cpu_has(X86_FEATURE_DTHERM);
40 static bool test_intel(int idx, void *data)
42 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
43 boot_cpu_data.x86 != 6)
46 switch (boot_cpu_data.x86_model) {
47 case INTEL_FAM6_NEHALEM:
48 case INTEL_FAM6_NEHALEM_G:
49 case INTEL_FAM6_NEHALEM_EP:
50 case INTEL_FAM6_NEHALEM_EX:
52 case INTEL_FAM6_WESTMERE:
53 case INTEL_FAM6_WESTMERE_EP:
54 case INTEL_FAM6_WESTMERE_EX:
56 case INTEL_FAM6_SANDYBRIDGE:
57 case INTEL_FAM6_SANDYBRIDGE_X:
59 case INTEL_FAM6_IVYBRIDGE:
60 case INTEL_FAM6_IVYBRIDGE_X:
62 case INTEL_FAM6_HASWELL:
63 case INTEL_FAM6_HASWELL_X:
64 case INTEL_FAM6_HASWELL_L:
65 case INTEL_FAM6_HASWELL_G:
67 case INTEL_FAM6_BROADWELL:
68 case INTEL_FAM6_BROADWELL_D:
69 case INTEL_FAM6_BROADWELL_G:
70 case INTEL_FAM6_BROADWELL_X:
71 case INTEL_FAM6_SAPPHIRERAPIDS_X:
73 case INTEL_FAM6_ATOM_SILVERMONT:
74 case INTEL_FAM6_ATOM_SILVERMONT_D:
75 case INTEL_FAM6_ATOM_AIRMONT:
77 case INTEL_FAM6_ATOM_GOLDMONT:
78 case INTEL_FAM6_ATOM_GOLDMONT_D:
79 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
80 case INTEL_FAM6_ATOM_TREMONT_D:
81 case INTEL_FAM6_ATOM_TREMONT:
82 case INTEL_FAM6_ATOM_TREMONT_L:
84 case INTEL_FAM6_XEON_PHI_KNL:
85 case INTEL_FAM6_XEON_PHI_KNM:
86 if (idx == PERF_MSR_SMI)
90 case INTEL_FAM6_SKYLAKE_L:
91 case INTEL_FAM6_SKYLAKE:
92 case INTEL_FAM6_SKYLAKE_X:
93 case INTEL_FAM6_KABYLAKE_L:
94 case INTEL_FAM6_KABYLAKE:
95 case INTEL_FAM6_COMETLAKE_L:
96 case INTEL_FAM6_COMETLAKE:
97 case INTEL_FAM6_ICELAKE_L:
98 case INTEL_FAM6_ICELAKE:
99 case INTEL_FAM6_ICELAKE_X:
100 case INTEL_FAM6_ICELAKE_D:
101 case INTEL_FAM6_TIGERLAKE_L:
102 case INTEL_FAM6_TIGERLAKE:
103 case INTEL_FAM6_ROCKETLAKE:
104 case INTEL_FAM6_ALDERLAKE:
105 case INTEL_FAM6_ALDERLAKE_L:
106 case INTEL_FAM6_ALDERLAKE_N:
107 case INTEL_FAM6_RAPTORLAKE:
108 case INTEL_FAM6_RAPTORLAKE_P:
109 if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
117 PMU_EVENT_ATTR_STRING(tsc, attr_tsc, "event=0x00" );
118 PMU_EVENT_ATTR_STRING(aperf, attr_aperf, "event=0x01" );
119 PMU_EVENT_ATTR_STRING(mperf, attr_mperf, "event=0x02" );
120 PMU_EVENT_ATTR_STRING(pperf, attr_pperf, "event=0x03" );
121 PMU_EVENT_ATTR_STRING(smi, attr_smi, "event=0x04" );
122 PMU_EVENT_ATTR_STRING(ptsc, attr_ptsc, "event=0x05" );
123 PMU_EVENT_ATTR_STRING(irperf, attr_irperf, "event=0x06" );
124 PMU_EVENT_ATTR_STRING(cpu_thermal_margin, attr_therm, "event=0x07" );
125 PMU_EVENT_ATTR_STRING(cpu_thermal_margin.snapshot, attr_therm_snap, "1" );
126 PMU_EVENT_ATTR_STRING(cpu_thermal_margin.unit, attr_therm_unit, "C" );
128 static unsigned long msr_mask;
130 PMU_EVENT_GROUP(events, aperf);
131 PMU_EVENT_GROUP(events, mperf);
132 PMU_EVENT_GROUP(events, pperf);
133 PMU_EVENT_GROUP(events, smi);
134 PMU_EVENT_GROUP(events, ptsc);
135 PMU_EVENT_GROUP(events, irperf);
137 static struct attribute *attrs_therm[] = {
138 &attr_therm.attr.attr,
139 &attr_therm_snap.attr.attr,
140 &attr_therm_unit.attr.attr,
144 static struct attribute_group group_therm = {
146 .attrs = attrs_therm,
149 static struct perf_msr msr[] = {
150 [PERF_MSR_TSC] = { .no_check = true, },
151 [PERF_MSR_APERF] = { MSR_IA32_APERF, &group_aperf, test_aperfmperf, },
152 [PERF_MSR_MPERF] = { MSR_IA32_MPERF, &group_mperf, test_aperfmperf, },
153 [PERF_MSR_PPERF] = { MSR_PPERF, &group_pperf, test_intel, },
154 [PERF_MSR_SMI] = { MSR_SMI_COUNT, &group_smi, test_intel, },
155 [PERF_MSR_PTSC] = { MSR_F15H_PTSC, &group_ptsc, test_ptsc, },
156 [PERF_MSR_IRPERF] = { MSR_F17H_IRPERF, &group_irperf, test_irperf, },
157 [PERF_MSR_THERM] = { MSR_IA32_THERM_STATUS, &group_therm, test_therm_status, },
160 static struct attribute *events_attrs[] = {
165 static struct attribute_group events_attr_group = {
167 .attrs = events_attrs,
170 PMU_FORMAT_ATTR(event, "config:0-63");
171 static struct attribute *format_attrs[] = {
172 &format_attr_event.attr,
175 static struct attribute_group format_attr_group = {
177 .attrs = format_attrs,
180 static const struct attribute_group *attr_groups[] = {
186 static const struct attribute_group *attr_update[] = {
197 static int msr_event_init(struct perf_event *event)
199 u64 cfg = event->attr.config;
201 if (event->attr.type != event->pmu->type)
204 /* unsupported modes and filters */
205 if (event->attr.sample_period) /* no sampling */
208 if (cfg >= PERF_MSR_EVENT_MAX)
211 cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);
213 if (!(msr_mask & (1 << cfg)))
217 event->hw.event_base = msr[cfg].msr;
218 event->hw.config = cfg;
223 static inline u64 msr_read_counter(struct perf_event *event)
227 if (event->hw.event_base)
228 rdmsrl(event->hw.event_base, now);
230 now = rdtsc_ordered();
235 static void msr_event_update(struct perf_event *event)
240 /* Careful, an NMI might modify the previous event value: */
242 prev = local64_read(&event->hw.prev_count);
243 now = msr_read_counter(event);
245 if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev)
249 if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) {
250 delta = sign_extend64(delta, 31);
251 local64_add(delta, &event->count);
252 } else if (unlikely(event->hw.event_base == MSR_IA32_THERM_STATUS)) {
253 /* If valid, extract digital readout, otherwise set to -1: */
254 now = now & (1ULL << 31) ? (now >> 16) & 0x3f : -1;
255 local64_set(&event->count, now);
257 local64_add(delta, &event->count);
261 static void msr_event_start(struct perf_event *event, int flags)
263 u64 now = msr_read_counter(event);
265 local64_set(&event->hw.prev_count, now);
268 static void msr_event_stop(struct perf_event *event, int flags)
270 msr_event_update(event);
273 static void msr_event_del(struct perf_event *event, int flags)
275 msr_event_stop(event, PERF_EF_UPDATE);
278 static int msr_event_add(struct perf_event *event, int flags)
280 if (flags & PERF_EF_START)
281 msr_event_start(event, flags);
286 static struct pmu pmu_msr = {
287 .task_ctx_nr = perf_sw_context,
288 .attr_groups = attr_groups,
289 .event_init = msr_event_init,
290 .add = msr_event_add,
291 .del = msr_event_del,
292 .start = msr_event_start,
293 .stop = msr_event_stop,
294 .read = msr_event_update,
295 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
296 .attr_update = attr_update,
299 static int __init msr_init(void)
301 if (!boot_cpu_has(X86_FEATURE_TSC)) {
302 pr_cont("no MSR PMU driver.\n");
306 msr_mask = perf_msr_probe(msr, PERF_MSR_EVENT_MAX, true, NULL);
308 perf_pmu_register(&pmu_msr, "msr", -1);
312 device_initcall(msr_init);