1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/perf_event.h>
3 #include <linux/sysfs.h>
4 #include <linux/nospec.h>
5 #include <asm/intel-family.h>
20 static bool test_aperfmperf(int idx, void *data)
22 return boot_cpu_has(X86_FEATURE_APERFMPERF);
25 static bool test_ptsc(int idx, void *data)
27 return boot_cpu_has(X86_FEATURE_PTSC);
30 static bool test_irperf(int idx, void *data)
32 return boot_cpu_has(X86_FEATURE_IRPERF);
35 static bool test_therm_status(int idx, void *data)
37 return boot_cpu_has(X86_FEATURE_DTHERM);
40 static bool test_intel(int idx, void *data)
42 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
43 boot_cpu_data.x86 != 6)
46 switch (boot_cpu_data.x86_model) {
47 case INTEL_FAM6_NEHALEM:
48 case INTEL_FAM6_NEHALEM_G:
49 case INTEL_FAM6_NEHALEM_EP:
50 case INTEL_FAM6_NEHALEM_EX:
52 case INTEL_FAM6_WESTMERE:
53 case INTEL_FAM6_WESTMERE_EP:
54 case INTEL_FAM6_WESTMERE_EX:
56 case INTEL_FAM6_SANDYBRIDGE:
57 case INTEL_FAM6_SANDYBRIDGE_X:
59 case INTEL_FAM6_IVYBRIDGE:
60 case INTEL_FAM6_IVYBRIDGE_X:
62 case INTEL_FAM6_HASWELL:
63 case INTEL_FAM6_HASWELL_X:
64 case INTEL_FAM6_HASWELL_L:
65 case INTEL_FAM6_HASWELL_G:
67 case INTEL_FAM6_BROADWELL:
68 case INTEL_FAM6_BROADWELL_D:
69 case INTEL_FAM6_BROADWELL_G:
70 case INTEL_FAM6_BROADWELL_X:
71 case INTEL_FAM6_SAPPHIRERAPIDS_X:
72 case INTEL_FAM6_EMERALDRAPIDS_X:
74 case INTEL_FAM6_ATOM_SILVERMONT:
75 case INTEL_FAM6_ATOM_SILVERMONT_D:
76 case INTEL_FAM6_ATOM_AIRMONT:
78 case INTEL_FAM6_ATOM_GOLDMONT:
79 case INTEL_FAM6_ATOM_GOLDMONT_D:
80 case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
81 case INTEL_FAM6_ATOM_TREMONT_D:
82 case INTEL_FAM6_ATOM_TREMONT:
83 case INTEL_FAM6_ATOM_TREMONT_L:
85 case INTEL_FAM6_XEON_PHI_KNL:
86 case INTEL_FAM6_XEON_PHI_KNM:
87 if (idx == PERF_MSR_SMI)
91 case INTEL_FAM6_SKYLAKE_L:
92 case INTEL_FAM6_SKYLAKE:
93 case INTEL_FAM6_SKYLAKE_X:
94 case INTEL_FAM6_KABYLAKE_L:
95 case INTEL_FAM6_KABYLAKE:
96 case INTEL_FAM6_COMETLAKE_L:
97 case INTEL_FAM6_COMETLAKE:
98 case INTEL_FAM6_ICELAKE_L:
99 case INTEL_FAM6_ICELAKE:
100 case INTEL_FAM6_ICELAKE_X:
101 case INTEL_FAM6_ICELAKE_D:
102 case INTEL_FAM6_TIGERLAKE_L:
103 case INTEL_FAM6_TIGERLAKE:
104 case INTEL_FAM6_ROCKETLAKE:
105 case INTEL_FAM6_ALDERLAKE:
106 case INTEL_FAM6_ALDERLAKE_L:
107 if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
115 PMU_EVENT_ATTR_STRING(tsc, attr_tsc, "event=0x00" );
116 PMU_EVENT_ATTR_STRING(aperf, attr_aperf, "event=0x01" );
117 PMU_EVENT_ATTR_STRING(mperf, attr_mperf, "event=0x02" );
118 PMU_EVENT_ATTR_STRING(pperf, attr_pperf, "event=0x03" );
119 PMU_EVENT_ATTR_STRING(smi, attr_smi, "event=0x04" );
120 PMU_EVENT_ATTR_STRING(ptsc, attr_ptsc, "event=0x05" );
121 PMU_EVENT_ATTR_STRING(irperf, attr_irperf, "event=0x06" );
122 PMU_EVENT_ATTR_STRING(cpu_thermal_margin, attr_therm, "event=0x07" );
123 PMU_EVENT_ATTR_STRING(cpu_thermal_margin.snapshot, attr_therm_snap, "1" );
124 PMU_EVENT_ATTR_STRING(cpu_thermal_margin.unit, attr_therm_unit, "C" );
126 static unsigned long msr_mask;
128 PMU_EVENT_GROUP(events, aperf);
129 PMU_EVENT_GROUP(events, mperf);
130 PMU_EVENT_GROUP(events, pperf);
131 PMU_EVENT_GROUP(events, smi);
132 PMU_EVENT_GROUP(events, ptsc);
133 PMU_EVENT_GROUP(events, irperf);
135 static struct attribute *attrs_therm[] = {
136 &attr_therm.attr.attr,
137 &attr_therm_snap.attr.attr,
138 &attr_therm_unit.attr.attr,
142 static struct attribute_group group_therm = {
144 .attrs = attrs_therm,
147 static struct perf_msr msr[] = {
148 [PERF_MSR_TSC] = { .no_check = true, },
149 [PERF_MSR_APERF] = { MSR_IA32_APERF, &group_aperf, test_aperfmperf, },
150 [PERF_MSR_MPERF] = { MSR_IA32_MPERF, &group_mperf, test_aperfmperf, },
151 [PERF_MSR_PPERF] = { MSR_PPERF, &group_pperf, test_intel, },
152 [PERF_MSR_SMI] = { MSR_SMI_COUNT, &group_smi, test_intel, },
153 [PERF_MSR_PTSC] = { MSR_F15H_PTSC, &group_ptsc, test_ptsc, },
154 [PERF_MSR_IRPERF] = { MSR_F17H_IRPERF, &group_irperf, test_irperf, },
155 [PERF_MSR_THERM] = { MSR_IA32_THERM_STATUS, &group_therm, test_therm_status, },
158 static struct attribute *events_attrs[] = {
163 static struct attribute_group events_attr_group = {
165 .attrs = events_attrs,
168 PMU_FORMAT_ATTR(event, "config:0-63");
169 static struct attribute *format_attrs[] = {
170 &format_attr_event.attr,
173 static struct attribute_group format_attr_group = {
175 .attrs = format_attrs,
178 static const struct attribute_group *attr_groups[] = {
184 static const struct attribute_group *attr_update[] = {
195 static int msr_event_init(struct perf_event *event)
197 u64 cfg = event->attr.config;
199 if (event->attr.type != event->pmu->type)
202 /* unsupported modes and filters */
203 if (event->attr.sample_period) /* no sampling */
206 if (cfg >= PERF_MSR_EVENT_MAX)
209 cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);
211 if (!(msr_mask & (1 << cfg)))
215 event->hw.event_base = msr[cfg].msr;
216 event->hw.config = cfg;
221 static inline u64 msr_read_counter(struct perf_event *event)
225 if (event->hw.event_base)
226 rdmsrl(event->hw.event_base, now);
228 now = rdtsc_ordered();
233 static void msr_event_update(struct perf_event *event)
238 /* Careful, an NMI might modify the previous event value: */
240 prev = local64_read(&event->hw.prev_count);
241 now = msr_read_counter(event);
243 if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev)
247 if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) {
248 delta = sign_extend64(delta, 31);
249 local64_add(delta, &event->count);
250 } else if (unlikely(event->hw.event_base == MSR_IA32_THERM_STATUS)) {
251 /* If valid, extract digital readout, otherwise set to -1: */
252 now = now & (1ULL << 31) ? (now >> 16) & 0x3f : -1;
253 local64_set(&event->count, now);
255 local64_add(delta, &event->count);
259 static void msr_event_start(struct perf_event *event, int flags)
261 u64 now = msr_read_counter(event);
263 local64_set(&event->hw.prev_count, now);
266 static void msr_event_stop(struct perf_event *event, int flags)
268 msr_event_update(event);
271 static void msr_event_del(struct perf_event *event, int flags)
273 msr_event_stop(event, PERF_EF_UPDATE);
276 static int msr_event_add(struct perf_event *event, int flags)
278 if (flags & PERF_EF_START)
279 msr_event_start(event, flags);
284 static struct pmu pmu_msr = {
285 .task_ctx_nr = perf_sw_context,
286 .attr_groups = attr_groups,
287 .event_init = msr_event_init,
288 .add = msr_event_add,
289 .del = msr_event_del,
290 .start = msr_event_start,
291 .stop = msr_event_stop,
292 .read = msr_event_update,
293 .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE,
294 .attr_update = attr_update,
297 static int __init msr_init(void)
299 if (!boot_cpu_has(X86_FEATURE_TSC)) {
300 pr_cont("no MSR PMU driver.\n");
304 msr_mask = perf_msr_probe(msr, PERF_MSR_EVENT_MAX, true, NULL);
306 perf_pmu_register(&pmu_msr, "msr", -1);
310 device_initcall(msr_init);