2 * perf_event_intel_cstate.c: support cstate residency counters
4 * Copyright (C) 2015, Intel Corp.
5 * Author: Kan Liang (kan.liang@intel.com)
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Library General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Library General Public License for more details.
20 * This file export cstate related free running (read-only) counters
21 * for perf. These counters may be use simultaneously by other tools,
22 * such as turbostat. However, it still make sense to implement them
23 * in perf. Because we can conveniently collect them together with
24 * other events, and allow to use them from tools without special MSR
27 * The events only support system-wide mode counting. There is no
28 * sampling support because it is not supported by the hardware.
30 * According to counters' scope and category, two PMUs are registered
31 * with the perf_event core subsystem.
32 * - 'cstate_core': The counter is available for each physical core.
33 * The counters include CORE_C*_RESIDENCY.
34 * - 'cstate_pkg': The counter is available for each physical package.
35 * The counters include PKG_C*_RESIDENCY.
37 * All of these counters are specified in the IntelĀ® 64 and IA-32
38 * Architectures Software Developer.s Manual Vol3b.
40 * Model specific counters:
41 * MSR_CORE_C1_RES: CORE C1 Residency Counter
43 * Available model: SLM,AMT
44 * Scope: Core (each processor core has a MSR)
45 * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
47 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
49 * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
51 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
53 * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
55 * Available model: SNB,IVB,HSW,BDW,SKL
57 * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
59 * Available model: SNB,IVB,HSW,BDW,SKL
60 * Scope: Package (physical package)
61 * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
63 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
64 * Scope: Package (physical package)
65 * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
67 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,SKL
68 * Scope: Package (physical package)
69 * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
71 * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL
72 * Scope: Package (physical package)
73 * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter.
75 * Available model: HSW ULT only
76 * Scope: Package (physical package)
77 * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter.
79 * Available model: HSW ULT only
80 * Scope: Package (physical package)
81 * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
83 * Available model: HSW ULT only
84 * Scope: Package (physical package)
88 #include <linux/module.h>
89 #include <linux/slab.h>
90 #include <linux/perf_event.h>
91 #include <linux/nospec.h>
92 #include <asm/cpu_device_id.h>
93 #include "perf_event.h"
95 #define DEFINE_CSTATE_FORMAT_ATTR(_var, _name, _format) \
96 static ssize_t __cstate_##_var##_show(struct kobject *kobj, \
97 struct kobj_attribute *attr, \
100 BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
101 return sprintf(page, _format "\n"); \
103 static struct kobj_attribute format_attr_##_var = \
104 __ATTR(_name, 0444, __cstate_##_var##_show, NULL)
106 static ssize_t cstate_get_attr_cpumask(struct device *dev,
107 struct device_attribute *attr,
110 struct perf_cstate_msr {
112 struct perf_pmu_events_attr *attr;
113 bool (*test)(int idx);
117 /* cstate_core PMU */
119 static struct pmu cstate_core_pmu;
120 static bool has_cstate_core;
122 enum perf_cstate_core_id {
126 PERF_CSTATE_CORE_C1_RES = 0,
127 PERF_CSTATE_CORE_C3_RES,
128 PERF_CSTATE_CORE_C6_RES,
129 PERF_CSTATE_CORE_C7_RES,
131 PERF_CSTATE_CORE_EVENT_MAX,
134 bool test_core(int idx)
136 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
137 boot_cpu_data.x86 != 6)
140 switch (boot_cpu_data.x86_model) {
141 case 30: /* 45nm Nehalem */
142 case 26: /* 45nm Nehalem-EP */
143 case 46: /* 45nm Nehalem-EX */
145 case 37: /* 32nm Westmere */
146 case 44: /* 32nm Westmere-EP */
147 case 47: /* 32nm Westmere-EX */
148 if (idx == PERF_CSTATE_CORE_C3_RES ||
149 idx == PERF_CSTATE_CORE_C6_RES)
152 case 42: /* 32nm SandyBridge */
153 case 45: /* 32nm SandyBridge-E/EN/EP */
155 case 58: /* 22nm IvyBridge */
156 case 62: /* 22nm IvyBridge-EP/EX */
158 case 60: /* 22nm Haswell Core */
159 case 63: /* 22nm Haswell Server */
160 case 69: /* 22nm Haswell ULT */
161 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
163 case 61: /* 14nm Broadwell Core-M */
164 case 86: /* 14nm Broadwell Xeon D */
165 case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
166 case 79: /* 14nm Broadwell Server */
168 case 78: /* 14nm Skylake Mobile */
169 case 94: /* 14nm Skylake Desktop */
170 if (idx == PERF_CSTATE_CORE_C3_RES ||
171 idx == PERF_CSTATE_CORE_C6_RES ||
172 idx == PERF_CSTATE_CORE_C7_RES)
175 case 55: /* 22nm Atom "Silvermont" */
176 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
177 case 76: /* 14nm Atom "Airmont" */
178 if (idx == PERF_CSTATE_CORE_C1_RES ||
179 idx == PERF_CSTATE_CORE_C6_RES)
187 PMU_EVENT_ATTR_STRING(c1-residency, evattr_cstate_core_c1, "event=0x00");
188 PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_core_c3, "event=0x01");
189 PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_core_c6, "event=0x02");
190 PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_core_c7, "event=0x03");
192 static struct perf_cstate_msr core_msr[] = {
193 [PERF_CSTATE_CORE_C1_RES] = { MSR_CORE_C1_RES, &evattr_cstate_core_c1, test_core, },
194 [PERF_CSTATE_CORE_C3_RES] = { MSR_CORE_C3_RESIDENCY, &evattr_cstate_core_c3, test_core, },
195 [PERF_CSTATE_CORE_C6_RES] = { MSR_CORE_C6_RESIDENCY, &evattr_cstate_core_c6, test_core, },
196 [PERF_CSTATE_CORE_C7_RES] = { MSR_CORE_C7_RESIDENCY, &evattr_cstate_core_c7, test_core, },
199 static struct attribute *core_events_attrs[PERF_CSTATE_CORE_EVENT_MAX + 1] = {
203 static struct attribute_group core_events_attr_group = {
205 .attrs = core_events_attrs,
208 DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63");
209 static struct attribute *core_format_attrs[] = {
210 &format_attr_core_event.attr,
214 static struct attribute_group core_format_attr_group = {
216 .attrs = core_format_attrs,
219 static cpumask_t cstate_core_cpu_mask;
220 static DEVICE_ATTR(cpumask, S_IRUGO, cstate_get_attr_cpumask, NULL);
222 static struct attribute *cstate_cpumask_attrs[] = {
223 &dev_attr_cpumask.attr,
227 static struct attribute_group cpumask_attr_group = {
228 .attrs = cstate_cpumask_attrs,
231 static const struct attribute_group *core_attr_groups[] = {
232 &core_events_attr_group,
233 &core_format_attr_group,
238 /* cstate_core PMU end */
243 static struct pmu cstate_pkg_pmu;
244 static bool has_cstate_pkg;
246 enum perf_cstate_pkg_id {
250 PERF_CSTATE_PKG_C2_RES = 0,
251 PERF_CSTATE_PKG_C3_RES,
252 PERF_CSTATE_PKG_C6_RES,
253 PERF_CSTATE_PKG_C7_RES,
254 PERF_CSTATE_PKG_C8_RES,
255 PERF_CSTATE_PKG_C9_RES,
256 PERF_CSTATE_PKG_C10_RES,
258 PERF_CSTATE_PKG_EVENT_MAX,
261 bool test_pkg(int idx)
263 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
264 boot_cpu_data.x86 != 6)
267 switch (boot_cpu_data.x86_model) {
268 case 30: /* 45nm Nehalem */
269 case 26: /* 45nm Nehalem-EP */
270 case 46: /* 45nm Nehalem-EX */
272 case 37: /* 32nm Westmere */
273 case 44: /* 32nm Westmere-EP */
274 case 47: /* 32nm Westmere-EX */
275 if (idx == PERF_CSTATE_CORE_C3_RES ||
276 idx == PERF_CSTATE_CORE_C6_RES ||
277 idx == PERF_CSTATE_CORE_C7_RES)
280 case 42: /* 32nm SandyBridge */
281 case 45: /* 32nm SandyBridge-E/EN/EP */
283 case 58: /* 22nm IvyBridge */
284 case 62: /* 22nm IvyBridge-EP/EX */
286 case 60: /* 22nm Haswell Core */
287 case 63: /* 22nm Haswell Server */
288 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
290 case 61: /* 14nm Broadwell Core-M */
291 case 86: /* 14nm Broadwell Xeon D */
292 case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
293 case 79: /* 14nm Broadwell Server */
295 case 78: /* 14nm Skylake Mobile */
296 case 94: /* 14nm Skylake Desktop */
297 if (idx == PERF_CSTATE_PKG_C2_RES ||
298 idx == PERF_CSTATE_PKG_C3_RES ||
299 idx == PERF_CSTATE_PKG_C6_RES ||
300 idx == PERF_CSTATE_PKG_C7_RES)
303 case 55: /* 22nm Atom "Silvermont" */
304 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
305 case 76: /* 14nm Atom "Airmont" */
306 if (idx == PERF_CSTATE_CORE_C6_RES)
309 case 69: /* 22nm Haswell ULT */
310 if (idx == PERF_CSTATE_PKG_C2_RES ||
311 idx == PERF_CSTATE_PKG_C3_RES ||
312 idx == PERF_CSTATE_PKG_C6_RES ||
313 idx == PERF_CSTATE_PKG_C7_RES ||
314 idx == PERF_CSTATE_PKG_C8_RES ||
315 idx == PERF_CSTATE_PKG_C9_RES ||
316 idx == PERF_CSTATE_PKG_C10_RES)
324 PMU_EVENT_ATTR_STRING(c2-residency, evattr_cstate_pkg_c2, "event=0x00");
325 PMU_EVENT_ATTR_STRING(c3-residency, evattr_cstate_pkg_c3, "event=0x01");
326 PMU_EVENT_ATTR_STRING(c6-residency, evattr_cstate_pkg_c6, "event=0x02");
327 PMU_EVENT_ATTR_STRING(c7-residency, evattr_cstate_pkg_c7, "event=0x03");
328 PMU_EVENT_ATTR_STRING(c8-residency, evattr_cstate_pkg_c8, "event=0x04");
329 PMU_EVENT_ATTR_STRING(c9-residency, evattr_cstate_pkg_c9, "event=0x05");
330 PMU_EVENT_ATTR_STRING(c10-residency, evattr_cstate_pkg_c10, "event=0x06");
332 static struct perf_cstate_msr pkg_msr[] = {
333 [PERF_CSTATE_PKG_C2_RES] = { MSR_PKG_C2_RESIDENCY, &evattr_cstate_pkg_c2, test_pkg, },
334 [PERF_CSTATE_PKG_C3_RES] = { MSR_PKG_C3_RESIDENCY, &evattr_cstate_pkg_c3, test_pkg, },
335 [PERF_CSTATE_PKG_C6_RES] = { MSR_PKG_C6_RESIDENCY, &evattr_cstate_pkg_c6, test_pkg, },
336 [PERF_CSTATE_PKG_C7_RES] = { MSR_PKG_C7_RESIDENCY, &evattr_cstate_pkg_c7, test_pkg, },
337 [PERF_CSTATE_PKG_C8_RES] = { MSR_PKG_C8_RESIDENCY, &evattr_cstate_pkg_c8, test_pkg, },
338 [PERF_CSTATE_PKG_C9_RES] = { MSR_PKG_C9_RESIDENCY, &evattr_cstate_pkg_c9, test_pkg, },
339 [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &evattr_cstate_pkg_c10, test_pkg, },
342 static struct attribute *pkg_events_attrs[PERF_CSTATE_PKG_EVENT_MAX + 1] = {
346 static struct attribute_group pkg_events_attr_group = {
348 .attrs = pkg_events_attrs,
351 DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63");
352 static struct attribute *pkg_format_attrs[] = {
353 &format_attr_pkg_event.attr,
356 static struct attribute_group pkg_format_attr_group = {
358 .attrs = pkg_format_attrs,
361 static cpumask_t cstate_pkg_cpu_mask;
363 static const struct attribute_group *pkg_attr_groups[] = {
364 &pkg_events_attr_group,
365 &pkg_format_attr_group,
370 /* cstate_pkg PMU end*/
372 static ssize_t cstate_get_attr_cpumask(struct device *dev,
373 struct device_attribute *attr,
376 struct pmu *pmu = dev_get_drvdata(dev);
378 if (pmu == &cstate_core_pmu)
379 return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask);
380 else if (pmu == &cstate_pkg_pmu)
381 return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask);
386 static int cstate_pmu_event_init(struct perf_event *event)
388 u64 cfg = event->attr.config;
391 if (event->attr.type != event->pmu->type)
394 /* unsupported modes and filters */
395 if (event->attr.exclude_user ||
396 event->attr.exclude_kernel ||
397 event->attr.exclude_hv ||
398 event->attr.exclude_idle ||
399 event->attr.exclude_host ||
400 event->attr.exclude_guest ||
401 event->attr.sample_period) /* no sampling */
404 if (event->pmu == &cstate_core_pmu) {
405 if (cfg >= PERF_CSTATE_CORE_EVENT_MAX)
407 if (!core_msr[cfg].attr)
409 event->hw.event_base = core_msr[cfg].msr;
410 } else if (event->pmu == &cstate_pkg_pmu) {
411 if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
413 cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
414 if (!pkg_msr[cfg].attr)
416 event->hw.event_base = pkg_msr[cfg].msr;
420 /* must be done before validate_group */
421 event->hw.config = cfg;
427 static inline u64 cstate_pmu_read_counter(struct perf_event *event)
431 rdmsrl(event->hw.event_base, val);
435 static void cstate_pmu_event_update(struct perf_event *event)
437 struct hw_perf_event *hwc = &event->hw;
438 u64 prev_raw_count, new_raw_count;
441 prev_raw_count = local64_read(&hwc->prev_count);
442 new_raw_count = cstate_pmu_read_counter(event);
444 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
445 new_raw_count) != prev_raw_count)
448 local64_add(new_raw_count - prev_raw_count, &event->count);
451 static void cstate_pmu_event_start(struct perf_event *event, int mode)
453 local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event));
456 static void cstate_pmu_event_stop(struct perf_event *event, int mode)
458 cstate_pmu_event_update(event);
461 static void cstate_pmu_event_del(struct perf_event *event, int mode)
463 cstate_pmu_event_stop(event, PERF_EF_UPDATE);
466 static int cstate_pmu_event_add(struct perf_event *event, int mode)
468 if (mode & PERF_EF_START)
469 cstate_pmu_event_start(event, mode);
474 static void cstate_cpu_exit(int cpu)
478 /* cpu exit for cstate core */
479 if (has_cstate_core) {
480 id = topology_core_id(cpu);
483 for_each_online_cpu(i) {
486 if (id == topology_core_id(i)) {
491 if (cpumask_test_and_clear_cpu(cpu, &cstate_core_cpu_mask) && target >= 0)
492 cpumask_set_cpu(target, &cstate_core_cpu_mask);
493 WARN_ON(cpumask_empty(&cstate_core_cpu_mask));
495 perf_pmu_migrate_context(&cstate_core_pmu, cpu, target);
498 /* cpu exit for cstate pkg */
499 if (has_cstate_pkg) {
500 id = topology_physical_package_id(cpu);
503 for_each_online_cpu(i) {
506 if (id == topology_physical_package_id(i)) {
511 if (cpumask_test_and_clear_cpu(cpu, &cstate_pkg_cpu_mask) && target >= 0)
512 cpumask_set_cpu(target, &cstate_pkg_cpu_mask);
513 WARN_ON(cpumask_empty(&cstate_pkg_cpu_mask));
515 perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
519 static void cstate_cpu_init(int cpu)
523 /* cpu init for cstate core */
524 if (has_cstate_core) {
525 id = topology_core_id(cpu);
526 for_each_cpu(i, &cstate_core_cpu_mask) {
527 if (id == topology_core_id(i))
531 cpumask_set_cpu(cpu, &cstate_core_cpu_mask);
534 /* cpu init for cstate pkg */
535 if (has_cstate_pkg) {
536 id = topology_physical_package_id(cpu);
537 for_each_cpu(i, &cstate_pkg_cpu_mask) {
538 if (id == topology_physical_package_id(i))
542 cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
546 static int cstate_cpu_notifier(struct notifier_block *self,
547 unsigned long action, void *hcpu)
549 unsigned int cpu = (long)hcpu;
551 switch (action & ~CPU_TASKS_FROZEN) {
555 cstate_cpu_init(cpu);
557 case CPU_UP_CANCELED:
563 case CPU_DOWN_PREPARE:
564 cstate_cpu_exit(cpu);
574 * Probe the cstate events and insert the available one into sysfs attrs
575 * Return false if there is no available events.
577 static bool cstate_probe_msr(struct perf_cstate_msr *msr,
578 struct attribute **events_attrs,
584 /* Probe the cstate events. */
585 for (i = 0; i < max_event_nr; i++) {
586 if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val))
590 /* List remaining events in the sysfs attrs. */
591 for (i = 0; i < max_event_nr; i++) {
593 events_attrs[j++] = &msr[i].attr->attr.attr;
595 events_attrs[j] = NULL;
597 return (j > 0) ? true : false;
600 static int __init cstate_init(void)
602 /* SLM has different MSR for PKG C6 */
603 switch (boot_cpu_data.x86_model) {
607 pkg_msr[PERF_CSTATE_PKG_C6_RES].msr = MSR_PKG_C7_RESIDENCY;
610 if (cstate_probe_msr(core_msr, core_events_attrs, PERF_CSTATE_CORE_EVENT_MAX))
611 has_cstate_core = true;
613 if (cstate_probe_msr(pkg_msr, pkg_events_attrs, PERF_CSTATE_PKG_EVENT_MAX))
614 has_cstate_pkg = true;
616 return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV;
619 static void __init cstate_cpumask_init(void)
623 cpu_notifier_register_begin();
625 for_each_online_cpu(cpu)
626 cstate_cpu_init(cpu);
628 __perf_cpu_notifier(cstate_cpu_notifier);
630 cpu_notifier_register_done();
633 static struct pmu cstate_core_pmu = {
634 .attr_groups = core_attr_groups,
635 .name = "cstate_core",
636 .task_ctx_nr = perf_invalid_context,
637 .event_init = cstate_pmu_event_init,
638 .add = cstate_pmu_event_add, /* must have */
639 .del = cstate_pmu_event_del, /* must have */
640 .start = cstate_pmu_event_start,
641 .stop = cstate_pmu_event_stop,
642 .read = cstate_pmu_event_update,
643 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
646 static struct pmu cstate_pkg_pmu = {
647 .attr_groups = pkg_attr_groups,
648 .name = "cstate_pkg",
649 .task_ctx_nr = perf_invalid_context,
650 .event_init = cstate_pmu_event_init,
651 .add = cstate_pmu_event_add, /* must have */
652 .del = cstate_pmu_event_del, /* must have */
653 .start = cstate_pmu_event_start,
654 .stop = cstate_pmu_event_stop,
655 .read = cstate_pmu_event_update,
656 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
659 static void __init cstate_pmus_register(void)
663 if (has_cstate_core) {
664 err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
666 pr_info("Failed to register PMU %s error %d\n",
667 cstate_core_pmu.name, err);
670 if (has_cstate_pkg) {
671 err = perf_pmu_register(&cstate_pkg_pmu, cstate_pkg_pmu.name, -1);
673 pr_info("Failed to register PMU %s error %d\n",
674 cstate_pkg_pmu.name, err);
678 static int __init cstate_pmu_init(void)
682 if (cpu_has_hypervisor)
689 cstate_cpumask_init();
691 cstate_pmus_register();
696 device_initcall(cstate_pmu_init);