2 * Support Intel RAPL energy consumption counters
3 * Copyright (C) 2013 Google, Inc., Stephane Eranian
5 * Intel RAPL interface is specified in the IA-32 Manual Vol3b
6 * section 14.7.1 (September 2013)
8 * RAPL provides more controls than just reporting energy consumption
9 * however here we only expose the 3 energy consumption free running
10 * counters (pp0, pkg, dram).
12 * Each of those counters increments in a power unit defined by the
13 * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules
16 * Counter to rapl events mappings:
18 * pp0 counter: consumption of all physical cores (power plane 0)
19 * event: rapl_energy_cores
22 * pkg counter: consumption of the whole processor package
23 * event: rapl_energy_pkg
26 * dram counter: consumption of the dram domain (servers only)
27 * event: rapl_energy_dram
30 * gpu counter: consumption of the builtin-gpu domain (client only)
31 * event: rapl_energy_gpu
34 * psys counter: consumption of the builtin-psys domain (client only)
35 * event: rapl_energy_psys
38 * We manage those counters as free running (read-only). They may be
39 * use simultaneously by other tools, such as turbostat.
41 * The events only support system-wide mode counting. There is no
42 * sampling support because it does not make sense and is not
43 * supported by the RAPL hardware.
45 * Because we want to avoid floating-point operations in the kernel,
46 * the events are all reported in fixed point arithmetic (32.32).
47 * Tools must adjust the counts to convert them to Watts using
48 * the duration of the measurement. Tools may use a function such as
49 * ldexp(raw_count, -32);
52 #define pr_fmt(fmt) "RAPL PMU: " fmt
54 #include <linux/module.h>
55 #include <linux/slab.h>
56 #include <linux/perf_event.h>
57 #include <asm/cpu_device_id.h>
58 #include <asm/intel-family.h>
59 #include "../perf_event.h"
61 MODULE_LICENSE("GPL");
64 * RAPL energy status counters
66 #define RAPL_IDX_PP0_NRG_STAT 0 /* all cores */
67 #define INTEL_RAPL_PP0 0x1 /* pseudo-encoding */
68 #define RAPL_IDX_PKG_NRG_STAT 1 /* entire package */
69 #define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */
70 #define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */
71 #define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */
72 #define RAPL_IDX_PP1_NRG_STAT 3 /* gpu */
73 #define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */
74 #define RAPL_IDX_PSYS_NRG_STAT 4 /* psys */
75 #define INTEL_RAPL_PSYS 0x5 /* pseudo-encoding */
77 #define NR_RAPL_DOMAINS 0x5
78 static const char *const rapl_domain_names[NR_RAPL_DOMAINS] __initconst = {
86 /* Clients have PP0, PKG */
87 #define RAPL_IDX_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
88 1<<RAPL_IDX_PKG_NRG_STAT|\
89 1<<RAPL_IDX_PP1_NRG_STAT)
91 /* Servers have PP0, PKG, RAM */
92 #define RAPL_IDX_SRV (1<<RAPL_IDX_PP0_NRG_STAT|\
93 1<<RAPL_IDX_PKG_NRG_STAT|\
94 1<<RAPL_IDX_RAM_NRG_STAT)
96 /* Servers have PP0, PKG, RAM, PP1 */
97 #define RAPL_IDX_HSW (1<<RAPL_IDX_PP0_NRG_STAT|\
98 1<<RAPL_IDX_PKG_NRG_STAT|\
99 1<<RAPL_IDX_RAM_NRG_STAT|\
100 1<<RAPL_IDX_PP1_NRG_STAT)
102 /* SKL clients have PP0, PKG, RAM, PP1, PSYS */
103 #define RAPL_IDX_SKL_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
104 1<<RAPL_IDX_PKG_NRG_STAT|\
105 1<<RAPL_IDX_RAM_NRG_STAT|\
106 1<<RAPL_IDX_PP1_NRG_STAT|\
107 1<<RAPL_IDX_PSYS_NRG_STAT)
109 /* Knights Landing has PKG, RAM */
110 #define RAPL_IDX_KNL (1<<RAPL_IDX_PKG_NRG_STAT|\
111 1<<RAPL_IDX_RAM_NRG_STAT)
114 * event code: LSB 8 bits, passed in attr->config
115 * any other bit is reserved
117 #define RAPL_EVENT_MASK 0xFFULL
118 #define RAPL_CNTR_WIDTH 32
120 #define RAPL_EVENT_ATTR_STR(_name, v, str) \
121 static struct perf_pmu_events_attr event_attr_##v = { \
122 .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \
131 struct list_head active_list;
133 ktime_t timer_interval;
134 struct hrtimer hrtimer;
140 struct rapl_pmu *pmus[];
143 /* 1/2^hw_unit Joule */
144 static int rapl_hw_unit[NR_RAPL_DOMAINS] __read_mostly;
145 static struct rapl_pmus *rapl_pmus;
146 static cpumask_t rapl_cpu_mask;
147 static unsigned int rapl_cntr_mask;
148 static u64 rapl_timer_ms;
150 static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
152 unsigned int pkgid = topology_logical_package_id(cpu);
155 * The unsigned check also catches the '-1' return value for non
156 * existent mappings in the topology map.
158 return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
161 static inline u64 rapl_read_counter(struct perf_event *event)
164 rdmsrl(event->hw.event_base, raw);
168 static inline u64 rapl_scale(u64 v, int cfg)
170 if (cfg > NR_RAPL_DOMAINS) {
171 pr_warn("Invalid domain %d, failed to scale data\n", cfg);
175 * scale delta to smallest unit (1/2^32)
176 * users must then scale back: count * 1/(1e9*2^32) to get Joules
177 * or use ldexp(count, -32).
178 * Watts = Joules/Time delta
180 return v << (32 - rapl_hw_unit[cfg - 1]);
183 static u64 rapl_event_update(struct perf_event *event)
185 struct hw_perf_event *hwc = &event->hw;
186 u64 prev_raw_count, new_raw_count;
188 int shift = RAPL_CNTR_WIDTH;
191 prev_raw_count = local64_read(&hwc->prev_count);
192 rdmsrl(event->hw.event_base, new_raw_count);
194 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
195 new_raw_count) != prev_raw_count) {
201 * Now we have the new raw value and have updated the prev
202 * timestamp already. We can now calculate the elapsed delta
203 * (event-)time and add that to the generic event.
205 * Careful, not all hw sign-extends above the physical width
208 delta = (new_raw_count << shift) - (prev_raw_count << shift);
211 sdelta = rapl_scale(delta, event->hw.config);
213 local64_add(sdelta, &event->count);
215 return new_raw_count;
218 static void rapl_start_hrtimer(struct rapl_pmu *pmu)
220 hrtimer_start(&pmu->hrtimer, pmu->timer_interval,
221 HRTIMER_MODE_REL_PINNED);
224 static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
226 struct rapl_pmu *pmu = container_of(hrtimer, struct rapl_pmu, hrtimer);
227 struct perf_event *event;
231 return HRTIMER_NORESTART;
233 raw_spin_lock_irqsave(&pmu->lock, flags);
235 list_for_each_entry(event, &pmu->active_list, active_entry)
236 rapl_event_update(event);
238 raw_spin_unlock_irqrestore(&pmu->lock, flags);
240 hrtimer_forward_now(hrtimer, pmu->timer_interval);
242 return HRTIMER_RESTART;
245 static void rapl_hrtimer_init(struct rapl_pmu *pmu)
247 struct hrtimer *hr = &pmu->hrtimer;
249 hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
250 hr->function = rapl_hrtimer_handle;
253 static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
254 struct perf_event *event)
256 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
261 list_add_tail(&event->active_entry, &pmu->active_list);
263 local64_set(&event->hw.prev_count, rapl_read_counter(event));
266 if (pmu->n_active == 1)
267 rapl_start_hrtimer(pmu);
270 static void rapl_pmu_event_start(struct perf_event *event, int mode)
272 struct rapl_pmu *pmu = event->pmu_private;
275 raw_spin_lock_irqsave(&pmu->lock, flags);
276 __rapl_pmu_event_start(pmu, event);
277 raw_spin_unlock_irqrestore(&pmu->lock, flags);
280 static void rapl_pmu_event_stop(struct perf_event *event, int mode)
282 struct rapl_pmu *pmu = event->pmu_private;
283 struct hw_perf_event *hwc = &event->hw;
286 raw_spin_lock_irqsave(&pmu->lock, flags);
288 /* mark event as deactivated and stopped */
289 if (!(hwc->state & PERF_HES_STOPPED)) {
290 WARN_ON_ONCE(pmu->n_active <= 0);
292 if (pmu->n_active == 0)
293 hrtimer_cancel(&pmu->hrtimer);
295 list_del(&event->active_entry);
297 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
298 hwc->state |= PERF_HES_STOPPED;
301 /* check if update of sw counter is necessary */
302 if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
304 * Drain the remaining delta count out of a event
305 * that we are disabling:
307 rapl_event_update(event);
308 hwc->state |= PERF_HES_UPTODATE;
311 raw_spin_unlock_irqrestore(&pmu->lock, flags);
314 static int rapl_pmu_event_add(struct perf_event *event, int mode)
316 struct rapl_pmu *pmu = event->pmu_private;
317 struct hw_perf_event *hwc = &event->hw;
320 raw_spin_lock_irqsave(&pmu->lock, flags);
322 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
324 if (mode & PERF_EF_START)
325 __rapl_pmu_event_start(pmu, event);
327 raw_spin_unlock_irqrestore(&pmu->lock, flags);
332 static void rapl_pmu_event_del(struct perf_event *event, int flags)
334 rapl_pmu_event_stop(event, PERF_EF_UPDATE);
337 static int rapl_pmu_event_init(struct perf_event *event)
339 u64 cfg = event->attr.config & RAPL_EVENT_MASK;
340 int bit, msr, ret = 0;
341 struct rapl_pmu *pmu;
343 /* only look at RAPL events */
344 if (event->attr.type != rapl_pmus->pmu.type)
347 /* check only supported bits are set */
348 if (event->attr.config & ~RAPL_EVENT_MASK)
354 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG;
357 * check event is known (determines counter)
361 bit = RAPL_IDX_PP0_NRG_STAT;
362 msr = MSR_PP0_ENERGY_STATUS;
365 bit = RAPL_IDX_PKG_NRG_STAT;
366 msr = MSR_PKG_ENERGY_STATUS;
369 bit = RAPL_IDX_RAM_NRG_STAT;
370 msr = MSR_DRAM_ENERGY_STATUS;
373 bit = RAPL_IDX_PP1_NRG_STAT;
374 msr = MSR_PP1_ENERGY_STATUS;
376 case INTEL_RAPL_PSYS:
377 bit = RAPL_IDX_PSYS_NRG_STAT;
378 msr = MSR_PLATFORM_ENERGY_STATUS;
383 /* check event supported */
384 if (!(rapl_cntr_mask & (1 << bit)))
387 /* unsupported modes and filters */
388 if (event->attr.exclude_user ||
389 event->attr.exclude_kernel ||
390 event->attr.exclude_hv ||
391 event->attr.exclude_idle ||
392 event->attr.exclude_host ||
393 event->attr.exclude_guest ||
394 event->attr.sample_period) /* no sampling */
397 /* must be done before validate_group */
398 pmu = cpu_to_rapl_pmu(event->cpu);
401 event->cpu = pmu->cpu;
402 event->pmu_private = pmu;
403 event->hw.event_base = msr;
404 event->hw.config = cfg;
410 static void rapl_pmu_event_read(struct perf_event *event)
412 rapl_event_update(event);
415 static ssize_t rapl_get_attr_cpumask(struct device *dev,
416 struct device_attribute *attr, char *buf)
418 return cpumap_print_to_pagebuf(true, buf, &rapl_cpu_mask);
421 static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL);
423 static struct attribute *rapl_pmu_attrs[] = {
424 &dev_attr_cpumask.attr,
428 static struct attribute_group rapl_pmu_attr_group = {
429 .attrs = rapl_pmu_attrs,
432 RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
433 RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
434 RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
435 RAPL_EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04");
436 RAPL_EVENT_ATTR_STR(energy-psys, rapl_psys, "event=0x05");
438 RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
439 RAPL_EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules");
440 RAPL_EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules");
441 RAPL_EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules");
442 RAPL_EVENT_ATTR_STR(energy-psys.unit, rapl_psys_unit, "Joules");
445 * we compute in 0.23 nJ increments regardless of MSR
447 RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
448 RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10");
449 RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10");
450 RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10");
451 RAPL_EVENT_ATTR_STR(energy-psys.scale, rapl_psys_scale, "2.3283064365386962890625e-10");
453 static struct attribute *rapl_events_srv_attr[] = {
454 EVENT_PTR(rapl_cores),
458 EVENT_PTR(rapl_cores_unit),
459 EVENT_PTR(rapl_pkg_unit),
460 EVENT_PTR(rapl_ram_unit),
462 EVENT_PTR(rapl_cores_scale),
463 EVENT_PTR(rapl_pkg_scale),
464 EVENT_PTR(rapl_ram_scale),
468 static struct attribute *rapl_events_cln_attr[] = {
469 EVENT_PTR(rapl_cores),
473 EVENT_PTR(rapl_cores_unit),
474 EVENT_PTR(rapl_pkg_unit),
475 EVENT_PTR(rapl_gpu_unit),
477 EVENT_PTR(rapl_cores_scale),
478 EVENT_PTR(rapl_pkg_scale),
479 EVENT_PTR(rapl_gpu_scale),
483 static struct attribute *rapl_events_hsw_attr[] = {
484 EVENT_PTR(rapl_cores),
489 EVENT_PTR(rapl_cores_unit),
490 EVENT_PTR(rapl_pkg_unit),
491 EVENT_PTR(rapl_gpu_unit),
492 EVENT_PTR(rapl_ram_unit),
494 EVENT_PTR(rapl_cores_scale),
495 EVENT_PTR(rapl_pkg_scale),
496 EVENT_PTR(rapl_gpu_scale),
497 EVENT_PTR(rapl_ram_scale),
501 static struct attribute *rapl_events_skl_attr[] = {
502 EVENT_PTR(rapl_cores),
506 EVENT_PTR(rapl_psys),
508 EVENT_PTR(rapl_cores_unit),
509 EVENT_PTR(rapl_pkg_unit),
510 EVENT_PTR(rapl_gpu_unit),
511 EVENT_PTR(rapl_ram_unit),
512 EVENT_PTR(rapl_psys_unit),
514 EVENT_PTR(rapl_cores_scale),
515 EVENT_PTR(rapl_pkg_scale),
516 EVENT_PTR(rapl_gpu_scale),
517 EVENT_PTR(rapl_ram_scale),
518 EVENT_PTR(rapl_psys_scale),
522 static struct attribute *rapl_events_knl_attr[] = {
526 EVENT_PTR(rapl_pkg_unit),
527 EVENT_PTR(rapl_ram_unit),
529 EVENT_PTR(rapl_pkg_scale),
530 EVENT_PTR(rapl_ram_scale),
534 static struct attribute_group rapl_pmu_events_group = {
536 .attrs = NULL, /* patched at runtime */
539 PMU_FORMAT_ATTR(event, "config:0-7");
540 static struct attribute *rapl_formats_attr[] = {
541 &format_attr_event.attr,
545 static struct attribute_group rapl_pmu_format_group = {
547 .attrs = rapl_formats_attr,
550 static const struct attribute_group *rapl_attr_groups[] = {
551 &rapl_pmu_attr_group,
552 &rapl_pmu_format_group,
553 &rapl_pmu_events_group,
557 static int rapl_cpu_offline(unsigned int cpu)
559 struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
562 /* Check if exiting cpu is used for collecting rapl events */
563 if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
567 /* Find a new cpu to collect rapl events */
568 target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
570 /* Migrate rapl events to the new target */
571 if (target < nr_cpu_ids) {
572 cpumask_set_cpu(target, &rapl_cpu_mask);
574 perf_pmu_migrate_context(pmu->pmu, cpu, target);
579 static int rapl_cpu_online(unsigned int cpu)
581 struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
585 pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
589 raw_spin_lock_init(&pmu->lock);
590 INIT_LIST_HEAD(&pmu->active_list);
591 pmu->pmu = &rapl_pmus->pmu;
592 pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
593 rapl_hrtimer_init(pmu);
595 rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
599 * Check if there is an online cpu in the package which collects rapl
602 target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu));
603 if (target < nr_cpu_ids)
606 cpumask_set_cpu(cpu, &rapl_cpu_mask);
611 static int rapl_check_hw_unit(bool apply_quirk)
613 u64 msr_rapl_power_unit_bits;
616 /* protect rdmsrl() to handle virtualization */
617 if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
619 for (i = 0; i < NR_RAPL_DOMAINS; i++)
620 rapl_hw_unit[i] = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
623 * DRAM domain on HSW server and KNL has fixed energy unit which can be
624 * different than the unit from power unit MSR. See
625 * "Intel Xeon Processor E5-1600 and E5-2600 v3 Product Families, V2
626 * of 2. Datasheet, September 2014, Reference Number: 330784-001 "
629 rapl_hw_unit[RAPL_IDX_RAM_NRG_STAT] = 16;
632 * Calculate the timer rate:
633 * Use reference of 200W for scaling the timeout to avoid counter
634 * overflows. 200W = 200 Joules/sec
635 * Divide interval by 2 to avoid lockstep (2 * 100)
636 * if hw unit is 32, then we use 2 ms 1/200/2
639 if (rapl_hw_unit[0] < 32) {
640 rapl_timer_ms = (1000 / (2 * 100));
641 rapl_timer_ms *= (1ULL << (32 - rapl_hw_unit[0] - 1));
646 static void __init rapl_advertise(void)
650 pr_info("API unit is 2^-32 Joules, %d fixed counters, %llu ms ovfl timer\n",
651 hweight32(rapl_cntr_mask), rapl_timer_ms);
653 for (i = 0; i < NR_RAPL_DOMAINS; i++) {
654 if (rapl_cntr_mask & (1 << i)) {
655 pr_info("hw unit of domain %s 2^-%d Joules\n",
656 rapl_domain_names[i], rapl_hw_unit[i]);
661 static void cleanup_rapl_pmus(void)
665 for (i = 0; i < rapl_pmus->maxpkg; i++)
666 kfree(rapl_pmus->pmus[i]);
670 static int __init init_rapl_pmus(void)
672 int maxpkg = topology_max_packages();
675 size = sizeof(*rapl_pmus) + maxpkg * sizeof(struct rapl_pmu *);
676 rapl_pmus = kzalloc(size, GFP_KERNEL);
680 rapl_pmus->maxpkg = maxpkg;
681 rapl_pmus->pmu.attr_groups = rapl_attr_groups;
682 rapl_pmus->pmu.task_ctx_nr = perf_invalid_context;
683 rapl_pmus->pmu.event_init = rapl_pmu_event_init;
684 rapl_pmus->pmu.add = rapl_pmu_event_add;
685 rapl_pmus->pmu.del = rapl_pmu_event_del;
686 rapl_pmus->pmu.start = rapl_pmu_event_start;
687 rapl_pmus->pmu.stop = rapl_pmu_event_stop;
688 rapl_pmus->pmu.read = rapl_pmu_event_read;
689 rapl_pmus->pmu.module = THIS_MODULE;
693 #define X86_RAPL_MODEL_MATCH(model, init) \
694 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
696 struct intel_rapl_init_fun {
699 struct attribute **attrs;
702 static const struct intel_rapl_init_fun snb_rapl_init __initconst = {
703 .apply_quirk = false,
704 .cntr_mask = RAPL_IDX_CLN,
705 .attrs = rapl_events_cln_attr,
708 static const struct intel_rapl_init_fun hsx_rapl_init __initconst = {
710 .cntr_mask = RAPL_IDX_SRV,
711 .attrs = rapl_events_srv_attr,
714 static const struct intel_rapl_init_fun hsw_rapl_init __initconst = {
715 .apply_quirk = false,
716 .cntr_mask = RAPL_IDX_HSW,
717 .attrs = rapl_events_hsw_attr,
720 static const struct intel_rapl_init_fun snbep_rapl_init __initconst = {
721 .apply_quirk = false,
722 .cntr_mask = RAPL_IDX_SRV,
723 .attrs = rapl_events_srv_attr,
726 static const struct intel_rapl_init_fun knl_rapl_init __initconst = {
728 .cntr_mask = RAPL_IDX_KNL,
729 .attrs = rapl_events_knl_attr,
732 static const struct intel_rapl_init_fun skl_rapl_init __initconst = {
733 .apply_quirk = false,
734 .cntr_mask = RAPL_IDX_SKL_CLN,
735 .attrs = rapl_events_skl_attr,
738 static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
739 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_rapl_init),
740 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_rapl_init),
742 X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, snb_rapl_init),
743 X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, snbep_rapl_init),
745 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_rapl_init),
746 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hsw_rapl_init),
747 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_rapl_init),
748 X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_rapl_init),
750 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init),
751 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init),
752 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsx_rapl_init),
753 X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
755 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
756 X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init),
758 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_rapl_init),
759 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, skl_rapl_init),
760 X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, hsx_rapl_init),
762 X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_rapl_init),
763 X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_rapl_init),
765 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init),
766 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init),
768 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init),
772 MODULE_DEVICE_TABLE(x86cpu, rapl_cpu_match);
774 static int __init rapl_pmu_init(void)
776 const struct x86_cpu_id *id;
777 struct intel_rapl_init_fun *rapl_init;
781 id = x86_match_cpu(rapl_cpu_match);
785 rapl_init = (struct intel_rapl_init_fun *)id->driver_data;
786 apply_quirk = rapl_init->apply_quirk;
787 rapl_cntr_mask = rapl_init->cntr_mask;
788 rapl_pmu_events_group.attrs = rapl_init->attrs;
790 ret = rapl_check_hw_unit(apply_quirk);
794 ret = init_rapl_pmus();
799 * Install callbacks. Core will call them for each online cpu.
801 ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
802 "perf/x86/rapl:online",
803 rapl_cpu_online, rapl_cpu_offline);
807 ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
815 cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
817 pr_warn("Initialization failed (%d), disabled\n", ret);
821 module_init(rapl_pmu_init);
823 static void __exit intel_rapl_exit(void)
825 cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
826 perf_pmu_unregister(&rapl_pmus->pmu);
829 module_exit(intel_rapl_exit);