2 * Copyright (C) 2013 Advanced Micro Devices, Inc.
4 * Author: Jacob Shin <jacob.shin@amd.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include <linux/perf_event.h>
12 #include <linux/percpu.h>
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/cpu.h>
17 #include <linux/cpumask.h>
19 #include <asm/cpufeature.h>
20 #include <asm/perf_event.h>
24 #define NUM_COUNTERS_NB 4
25 #define NUM_COUNTERS_L2 4
26 #define MAX_COUNTERS NUM_COUNTERS_NB
28 #define RDPMC_BASE_NB 6
29 #define RDPMC_BASE_LLC 10
31 #define COUNTER_SHIFT 16
33 static HLIST_HEAD(uncore_unused_list);
42 cpumask_t *active_mask;
44 struct perf_event *events[MAX_COUNTERS];
45 struct hlist_node node;
48 static struct amd_uncore * __percpu *amd_uncore_nb;
49 static struct amd_uncore * __percpu *amd_uncore_llc;
51 static struct pmu amd_nb_pmu;
52 static struct pmu amd_llc_pmu;
54 static cpumask_t amd_nb_active_mask;
55 static cpumask_t amd_llc_active_mask;
57 static bool is_nb_event(struct perf_event *event)
59 return event->pmu->type == amd_nb_pmu.type;
62 static bool is_llc_event(struct perf_event *event)
64 return event->pmu->type == amd_llc_pmu.type;
67 static struct amd_uncore *event_to_amd_uncore(struct perf_event *event)
69 if (is_nb_event(event) && amd_uncore_nb)
70 return *per_cpu_ptr(amd_uncore_nb, event->cpu);
71 else if (is_llc_event(event) && amd_uncore_llc)
72 return *per_cpu_ptr(amd_uncore_llc, event->cpu);
77 static void amd_uncore_read(struct perf_event *event)
79 struct hw_perf_event *hwc = &event->hw;
84 * since we do not enable counter overflow interrupts,
85 * we do not have to worry about prev_count changing on us
88 prev = local64_read(&hwc->prev_count);
89 rdpmcl(hwc->event_base_rdpmc, new);
90 local64_set(&hwc->prev_count, new);
91 delta = (new << COUNTER_SHIFT) - (prev << COUNTER_SHIFT);
92 delta >>= COUNTER_SHIFT;
93 local64_add(delta, &event->count);
96 static void amd_uncore_start(struct perf_event *event, int flags)
98 struct hw_perf_event *hwc = &event->hw;
100 if (flags & PERF_EF_RELOAD)
101 wrmsrl(hwc->event_base, (u64)local64_read(&hwc->prev_count));
104 wrmsrl(hwc->config_base, (hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE));
105 perf_event_update_userpage(event);
108 static void amd_uncore_stop(struct perf_event *event, int flags)
110 struct hw_perf_event *hwc = &event->hw;
112 wrmsrl(hwc->config_base, hwc->config);
113 hwc->state |= PERF_HES_STOPPED;
115 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
116 amd_uncore_read(event);
117 hwc->state |= PERF_HES_UPTODATE;
121 static int amd_uncore_add(struct perf_event *event, int flags)
124 struct amd_uncore *uncore = event_to_amd_uncore(event);
125 struct hw_perf_event *hwc = &event->hw;
127 /* are we already assigned? */
128 if (hwc->idx != -1 && uncore->events[hwc->idx] == event)
131 for (i = 0; i < uncore->num_counters; i++) {
132 if (uncore->events[i] == event) {
138 /* if not, take the first available counter */
140 for (i = 0; i < uncore->num_counters; i++) {
141 if (cmpxchg(&uncore->events[i], NULL, event) == NULL) {
151 hwc->config_base = uncore->msr_base + (2 * hwc->idx);
152 hwc->event_base = uncore->msr_base + 1 + (2 * hwc->idx);
153 hwc->event_base_rdpmc = uncore->rdpmc_base + hwc->idx;
154 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
156 if (flags & PERF_EF_START)
157 amd_uncore_start(event, PERF_EF_RELOAD);
162 static void amd_uncore_del(struct perf_event *event, int flags)
165 struct amd_uncore *uncore = event_to_amd_uncore(event);
166 struct hw_perf_event *hwc = &event->hw;
168 amd_uncore_stop(event, PERF_EF_UPDATE);
170 for (i = 0; i < uncore->num_counters; i++) {
171 if (cmpxchg(&uncore->events[i], event, NULL) == event)
178 static int amd_uncore_event_init(struct perf_event *event)
180 struct amd_uncore *uncore;
181 struct hw_perf_event *hwc = &event->hw;
183 if (event->attr.type != event->pmu->type)
187 * NB and Last level cache counters (MSRs) are shared across all cores
188 * that share the same NB / Last level cache. On family 16h and below,
189 * Interrupts can be directed to a single target core, however, event
190 * counts generated by processes running on other cores cannot be masked
191 * out. So we do not support sampling and per-thread events via
192 * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
195 /* NB and Last level cache counters do not have usr/os/guest/host bits */
196 if (event->attr.exclude_user || event->attr.exclude_kernel ||
197 event->attr.exclude_host || event->attr.exclude_guest)
200 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
206 uncore = event_to_amd_uncore(event);
211 * since request can come in to any of the shared cores, we will remap
212 * to a single common cpu.
214 event->cpu = uncore->cpu;
219 static ssize_t amd_uncore_attr_show_cpumask(struct device *dev,
220 struct device_attribute *attr,
223 cpumask_t *active_mask;
224 struct pmu *pmu = dev_get_drvdata(dev);
226 if (pmu->type == amd_nb_pmu.type)
227 active_mask = &amd_nb_active_mask;
228 else if (pmu->type == amd_llc_pmu.type)
229 active_mask = &amd_llc_active_mask;
233 return cpumap_print_to_pagebuf(true, buf, active_mask);
235 static DEVICE_ATTR(cpumask, S_IRUGO, amd_uncore_attr_show_cpumask, NULL);
237 static struct attribute *amd_uncore_attrs[] = {
238 &dev_attr_cpumask.attr,
242 static struct attribute_group amd_uncore_attr_group = {
243 .attrs = amd_uncore_attrs,
246 PMU_FORMAT_ATTR(event, "config:0-7,32-35");
247 PMU_FORMAT_ATTR(umask, "config:8-15");
249 static struct attribute *amd_uncore_format_attr[] = {
250 &format_attr_event.attr,
251 &format_attr_umask.attr,
255 static struct attribute_group amd_uncore_format_group = {
257 .attrs = amd_uncore_format_attr,
260 static const struct attribute_group *amd_uncore_attr_groups[] = {
261 &amd_uncore_attr_group,
262 &amd_uncore_format_group,
266 static struct pmu amd_nb_pmu = {
267 .task_ctx_nr = perf_invalid_context,
268 .attr_groups = amd_uncore_attr_groups,
270 .event_init = amd_uncore_event_init,
271 .add = amd_uncore_add,
272 .del = amd_uncore_del,
273 .start = amd_uncore_start,
274 .stop = amd_uncore_stop,
275 .read = amd_uncore_read,
276 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
279 static struct pmu amd_llc_pmu = {
280 .task_ctx_nr = perf_invalid_context,
281 .attr_groups = amd_uncore_attr_groups,
283 .event_init = amd_uncore_event_init,
284 .add = amd_uncore_add,
285 .del = amd_uncore_del,
286 .start = amd_uncore_start,
287 .stop = amd_uncore_stop,
288 .read = amd_uncore_read,
289 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
292 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
294 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
298 static int amd_uncore_cpu_up_prepare(unsigned int cpu)
300 struct amd_uncore *uncore_nb = NULL, *uncore_llc;
303 uncore_nb = amd_uncore_alloc(cpu);
306 uncore_nb->cpu = cpu;
307 uncore_nb->num_counters = NUM_COUNTERS_NB;
308 uncore_nb->rdpmc_base = RDPMC_BASE_NB;
309 uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
310 uncore_nb->active_mask = &amd_nb_active_mask;
311 uncore_nb->pmu = &amd_nb_pmu;
313 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
316 if (amd_uncore_llc) {
317 uncore_llc = amd_uncore_alloc(cpu);
320 uncore_llc->cpu = cpu;
321 uncore_llc->num_counters = NUM_COUNTERS_L2;
322 uncore_llc->rdpmc_base = RDPMC_BASE_LLC;
323 uncore_llc->msr_base = MSR_F16H_L2I_PERF_CTL;
324 uncore_llc->active_mask = &amd_llc_active_mask;
325 uncore_llc->pmu = &amd_llc_pmu;
327 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore_llc;
334 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
339 static struct amd_uncore *
340 amd_uncore_find_online_sibling(struct amd_uncore *this,
341 struct amd_uncore * __percpu *uncores)
344 struct amd_uncore *that;
346 for_each_online_cpu(cpu) {
347 that = *per_cpu_ptr(uncores, cpu);
355 if (this->id == that->id) {
356 hlist_add_head(&this->node, &uncore_unused_list);
366 static int amd_uncore_cpu_starting(unsigned int cpu)
368 unsigned int eax, ebx, ecx, edx;
369 struct amd_uncore *uncore;
372 uncore = *per_cpu_ptr(amd_uncore_nb, cpu);
373 cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
374 uncore->id = ecx & 0xff;
376 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_nb);
377 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
380 if (amd_uncore_llc) {
381 uncore = *per_cpu_ptr(amd_uncore_llc, cpu);
382 uncore->id = per_cpu(cpu_llc_id, cpu);
384 uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_llc);
385 *per_cpu_ptr(amd_uncore_llc, cpu) = uncore;
391 static void uncore_clean_online(void)
393 struct amd_uncore *uncore;
394 struct hlist_node *n;
396 hlist_for_each_entry_safe(uncore, n, &uncore_unused_list, node) {
397 hlist_del(&uncore->node);
402 static void uncore_online(unsigned int cpu,
403 struct amd_uncore * __percpu *uncores)
405 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
407 uncore_clean_online();
409 if (cpu == uncore->cpu)
410 cpumask_set_cpu(cpu, uncore->active_mask);
413 static int amd_uncore_cpu_online(unsigned int cpu)
416 uncore_online(cpu, amd_uncore_nb);
419 uncore_online(cpu, amd_uncore_llc);
424 static void uncore_down_prepare(unsigned int cpu,
425 struct amd_uncore * __percpu *uncores)
428 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
430 if (this->cpu != cpu)
433 /* this cpu is going down, migrate to a shared sibling if possible */
434 for_each_online_cpu(i) {
435 struct amd_uncore *that = *per_cpu_ptr(uncores, i);
441 perf_pmu_migrate_context(this->pmu, cpu, i);
442 cpumask_clear_cpu(cpu, that->active_mask);
443 cpumask_set_cpu(i, that->active_mask);
450 static int amd_uncore_cpu_down_prepare(unsigned int cpu)
453 uncore_down_prepare(cpu, amd_uncore_nb);
456 uncore_down_prepare(cpu, amd_uncore_llc);
461 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
463 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
465 if (cpu == uncore->cpu)
466 cpumask_clear_cpu(cpu, uncore->active_mask);
468 if (!--uncore->refcnt)
470 *per_cpu_ptr(uncores, cpu) = NULL;
473 static int amd_uncore_cpu_dead(unsigned int cpu)
476 uncore_dead(cpu, amd_uncore_nb);
479 uncore_dead(cpu, amd_uncore_llc);
484 static int __init amd_uncore_init(void)
488 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
491 if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
494 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
495 amd_uncore_nb = alloc_percpu(struct amd_uncore *);
496 if (!amd_uncore_nb) {
500 ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
504 pr_info("perf: AMD NB counters detected\n");
508 if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
509 amd_uncore_llc = alloc_percpu(struct amd_uncore *);
510 if (!amd_uncore_llc) {
514 ret = perf_pmu_register(&amd_llc_pmu, amd_llc_pmu.name, -1);
518 pr_info("perf: AMD LLC counters detected\n");
523 * Install callbacks. Core will call them for each online cpu.
525 if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
526 "PERF_X86_AMD_UNCORE_PREP",
527 amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
530 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
531 "AP_PERF_X86_AMD_UNCORE_STARTING",
532 amd_uncore_cpu_starting, NULL))
534 if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
535 "AP_PERF_X86_AMD_UNCORE_ONLINE",
536 amd_uncore_cpu_online,
537 amd_uncore_cpu_down_prepare))
542 cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
544 cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
546 if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
547 perf_pmu_unregister(&amd_nb_pmu);
549 free_percpu(amd_uncore_llc);
552 free_percpu(amd_uncore_nb);
557 device_initcall(amd_uncore_init);