2 * Performance event support for s390x - CPU-measurement Counter Facility
4 * Copyright IBM Corp. 2012
5 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License (version 2 only)
9 * as published by the Free Software Foundation.
11 #define KMSG_COMPONENT "cpum_cf"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/perf_event.h>
17 #include <linux/percpu.h>
18 #include <linux/notifier.h>
19 #include <linux/init.h>
20 #include <linux/export.h>
21 #include <asm/ctl_reg.h>
23 #include <asm/cpu_mf.h>
25 /* CPU-measurement counter facility supports these CPU counter sets:
26 * For CPU counter sets:
27 * Basic counter set: 0-31
28 * Problem-state counter set: 32-63
29 * Crypto-activity counter set: 64-127
30 * Extented counter set: 128-159
33 /* CPU counter sets */
34 CPUMF_CTR_SET_BASIC = 0,
35 CPUMF_CTR_SET_USER = 1,
36 CPUMF_CTR_SET_CRYPTO = 2,
37 CPUMF_CTR_SET_EXT = 3,
39 /* Maximum number of counter sets */
43 #define CPUMF_LCCTL_ENABLE_SHIFT 16
44 #define CPUMF_LCCTL_ACTCTL_SHIFT 0
45 static const u64 cpumf_state_ctl[CPUMF_CTR_SET_MAX] = {
46 [CPUMF_CTR_SET_BASIC] = 0x02,
47 [CPUMF_CTR_SET_USER] = 0x04,
48 [CPUMF_CTR_SET_CRYPTO] = 0x08,
49 [CPUMF_CTR_SET_EXT] = 0x01,
52 static void ctr_set_enable(u64 *state, int ctr_set)
54 *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT;
56 static void ctr_set_disable(u64 *state, int ctr_set)
58 *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT);
60 static void ctr_set_start(u64 *state, int ctr_set)
62 *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT;
64 static void ctr_set_stop(u64 *state, int ctr_set)
66 *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT);
69 /* Local CPUMF event structure */
70 struct cpu_hw_events {
71 struct cpumf_ctr_info info;
72 atomic_t ctr_set[CPUMF_CTR_SET_MAX];
75 unsigned int txn_flags;
77 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
79 [CPUMF_CTR_SET_BASIC] = ATOMIC_INIT(0),
80 [CPUMF_CTR_SET_USER] = ATOMIC_INIT(0),
81 [CPUMF_CTR_SET_CRYPTO] = ATOMIC_INIT(0),
82 [CPUMF_CTR_SET_EXT] = ATOMIC_INIT(0),
89 static int get_counter_set(u64 event)
94 set = CPUMF_CTR_SET_BASIC;
96 set = CPUMF_CTR_SET_USER;
98 set = CPUMF_CTR_SET_CRYPTO;
100 set = CPUMF_CTR_SET_EXT;
105 static int validate_event(const struct hw_perf_event *hwc)
107 switch (hwc->config_base) {
108 case CPUMF_CTR_SET_BASIC:
109 case CPUMF_CTR_SET_USER:
110 case CPUMF_CTR_SET_CRYPTO:
111 case CPUMF_CTR_SET_EXT:
112 /* check for reserved counters */
113 if ((hwc->config >= 6 && hwc->config <= 31) ||
114 (hwc->config >= 38 && hwc->config <= 63) ||
115 (hwc->config >= 80 && hwc->config <= 127))
125 static int validate_ctr_version(const struct hw_perf_event *hwc)
127 struct cpu_hw_events *cpuhw;
130 cpuhw = &get_cpu_var(cpu_hw_events);
132 /* check required version for counter sets */
133 switch (hwc->config_base) {
134 case CPUMF_CTR_SET_BASIC:
135 case CPUMF_CTR_SET_USER:
136 if (cpuhw->info.cfvn < 1)
139 case CPUMF_CTR_SET_CRYPTO:
140 case CPUMF_CTR_SET_EXT:
141 if (cpuhw->info.csvn < 1)
143 if ((cpuhw->info.csvn == 1 && hwc->config > 159) ||
144 (cpuhw->info.csvn == 2 && hwc->config > 175) ||
145 (cpuhw->info.csvn > 2 && hwc->config > 255))
150 put_cpu_var(cpu_hw_events);
154 static int validate_ctr_auth(const struct hw_perf_event *hwc)
156 struct cpu_hw_events *cpuhw;
160 cpuhw = &get_cpu_var(cpu_hw_events);
162 /* Check authorization for cpu counter sets.
163 * If the particular CPU counter set is not authorized,
164 * return with -ENOENT in order to fall back to other
165 * PMUs that might suffice the event request.
167 ctrs_state = cpumf_state_ctl[hwc->config_base];
168 if (!(ctrs_state & cpuhw->info.auth_ctl))
171 put_cpu_var(cpu_hw_events);
176 * Change the CPUMF state to active.
177 * Enable and activate the CPU-counter sets according
178 * to the per-cpu control state.
180 static void cpumf_pmu_enable(struct pmu *pmu)
182 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
185 if (cpuhw->flags & PMU_F_ENABLED)
188 err = lcctl(cpuhw->state);
190 pr_err("Enabling the performance measuring unit "
191 "failed with rc=%x\n", err);
195 cpuhw->flags |= PMU_F_ENABLED;
199 * Change the CPUMF state to inactive.
200 * Disable and enable (inactive) the CPU-counter sets according
201 * to the per-cpu control state.
203 static void cpumf_pmu_disable(struct pmu *pmu)
205 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
209 if (!(cpuhw->flags & PMU_F_ENABLED))
212 inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
213 err = lcctl(inactive);
215 pr_err("Disabling the performance measuring unit "
216 "failed with rc=%x\n", err);
220 cpuhw->flags &= ~PMU_F_ENABLED;
224 /* Number of perf events counting hardware events */
225 static atomic_t num_events = ATOMIC_INIT(0);
226 /* Used to avoid races in calling reserve/release_cpumf_hardware */
227 static DEFINE_MUTEX(pmc_reserve_mutex);
229 /* CPU-measurement alerts for the counter facility */
230 static void cpumf_measurement_alert(struct ext_code ext_code,
231 unsigned int alert, unsigned long unused)
233 struct cpu_hw_events *cpuhw;
235 if (!(alert & CPU_MF_INT_CF_MASK))
238 inc_irq_stat(IRQEXT_CMC);
239 cpuhw = this_cpu_ptr(&cpu_hw_events);
241 /* Measurement alerts are shared and might happen when the PMU
242 * is not reserved. Ignore these alerts in this case. */
243 if (!(cpuhw->flags & PMU_F_RESERVED))
246 /* counter authorization change alert */
247 if (alert & CPU_MF_INT_CF_CACA)
250 /* loss of counter data alert */
251 if (alert & CPU_MF_INT_CF_LCDA)
252 pr_err("CPU[%i] Counter data was lost\n", smp_processor_id());
256 #define PMC_RELEASE 1
257 static void setup_pmc_cpu(void *flags)
259 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
261 switch (*((int *) flags)) {
263 memset(&cpuhw->info, 0, sizeof(cpuhw->info));
265 cpuhw->flags |= PMU_F_RESERVED;
269 cpuhw->flags &= ~PMU_F_RESERVED;
273 /* Disable CPU counter sets */
277 /* Initialize the CPU-measurement facility */
278 static int reserve_pmc_hardware(void)
280 int flags = PMC_INIT;
282 on_each_cpu(setup_pmc_cpu, &flags, 1);
283 irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
288 /* Release the CPU-measurement facility */
289 static void release_pmc_hardware(void)
291 int flags = PMC_RELEASE;
293 on_each_cpu(setup_pmc_cpu, &flags, 1);
294 irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
297 /* Release the PMU if event is the last perf event */
298 static void hw_perf_event_destroy(struct perf_event *event)
300 if (!atomic_add_unless(&num_events, -1, 1)) {
301 mutex_lock(&pmc_reserve_mutex);
302 if (atomic_dec_return(&num_events) == 0)
303 release_pmc_hardware();
304 mutex_unlock(&pmc_reserve_mutex);
308 /* CPUMF <-> perf event mappings for kernel+userspace (basic set) */
309 static const int cpumf_generic_events_basic[] = {
310 [PERF_COUNT_HW_CPU_CYCLES] = 0,
311 [PERF_COUNT_HW_INSTRUCTIONS] = 1,
312 [PERF_COUNT_HW_CACHE_REFERENCES] = -1,
313 [PERF_COUNT_HW_CACHE_MISSES] = -1,
314 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1,
315 [PERF_COUNT_HW_BRANCH_MISSES] = -1,
316 [PERF_COUNT_HW_BUS_CYCLES] = -1,
318 /* CPUMF <-> perf event mappings for userspace (problem-state set) */
319 static const int cpumf_generic_events_user[] = {
320 [PERF_COUNT_HW_CPU_CYCLES] = 32,
321 [PERF_COUNT_HW_INSTRUCTIONS] = 33,
322 [PERF_COUNT_HW_CACHE_REFERENCES] = -1,
323 [PERF_COUNT_HW_CACHE_MISSES] = -1,
324 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = -1,
325 [PERF_COUNT_HW_BRANCH_MISSES] = -1,
326 [PERF_COUNT_HW_BUS_CYCLES] = -1,
329 static int __hw_perf_event_init(struct perf_event *event)
331 struct perf_event_attr *attr = &event->attr;
332 struct hw_perf_event *hwc = &event->hw;
336 switch (attr->type) {
338 /* Raw events are used to access counters directly,
339 * hence do not permit excludes */
340 if (attr->exclude_kernel || attr->exclude_user ||
346 case PERF_TYPE_HARDWARE:
347 if (is_sampling_event(event)) /* No sampling support */
350 /* Count user space (problem-state) only */
351 if (!attr->exclude_user && attr->exclude_kernel) {
352 if (ev >= ARRAY_SIZE(cpumf_generic_events_user))
354 ev = cpumf_generic_events_user[ev];
356 /* No support for kernel space counters only */
357 } else if (!attr->exclude_kernel && attr->exclude_user) {
360 /* Count user and kernel space */
362 if (ev >= ARRAY_SIZE(cpumf_generic_events_basic))
364 ev = cpumf_generic_events_basic[ev];
375 if (ev >= PERF_CPUM_CF_MAX_CTR)
378 /* Use the hardware perf event structure to store the counter number
379 * in 'config' member and the counter set to which the counter belongs
380 * in the 'config_base'. The counter set (config_base) is then used
381 * to enable/disable the counters.
384 hwc->config_base = get_counter_set(ev);
386 /* Validate the counter that is assigned to this event.
387 * Because the counter facility can use numerous counters at the
388 * same time without constraints, it is not necessary to explicitly
389 * validate event groups (event->group_leader != event).
391 err = validate_event(hwc);
395 /* Initialize for using the CPU-measurement counter facility */
396 if (!atomic_inc_not_zero(&num_events)) {
397 mutex_lock(&pmc_reserve_mutex);
398 if (atomic_read(&num_events) == 0 && reserve_pmc_hardware())
401 atomic_inc(&num_events);
402 mutex_unlock(&pmc_reserve_mutex);
404 event->destroy = hw_perf_event_destroy;
406 /* Finally, validate version and authorization of the counter set */
407 err = validate_ctr_auth(hwc);
409 err = validate_ctr_version(hwc);
414 static int cpumf_pmu_event_init(struct perf_event *event)
418 switch (event->attr.type) {
419 case PERF_TYPE_HARDWARE:
420 case PERF_TYPE_HW_CACHE:
422 err = __hw_perf_event_init(event);
428 if (unlikely(err) && event->destroy)
429 event->destroy(event);
434 static int hw_perf_event_reset(struct perf_event *event)
440 prev = local64_read(&event->hw.prev_count);
441 err = ecctr(event->hw.config, &new);
445 /* The counter is not (yet) available. This
446 * might happen if the counter set to which
447 * this counter belongs is in the disabled
452 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
457 static int hw_perf_event_update(struct perf_event *event)
459 u64 prev, new, delta;
463 prev = local64_read(&event->hw.prev_count);
464 err = ecctr(event->hw.config, &new);
467 } while (local64_cmpxchg(&event->hw.prev_count, prev, new) != prev);
469 delta = (prev <= new) ? new - prev
470 : (-1ULL - prev) + new + 1; /* overflow */
471 local64_add(delta, &event->count);
476 static void cpumf_pmu_read(struct perf_event *event)
478 if (event->hw.state & PERF_HES_STOPPED)
481 hw_perf_event_update(event);
484 static void cpumf_pmu_start(struct perf_event *event, int flags)
486 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
487 struct hw_perf_event *hwc = &event->hw;
489 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
492 if (WARN_ON_ONCE(hwc->config == -1))
495 if (flags & PERF_EF_RELOAD)
496 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
500 /* (Re-)enable and activate the counter set */
501 ctr_set_enable(&cpuhw->state, hwc->config_base);
502 ctr_set_start(&cpuhw->state, hwc->config_base);
504 /* The counter set to which this counter belongs can be already active.
505 * Because all counters in a set are active, the event->hw.prev_count
506 * needs to be synchronized. At this point, the counter set can be in
507 * the inactive or disabled state.
509 hw_perf_event_reset(event);
511 /* increment refcount for this counter set */
512 atomic_inc(&cpuhw->ctr_set[hwc->config_base]);
515 static void cpumf_pmu_stop(struct perf_event *event, int flags)
517 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
518 struct hw_perf_event *hwc = &event->hw;
520 if (!(hwc->state & PERF_HES_STOPPED)) {
521 /* Decrement reference count for this counter set and if this
522 * is the last used counter in the set, clear activation
523 * control and set the counter set state to inactive.
525 if (!atomic_dec_return(&cpuhw->ctr_set[hwc->config_base]))
526 ctr_set_stop(&cpuhw->state, hwc->config_base);
527 event->hw.state |= PERF_HES_STOPPED;
530 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
531 hw_perf_event_update(event);
532 event->hw.state |= PERF_HES_UPTODATE;
536 static int cpumf_pmu_add(struct perf_event *event, int flags)
538 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
540 /* Check authorization for the counter set to which this
542 * For group events transaction, the authorization check is
543 * done in cpumf_pmu_commit_txn().
545 if (!(cpuhw->txn_flags & PERF_PMU_TXN_ADD))
546 if (validate_ctr_auth(&event->hw))
549 ctr_set_enable(&cpuhw->state, event->hw.config_base);
550 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
552 if (flags & PERF_EF_START)
553 cpumf_pmu_start(event, PERF_EF_RELOAD);
555 perf_event_update_userpage(event);
560 static void cpumf_pmu_del(struct perf_event *event, int flags)
562 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
564 cpumf_pmu_stop(event, PERF_EF_UPDATE);
566 /* Check if any counter in the counter set is still used. If not used,
567 * change the counter set to the disabled state. This also clears the
568 * content of all counters in the set.
570 * When a new perf event has been added but not yet started, this can
571 * clear enable control and resets all counters in a set. Therefore,
572 * cpumf_pmu_start() always has to reenable a counter set.
574 if (!atomic_read(&cpuhw->ctr_set[event->hw.config_base]))
575 ctr_set_disable(&cpuhw->state, event->hw.config_base);
577 perf_event_update_userpage(event);
581 * Start group events scheduling transaction.
582 * Set flags to perform a single test at commit time.
584 * We only support PERF_PMU_TXN_ADD transactions. Save the
585 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
588 static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
590 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
592 WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */
594 cpuhw->txn_flags = txn_flags;
595 if (txn_flags & ~PERF_PMU_TXN_ADD)
598 perf_pmu_disable(pmu);
599 cpuhw->tx_state = cpuhw->state;
603 * Stop and cancel a group events scheduling tranctions.
604 * Assumes cpumf_pmu_del() is called for each successful added
605 * cpumf_pmu_add() during the transaction.
607 static void cpumf_pmu_cancel_txn(struct pmu *pmu)
609 unsigned int txn_flags;
610 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
612 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
614 txn_flags = cpuhw->txn_flags;
615 cpuhw->txn_flags = 0;
616 if (txn_flags & ~PERF_PMU_TXN_ADD)
619 WARN_ON(cpuhw->tx_state != cpuhw->state);
621 perf_pmu_enable(pmu);
625 * Commit the group events scheduling transaction. On success, the
626 * transaction is closed. On error, the transaction is kept open
627 * until cpumf_pmu_cancel_txn() is called.
629 static int cpumf_pmu_commit_txn(struct pmu *pmu)
631 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
634 WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
636 if (cpuhw->txn_flags & ~PERF_PMU_TXN_ADD) {
637 cpuhw->txn_flags = 0;
641 /* check if the updated state can be scheduled */
642 state = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
643 state >>= CPUMF_LCCTL_ENABLE_SHIFT;
644 if ((state & cpuhw->info.auth_ctl) != state)
647 cpuhw->txn_flags = 0;
648 perf_pmu_enable(pmu);
652 /* Performance monitoring unit for s390x */
653 static struct pmu cpumf_pmu = {
654 .task_ctx_nr = perf_sw_context,
655 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
656 .pmu_enable = cpumf_pmu_enable,
657 .pmu_disable = cpumf_pmu_disable,
658 .event_init = cpumf_pmu_event_init,
659 .add = cpumf_pmu_add,
660 .del = cpumf_pmu_del,
661 .start = cpumf_pmu_start,
662 .stop = cpumf_pmu_stop,
663 .read = cpumf_pmu_read,
664 .start_txn = cpumf_pmu_start_txn,
665 .commit_txn = cpumf_pmu_commit_txn,
666 .cancel_txn = cpumf_pmu_cancel_txn,
669 static int cpumf_pmf_setup(unsigned int cpu, int flags)
672 setup_pmc_cpu(&flags);
677 static int s390_pmu_online_cpu(unsigned int cpu)
679 return cpumf_pmf_setup(cpu, PMC_INIT);
682 static int s390_pmu_offline_cpu(unsigned int cpu)
684 return cpumf_pmf_setup(cpu, PMC_RELEASE);
687 static int __init cpumf_pmu_init(void)
691 if (!cpum_cf_avail())
694 /* clear bit 15 of cr0 to unauthorize problem-state to
695 * extract measurement counters */
696 ctl_clear_bit(0, 48);
698 /* register handler for measurement-alert interruptions */
699 rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
700 cpumf_measurement_alert);
702 pr_err("Registering for CPU-measurement alerts "
703 "failed with rc=%i\n", rc);
707 cpumf_pmu.attr_groups = cpumf_cf_event_group();
708 rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
710 pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
711 unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
712 cpumf_measurement_alert);
715 return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
716 "AP_PERF_S390_CF_ONLINE",
717 s390_pmu_online_cpu, s390_pmu_offline_cpu);
719 early_initcall(cpumf_pmu_init);