2 * Performance counter support for POWER9 processors.
4 * Copyright 2009 Paul Mackerras, IBM Corporation.
5 * Copyright 2013 Michael Ellerman, IBM Corporation.
6 * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or later version.
14 #define pr_fmt(fmt) "power9-pmu: " fmt
16 #include "isa207-common.h"
19 * Raw event encoding for Power9:
21 * 60 56 52 48 44 40 36 32
22 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
23 * | | [ ] [ ] [ thresh_cmp ] [ thresh_ctl ]
25 * | | *- IFM (Linux) | thresh start/stop -*
26 * | *- BHRB (Linux) *sm
29 * 28 24 20 16 12 8 4 0
30 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
31 * [ ] [ sample ] [cache] [ pmc ] [unit ] [] m [ pmcxsel ]
34 * | | *- L1/L2/L3 cache_sel |
36 * | *- sampling mode for marked events *- combine
40 * Below uses IBM bit numbering.
42 * MMCR1[x:y] = unit (PMCxUNIT)
43 * MMCR1[24] = pmc1combine[0]
44 * MMCR1[25] = pmc1combine[1]
45 * MMCR1[26] = pmc2combine[0]
46 * MMCR1[27] = pmc2combine[1]
47 * MMCR1[28] = pmc3combine[0]
48 * MMCR1[29] = pmc3combine[1]
49 * MMCR1[30] = pmc4combine[0]
50 * MMCR1[31] = pmc4combine[1]
52 * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
53 * MMCR1[20:27] = thresh_ctl
54 * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
55 * MMCR1[20:27] = thresh_ctl
57 * MMCRA[48:55] = thresh_ctl (THRESH START/END)
60 * MMCRA[45:47] = thresh_sel
63 * MMCRA[9:11] = thresh_cmp[0:2]
64 * MMCRA[12:18] = thresh_cmp[3:9]
66 * if unit == 6 or unit == 7
67 * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
68 * else if unit == 8 or unit == 9:
69 * if cache_sel[0] == 0: # L3 bank
70 * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
71 * else if cache_sel[0] == 1:
72 * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
73 * else if cache_sel[1]: # L1 event
74 * MMCR1[16] = cache_sel[2]
75 * MMCR1[17] = cache_sel[3]
78 * MMCRA[63] = 1 (SAMPLE_ENABLE)
79 * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
80 * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
85 * MMCRA[SDAR_MODE] = sm
89 * Some power9 event codes.
91 #define EVENT(_name, _code) _name = _code,
94 #include "power9-events-list.h"
99 /* MMCRA IFM bits - POWER9 */
100 #define POWER9_MMCRA_IFM1 0x0000000040000000UL
101 #define POWER9_MMCRA_IFM2 0x0000000080000000UL
102 #define POWER9_MMCRA_IFM3 0x00000000C0000000UL
103 #define POWER9_MMCRA_BHRB_MASK 0x00000000C0000000UL
105 /* PowerISA v2.07 format attribute structure*/
106 extern struct attribute_group isa207_pmu_format_group;
108 /* Table of alternatives, sorted by column 0 */
109 static const unsigned int power9_event_alternatives[][MAX_ALT] = {
110 { PM_INST_DISP, PM_INST_DISP_ALT },
111 { PM_RUN_CYC_ALT, PM_RUN_CYC },
112 { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
113 { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
114 { PM_BR_2PATH, PM_BR_2PATH_ALT },
117 static int power9_get_alternatives(u64 event, unsigned int flags, u64 alt[])
121 num_alt = isa207_get_alternatives(event, alt,
122 ARRAY_SIZE(power9_event_alternatives), flags,
123 power9_event_alternatives);
128 GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
129 GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_ICT_NOSLOT_CYC);
130 GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
131 GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
132 GENERIC_EVENT_ATTR(branch-instructions, PM_BR_CMPL);
133 GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
134 GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
135 GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1_FIN);
137 CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1_FIN);
138 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
139 CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
140 CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
141 CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
142 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
143 CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
144 CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
145 CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
146 CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL);
147 CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS);
148 CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST);
149 CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
150 CACHE_EVENT_ATTR(branch-loads, PM_BR_CMPL);
151 CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
152 CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
154 static struct attribute *power9_events_attr[] = {
155 GENERIC_EVENT_PTR(PM_CYC),
156 GENERIC_EVENT_PTR(PM_ICT_NOSLOT_CYC),
157 GENERIC_EVENT_PTR(PM_CMPLU_STALL),
158 GENERIC_EVENT_PTR(PM_INST_CMPL),
159 GENERIC_EVENT_PTR(PM_BR_CMPL),
160 GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
161 GENERIC_EVENT_PTR(PM_LD_REF_L1),
162 GENERIC_EVENT_PTR(PM_LD_MISS_L1_FIN),
163 CACHE_EVENT_PTR(PM_LD_MISS_L1_FIN),
164 CACHE_EVENT_PTR(PM_LD_REF_L1),
165 CACHE_EVENT_PTR(PM_L1_PREF),
166 CACHE_EVENT_PTR(PM_ST_MISS_L1),
167 CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
168 CACHE_EVENT_PTR(PM_INST_FROM_L1),
169 CACHE_EVENT_PTR(PM_IC_PREF_WRITE),
170 CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
171 CACHE_EVENT_PTR(PM_DATA_FROM_L3),
172 CACHE_EVENT_PTR(PM_L3_PREF_ALL),
173 CACHE_EVENT_PTR(PM_L2_ST_MISS),
174 CACHE_EVENT_PTR(PM_L2_ST),
175 CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
176 CACHE_EVENT_PTR(PM_BR_CMPL),
177 CACHE_EVENT_PTR(PM_DTLB_MISS),
178 CACHE_EVENT_PTR(PM_ITLB_MISS),
182 static struct attribute_group power9_pmu_events_group = {
184 .attrs = power9_events_attr,
187 static const struct attribute_group *power9_isa207_pmu_attr_groups[] = {
188 &isa207_pmu_format_group,
189 &power9_pmu_events_group,
193 PMU_FORMAT_ATTR(event, "config:0-51");
194 PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
195 PMU_FORMAT_ATTR(mark, "config:8");
196 PMU_FORMAT_ATTR(combine, "config:10-11");
197 PMU_FORMAT_ATTR(unit, "config:12-15");
198 PMU_FORMAT_ATTR(pmc, "config:16-19");
199 PMU_FORMAT_ATTR(cache_sel, "config:20-23");
200 PMU_FORMAT_ATTR(sample_mode, "config:24-28");
201 PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
202 PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
203 PMU_FORMAT_ATTR(thresh_start, "config:36-39");
204 PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
205 PMU_FORMAT_ATTR(sdar_mode, "config:50-51");
207 static struct attribute *power9_pmu_format_attr[] = {
208 &format_attr_event.attr,
209 &format_attr_pmcxsel.attr,
210 &format_attr_mark.attr,
211 &format_attr_combine.attr,
212 &format_attr_unit.attr,
213 &format_attr_pmc.attr,
214 &format_attr_cache_sel.attr,
215 &format_attr_sample_mode.attr,
216 &format_attr_thresh_sel.attr,
217 &format_attr_thresh_stop.attr,
218 &format_attr_thresh_start.attr,
219 &format_attr_thresh_cmp.attr,
220 &format_attr_sdar_mode.attr,
224 static struct attribute_group power9_pmu_format_group = {
226 .attrs = power9_pmu_format_attr,
229 static const struct attribute_group *power9_pmu_attr_groups[] = {
230 &power9_pmu_format_group,
231 &power9_pmu_events_group,
235 static int power9_generic_events_dd1[] = {
236 [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
237 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
238 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
239 [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_DISP,
240 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL_ALT,
241 [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
242 [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
243 [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1_FIN,
246 static int power9_generic_events[] = {
247 [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
248 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
249 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
250 [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
251 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL,
252 [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
253 [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
254 [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1_FIN,
257 static u64 power9_bhrb_filter_map(u64 branch_sample_type)
259 u64 pmu_bhrb_filter = 0;
261 /* BHRB and regular PMU events share the same privilege state
262 * filter configuration. BHRB is always recorded along with a
263 * regular PMU event. As the privilege state filter is handled
264 * in the basic PMC configuration of the accompanying regular
265 * PMU event, we ignore any separate BHRB specific request.
268 /* No branch filter requested */
269 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
270 return pmu_bhrb_filter;
272 /* Invalid branch filter options - HW does not support */
273 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
276 if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
279 if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
282 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
283 pmu_bhrb_filter |= POWER9_MMCRA_IFM1;
284 return pmu_bhrb_filter;
287 /* Every thing else is unsupported */
291 static void power9_config_bhrb(u64 pmu_bhrb_filter)
293 pmu_bhrb_filter &= POWER9_MMCRA_BHRB_MASK;
295 /* Enable BHRB filter in PMU */
296 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
299 #define C(x) PERF_COUNT_HW_CACHE_##x
302 * Table of generalized cache-related events.
303 * 0 means not supported, -1 means nonsensical, other values
306 static int power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
309 [ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
310 [ C(RESULT_MISS) ] = PM_LD_MISS_L1_FIN,
313 [ C(RESULT_ACCESS) ] = 0,
314 [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
316 [ C(OP_PREFETCH) ] = {
317 [ C(RESULT_ACCESS) ] = PM_L1_PREF,
318 [ C(RESULT_MISS) ] = 0,
323 [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
324 [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
327 [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
328 [ C(RESULT_MISS) ] = -1,
330 [ C(OP_PREFETCH) ] = {
331 [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
332 [ C(RESULT_MISS) ] = 0,
337 [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
338 [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
341 [ C(RESULT_ACCESS) ] = PM_L2_ST,
342 [ C(RESULT_MISS) ] = PM_L2_ST_MISS,
344 [ C(OP_PREFETCH) ] = {
345 [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
346 [ C(RESULT_MISS) ] = 0,
351 [ C(RESULT_ACCESS) ] = 0,
352 [ C(RESULT_MISS) ] = PM_DTLB_MISS,
355 [ C(RESULT_ACCESS) ] = -1,
356 [ C(RESULT_MISS) ] = -1,
358 [ C(OP_PREFETCH) ] = {
359 [ C(RESULT_ACCESS) ] = -1,
360 [ C(RESULT_MISS) ] = -1,
365 [ C(RESULT_ACCESS) ] = 0,
366 [ C(RESULT_MISS) ] = PM_ITLB_MISS,
369 [ C(RESULT_ACCESS) ] = -1,
370 [ C(RESULT_MISS) ] = -1,
372 [ C(OP_PREFETCH) ] = {
373 [ C(RESULT_ACCESS) ] = -1,
374 [ C(RESULT_MISS) ] = -1,
379 [ C(RESULT_ACCESS) ] = PM_BR_CMPL,
380 [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
383 [ C(RESULT_ACCESS) ] = -1,
384 [ C(RESULT_MISS) ] = -1,
386 [ C(OP_PREFETCH) ] = {
387 [ C(RESULT_ACCESS) ] = -1,
388 [ C(RESULT_MISS) ] = -1,
393 [ C(RESULT_ACCESS) ] = -1,
394 [ C(RESULT_MISS) ] = -1,
397 [ C(RESULT_ACCESS) ] = -1,
398 [ C(RESULT_MISS) ] = -1,
400 [ C(OP_PREFETCH) ] = {
401 [ C(RESULT_ACCESS) ] = -1,
402 [ C(RESULT_MISS) ] = -1,
409 static struct power_pmu power9_isa207_pmu = {
411 .n_counter = MAX_PMU_COUNTERS,
412 .add_fields = ISA207_ADD_FIELDS,
413 .test_adder = P9_DD1_TEST_ADDER,
414 .compute_mmcr = isa207_compute_mmcr,
415 .config_bhrb = power9_config_bhrb,
416 .bhrb_filter_map = power9_bhrb_filter_map,
417 .get_constraint = isa207_get_constraint,
418 .get_alternatives = power9_get_alternatives,
419 .disable_pmc = isa207_disable_pmc,
420 .flags = PPMU_NO_SIAR | PPMU_ARCH_207S,
421 .n_generic = ARRAY_SIZE(power9_generic_events_dd1),
422 .generic_events = power9_generic_events_dd1,
423 .cache_events = &power9_cache_events,
424 .attr_groups = power9_isa207_pmu_attr_groups,
428 static struct power_pmu power9_pmu = {
430 .n_counter = MAX_PMU_COUNTERS,
431 .add_fields = ISA207_ADD_FIELDS,
432 .test_adder = ISA207_TEST_ADDER,
433 .compute_mmcr = isa207_compute_mmcr,
434 .config_bhrb = power9_config_bhrb,
435 .bhrb_filter_map = power9_bhrb_filter_map,
436 .get_constraint = isa207_get_constraint,
437 .get_alternatives = power9_get_alternatives,
438 .get_mem_data_src = isa207_get_mem_data_src,
439 .get_mem_weight = isa207_get_mem_weight,
440 .disable_pmc = isa207_disable_pmc,
441 .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
442 .n_generic = ARRAY_SIZE(power9_generic_events),
443 .generic_events = power9_generic_events,
444 .cache_events = &power9_cache_events,
445 .attr_groups = power9_pmu_attr_groups,
449 static int __init init_power9_pmu(void)
453 /* Comes from cpu_specs[] */
454 if (!cur_cpu_spec->oprofile_cpu_type ||
455 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power9"))
458 if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
460 * Since PM_INST_CMPL may not provide right counts in all
461 * sampling scenarios in power9 DD1, instead use PM_INST_DISP.
463 EVENT_VAR(PM_INST_CMPL, _g).id = PM_INST_DISP;
465 * Power9 DD1 should use PM_BR_CMPL_ALT event code for
466 * "branches" to provide correct counter value.
468 EVENT_VAR(PM_BR_CMPL, _g).id = PM_BR_CMPL_ALT;
469 EVENT_VAR(PM_BR_CMPL, _c).id = PM_BR_CMPL_ALT;
470 rc = register_power_pmu(&power9_isa207_pmu);
472 rc = register_power_pmu(&power9_pmu);
478 /* Tell userspace that EBB is supported */
479 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
483 early_initcall(init_power9_pmu);