2 * Performance counter support for POWER8 processors.
4 * Copyright 2009 Paul Mackerras, IBM Corporation.
5 * Copyright 2013 Michael Ellerman, IBM Corporation.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #define pr_fmt(fmt) "power8-pmu: " fmt
15 #include "isa207-common.h"
18 * Some power8 event codes.
20 #define EVENT(_name, _code) _name = _code,
23 #include "power8-events-list.h"
28 /* MMCRA IFM bits - POWER8 */
29 #define POWER8_MMCRA_IFM1 0x0000000040000000UL
30 #define POWER8_MMCRA_IFM2 0x0000000080000000UL
31 #define POWER8_MMCRA_IFM3 0x00000000C0000000UL
32 #define POWER8_MMCRA_BHRB_MASK 0x00000000C0000000UL
34 /* Table of alternatives, sorted by column 0 */
35 static const unsigned int event_alternatives[][MAX_ALT] = {
36 { PM_MRK_ST_CMPL, PM_MRK_ST_CMPL_ALT },
37 { PM_BR_MRK_2PATH, PM_BR_MRK_2PATH_ALT },
38 { PM_L3_CO_MEPF, PM_L3_CO_MEPF_ALT },
39 { PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L2MISS_ALT },
40 { PM_CMPLU_STALL_ALT, PM_CMPLU_STALL },
41 { PM_BR_2PATH, PM_BR_2PATH_ALT },
42 { PM_INST_DISP, PM_INST_DISP_ALT },
43 { PM_RUN_CYC_ALT, PM_RUN_CYC },
44 { PM_MRK_FILT_MATCH, PM_MRK_FILT_MATCH_ALT },
45 { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
46 { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
50 * Scan the alternatives table for a match and return the
51 * index into the alternatives table if found, else -1.
53 static int find_alternative(u64 event)
57 for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
58 if (event < event_alternatives[i][0])
61 for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
62 if (event == event_alternatives[i][j])
69 static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
71 int i, j, num_alt = 0;
74 alt[num_alt++] = event;
76 i = find_alternative(event);
78 /* Filter out the original event, it's already in alt[0] */
79 for (j = 0; j < MAX_ALT; ++j) {
80 alt_event = event_alternatives[i][j];
81 if (alt_event && alt_event != event)
82 alt[num_alt++] = alt_event;
86 if (flags & PPMU_ONLY_COUNT_RUN) {
88 * We're only counting in RUN state, so PM_CYC is equivalent to
89 * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
92 for (i = 0; i < num_alt; ++i) {
95 alt[j++] = PM_RUN_CYC;
101 alt[j++] = PM_RUN_INST_CMPL;
103 case PM_RUN_INST_CMPL:
104 alt[j++] = PM_INST_CMPL;
114 GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
115 GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC);
116 GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
117 GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
118 GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN);
119 GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
120 GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
121 GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
123 CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
124 CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
126 CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
127 CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
128 CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
129 CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
130 CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
132 CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
133 CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
134 CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL);
135 CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS);
136 CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST);
138 CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
139 CACHE_EVENT_ATTR(branch-loads, PM_BRU_FIN);
140 CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
141 CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
143 static struct attribute *power8_events_attr[] = {
144 GENERIC_EVENT_PTR(PM_CYC),
145 GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC),
146 GENERIC_EVENT_PTR(PM_CMPLU_STALL),
147 GENERIC_EVENT_PTR(PM_INST_CMPL),
148 GENERIC_EVENT_PTR(PM_BRU_FIN),
149 GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
150 GENERIC_EVENT_PTR(PM_LD_REF_L1),
151 GENERIC_EVENT_PTR(PM_LD_MISS_L1),
153 CACHE_EVENT_PTR(PM_LD_MISS_L1),
154 CACHE_EVENT_PTR(PM_LD_REF_L1),
155 CACHE_EVENT_PTR(PM_L1_PREF),
156 CACHE_EVENT_PTR(PM_ST_MISS_L1),
157 CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
158 CACHE_EVENT_PTR(PM_INST_FROM_L1),
159 CACHE_EVENT_PTR(PM_IC_PREF_WRITE),
160 CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
161 CACHE_EVENT_PTR(PM_DATA_FROM_L3),
162 CACHE_EVENT_PTR(PM_L3_PREF_ALL),
163 CACHE_EVENT_PTR(PM_L2_ST_MISS),
164 CACHE_EVENT_PTR(PM_L2_ST),
166 CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
167 CACHE_EVENT_PTR(PM_BRU_FIN),
169 CACHE_EVENT_PTR(PM_DTLB_MISS),
170 CACHE_EVENT_PTR(PM_ITLB_MISS),
174 static struct attribute_group power8_pmu_events_group = {
176 .attrs = power8_events_attr,
179 PMU_FORMAT_ATTR(event, "config:0-49");
180 PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
181 PMU_FORMAT_ATTR(mark, "config:8");
182 PMU_FORMAT_ATTR(combine, "config:11");
183 PMU_FORMAT_ATTR(unit, "config:12-15");
184 PMU_FORMAT_ATTR(pmc, "config:16-19");
185 PMU_FORMAT_ATTR(cache_sel, "config:20-23");
186 PMU_FORMAT_ATTR(sample_mode, "config:24-28");
187 PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
188 PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
189 PMU_FORMAT_ATTR(thresh_start, "config:36-39");
190 PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
192 static struct attribute *power8_pmu_format_attr[] = {
193 &format_attr_event.attr,
194 &format_attr_pmcxsel.attr,
195 &format_attr_mark.attr,
196 &format_attr_combine.attr,
197 &format_attr_unit.attr,
198 &format_attr_pmc.attr,
199 &format_attr_cache_sel.attr,
200 &format_attr_sample_mode.attr,
201 &format_attr_thresh_sel.attr,
202 &format_attr_thresh_stop.attr,
203 &format_attr_thresh_start.attr,
204 &format_attr_thresh_cmp.attr,
208 static struct attribute_group power8_pmu_format_group = {
210 .attrs = power8_pmu_format_attr,
213 static const struct attribute_group *power8_pmu_attr_groups[] = {
214 &power8_pmu_format_group,
215 &power8_pmu_events_group,
219 static int power8_generic_events[] = {
220 [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
221 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC,
222 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
223 [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
224 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
225 [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
226 [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
227 [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
230 static u64 power8_bhrb_filter_map(u64 branch_sample_type)
232 u64 pmu_bhrb_filter = 0;
234 /* BHRB and regular PMU events share the same privilege state
235 * filter configuration. BHRB is always recorded along with a
236 * regular PMU event. As the privilege state filter is handled
237 * in the basic PMC configuration of the accompanying regular
238 * PMU event, we ignore any separate BHRB specific request.
241 /* No branch filter requested */
242 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
243 return pmu_bhrb_filter;
245 /* Invalid branch filter options - HW does not support */
246 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
249 if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
252 if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
255 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
256 pmu_bhrb_filter |= POWER8_MMCRA_IFM1;
257 return pmu_bhrb_filter;
260 /* Every thing else is unsupported */
264 static void power8_config_bhrb(u64 pmu_bhrb_filter)
266 pmu_bhrb_filter &= POWER8_MMCRA_BHRB_MASK;
268 /* Enable BHRB filter in PMU */
269 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
272 #define C(x) PERF_COUNT_HW_CACHE_##x
275 * Table of generalized cache-related events.
276 * 0 means not supported, -1 means nonsensical, other values
279 static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
282 [ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
283 [ C(RESULT_MISS) ] = PM_LD_MISS_L1,
286 [ C(RESULT_ACCESS) ] = 0,
287 [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
289 [ C(OP_PREFETCH) ] = {
290 [ C(RESULT_ACCESS) ] = PM_L1_PREF,
291 [ C(RESULT_MISS) ] = 0,
296 [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
297 [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
300 [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
301 [ C(RESULT_MISS) ] = -1,
303 [ C(OP_PREFETCH) ] = {
304 [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
305 [ C(RESULT_MISS) ] = 0,
310 [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
311 [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
314 [ C(RESULT_ACCESS) ] = PM_L2_ST,
315 [ C(RESULT_MISS) ] = PM_L2_ST_MISS,
317 [ C(OP_PREFETCH) ] = {
318 [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
319 [ C(RESULT_MISS) ] = 0,
324 [ C(RESULT_ACCESS) ] = 0,
325 [ C(RESULT_MISS) ] = PM_DTLB_MISS,
328 [ C(RESULT_ACCESS) ] = -1,
329 [ C(RESULT_MISS) ] = -1,
331 [ C(OP_PREFETCH) ] = {
332 [ C(RESULT_ACCESS) ] = -1,
333 [ C(RESULT_MISS) ] = -1,
338 [ C(RESULT_ACCESS) ] = 0,
339 [ C(RESULT_MISS) ] = PM_ITLB_MISS,
342 [ C(RESULT_ACCESS) ] = -1,
343 [ C(RESULT_MISS) ] = -1,
345 [ C(OP_PREFETCH) ] = {
346 [ C(RESULT_ACCESS) ] = -1,
347 [ C(RESULT_MISS) ] = -1,
352 [ C(RESULT_ACCESS) ] = PM_BRU_FIN,
353 [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
356 [ C(RESULT_ACCESS) ] = -1,
357 [ C(RESULT_MISS) ] = -1,
359 [ C(OP_PREFETCH) ] = {
360 [ C(RESULT_ACCESS) ] = -1,
361 [ C(RESULT_MISS) ] = -1,
366 [ C(RESULT_ACCESS) ] = -1,
367 [ C(RESULT_MISS) ] = -1,
370 [ C(RESULT_ACCESS) ] = -1,
371 [ C(RESULT_MISS) ] = -1,
373 [ C(OP_PREFETCH) ] = {
374 [ C(RESULT_ACCESS) ] = -1,
375 [ C(RESULT_MISS) ] = -1,
382 static struct power_pmu power8_pmu = {
384 .n_counter = MAX_PMU_COUNTERS,
385 .max_alternatives = MAX_ALT + 1,
386 .add_fields = ISA207_ADD_FIELDS,
387 .test_adder = ISA207_TEST_ADDER,
388 .compute_mmcr = isa207_compute_mmcr,
389 .config_bhrb = power8_config_bhrb,
390 .bhrb_filter_map = power8_bhrb_filter_map,
391 .get_constraint = isa207_get_constraint,
392 .get_alternatives = power8_get_alternatives,
393 .disable_pmc = isa207_disable_pmc,
394 .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
395 .n_generic = ARRAY_SIZE(power8_generic_events),
396 .generic_events = power8_generic_events,
397 .cache_events = &power8_cache_events,
398 .attr_groups = power8_pmu_attr_groups,
402 static int __init init_power8_pmu(void)
406 if (!cur_cpu_spec->oprofile_cpu_type ||
407 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
410 rc = register_power_pmu(&power8_pmu);
414 /* Tell userspace that EBB is supported */
415 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
417 if (cpu_has_feature(CPU_FTR_PMAO_BUG))
418 pr_info("PMAO restore workaround active.\n");
422 early_initcall(init_power8_pmu);