GNU Linux-libre 4.14.251-gnu1
[releases.git] / arch / powerpc / perf / 8xx-pmu.c
1 /*
2  * Performance event support - PPC 8xx
3  *
4  * Copyright 2016 Christophe Leroy, CS Systemes d'Information
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/perf_event.h>
15 #include <linux/percpu.h>
16 #include <linux/hardirq.h>
17 #include <asm/pmc.h>
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/ptrace.h>
21
22 #define PERF_8xx_ID_CPU_CYCLES          1
23 #define PERF_8xx_ID_HW_INSTRUCTIONS     2
24 #define PERF_8xx_ID_ITLB_LOAD_MISS      3
25 #define PERF_8xx_ID_DTLB_LOAD_MISS      4
26
27 #define C(x)    PERF_COUNT_HW_CACHE_##x
28 #define DTLB_LOAD_MISS  (C(DTLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16))
29 #define ITLB_LOAD_MISS  (C(ITLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16))
30
31 extern unsigned long itlb_miss_counter, dtlb_miss_counter;
32 extern atomic_t instruction_counter;
33
34 static atomic_t insn_ctr_ref;
35
36 static s64 get_insn_ctr(void)
37 {
38         int ctr;
39         unsigned long counta;
40
41         do {
42                 ctr = atomic_read(&instruction_counter);
43                 counta = mfspr(SPRN_COUNTA);
44         } while (ctr != atomic_read(&instruction_counter));
45
46         return ((s64)ctr << 16) | (counta >> 16);
47 }
48
49 static int event_type(struct perf_event *event)
50 {
51         switch (event->attr.type) {
52         case PERF_TYPE_HARDWARE:
53                 if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES)
54                         return PERF_8xx_ID_CPU_CYCLES;
55                 if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS)
56                         return PERF_8xx_ID_HW_INSTRUCTIONS;
57                 break;
58         case PERF_TYPE_HW_CACHE:
59                 if (event->attr.config == ITLB_LOAD_MISS)
60                         return PERF_8xx_ID_ITLB_LOAD_MISS;
61                 if (event->attr.config == DTLB_LOAD_MISS)
62                         return PERF_8xx_ID_DTLB_LOAD_MISS;
63                 break;
64         case PERF_TYPE_RAW:
65                 break;
66         default:
67                 return -ENOENT;
68         }
69         return -EOPNOTSUPP;
70 }
71
72 static int mpc8xx_pmu_event_init(struct perf_event *event)
73 {
74         int type = event_type(event);
75
76         if (type < 0)
77                 return type;
78         return 0;
79 }
80
81 static int mpc8xx_pmu_add(struct perf_event *event, int flags)
82 {
83         int type = event_type(event);
84         s64 val = 0;
85
86         if (type < 0)
87                 return type;
88
89         switch (type) {
90         case PERF_8xx_ID_CPU_CYCLES:
91                 val = get_tb();
92                 break;
93         case PERF_8xx_ID_HW_INSTRUCTIONS:
94                 if (atomic_inc_return(&insn_ctr_ref) == 1)
95                         mtspr(SPRN_ICTRL, 0xc0080007);
96                 val = get_insn_ctr();
97                 break;
98         case PERF_8xx_ID_ITLB_LOAD_MISS:
99                 val = itlb_miss_counter;
100                 break;
101         case PERF_8xx_ID_DTLB_LOAD_MISS:
102                 val = dtlb_miss_counter;
103                 break;
104         }
105         local64_set(&event->hw.prev_count, val);
106         return 0;
107 }
108
109 static void mpc8xx_pmu_read(struct perf_event *event)
110 {
111         int type = event_type(event);
112         s64 prev, val = 0, delta = 0;
113
114         if (type < 0)
115                 return;
116
117         do {
118                 prev = local64_read(&event->hw.prev_count);
119                 switch (type) {
120                 case PERF_8xx_ID_CPU_CYCLES:
121                         val = get_tb();
122                         delta = 16 * (val - prev);
123                         break;
124                 case PERF_8xx_ID_HW_INSTRUCTIONS:
125                         val = get_insn_ctr();
126                         delta = prev - val;
127                         if (delta < 0)
128                                 delta += 0x1000000000000LL;
129                         break;
130                 case PERF_8xx_ID_ITLB_LOAD_MISS:
131                         val = itlb_miss_counter;
132                         delta = (s64)((s32)val - (s32)prev);
133                         break;
134                 case PERF_8xx_ID_DTLB_LOAD_MISS:
135                         val = dtlb_miss_counter;
136                         delta = (s64)((s32)val - (s32)prev);
137                         break;
138                 }
139         } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
140
141         local64_add(delta, &event->count);
142 }
143
144 static void mpc8xx_pmu_del(struct perf_event *event, int flags)
145 {
146         mpc8xx_pmu_read(event);
147         if (event_type(event) != PERF_8xx_ID_HW_INSTRUCTIONS)
148                 return;
149
150         /* If it was the last user, stop counting to avoid useles overhead */
151         if (atomic_dec_return(&insn_ctr_ref) == 0)
152                 mtspr(SPRN_ICTRL, 7);
153 }
154
155 static struct pmu mpc8xx_pmu = {
156         .event_init     = mpc8xx_pmu_event_init,
157         .add            = mpc8xx_pmu_add,
158         .del            = mpc8xx_pmu_del,
159         .read           = mpc8xx_pmu_read,
160         .capabilities   = PERF_PMU_CAP_NO_INTERRUPT |
161                           PERF_PMU_CAP_NO_NMI,
162 };
163
164 static int init_mpc8xx_pmu(void)
165 {
166         mtspr(SPRN_ICTRL, 7);
167         mtspr(SPRN_CMPA, 0);
168         mtspr(SPRN_COUNTA, 0xffff);
169
170         return perf_pmu_register(&mpc8xx_pmu, "cpu", PERF_TYPE_RAW);
171 }
172
173 early_initcall(init_mpc8xx_pmu);