GNU Linux-libre 6.8.7-gnu
[releases.git] / drivers / hwtracing / coresight / coresight-trace-id.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2022, Linaro Limited, All rights reserved.
4  * Author: Mike Leach <mike.leach@linaro.org>
5  */
6 #include <linux/coresight-pmu.h>
7 #include <linux/cpumask.h>
8 #include <linux/kernel.h>
9 #include <linux/spinlock.h>
10 #include <linux/types.h>
11
12 #include "coresight-trace-id.h"
13
14 /* Default trace ID map. Used on systems that don't require per sink mappings */
15 static struct coresight_trace_id_map id_map_default;
16
17 /* maintain a record of the mapping of IDs and pending releases per cpu */
18 static DEFINE_PER_CPU(atomic_t, cpu_id) = ATOMIC_INIT(0);
19 static cpumask_t cpu_id_release_pending;
20
21 /* perf session active counter */
22 static atomic_t perf_cs_etm_session_active = ATOMIC_INIT(0);
23
24 /* lock to protect id_map and cpu data  */
25 static DEFINE_SPINLOCK(id_map_lock);
26
27 /* #define TRACE_ID_DEBUG 1 */
28 #if defined(TRACE_ID_DEBUG) || defined(CONFIG_COMPILE_TEST)
29
30 static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
31                                           const char *func_name)
32 {
33         pr_debug("%s id_map::\n", func_name);
34         pr_debug("Used = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->used_ids);
35         pr_debug("Pend = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->pend_rel_ids);
36 }
37 #define DUMP_ID_MAP(map)   coresight_trace_id_dump_table(map, __func__)
38 #define DUMP_ID_CPU(cpu, id) pr_debug("%s called;  cpu=%d, id=%d\n", __func__, cpu, id)
39 #define DUMP_ID(id)   pr_debug("%s called; id=%d\n", __func__, id)
40 #define PERF_SESSION(n) pr_debug("%s perf count %d\n", __func__, n)
41 #else
42 #define DUMP_ID_MAP(map)
43 #define DUMP_ID(id)
44 #define DUMP_ID_CPU(cpu, id)
45 #define PERF_SESSION(n)
46 #endif
47
48 /* unlocked read of current trace ID value for given CPU */
49 static int _coresight_trace_id_read_cpu_id(int cpu)
50 {
51         return atomic_read(&per_cpu(cpu_id, cpu));
52 }
53
54 /* look for next available odd ID, return 0 if none found */
55 static int coresight_trace_id_find_odd_id(struct coresight_trace_id_map *id_map)
56 {
57         int found_id = 0, bit = 1, next_id;
58
59         while ((bit < CORESIGHT_TRACE_ID_RES_TOP) && !found_id) {
60                 /*
61                  * bitmap length of CORESIGHT_TRACE_ID_RES_TOP,
62                  * search from offset `bit`.
63                  */
64                 next_id = find_next_zero_bit(id_map->used_ids,
65                                              CORESIGHT_TRACE_ID_RES_TOP, bit);
66                 if ((next_id < CORESIGHT_TRACE_ID_RES_TOP) && (next_id & 0x1))
67                         found_id = next_id;
68                 else
69                         bit = next_id + 1;
70         }
71         return found_id;
72 }
73
74 /*
75  * Allocate new ID and set in use
76  *
77  * if @preferred_id is a valid id then try to use that value if available.
78  * if @preferred_id is not valid and @prefer_odd_id is true, try for odd id.
79  *
80  * Otherwise allocate next available ID.
81  */
82 static int coresight_trace_id_alloc_new_id(struct coresight_trace_id_map *id_map,
83                                            int preferred_id, bool prefer_odd_id)
84 {
85         int id = 0;
86
87         /* for backwards compatibility, cpu IDs may use preferred value */
88         if (IS_VALID_CS_TRACE_ID(preferred_id) &&
89             !test_bit(preferred_id, id_map->used_ids)) {
90                 id = preferred_id;
91                 goto trace_id_allocated;
92         } else if (prefer_odd_id) {
93         /* may use odd ids to avoid preferred legacy cpu IDs */
94                 id = coresight_trace_id_find_odd_id(id_map);
95                 if (id)
96                         goto trace_id_allocated;
97         }
98
99         /*
100          * skip reserved bit 0, look at bitmap length of
101          * CORESIGHT_TRACE_ID_RES_TOP from offset of bit 1.
102          */
103         id = find_next_zero_bit(id_map->used_ids, CORESIGHT_TRACE_ID_RES_TOP, 1);
104         if (id >= CORESIGHT_TRACE_ID_RES_TOP)
105                 return -EINVAL;
106
107         /* mark as used */
108 trace_id_allocated:
109         set_bit(id, id_map->used_ids);
110         return id;
111 }
112
113 static void coresight_trace_id_free(int id, struct coresight_trace_id_map *id_map)
114 {
115         if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id))
116                 return;
117         if (WARN(!test_bit(id, id_map->used_ids), "Freeing unused ID %d\n", id))
118                 return;
119         clear_bit(id, id_map->used_ids);
120 }
121
122 static void coresight_trace_id_set_pend_rel(int id, struct coresight_trace_id_map *id_map)
123 {
124         if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id))
125                 return;
126         set_bit(id, id_map->pend_rel_ids);
127 }
128
129 /*
130  * release all pending IDs for all current maps & clear CPU associations
131  *
132  * This currently operates on the default id map, but may be extended to
133  * operate on all registered id maps if per sink id maps are used.
134  */
135 static void coresight_trace_id_release_all_pending(void)
136 {
137         struct coresight_trace_id_map *id_map = &id_map_default;
138         unsigned long flags;
139         int cpu, bit;
140
141         spin_lock_irqsave(&id_map_lock, flags);
142         for_each_set_bit(bit, id_map->pend_rel_ids, CORESIGHT_TRACE_ID_RES_TOP) {
143                 clear_bit(bit, id_map->used_ids);
144                 clear_bit(bit, id_map->pend_rel_ids);
145         }
146         for_each_cpu(cpu, &cpu_id_release_pending) {
147                 atomic_set(&per_cpu(cpu_id, cpu), 0);
148                 cpumask_clear_cpu(cpu, &cpu_id_release_pending);
149         }
150         spin_unlock_irqrestore(&id_map_lock, flags);
151         DUMP_ID_MAP(id_map);
152 }
153
154 static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
155 {
156         unsigned long flags;
157         int id;
158
159         spin_lock_irqsave(&id_map_lock, flags);
160
161         /* check for existing allocation for this CPU */
162         id = _coresight_trace_id_read_cpu_id(cpu);
163         if (id)
164                 goto get_cpu_id_clr_pend;
165
166         /*
167          * Find a new ID.
168          *
169          * Use legacy values where possible in the dynamic trace ID allocator to
170          * allow older tools to continue working if they are not upgraded at the
171          * same time as the kernel drivers.
172          *
173          * If the generated legacy ID is invalid, or not available then the next
174          * available dynamic ID will be used.
175          */
176         id = coresight_trace_id_alloc_new_id(id_map,
177                                              CORESIGHT_LEGACY_CPU_TRACE_ID(cpu),
178                                              false);
179         if (!IS_VALID_CS_TRACE_ID(id))
180                 goto get_cpu_id_out_unlock;
181
182         /* allocate the new id to the cpu */
183         atomic_set(&per_cpu(cpu_id, cpu), id);
184
185 get_cpu_id_clr_pend:
186         /* we are (re)using this ID - so ensure it is not marked for release */
187         cpumask_clear_cpu(cpu, &cpu_id_release_pending);
188         clear_bit(id, id_map->pend_rel_ids);
189
190 get_cpu_id_out_unlock:
191         spin_unlock_irqrestore(&id_map_lock, flags);
192
193         DUMP_ID_CPU(cpu, id);
194         DUMP_ID_MAP(id_map);
195         return id;
196 }
197
198 static void coresight_trace_id_map_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
199 {
200         unsigned long flags;
201         int id;
202
203         /* check for existing allocation for this CPU */
204         id = _coresight_trace_id_read_cpu_id(cpu);
205         if (!id)
206                 return;
207
208         spin_lock_irqsave(&id_map_lock, flags);
209
210         if (atomic_read(&perf_cs_etm_session_active)) {
211                 /* set release at pending if perf still active */
212                 coresight_trace_id_set_pend_rel(id, id_map);
213                 cpumask_set_cpu(cpu, &cpu_id_release_pending);
214         } else {
215                 /* otherwise clear id */
216                 coresight_trace_id_free(id, id_map);
217                 atomic_set(&per_cpu(cpu_id, cpu), 0);
218         }
219
220         spin_unlock_irqrestore(&id_map_lock, flags);
221         DUMP_ID_CPU(cpu, id);
222         DUMP_ID_MAP(id_map);
223 }
224
225 static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *id_map)
226 {
227         unsigned long flags;
228         int id;
229
230         spin_lock_irqsave(&id_map_lock, flags);
231         /* prefer odd IDs for system components to avoid legacy CPU IDS */
232         id = coresight_trace_id_alloc_new_id(id_map, 0, true);
233         spin_unlock_irqrestore(&id_map_lock, flags);
234
235         DUMP_ID(id);
236         DUMP_ID_MAP(id_map);
237         return id;
238 }
239
240 static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *id_map, int id)
241 {
242         unsigned long flags;
243
244         spin_lock_irqsave(&id_map_lock, flags);
245         coresight_trace_id_free(id, id_map);
246         spin_unlock_irqrestore(&id_map_lock, flags);
247
248         DUMP_ID(id);
249         DUMP_ID_MAP(id_map);
250 }
251
252 /* API functions */
253
254 int coresight_trace_id_get_cpu_id(int cpu)
255 {
256         return coresight_trace_id_map_get_cpu_id(cpu, &id_map_default);
257 }
258 EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id);
259
260 void coresight_trace_id_put_cpu_id(int cpu)
261 {
262         coresight_trace_id_map_put_cpu_id(cpu, &id_map_default);
263 }
264 EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id);
265
266 int coresight_trace_id_read_cpu_id(int cpu)
267 {
268         return _coresight_trace_id_read_cpu_id(cpu);
269 }
270 EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id);
271
272 int coresight_trace_id_get_system_id(void)
273 {
274         return coresight_trace_id_map_get_system_id(&id_map_default);
275 }
276 EXPORT_SYMBOL_GPL(coresight_trace_id_get_system_id);
277
278 void coresight_trace_id_put_system_id(int id)
279 {
280         coresight_trace_id_map_put_system_id(&id_map_default, id);
281 }
282 EXPORT_SYMBOL_GPL(coresight_trace_id_put_system_id);
283
284 void coresight_trace_id_perf_start(void)
285 {
286         atomic_inc(&perf_cs_etm_session_active);
287         PERF_SESSION(atomic_read(&perf_cs_etm_session_active));
288 }
289 EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start);
290
291 void coresight_trace_id_perf_stop(void)
292 {
293         if (!atomic_dec_return(&perf_cs_etm_session_active))
294                 coresight_trace_id_release_all_pending();
295         PERF_SESSION(atomic_read(&perf_cs_etm_session_active));
296 }
297 EXPORT_SYMBOL_GPL(coresight_trace_id_perf_stop);