1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2008-2014 Mathieu Desnoyers
5 #include <linux/module.h>
6 #include <linux/mutex.h>
7 #include <linux/types.h>
8 #include <linux/jhash.h>
9 #include <linux/list.h>
10 #include <linux/rcupdate.h>
11 #include <linux/tracepoint.h>
12 #include <linux/err.h>
13 #include <linux/slab.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/task.h>
16 #include <linux/static_key.h>
18 extern tracepoint_ptr_t __start___tracepoints_ptrs[];
19 extern tracepoint_ptr_t __stop___tracepoints_ptrs[];
21 DEFINE_SRCU(tracepoint_srcu);
22 EXPORT_SYMBOL_GPL(tracepoint_srcu);
24 /* Set to 1 to enable tracepoint debug output */
25 static const int tracepoint_debug;
29 * Tracepoint module list mutex protects the local module list.
31 static DEFINE_MUTEX(tracepoint_module_list_mutex);
33 /* Local list of struct tp_module */
34 static LIST_HEAD(tracepoint_module_list);
35 #endif /* CONFIG_MODULES */
38 * tracepoints_mutex protects the builtin and module tracepoints.
39 * tracepoints_mutex nests inside tracepoint_module_list_mutex.
41 static DEFINE_MUTEX(tracepoints_mutex);
43 static struct rcu_head *early_probes;
44 static bool ok_to_free_tracepoints;
48 * It is used to delay the free of multiple probes array until a quiescent
53 struct tracepoint_func probes[0];
56 /* Called in removal of a func but failed to allocate a new tp_funcs */
57 static void tp_stub_func(void)
62 static inline void *allocate_probes(int count)
64 struct tp_probes *p = kmalloc(struct_size(p, probes, count),
66 return p == NULL ? NULL : p->probes;
69 static void srcu_free_old_probes(struct rcu_head *head)
71 kfree(container_of(head, struct tp_probes, rcu));
74 static void rcu_free_old_probes(struct rcu_head *head)
76 call_srcu(&tracepoint_srcu, head, srcu_free_old_probes);
79 static __init int release_early_probes(void)
83 ok_to_free_tracepoints = true;
85 while (early_probes) {
87 early_probes = tmp->next;
88 call_rcu(tmp, rcu_free_old_probes);
94 /* SRCU is initialized at core_initcall */
95 postcore_initcall(release_early_probes);
97 static inline void release_probes(struct tracepoint_func *old)
100 struct tp_probes *tp_probes = container_of(old,
101 struct tp_probes, probes[0]);
104 * We can't free probes if SRCU is not initialized yet.
105 * Postpone the freeing till after SRCU is initialized.
107 if (unlikely(!ok_to_free_tracepoints)) {
108 tp_probes->rcu.next = early_probes;
109 early_probes = &tp_probes->rcu;
114 * Tracepoint probes are protected by both sched RCU and SRCU,
115 * by calling the SRCU callback in the sched RCU callback we
116 * cover both cases. So let us chain the SRCU and sched RCU
117 * callbacks to wait for both grace periods.
119 call_rcu(&tp_probes->rcu, rcu_free_old_probes);
123 static void debug_print_probes(struct tracepoint_func *funcs)
127 if (!tracepoint_debug || !funcs)
130 for (i = 0; funcs[i].func; i++)
131 printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func);
134 static struct tracepoint_func *
135 func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
138 struct tracepoint_func *old, *new;
143 if (WARN_ON(!tp_func->func))
144 return ERR_PTR(-EINVAL);
146 debug_print_probes(*funcs);
149 /* (N -> N+1), (N != 0, 1) probes */
150 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
151 /* Insert before probes of lower priority */
152 if (pos < 0 && old[nr_probes].prio < prio)
154 if (old[nr_probes].func == tp_func->func &&
155 old[nr_probes].data == tp_func->data)
156 return ERR_PTR(-EEXIST);
157 if (old[nr_probes].func == tp_stub_func)
161 /* + 2 : one for new probe, one for NULL func - stub functions */
162 new = allocate_probes(nr_probes + 2 - stub_funcs);
164 return ERR_PTR(-ENOMEM);
167 /* Need to copy one at a time to remove stubs */
171 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
172 if (old[nr_probes].func == tp_stub_func)
174 if (pos < 0 && old[nr_probes].prio < prio)
176 new[probes++] = old[nr_probes];
182 nr_probes--; /* Account for insertion */
184 } else if (pos < 0) {
186 memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
188 /* Copy higher priority probes ahead of the new probe */
189 memcpy(new, old, pos * sizeof(struct tracepoint_func));
190 /* Copy the rest after it. */
191 memcpy(new + pos + 1, old + pos,
192 (nr_probes - pos) * sizeof(struct tracepoint_func));
197 new[nr_probes + 1].func = NULL;
199 debug_print_probes(*funcs);
203 static void *func_remove(struct tracepoint_func **funcs,
204 struct tracepoint_func *tp_func)
206 int nr_probes = 0, nr_del = 0, i;
207 struct tracepoint_func *old, *new;
212 return ERR_PTR(-ENOENT);
214 debug_print_probes(*funcs);
215 /* (N -> M), (N > 1, M >= 0) probes */
217 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
218 if ((old[nr_probes].func == tp_func->func &&
219 old[nr_probes].data == tp_func->data) ||
220 old[nr_probes].func == tp_stub_func)
226 * If probe is NULL, then nr_probes = nr_del = 0, and then the
227 * entire entry will be removed.
229 if (nr_probes - nr_del == 0) {
230 /* N -> 0, (N > 1) */
232 debug_print_probes(*funcs);
236 /* N -> M, (N > 1, M > 0) */
238 new = allocate_probes(nr_probes - nr_del + 1);
240 for (i = 0; old[i].func; i++)
241 if ((old[i].func != tp_func->func
242 || old[i].data != tp_func->data)
243 && old[i].func != tp_stub_func)
245 new[nr_probes - nr_del].func = NULL;
249 * Failed to allocate, replace the old function
250 * with calls to tp_stub_func.
252 for (i = 0; old[i].func; i++)
253 if (old[i].func == tp_func->func &&
254 old[i].data == tp_func->data) {
255 old[i].func = tp_stub_func;
256 /* Set the prio to the next event. */
266 debug_print_probes(*funcs);
271 * Add the probe function to a tracepoint.
273 static int tracepoint_add_func(struct tracepoint *tp,
274 struct tracepoint_func *func, int prio,
277 struct tracepoint_func *old, *tp_funcs;
280 if (tp->regfunc && !static_key_enabled(&tp->key)) {
286 tp_funcs = rcu_dereference_protected(tp->funcs,
287 lockdep_is_held(&tracepoints_mutex));
288 old = func_add(&tp_funcs, func, prio);
290 WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM);
295 * rcu_assign_pointer has as smp_store_release() which makes sure
296 * that the new probe callbacks array is consistent before setting
297 * a pointer to it. This array is referenced by __DO_TRACE from
298 * include/linux/tracepoint.h using rcu_dereference_sched().
300 rcu_assign_pointer(tp->funcs, tp_funcs);
301 if (!static_key_enabled(&tp->key))
302 static_key_slow_inc(&tp->key);
308 * Remove a probe function from a tracepoint.
309 * Note: only waiting an RCU period after setting elem->call to the empty
310 * function insures that the original callback is not used anymore. This insured
311 * by preempt_disable around the call site.
313 static int tracepoint_remove_func(struct tracepoint *tp,
314 struct tracepoint_func *func)
316 struct tracepoint_func *old, *tp_funcs;
318 tp_funcs = rcu_dereference_protected(tp->funcs,
319 lockdep_is_held(&tracepoints_mutex));
320 old = func_remove(&tp_funcs, func);
321 if (WARN_ON_ONCE(IS_ERR(old)))
325 /* Failed allocating new tp_funcs, replaced func with stub */
329 /* Removed last function */
330 if (tp->unregfunc && static_key_enabled(&tp->key))
333 if (static_key_enabled(&tp->key))
334 static_key_slow_dec(&tp->key);
336 rcu_assign_pointer(tp->funcs, tp_funcs);
342 * tracepoint_probe_register_prio_may_exist - Connect a probe to a tracepoint with priority
344 * @probe: probe handler
345 * @data: tracepoint data
346 * @prio: priority of this function over other registered functions
348 * Same as tracepoint_probe_register_prio() except that it will not warn
349 * if the tracepoint is already registered.
351 int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe,
352 void *data, int prio)
354 struct tracepoint_func tp_func;
357 mutex_lock(&tracepoints_mutex);
358 tp_func.func = probe;
361 ret = tracepoint_add_func(tp, &tp_func, prio, false);
362 mutex_unlock(&tracepoints_mutex);
365 EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist);
368 * tracepoint_probe_register_prio - Connect a probe to a tracepoint with priority
370 * @probe: probe handler
371 * @data: tracepoint data
372 * @prio: priority of this function over other registered functions
374 * Returns 0 if ok, error value on error.
375 * Note: if @tp is within a module, the caller is responsible for
376 * unregistering the probe before the module is gone. This can be
377 * performed either with a tracepoint module going notifier, or from
378 * within module exit functions.
380 int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
381 void *data, int prio)
383 struct tracepoint_func tp_func;
386 mutex_lock(&tracepoints_mutex);
387 tp_func.func = probe;
390 ret = tracepoint_add_func(tp, &tp_func, prio, true);
391 mutex_unlock(&tracepoints_mutex);
394 EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio);
397 * tracepoint_probe_register - Connect a probe to a tracepoint
399 * @probe: probe handler
400 * @data: tracepoint data
402 * Returns 0 if ok, error value on error.
403 * Note: if @tp is within a module, the caller is responsible for
404 * unregistering the probe before the module is gone. This can be
405 * performed either with a tracepoint module going notifier, or from
406 * within module exit functions.
408 int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
410 return tracepoint_probe_register_prio(tp, probe, data, TRACEPOINT_DEFAULT_PRIO);
412 EXPORT_SYMBOL_GPL(tracepoint_probe_register);
415 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
417 * @probe: probe function pointer
418 * @data: tracepoint data
420 * Returns 0 if ok, error value on error.
422 int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
424 struct tracepoint_func tp_func;
427 mutex_lock(&tracepoints_mutex);
428 tp_func.func = probe;
430 ret = tracepoint_remove_func(tp, &tp_func);
431 mutex_unlock(&tracepoints_mutex);
434 EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
436 static void for_each_tracepoint_range(
437 tracepoint_ptr_t *begin, tracepoint_ptr_t *end,
438 void (*fct)(struct tracepoint *tp, void *priv),
441 tracepoint_ptr_t *iter;
445 for (iter = begin; iter < end; iter++)
446 fct(tracepoint_ptr_deref(iter), priv);
449 #ifdef CONFIG_MODULES
450 bool trace_module_has_bad_taint(struct module *mod)
452 return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) |
453 (1 << TAINT_UNSIGNED_MODULE));
456 static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list);
459 * register_tracepoint_notifier - register tracepoint coming/going notifier
460 * @nb: notifier block
462 * Notifiers registered with this function are called on module
463 * coming/going with the tracepoint_module_list_mutex held.
464 * The notifier block callback should expect a "struct tp_module" data
467 int register_tracepoint_module_notifier(struct notifier_block *nb)
469 struct tp_module *tp_mod;
472 mutex_lock(&tracepoint_module_list_mutex);
473 ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb);
476 list_for_each_entry(tp_mod, &tracepoint_module_list, list)
477 (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod);
479 mutex_unlock(&tracepoint_module_list_mutex);
482 EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier);
485 * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier
486 * @nb: notifier block
488 * The notifier block callback should expect a "struct tp_module" data
491 int unregister_tracepoint_module_notifier(struct notifier_block *nb)
493 struct tp_module *tp_mod;
496 mutex_lock(&tracepoint_module_list_mutex);
497 ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb);
500 list_for_each_entry(tp_mod, &tracepoint_module_list, list)
501 (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod);
503 mutex_unlock(&tracepoint_module_list_mutex);
507 EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier);
510 * Ensure the tracer unregistered the module's probes before the module
511 * teardown is performed. Prevents leaks of probe and data pointers.
513 static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv)
515 WARN_ON_ONCE(tp->funcs);
518 static int tracepoint_module_coming(struct module *mod)
520 struct tp_module *tp_mod;
523 if (!mod->num_tracepoints)
527 * We skip modules that taint the kernel, especially those with different
528 * module headers (for forced load), to make sure we don't cause a crash.
529 * Staging, out-of-tree, and unsigned GPL modules are fine.
531 if (trace_module_has_bad_taint(mod))
533 mutex_lock(&tracepoint_module_list_mutex);
534 tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
540 list_add_tail(&tp_mod->list, &tracepoint_module_list);
541 blocking_notifier_call_chain(&tracepoint_notify_list,
542 MODULE_STATE_COMING, tp_mod);
544 mutex_unlock(&tracepoint_module_list_mutex);
548 static void tracepoint_module_going(struct module *mod)
550 struct tp_module *tp_mod;
552 if (!mod->num_tracepoints)
555 mutex_lock(&tracepoint_module_list_mutex);
556 list_for_each_entry(tp_mod, &tracepoint_module_list, list) {
557 if (tp_mod->mod == mod) {
558 blocking_notifier_call_chain(&tracepoint_notify_list,
559 MODULE_STATE_GOING, tp_mod);
560 list_del(&tp_mod->list);
563 * Called the going notifier before checking for
566 for_each_tracepoint_range(mod->tracepoints_ptrs,
567 mod->tracepoints_ptrs + mod->num_tracepoints,
568 tp_module_going_check_quiescent, NULL);
573 * In the case of modules that were tainted at "coming", we'll simply
574 * walk through the list without finding it. We cannot use the "tainted"
575 * flag on "going", in case a module taints the kernel only after being
578 mutex_unlock(&tracepoint_module_list_mutex);
581 static int tracepoint_module_notify(struct notifier_block *self,
582 unsigned long val, void *data)
584 struct module *mod = data;
588 case MODULE_STATE_COMING:
589 ret = tracepoint_module_coming(mod);
591 case MODULE_STATE_LIVE:
593 case MODULE_STATE_GOING:
594 tracepoint_module_going(mod);
596 case MODULE_STATE_UNFORMED:
602 static struct notifier_block tracepoint_module_nb = {
603 .notifier_call = tracepoint_module_notify,
607 static __init int init_tracepoints(void)
611 ret = register_module_notifier(&tracepoint_module_nb);
613 pr_warn("Failed to register tracepoint module enter notifier\n");
617 __initcall(init_tracepoints);
618 #endif /* CONFIG_MODULES */
621 * for_each_kernel_tracepoint - iteration on all kernel tracepoints
623 * @priv: private data
625 void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
628 for_each_tracepoint_range(__start___tracepoints_ptrs,
629 __stop___tracepoints_ptrs, fct, priv);
631 EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint);
633 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
635 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
636 static int sys_tracepoint_refcount;
638 int syscall_regfunc(void)
640 struct task_struct *p, *t;
642 if (!sys_tracepoint_refcount) {
643 read_lock(&tasklist_lock);
644 for_each_process_thread(p, t) {
645 set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
647 read_unlock(&tasklist_lock);
649 sys_tracepoint_refcount++;
654 void syscall_unregfunc(void)
656 struct task_struct *p, *t;
658 sys_tracepoint_refcount--;
659 if (!sys_tracepoint_refcount) {
660 read_lock(&tasklist_lock);
661 for_each_process_thread(p, t) {
662 clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
664 read_unlock(&tasklist_lock);