1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
5 #include <linux/kernel.h>
6 #include <linux/types.h>
7 #include <linux/slab.h>
9 #include <linux/bpf_perf_event.h>
10 #include <linux/filter.h>
11 #include <linux/uaccess.h>
12 #include <linux/ctype.h>
13 #include <linux/kprobes.h>
14 #include <linux/syscalls.h>
15 #include <linux/error-injection.h>
19 #include "trace_probe.h"
22 #define bpf_event_rcu_dereference(p) \
23 rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))
26 struct bpf_trace_module {
27 struct module *module;
28 struct list_head list;
31 static LIST_HEAD(bpf_trace_modules);
32 static DEFINE_MUTEX(bpf_module_mutex);
34 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
36 struct bpf_raw_event_map *btp, *ret = NULL;
37 struct bpf_trace_module *btm;
40 mutex_lock(&bpf_module_mutex);
41 list_for_each_entry(btm, &bpf_trace_modules, list) {
42 for (i = 0; i < btm->module->num_bpf_raw_events; ++i) {
43 btp = &btm->module->bpf_raw_events[i];
44 if (!strcmp(btp->tp->name, name)) {
45 if (try_module_get(btm->module))
52 mutex_unlock(&bpf_module_mutex);
56 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
60 #endif /* CONFIG_MODULES */
62 u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
63 u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
66 * trace_call_bpf - invoke BPF program
67 * @call: tracepoint event
68 * @ctx: opaque context pointer
70 * kprobe handlers execute BPF programs via this helper.
71 * Can be used from static tracepoints in the future.
73 * Return: BPF programs always return an integer which is interpreted by
75 * 0 - return from kprobe (event is filtered out)
76 * 1 - store kprobe event into ring buffer
77 * Other values are reserved and currently alias to 1
79 unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
83 if (in_nmi()) /* not supported yet */
88 if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
90 * since some bpf program is already running on this cpu,
91 * don't call into another bpf program (same or different)
92 * and don't send kprobe event into ring-buffer,
100 * Instead of moving rcu_read_lock/rcu_dereference/rcu_read_unlock
101 * to all call sites, we did a bpf_prog_array_valid() there to check
102 * whether call->prog_array is empty or not, which is
103 * a heurisitc to speed up execution.
105 * If bpf_prog_array_valid() fetched prog_array was
106 * non-NULL, we go into trace_call_bpf() and do the actual
107 * proper rcu_dereference() under RCU lock.
108 * If it turns out that prog_array is NULL then, we bail out.
109 * For the opposite, if the bpf_prog_array_valid() fetched pointer
110 * was NULL, you'll skip the prog_array with the risk of missing
111 * out of events when it was updated in between this and the
112 * rcu_dereference() which is accepted risk.
114 ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
117 __this_cpu_dec(bpf_prog_active);
122 EXPORT_SYMBOL_GPL(trace_call_bpf);
124 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
125 BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
127 regs_set_return_value(regs, rc);
128 override_function_with_return(regs);
132 static const struct bpf_func_proto bpf_override_return_proto = {
133 .func = bpf_override_return,
135 .ret_type = RET_INTEGER,
136 .arg1_type = ARG_PTR_TO_CTX,
137 .arg2_type = ARG_ANYTHING,
141 BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
145 ret = security_locked_down(LOCKDOWN_BPF_READ);
149 ret = probe_kernel_read(dst, unsafe_ptr, size);
150 if (unlikely(ret < 0))
152 memset(dst, 0, size);
157 static const struct bpf_func_proto bpf_probe_read_proto = {
158 .func = bpf_probe_read,
160 .ret_type = RET_INTEGER,
161 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
162 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
163 .arg3_type = ARG_ANYTHING,
166 BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
170 * Ensure we're in user context which is safe for the helper to
171 * run. This helper has no business in a kthread.
173 * access_ok() should prevent writing to non-user memory, but in
174 * some situations (nommu, temporary switch, etc) access_ok() does
175 * not provide enough validation, hence the check on KERNEL_DS.
177 * nmi_uaccess_okay() ensures the probe is not run in an interim
178 * state, when the task or mm are switched. This is specifically
179 * required to prevent the use of temporary mm.
182 if (unlikely(in_interrupt() ||
183 current->flags & (PF_KTHREAD | PF_EXITING)))
185 if (unlikely(uaccess_kernel()))
187 if (unlikely(!nmi_uaccess_okay()))
190 return probe_user_write(unsafe_ptr, src, size);
193 static const struct bpf_func_proto bpf_probe_write_user_proto = {
194 .func = bpf_probe_write_user,
196 .ret_type = RET_INTEGER,
197 .arg1_type = ARG_ANYTHING,
198 .arg2_type = ARG_PTR_TO_MEM,
199 .arg3_type = ARG_CONST_SIZE,
202 static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
204 pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
205 current->comm, task_pid_nr(current));
207 return &bpf_probe_write_user_proto;
211 * Only limited trace_printk() conversion specifiers allowed:
212 * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %s
214 BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
215 u64, arg2, u64, arg3)
217 bool str_seen = false;
225 * bpf_check()->check_func_arg()->check_stack_boundary()
226 * guarantees that fmt points to bpf program stack,
227 * fmt_size bytes of it were initialized and fmt_size > 0
229 if (fmt[--fmt_size] != 0)
232 /* check format string for allowed specifiers */
233 for (i = 0; i < fmt_size; i++) {
234 if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
243 /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
248 } else if (fmt[i] == 'p' || fmt[i] == 's') {
250 /* disallow any further format extensions */
251 if (fmt[i + 1] != 0 &&
252 !isspace(fmt[i + 1]) &&
253 !ispunct(fmt[i + 1]))
258 /* allow only one '%s' per fmt string */
277 strncpy_from_unsafe(buf,
278 (void *) (long) unsafe_addr,
289 if (fmt[i] != 'i' && fmt[i] != 'd' &&
290 fmt[i] != 'u' && fmt[i] != 'x')
295 /* Horrid workaround for getting va_list handling working with different
296 * argument type combinations generically for 32 and 64 bit archs.
298 #define __BPF_TP_EMIT() __BPF_ARG3_TP()
299 #define __BPF_TP(...) \
300 __trace_printk(0 /* Fake ip */, \
303 #define __BPF_ARG1_TP(...) \
304 ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
305 ? __BPF_TP(arg1, ##__VA_ARGS__) \
306 : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
307 ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
308 : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
310 #define __BPF_ARG2_TP(...) \
311 ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
312 ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
313 : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
314 ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
315 : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
317 #define __BPF_ARG3_TP(...) \
318 ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
319 ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
320 : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
321 ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
322 : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
324 return __BPF_TP_EMIT();
327 static const struct bpf_func_proto bpf_trace_printk_proto = {
328 .func = bpf_trace_printk,
330 .ret_type = RET_INTEGER,
331 .arg1_type = ARG_PTR_TO_MEM,
332 .arg2_type = ARG_CONST_SIZE,
335 const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
338 * this program might be calling bpf_trace_printk,
339 * so allocate per-cpu printk buffers
341 trace_printk_init_buffers();
343 return &bpf_trace_printk_proto;
346 static __always_inline int
347 get_map_perf_counter(struct bpf_map *map, u64 flags,
348 u64 *value, u64 *enabled, u64 *running)
350 struct bpf_array *array = container_of(map, struct bpf_array, map);
351 unsigned int cpu = smp_processor_id();
352 u64 index = flags & BPF_F_INDEX_MASK;
353 struct bpf_event_entry *ee;
355 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
357 if (index == BPF_F_CURRENT_CPU)
359 if (unlikely(index >= array->map.max_entries))
362 ee = READ_ONCE(array->ptrs[index]);
366 return perf_event_read_local(ee->event, value, enabled, running);
369 BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
374 err = get_map_perf_counter(map, flags, &value, NULL, NULL);
376 * this api is ugly since we miss [-22..-2] range of valid
377 * counter values, but that's uapi
384 static const struct bpf_func_proto bpf_perf_event_read_proto = {
385 .func = bpf_perf_event_read,
387 .ret_type = RET_INTEGER,
388 .arg1_type = ARG_CONST_MAP_PTR,
389 .arg2_type = ARG_ANYTHING,
392 BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags,
393 struct bpf_perf_event_value *, buf, u32, size)
397 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
399 err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled,
405 memset(buf, 0, size);
409 static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
410 .func = bpf_perf_event_read_value,
412 .ret_type = RET_INTEGER,
413 .arg1_type = ARG_CONST_MAP_PTR,
414 .arg2_type = ARG_ANYTHING,
415 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
416 .arg4_type = ARG_CONST_SIZE,
419 static __always_inline u64
420 __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
421 u64 flags, struct perf_sample_data *sd)
423 struct bpf_array *array = container_of(map, struct bpf_array, map);
424 unsigned int cpu = smp_processor_id();
425 u64 index = flags & BPF_F_INDEX_MASK;
426 struct bpf_event_entry *ee;
427 struct perf_event *event;
429 if (index == BPF_F_CURRENT_CPU)
431 if (unlikely(index >= array->map.max_entries))
434 ee = READ_ONCE(array->ptrs[index]);
439 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
440 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
443 if (unlikely(event->oncpu != cpu))
446 return perf_event_output(event, sd, regs);
450 * Support executing tracepoints in normal, irq, and nmi context that each call
451 * bpf_perf_event_output
453 struct bpf_trace_sample_data {
454 struct perf_sample_data sds[3];
457 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
458 static DEFINE_PER_CPU(int, bpf_trace_nest_level);
459 BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
460 u64, flags, void *, data, u64, size)
462 struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
463 int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
464 struct perf_raw_record raw = {
470 struct perf_sample_data *sd;
473 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
478 sd = &sds->sds[nest_level - 1];
480 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
485 perf_sample_data_init(sd, 0, 0);
488 err = __bpf_perf_event_output(regs, map, flags, sd);
491 this_cpu_dec(bpf_trace_nest_level);
495 static const struct bpf_func_proto bpf_perf_event_output_proto = {
496 .func = bpf_perf_event_output,
498 .ret_type = RET_INTEGER,
499 .arg1_type = ARG_PTR_TO_CTX,
500 .arg2_type = ARG_CONST_MAP_PTR,
501 .arg3_type = ARG_ANYTHING,
502 .arg4_type = ARG_PTR_TO_MEM,
503 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
506 static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
507 struct bpf_nested_pt_regs {
508 struct pt_regs regs[3];
510 static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
511 static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
513 u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
514 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
516 int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
517 struct perf_raw_frag frag = {
522 struct perf_raw_record raw = {
525 .next = ctx_size ? &frag : NULL,
531 struct perf_sample_data *sd;
532 struct pt_regs *regs;
535 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
539 sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
540 regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
542 perf_fetch_caller_regs(regs);
543 perf_sample_data_init(sd, 0, 0);
546 ret = __bpf_perf_event_output(regs, map, flags, sd);
548 this_cpu_dec(bpf_event_output_nest_level);
552 BPF_CALL_0(bpf_get_current_task)
554 return (long) current;
557 static const struct bpf_func_proto bpf_get_current_task_proto = {
558 .func = bpf_get_current_task,
560 .ret_type = RET_INTEGER,
563 BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
565 struct bpf_array *array = container_of(map, struct bpf_array, map);
568 if (unlikely(idx >= array->map.max_entries))
571 cgrp = READ_ONCE(array->ptrs[idx]);
575 return task_under_cgroup_hierarchy(current, cgrp);
578 static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
579 .func = bpf_current_task_under_cgroup,
581 .ret_type = RET_INTEGER,
582 .arg1_type = ARG_CONST_MAP_PTR,
583 .arg2_type = ARG_ANYTHING,
586 BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
587 const void *, unsafe_ptr)
591 ret = security_locked_down(LOCKDOWN_BPF_READ);
596 * The strncpy_from_unsafe() call will likely not fill the entire
597 * buffer, but that's okay in this circumstance as we're probing
598 * arbitrary memory anyway similar to bpf_probe_read() and might
599 * as well probe the stack. Thus, memory is explicitly cleared
600 * only in error case, so that improper users ignoring return
601 * code altogether don't copy garbage; otherwise length of string
602 * is returned that can be used for bpf_perf_event_output() et al.
604 ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
605 if (unlikely(ret < 0))
607 memset(dst, 0, size);
612 static const struct bpf_func_proto bpf_probe_read_str_proto = {
613 .func = bpf_probe_read_str,
615 .ret_type = RET_INTEGER,
616 .arg1_type = ARG_PTR_TO_UNINIT_MEM,
617 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
618 .arg3_type = ARG_ANYTHING,
621 struct send_signal_irq_work {
622 struct irq_work irq_work;
623 struct task_struct *task;
627 static DEFINE_PER_CPU(struct send_signal_irq_work, send_signal_work);
629 static void do_bpf_send_signal(struct irq_work *entry)
631 struct send_signal_irq_work *work;
633 work = container_of(entry, struct send_signal_irq_work, irq_work);
634 group_send_sig_info(work->sig, SEND_SIG_PRIV, work->task, PIDTYPE_TGID);
637 BPF_CALL_1(bpf_send_signal, u32, sig)
639 struct send_signal_irq_work *work = NULL;
641 /* Similar to bpf_probe_write_user, task needs to be
642 * in a sound condition and kernel memory access be
643 * permitted in order to send signal to the current
646 if (unlikely(current->flags & (PF_KTHREAD | PF_EXITING)))
648 if (unlikely(uaccess_kernel()))
650 if (unlikely(!nmi_uaccess_okay()))
652 /* Task should not be pid=1 to avoid kernel panic. */
653 if (unlikely(is_global_init(current)))
656 if (irqs_disabled()) {
657 /* Do an early check on signal validity. Otherwise,
658 * the error is lost in deferred irq_work.
660 if (unlikely(!valid_signal(sig)))
663 work = this_cpu_ptr(&send_signal_work);
664 if (work->irq_work.flags & IRQ_WORK_BUSY)
667 /* Add the current task, which is the target of sending signal,
668 * to the irq_work. The current task may change when queued
669 * irq works get executed.
671 work->task = current;
673 irq_work_queue(&work->irq_work);
677 return group_send_sig_info(sig, SEND_SIG_PRIV, current, PIDTYPE_TGID);
680 static const struct bpf_func_proto bpf_send_signal_proto = {
681 .func = bpf_send_signal,
683 .ret_type = RET_INTEGER,
684 .arg1_type = ARG_ANYTHING,
687 static const struct bpf_func_proto *
688 tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
691 case BPF_FUNC_map_lookup_elem:
692 return &bpf_map_lookup_elem_proto;
693 case BPF_FUNC_map_update_elem:
694 return &bpf_map_update_elem_proto;
695 case BPF_FUNC_map_delete_elem:
696 return &bpf_map_delete_elem_proto;
697 case BPF_FUNC_map_push_elem:
698 return &bpf_map_push_elem_proto;
699 case BPF_FUNC_map_pop_elem:
700 return &bpf_map_pop_elem_proto;
701 case BPF_FUNC_map_peek_elem:
702 return &bpf_map_peek_elem_proto;
703 case BPF_FUNC_probe_read:
704 return &bpf_probe_read_proto;
705 case BPF_FUNC_ktime_get_ns:
706 return &bpf_ktime_get_ns_proto;
707 case BPF_FUNC_tail_call:
708 return &bpf_tail_call_proto;
709 case BPF_FUNC_get_current_pid_tgid:
710 return &bpf_get_current_pid_tgid_proto;
711 case BPF_FUNC_get_current_task:
712 return &bpf_get_current_task_proto;
713 case BPF_FUNC_get_current_uid_gid:
714 return &bpf_get_current_uid_gid_proto;
715 case BPF_FUNC_get_current_comm:
716 return &bpf_get_current_comm_proto;
717 case BPF_FUNC_trace_printk:
718 return bpf_get_trace_printk_proto();
719 case BPF_FUNC_get_smp_processor_id:
720 return &bpf_get_smp_processor_id_proto;
721 case BPF_FUNC_get_numa_node_id:
722 return &bpf_get_numa_node_id_proto;
723 case BPF_FUNC_perf_event_read:
724 return &bpf_perf_event_read_proto;
725 case BPF_FUNC_probe_write_user:
726 return bpf_get_probe_write_proto();
727 case BPF_FUNC_current_task_under_cgroup:
728 return &bpf_current_task_under_cgroup_proto;
729 case BPF_FUNC_get_prandom_u32:
730 return &bpf_get_prandom_u32_proto;
731 case BPF_FUNC_probe_read_str:
732 return &bpf_probe_read_str_proto;
733 #ifdef CONFIG_CGROUPS
734 case BPF_FUNC_get_current_cgroup_id:
735 return &bpf_get_current_cgroup_id_proto;
737 case BPF_FUNC_send_signal:
738 return &bpf_send_signal_proto;
744 static const struct bpf_func_proto *
745 kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
748 case BPF_FUNC_perf_event_output:
749 return &bpf_perf_event_output_proto;
750 case BPF_FUNC_get_stackid:
751 return &bpf_get_stackid_proto;
752 case BPF_FUNC_get_stack:
753 return &bpf_get_stack_proto;
754 case BPF_FUNC_perf_event_read_value:
755 return &bpf_perf_event_read_value_proto;
756 #ifdef CONFIG_BPF_KPROBE_OVERRIDE
757 case BPF_FUNC_override_return:
758 return &bpf_override_return_proto;
761 return tracing_func_proto(func_id, prog);
765 /* bpf+kprobe programs can access fields of 'struct pt_regs' */
766 static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
767 const struct bpf_prog *prog,
768 struct bpf_insn_access_aux *info)
770 if (off < 0 || off >= sizeof(struct pt_regs))
772 if (type != BPF_READ)
777 * Assertion for 32 bit to make sure last 8 byte access
778 * (BPF_DW) to the last 4 byte member is disallowed.
780 if (off + size > sizeof(struct pt_regs))
786 const struct bpf_verifier_ops kprobe_verifier_ops = {
787 .get_func_proto = kprobe_prog_func_proto,
788 .is_valid_access = kprobe_prog_is_valid_access,
791 const struct bpf_prog_ops kprobe_prog_ops = {
794 BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
795 u64, flags, void *, data, u64, size)
797 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
800 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
801 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
802 * from there and call the same bpf_perf_event_output() helper inline.
804 return ____bpf_perf_event_output(regs, map, flags, data, size);
807 static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
808 .func = bpf_perf_event_output_tp,
810 .ret_type = RET_INTEGER,
811 .arg1_type = ARG_PTR_TO_CTX,
812 .arg2_type = ARG_CONST_MAP_PTR,
813 .arg3_type = ARG_ANYTHING,
814 .arg4_type = ARG_PTR_TO_MEM,
815 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
818 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
821 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
824 * Same comment as in bpf_perf_event_output_tp(), only that this time
825 * the other helper's function body cannot be inlined due to being
826 * external, thus we need to call raw helper function.
828 return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
832 static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
833 .func = bpf_get_stackid_tp,
835 .ret_type = RET_INTEGER,
836 .arg1_type = ARG_PTR_TO_CTX,
837 .arg2_type = ARG_CONST_MAP_PTR,
838 .arg3_type = ARG_ANYTHING,
841 BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
844 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
846 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
847 (unsigned long) size, flags, 0);
850 static const struct bpf_func_proto bpf_get_stack_proto_tp = {
851 .func = bpf_get_stack_tp,
853 .ret_type = RET_INTEGER,
854 .arg1_type = ARG_PTR_TO_CTX,
855 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
856 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
857 .arg4_type = ARG_ANYTHING,
860 static const struct bpf_func_proto *
861 tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
864 case BPF_FUNC_perf_event_output:
865 return &bpf_perf_event_output_proto_tp;
866 case BPF_FUNC_get_stackid:
867 return &bpf_get_stackid_proto_tp;
868 case BPF_FUNC_get_stack:
869 return &bpf_get_stack_proto_tp;
871 return tracing_func_proto(func_id, prog);
875 static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
876 const struct bpf_prog *prog,
877 struct bpf_insn_access_aux *info)
879 if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
881 if (type != BPF_READ)
886 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
890 const struct bpf_verifier_ops tracepoint_verifier_ops = {
891 .get_func_proto = tp_prog_func_proto,
892 .is_valid_access = tp_prog_is_valid_access,
895 const struct bpf_prog_ops tracepoint_prog_ops = {
898 BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx,
899 struct bpf_perf_event_value *, buf, u32, size)
903 if (unlikely(size != sizeof(struct bpf_perf_event_value)))
905 err = perf_event_read_local(ctx->event, &buf->counter, &buf->enabled,
911 memset(buf, 0, size);
915 static const struct bpf_func_proto bpf_perf_prog_read_value_proto = {
916 .func = bpf_perf_prog_read_value,
918 .ret_type = RET_INTEGER,
919 .arg1_type = ARG_PTR_TO_CTX,
920 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
921 .arg3_type = ARG_CONST_SIZE,
924 static const struct bpf_func_proto *
925 pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
928 case BPF_FUNC_perf_event_output:
929 return &bpf_perf_event_output_proto_tp;
930 case BPF_FUNC_get_stackid:
931 return &bpf_get_stackid_proto_tp;
932 case BPF_FUNC_get_stack:
933 return &bpf_get_stack_proto_tp;
934 case BPF_FUNC_perf_prog_read_value:
935 return &bpf_perf_prog_read_value_proto;
937 return tracing_func_proto(func_id, prog);
942 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
943 * to avoid potential recursive reuse issue when/if tracepoints are added
944 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
946 * Since raw tracepoints run despite bpf_prog_active, support concurrent usage
947 * in normal, irq, and nmi context.
949 struct bpf_raw_tp_regs {
950 struct pt_regs regs[3];
952 static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
953 static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
954 static struct pt_regs *get_bpf_raw_tp_regs(void)
956 struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
957 int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
959 if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
960 this_cpu_dec(bpf_raw_tp_nest_level);
961 return ERR_PTR(-EBUSY);
964 return &tp_regs->regs[nest_level - 1];
967 static void put_bpf_raw_tp_regs(void)
969 this_cpu_dec(bpf_raw_tp_nest_level);
972 BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
973 struct bpf_map *, map, u64, flags, void *, data, u64, size)
975 struct pt_regs *regs = get_bpf_raw_tp_regs();
979 return PTR_ERR(regs);
981 perf_fetch_caller_regs(regs);
982 ret = ____bpf_perf_event_output(regs, map, flags, data, size);
984 put_bpf_raw_tp_regs();
988 static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
989 .func = bpf_perf_event_output_raw_tp,
991 .ret_type = RET_INTEGER,
992 .arg1_type = ARG_PTR_TO_CTX,
993 .arg2_type = ARG_CONST_MAP_PTR,
994 .arg3_type = ARG_ANYTHING,
995 .arg4_type = ARG_PTR_TO_MEM,
996 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
999 BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
1000 struct bpf_map *, map, u64, flags)
1002 struct pt_regs *regs = get_bpf_raw_tp_regs();
1006 return PTR_ERR(regs);
1008 perf_fetch_caller_regs(regs);
1009 /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
1010 ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
1012 put_bpf_raw_tp_regs();
1016 static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
1017 .func = bpf_get_stackid_raw_tp,
1019 .ret_type = RET_INTEGER,
1020 .arg1_type = ARG_PTR_TO_CTX,
1021 .arg2_type = ARG_CONST_MAP_PTR,
1022 .arg3_type = ARG_ANYTHING,
1025 BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
1026 void *, buf, u32, size, u64, flags)
1028 struct pt_regs *regs = get_bpf_raw_tp_regs();
1032 return PTR_ERR(regs);
1034 perf_fetch_caller_regs(regs);
1035 ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
1036 (unsigned long) size, flags, 0);
1037 put_bpf_raw_tp_regs();
1041 static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
1042 .func = bpf_get_stack_raw_tp,
1044 .ret_type = RET_INTEGER,
1045 .arg1_type = ARG_PTR_TO_CTX,
1046 .arg2_type = ARG_PTR_TO_MEM,
1047 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
1048 .arg4_type = ARG_ANYTHING,
1051 static const struct bpf_func_proto *
1052 raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
1055 case BPF_FUNC_perf_event_output:
1056 return &bpf_perf_event_output_proto_raw_tp;
1057 case BPF_FUNC_get_stackid:
1058 return &bpf_get_stackid_proto_raw_tp;
1059 case BPF_FUNC_get_stack:
1060 return &bpf_get_stack_proto_raw_tp;
1062 return tracing_func_proto(func_id, prog);
1066 static bool raw_tp_prog_is_valid_access(int off, int size,
1067 enum bpf_access_type type,
1068 const struct bpf_prog *prog,
1069 struct bpf_insn_access_aux *info)
1071 /* largest tracepoint in the kernel has 12 args */
1072 if (off < 0 || off >= sizeof(__u64) * 12)
1074 if (type != BPF_READ)
1076 if (off % size != 0)
1081 const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
1082 .get_func_proto = raw_tp_prog_func_proto,
1083 .is_valid_access = raw_tp_prog_is_valid_access,
1086 const struct bpf_prog_ops raw_tracepoint_prog_ops = {
1089 static bool raw_tp_writable_prog_is_valid_access(int off, int size,
1090 enum bpf_access_type type,
1091 const struct bpf_prog *prog,
1092 struct bpf_insn_access_aux *info)
1095 if (size != sizeof(u64) || type != BPF_READ)
1097 info->reg_type = PTR_TO_TP_BUFFER;
1099 return raw_tp_prog_is_valid_access(off, size, type, prog, info);
1102 const struct bpf_verifier_ops raw_tracepoint_writable_verifier_ops = {
1103 .get_func_proto = raw_tp_prog_func_proto,
1104 .is_valid_access = raw_tp_writable_prog_is_valid_access,
1107 const struct bpf_prog_ops raw_tracepoint_writable_prog_ops = {
1110 static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
1111 const struct bpf_prog *prog,
1112 struct bpf_insn_access_aux *info)
1114 const int size_u64 = sizeof(u64);
1116 if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
1118 if (type != BPF_READ)
1120 if (off % size != 0) {
1121 if (sizeof(unsigned long) != 4)
1125 if (off % size != 4)
1130 case bpf_ctx_range(struct bpf_perf_event_data, sample_period):
1131 bpf_ctx_record_field_size(info, size_u64);
1132 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1135 case bpf_ctx_range(struct bpf_perf_event_data, addr):
1136 bpf_ctx_record_field_size(info, size_u64);
1137 if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
1141 if (size != sizeof(long))
1148 static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
1149 const struct bpf_insn *si,
1150 struct bpf_insn *insn_buf,
1151 struct bpf_prog *prog, u32 *target_size)
1153 struct bpf_insn *insn = insn_buf;
1156 case offsetof(struct bpf_perf_event_data, sample_period):
1157 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1158 data), si->dst_reg, si->src_reg,
1159 offsetof(struct bpf_perf_event_data_kern, data));
1160 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1161 bpf_target_off(struct perf_sample_data, period, 8,
1164 case offsetof(struct bpf_perf_event_data, addr):
1165 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1166 data), si->dst_reg, si->src_reg,
1167 offsetof(struct bpf_perf_event_data_kern, data));
1168 *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
1169 bpf_target_off(struct perf_sample_data, addr, 8,
1173 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
1174 regs), si->dst_reg, si->src_reg,
1175 offsetof(struct bpf_perf_event_data_kern, regs));
1176 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
1181 return insn - insn_buf;
1184 const struct bpf_verifier_ops perf_event_verifier_ops = {
1185 .get_func_proto = pe_prog_func_proto,
1186 .is_valid_access = pe_prog_is_valid_access,
1187 .convert_ctx_access = pe_prog_convert_ctx_access,
1190 const struct bpf_prog_ops perf_event_prog_ops = {
1193 static DEFINE_MUTEX(bpf_event_mutex);
1195 #define BPF_TRACE_MAX_PROGS 64
1197 int perf_event_attach_bpf_prog(struct perf_event *event,
1198 struct bpf_prog *prog)
1200 struct bpf_prog_array *old_array;
1201 struct bpf_prog_array *new_array;
1205 * Kprobe override only works if they are on the function entry,
1206 * and only if they are on the opt-in list.
1208 if (prog->kprobe_override &&
1209 (!trace_kprobe_on_func_entry(event->tp_event) ||
1210 !trace_kprobe_error_injectable(event->tp_event)))
1213 mutex_lock(&bpf_event_mutex);
1218 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1220 bpf_prog_array_length(old_array) >= BPF_TRACE_MAX_PROGS) {
1225 ret = bpf_prog_array_copy(old_array, NULL, prog, &new_array);
1229 /* set the new array to event->tp_event and set event->prog */
1231 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1232 bpf_prog_array_free(old_array);
1235 mutex_unlock(&bpf_event_mutex);
1239 void perf_event_detach_bpf_prog(struct perf_event *event)
1241 struct bpf_prog_array *old_array;
1242 struct bpf_prog_array *new_array;
1245 mutex_lock(&bpf_event_mutex);
1250 old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
1251 ret = bpf_prog_array_copy(old_array, event->prog, NULL, &new_array);
1255 bpf_prog_array_delete_safe(old_array, event->prog);
1257 rcu_assign_pointer(event->tp_event->prog_array, new_array);
1258 bpf_prog_array_free(old_array);
1261 bpf_prog_put(event->prog);
1265 mutex_unlock(&bpf_event_mutex);
1268 int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1270 struct perf_event_query_bpf __user *uquery = info;
1271 struct perf_event_query_bpf query = {};
1272 struct bpf_prog_array *progs;
1273 u32 *ids, prog_cnt, ids_len;
1276 if (!capable(CAP_SYS_ADMIN))
1278 if (event->attr.type != PERF_TYPE_TRACEPOINT)
1280 if (copy_from_user(&query, uquery, sizeof(query)))
1283 ids_len = query.ids_len;
1284 if (ids_len > BPF_TRACE_MAX_PROGS)
1286 ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1290 * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1291 * is required when user only wants to check for uquery->prog_cnt.
1292 * There is no need to check for it since the case is handled
1293 * gracefully in bpf_prog_array_copy_info.
1296 mutex_lock(&bpf_event_mutex);
1297 progs = bpf_event_rcu_dereference(event->tp_event->prog_array);
1298 ret = bpf_prog_array_copy_info(progs, ids, ids_len, &prog_cnt);
1299 mutex_unlock(&bpf_event_mutex);
1301 if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1302 copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1309 extern struct bpf_raw_event_map __start__bpf_raw_tp[];
1310 extern struct bpf_raw_event_map __stop__bpf_raw_tp[];
1312 struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
1314 struct bpf_raw_event_map *btp = __start__bpf_raw_tp;
1316 for (; btp < __stop__bpf_raw_tp; btp++) {
1317 if (!strcmp(btp->tp->name, name))
1321 return bpf_get_raw_tracepoint_module(name);
1324 void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
1329 mod = __module_address((unsigned long)btp);
1334 static __always_inline
1335 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
1339 (void) BPF_PROG_RUN(prog, args);
1344 #define UNPACK(...) __VA_ARGS__
1345 #define REPEAT_1(FN, DL, X, ...) FN(X)
1346 #define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
1347 #define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
1348 #define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
1349 #define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
1350 #define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
1351 #define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
1352 #define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
1353 #define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
1354 #define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
1355 #define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
1356 #define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
1357 #define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
1359 #define SARG(X) u64 arg##X
1360 #define COPY(X) args[X] = arg##X
1362 #define __DL_COM (,)
1363 #define __DL_SEM (;)
1365 #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
1367 #define BPF_TRACE_DEFN_x(x) \
1368 void bpf_trace_run##x(struct bpf_prog *prog, \
1369 REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
1372 REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \
1373 __bpf_trace_run(prog, args); \
1375 EXPORT_SYMBOL_GPL(bpf_trace_run##x)
1376 BPF_TRACE_DEFN_x(1);
1377 BPF_TRACE_DEFN_x(2);
1378 BPF_TRACE_DEFN_x(3);
1379 BPF_TRACE_DEFN_x(4);
1380 BPF_TRACE_DEFN_x(5);
1381 BPF_TRACE_DEFN_x(6);
1382 BPF_TRACE_DEFN_x(7);
1383 BPF_TRACE_DEFN_x(8);
1384 BPF_TRACE_DEFN_x(9);
1385 BPF_TRACE_DEFN_x(10);
1386 BPF_TRACE_DEFN_x(11);
1387 BPF_TRACE_DEFN_x(12);
1389 static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1391 struct tracepoint *tp = btp->tp;
1394 * check that program doesn't access arguments beyond what's
1395 * available in this tracepoint
1397 if (prog->aux->max_ctx_offset > btp->num_args * sizeof(u64))
1400 if (prog->aux->max_tp_access > btp->writable_size)
1403 return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
1407 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1409 return __bpf_probe_register(btp, prog);
1412 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
1414 return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
1417 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
1418 u32 *fd_type, const char **buf,
1419 u64 *probe_offset, u64 *probe_addr)
1421 bool is_tracepoint, is_syscall_tp;
1422 struct bpf_prog *prog;
1429 /* not supporting BPF_PROG_TYPE_PERF_EVENT yet */
1430 if (prog->type == BPF_PROG_TYPE_PERF_EVENT)
1433 *prog_id = prog->aux->id;
1434 flags = event->tp_event->flags;
1435 is_tracepoint = flags & TRACE_EVENT_FL_TRACEPOINT;
1436 is_syscall_tp = is_syscall_trace_event(event->tp_event);
1438 if (is_tracepoint || is_syscall_tp) {
1439 *buf = is_tracepoint ? event->tp_event->tp->name
1440 : event->tp_event->name;
1441 *fd_type = BPF_FD_TYPE_TRACEPOINT;
1442 *probe_offset = 0x0;
1447 #ifdef CONFIG_KPROBE_EVENTS
1448 if (flags & TRACE_EVENT_FL_KPROBE)
1449 err = bpf_get_kprobe_info(event, fd_type, buf,
1450 probe_offset, probe_addr,
1451 event->attr.type == PERF_TYPE_TRACEPOINT);
1453 #ifdef CONFIG_UPROBE_EVENTS
1454 if (flags & TRACE_EVENT_FL_UPROBE)
1455 err = bpf_get_uprobe_info(event, fd_type, buf,
1456 probe_offset, probe_addr,
1457 event->attr.type == PERF_TYPE_TRACEPOINT);
1464 static int __init send_signal_irq_work_init(void)
1467 struct send_signal_irq_work *work;
1469 for_each_possible_cpu(cpu) {
1470 work = per_cpu_ptr(&send_signal_work, cpu);
1471 init_irq_work(&work->irq_work, do_bpf_send_signal);
1476 subsys_initcall(send_signal_irq_work_init);
1478 #ifdef CONFIG_MODULES
1479 static int bpf_event_notify(struct notifier_block *nb, unsigned long op,
1482 struct bpf_trace_module *btm, *tmp;
1483 struct module *mod = module;
1485 if (mod->num_bpf_raw_events == 0 ||
1486 (op != MODULE_STATE_COMING && op != MODULE_STATE_GOING))
1489 mutex_lock(&bpf_module_mutex);
1492 case MODULE_STATE_COMING:
1493 btm = kzalloc(sizeof(*btm), GFP_KERNEL);
1495 btm->module = module;
1496 list_add(&btm->list, &bpf_trace_modules);
1499 case MODULE_STATE_GOING:
1500 list_for_each_entry_safe(btm, tmp, &bpf_trace_modules, list) {
1501 if (btm->module == module) {
1502 list_del(&btm->list);
1510 mutex_unlock(&bpf_module_mutex);
1515 static struct notifier_block bpf_module_nb = {
1516 .notifier_call = bpf_event_notify,
1519 static int __init bpf_event_init(void)
1521 register_module_notifier(&bpf_module_nb);
1525 fs_initcall(bpf_event_init);
1526 #endif /* CONFIG_MODULES */