1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/hash.h>
5 #include <linux/filter.h>
6 #include <linux/ftrace.h>
7 #include <linux/rbtree_latch.h>
8 #include <linux/perf_event.h>
10 #include <linux/rcupdate_trace.h>
11 #include <linux/rcupdate_wait.h>
12 #include <linux/module.h>
13 #include <linux/static_call.h>
15 /* dummy _ops. The verifier will operate on target program's ops. */
16 const struct bpf_verifier_ops bpf_extension_verifier_ops = {
18 const struct bpf_prog_ops bpf_extension_prog_ops = {
21 /* btf_vmlinux has ~22k attachable functions. 1k htab is enough. */
22 #define TRAMPOLINE_HASH_BITS 10
23 #define TRAMPOLINE_TABLE_SIZE (1 << TRAMPOLINE_HASH_BITS)
25 static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
27 /* serializes access to trampoline_table */
28 static DEFINE_MUTEX(trampoline_mutex);
30 bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
32 enum bpf_attach_type eatype = prog->expected_attach_type;
34 return eatype == BPF_TRACE_FENTRY || eatype == BPF_TRACE_FEXIT ||
35 eatype == BPF_MODIFY_RETURN;
38 void *bpf_jit_alloc_exec_page(void)
42 image = bpf_jit_alloc_exec(PAGE_SIZE);
46 set_vm_flush_reset_perms(image);
47 /* Keep image as writeable. The alternative is to keep flipping ro/rw
48 * everytime new program is attached or detached.
50 set_memory_x((long)image, 1);
54 void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym)
56 ksym->start = (unsigned long) data;
57 ksym->end = ksym->start + PAGE_SIZE;
59 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
60 PAGE_SIZE, false, ksym->name);
63 void bpf_image_ksym_del(struct bpf_ksym *ksym)
66 perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_BPF, ksym->start,
67 PAGE_SIZE, true, ksym->name);
70 static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
72 struct bpf_trampoline *tr;
73 struct hlist_head *head;
76 mutex_lock(&trampoline_mutex);
77 head = &trampoline_table[hash_64(key, TRAMPOLINE_HASH_BITS)];
78 hlist_for_each_entry(tr, head, hlist) {
80 refcount_inc(&tr->refcnt);
84 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
89 INIT_HLIST_NODE(&tr->hlist);
90 hlist_add_head(&tr->hlist, head);
91 refcount_set(&tr->refcnt, 1);
92 mutex_init(&tr->mutex);
93 for (i = 0; i < BPF_TRAMP_MAX; i++)
94 INIT_HLIST_HEAD(&tr->progs_hlist[i]);
96 mutex_unlock(&trampoline_mutex);
100 static int bpf_trampoline_module_get(struct bpf_trampoline *tr)
106 mod = __module_text_address((unsigned long) tr->func.addr);
107 if (mod && !try_module_get(mod))
114 static void bpf_trampoline_module_put(struct bpf_trampoline *tr)
120 static int is_ftrace_location(void *ip)
124 addr = ftrace_location((long)ip);
127 if (WARN_ON_ONCE(addr != (long)ip))
132 static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
134 void *ip = tr->func.addr;
137 if (tr->func.ftrace_managed)
138 ret = unregister_ftrace_direct((long)ip, (long)old_addr);
140 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
143 bpf_trampoline_module_put(tr);
147 static int modify_fentry(struct bpf_trampoline *tr, void *old_addr, void *new_addr)
149 void *ip = tr->func.addr;
152 if (tr->func.ftrace_managed)
153 ret = modify_ftrace_direct((long)ip, (long)old_addr, (long)new_addr);
155 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, new_addr);
159 /* first time registering */
160 static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
162 void *ip = tr->func.addr;
165 ret = is_ftrace_location(ip);
168 tr->func.ftrace_managed = ret;
170 if (bpf_trampoline_module_get(tr))
173 if (tr->func.ftrace_managed)
174 ret = register_ftrace_direct((long)ip, (long)new_addr);
176 ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
179 bpf_trampoline_module_put(tr);
183 static struct bpf_tramp_progs *
184 bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total, bool *ip_arg)
186 const struct bpf_prog_aux *aux;
187 struct bpf_tramp_progs *tprogs;
188 struct bpf_prog **progs;
192 tprogs = kcalloc(BPF_TRAMP_MAX, sizeof(*tprogs), GFP_KERNEL);
194 return ERR_PTR(-ENOMEM);
196 for (kind = 0; kind < BPF_TRAMP_MAX; kind++) {
197 tprogs[kind].nr_progs = tr->progs_cnt[kind];
198 *total += tr->progs_cnt[kind];
199 progs = tprogs[kind].progs;
201 hlist_for_each_entry(aux, &tr->progs_hlist[kind], tramp_hlist) {
202 *ip_arg |= aux->prog->call_get_func_ip;
203 *progs++ = aux->prog;
209 static void __bpf_tramp_image_put_deferred(struct work_struct *work)
211 struct bpf_tramp_image *im;
213 im = container_of(work, struct bpf_tramp_image, work);
214 bpf_image_ksym_del(&im->ksym);
215 bpf_jit_free_exec(im->image);
216 bpf_jit_uncharge_modmem(1);
217 percpu_ref_exit(&im->pcref);
221 /* callback, fexit step 3 or fentry step 2 */
222 static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
224 struct bpf_tramp_image *im;
226 im = container_of(rcu, struct bpf_tramp_image, rcu);
227 INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
228 schedule_work(&im->work);
231 /* callback, fexit step 2. Called after percpu_ref_kill confirms. */
232 static void __bpf_tramp_image_release(struct percpu_ref *pcref)
234 struct bpf_tramp_image *im;
236 im = container_of(pcref, struct bpf_tramp_image, pcref);
237 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
240 /* callback, fexit or fentry step 1 */
241 static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu)
243 struct bpf_tramp_image *im;
245 im = container_of(rcu, struct bpf_tramp_image, rcu);
246 if (im->ip_after_call)
247 /* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */
248 percpu_ref_kill(&im->pcref);
250 /* the case of fentry trampoline */
251 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
254 static void bpf_tramp_image_put(struct bpf_tramp_image *im)
256 /* The trampoline image that calls original function is using:
257 * rcu_read_lock_trace to protect sleepable bpf progs
258 * rcu_read_lock to protect normal bpf progs
259 * percpu_ref to protect trampoline itself
260 * rcu tasks to protect trampoline asm not covered by percpu_ref
261 * (which are few asm insns before __bpf_tramp_enter and
262 * after __bpf_tramp_exit)
264 * The trampoline is unreachable before bpf_tramp_image_put().
266 * First, patch the trampoline to avoid calling into fexit progs.
267 * The progs will be freed even if the original function is still
268 * executing or sleeping.
269 * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on
270 * first few asm instructions to execute and call into
271 * __bpf_tramp_enter->percpu_ref_get.
272 * Then use percpu_ref_kill to wait for the trampoline and the original
273 * function to finish.
274 * Then use call_rcu_tasks() to make sure few asm insns in
275 * the trampoline epilogue are done as well.
277 * In !PREEMPT case the task that got interrupted in the first asm
278 * insns won't go through an RCU quiescent state which the
279 * percpu_ref_kill will be waiting for. Hence the first
280 * call_rcu_tasks() is not necessary.
282 if (im->ip_after_call) {
283 int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,
284 NULL, im->ip_epilogue);
286 if (IS_ENABLED(CONFIG_PREEMPTION))
287 call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
289 percpu_ref_kill(&im->pcref);
293 /* The trampoline without fexit and fmod_ret progs doesn't call original
294 * function and doesn't use percpu_ref.
295 * Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
296 * Then use call_rcu_tasks() to wait for the rest of trampoline asm
299 call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
302 static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
304 struct bpf_tramp_image *im;
305 struct bpf_ksym *ksym;
309 im = kzalloc(sizeof(*im), GFP_KERNEL);
313 err = bpf_jit_charge_modmem(1);
318 im->image = image = bpf_jit_alloc_exec_page();
322 err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
327 INIT_LIST_HEAD_RCU(&ksym->lnode);
328 snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx);
329 bpf_image_ksym_add(image, ksym);
333 bpf_jit_free_exec(im->image);
335 bpf_jit_uncharge_modmem(1);
342 static int bpf_trampoline_update(struct bpf_trampoline *tr)
344 struct bpf_tramp_image *im;
345 struct bpf_tramp_progs *tprogs;
346 u32 flags = BPF_TRAMP_F_RESTORE_REGS;
350 tprogs = bpf_trampoline_get_progs(tr, &total, &ip_arg);
352 return PTR_ERR(tprogs);
355 err = unregister_fentry(tr, tr->cur_image->image);
356 bpf_tramp_image_put(tr->cur_image);
357 tr->cur_image = NULL;
362 im = bpf_tramp_image_alloc(tr->key, tr->selector);
368 if (tprogs[BPF_TRAMP_FEXIT].nr_progs ||
369 tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs)
370 flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
373 flags |= BPF_TRAMP_F_IP_ARG;
375 err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
376 &tr->func.model, flags, tprogs,
381 WARN_ON(tr->cur_image && tr->selector == 0);
382 WARN_ON(!tr->cur_image && tr->selector);
384 /* progs already running at this address */
385 err = modify_fentry(tr, tr->cur_image->image, im->image);
387 /* first time registering */
388 err = register_fentry(tr, im->image);
392 bpf_tramp_image_put(tr->cur_image);
400 static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
402 switch (prog->expected_attach_type) {
403 case BPF_TRACE_FENTRY:
404 return BPF_TRAMP_FENTRY;
405 case BPF_MODIFY_RETURN:
406 return BPF_TRAMP_MODIFY_RETURN;
407 case BPF_TRACE_FEXIT:
408 return BPF_TRAMP_FEXIT;
410 if (!prog->aux->attach_func_proto->type)
411 /* The function returns void, we cannot modify its
414 return BPF_TRAMP_FEXIT;
416 return BPF_TRAMP_MODIFY_RETURN;
418 return BPF_TRAMP_REPLACE;
422 int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
424 enum bpf_tramp_prog_type kind;
428 kind = bpf_attach_type_to_tramp(prog);
429 mutex_lock(&tr->mutex);
430 if (tr->extension_prog) {
431 /* cannot attach fentry/fexit if extension prog is attached.
432 * cannot overwrite extension prog either.
437 cnt = tr->progs_cnt[BPF_TRAMP_FENTRY] + tr->progs_cnt[BPF_TRAMP_FEXIT];
438 if (kind == BPF_TRAMP_REPLACE) {
439 /* Cannot attach extension if fentry/fexit are in use. */
444 tr->extension_prog = prog;
445 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP, NULL,
449 if (cnt >= BPF_MAX_TRAMP_PROGS) {
453 if (!hlist_unhashed(&prog->aux->tramp_hlist)) {
454 /* prog already linked */
458 hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]);
459 tr->progs_cnt[kind]++;
460 err = bpf_trampoline_update(tr);
462 hlist_del_init(&prog->aux->tramp_hlist);
463 tr->progs_cnt[kind]--;
466 mutex_unlock(&tr->mutex);
470 /* bpf_trampoline_unlink_prog() should never fail. */
471 int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
473 enum bpf_tramp_prog_type kind;
476 kind = bpf_attach_type_to_tramp(prog);
477 mutex_lock(&tr->mutex);
478 if (kind == BPF_TRAMP_REPLACE) {
479 WARN_ON_ONCE(!tr->extension_prog);
480 err = bpf_arch_text_poke(tr->func.addr, BPF_MOD_JUMP,
481 tr->extension_prog->bpf_func, NULL);
482 tr->extension_prog = NULL;
485 hlist_del_init(&prog->aux->tramp_hlist);
486 tr->progs_cnt[kind]--;
487 err = bpf_trampoline_update(tr);
489 mutex_unlock(&tr->mutex);
493 struct bpf_trampoline *bpf_trampoline_get(u64 key,
494 struct bpf_attach_target_info *tgt_info)
496 struct bpf_trampoline *tr;
498 tr = bpf_trampoline_lookup(key);
502 mutex_lock(&tr->mutex);
506 memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel));
507 tr->func.addr = (void *)tgt_info->tgt_addr;
509 mutex_unlock(&tr->mutex);
513 void bpf_trampoline_put(struct bpf_trampoline *tr)
517 mutex_lock(&trampoline_mutex);
518 if (!refcount_dec_and_test(&tr->refcnt))
520 WARN_ON_ONCE(mutex_is_locked(&tr->mutex));
521 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FENTRY])))
523 if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
525 /* This code will be executed even when the last bpf_tramp_image
526 * is alive. All progs are detached from the trampoline and the
527 * trampoline image is patched with jmp into epilogue to skip
528 * fexit progs. The fentry-only trampoline will be freed via
529 * multiple rcu callbacks.
531 hlist_del(&tr->hlist);
534 mutex_unlock(&trampoline_mutex);
537 #define NO_START_TIME 1
538 static __always_inline u64 notrace bpf_prog_start_time(void)
540 u64 start = NO_START_TIME;
542 if (static_branch_unlikely(&bpf_stats_enabled_key)) {
543 start = sched_clock();
544 if (unlikely(!start))
545 start = NO_START_TIME;
550 static void notrace inc_misses_counter(struct bpf_prog *prog)
552 struct bpf_prog_stats *stats;
555 stats = this_cpu_ptr(prog->stats);
556 flags = u64_stats_update_begin_irqsave(&stats->syncp);
557 u64_stats_inc(&stats->misses);
558 u64_stats_update_end_irqrestore(&stats->syncp, flags);
561 /* The logic is similar to bpf_prog_run(), but with an explicit
562 * rcu_read_lock() and migrate_disable() which are required
563 * for the trampoline. The macro is split into
564 * call __bpf_prog_enter
565 * call prog->bpf_func
566 * call __bpf_prog_exit
568 * __bpf_prog_enter returns:
569 * 0 - skip execution of the bpf prog
570 * 1 - execute bpf prog
571 * [2..MAX_U64] - execute bpf prog and record execution time.
572 * This is start time.
574 u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
579 if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
580 inc_misses_counter(prog);
583 return bpf_prog_start_time();
586 static void notrace update_prog_stats(struct bpf_prog *prog,
589 struct bpf_prog_stats *stats;
591 if (static_branch_unlikely(&bpf_stats_enabled_key) &&
592 /* static_key could be enabled in __bpf_prog_enter*
593 * and disabled in __bpf_prog_exit*.
595 * Hence check that 'start' is valid.
597 start > NO_START_TIME) {
600 stats = this_cpu_ptr(prog->stats);
601 flags = u64_stats_update_begin_irqsave(&stats->syncp);
602 u64_stats_inc(&stats->cnt);
603 u64_stats_add(&stats->nsecs, sched_clock() - start);
604 u64_stats_update_end_irqrestore(&stats->syncp, flags);
608 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
611 update_prog_stats(prog, start);
612 __this_cpu_dec(*(prog->active));
617 u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog)
619 rcu_read_lock_trace();
622 if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
623 inc_misses_counter(prog);
626 return bpf_prog_start_time();
629 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start)
631 update_prog_stats(prog, start);
632 __this_cpu_dec(*(prog->active));
634 rcu_read_unlock_trace();
637 void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
639 percpu_ref_get(&tr->pcref);
642 void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
644 percpu_ref_put(&tr->pcref);
648 arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
649 const struct btf_func_model *m, u32 flags,
650 struct bpf_tramp_progs *tprogs,
656 static int __init init_trampolines(void)
660 for (i = 0; i < TRAMPOLINE_TABLE_SIZE; i++)
661 INIT_HLIST_HEAD(&trampoline_table[i]);
664 late_initcall(init_trampolines);