1 // SPDX-License-Identifier: GPL-2.0
3 * Dynamic function tracing support.
5 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
7 * Thanks goes to Ingo Molnar, for suggesting the idea.
8 * Mathieu Desnoyers, for suggesting postponing the modifications.
9 * Arjan van de Ven, for keeping me straight, and explaining to me
10 * the dangers of modifying code on the run.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/spinlock.h>
16 #include <linux/hardirq.h>
17 #include <linux/uaccess.h>
18 #include <linux/ftrace.h>
19 #include <linux/percpu.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/list.h>
24 #include <linux/module.h>
25 #include <linux/memory.h>
26 #include <linux/vmalloc.h>
28 #include <trace/syscall.h>
30 #include <asm/set_memory.h>
31 #include <asm/kprobes.h>
32 #include <asm/ftrace.h>
34 #include <asm/text-patching.h>
36 #ifdef CONFIG_DYNAMIC_FTRACE
38 static int ftrace_poke_late = 0;
40 int ftrace_arch_code_modify_prepare(void)
41 __acquires(&text_mutex)
44 * Need to grab text_mutex to prevent a race from module loading
45 * and live kernel patching from changing the text permissions while
46 * ftrace has it set to "read/write".
48 mutex_lock(&text_mutex);
53 int ftrace_arch_code_modify_post_process(void)
54 __releases(&text_mutex)
57 * ftrace_make_{call,nop}() may be called during
58 * module load, and we need to finish the text_poke_queue()
63 mutex_unlock(&text_mutex);
67 static const char *ftrace_nop_replace(void)
72 static const char *ftrace_call_replace(unsigned long ip, unsigned long addr)
74 return text_gen_insn(CALL_INSN_OPCODE, (void *)ip, (void *)addr);
77 static int ftrace_verify_code(unsigned long ip, const char *old_code)
79 char cur_code[MCOUNT_INSN_SIZE];
83 * We are paranoid about modifying text, as if a bug was to happen, it
84 * could cause us to read or write to someplace that could cause harm.
85 * Carefully read and modify the code with probe_kernel_*(), and make
86 * sure what we read is what we expected it to be before modifying it.
88 /* read the text we want to modify */
89 if (copy_from_kernel_nofault(cur_code, (void *)ip, MCOUNT_INSN_SIZE)) {
94 /* Make sure it is what we expect it to be */
95 if (memcmp(cur_code, old_code, MCOUNT_INSN_SIZE) != 0) {
96 ftrace_expected = old_code;
105 * Marked __ref because it calls text_poke_early() which is .init.text. That is
106 * ok because that call will happen early, during boot, when .init sections are
110 ftrace_modify_code_direct(unsigned long ip, const char *old_code,
111 const char *new_code)
113 int ret = ftrace_verify_code(ip, old_code);
117 /* replace the text with the new text */
118 if (ftrace_poke_late)
119 text_poke_queue((void *)ip, new_code, MCOUNT_INSN_SIZE, NULL);
121 text_poke_early((void *)ip, new_code, MCOUNT_INSN_SIZE);
125 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
127 unsigned long ip = rec->ip;
128 const char *new, *old;
130 old = ftrace_call_replace(ip, addr);
131 new = ftrace_nop_replace();
134 * On boot up, and when modules are loaded, the MCOUNT_ADDR
135 * is converted to a nop, and will never become MCOUNT_ADDR
136 * again. This code is either running before SMP (on boot up)
137 * or before the code will ever be executed (module load).
138 * We do not want to use the breakpoint version in this case,
139 * just modify the code directly.
141 if (addr == MCOUNT_ADDR)
142 return ftrace_modify_code_direct(ip, old, new);
145 * x86 overrides ftrace_replace_code -- this function will never be used
148 WARN_ONCE(1, "invalid use of ftrace_make_nop");
152 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
154 unsigned long ip = rec->ip;
155 const char *new, *old;
157 old = ftrace_nop_replace();
158 new = ftrace_call_replace(ip, addr);
160 /* Should only be called when module is loaded */
161 return ftrace_modify_code_direct(rec->ip, old, new);
165 * Should never be called:
166 * As it is only called by __ftrace_replace_code() which is called by
167 * ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
168 * which is called to turn mcount into nops or nops into function calls
169 * but not to convert a function from not using regs to one that uses
170 * regs, which ftrace_modify_call() is for.
172 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
179 int ftrace_update_ftrace_func(ftrace_func_t func)
184 ip = (unsigned long)(&ftrace_call);
185 new = ftrace_call_replace(ip, (unsigned long)func);
186 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
188 ip = (unsigned long)(&ftrace_regs_call);
189 new = ftrace_call_replace(ip, (unsigned long)func);
190 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
195 void ftrace_replace_code(int enable)
197 struct ftrace_rec_iter *iter;
198 struct dyn_ftrace *rec;
199 const char *new, *old;
202 for_ftrace_rec_iter(iter) {
203 rec = ftrace_rec_iter_record(iter);
205 switch (ftrace_test_record(rec, enable)) {
206 case FTRACE_UPDATE_IGNORE:
210 case FTRACE_UPDATE_MAKE_CALL:
211 old = ftrace_nop_replace();
214 case FTRACE_UPDATE_MODIFY_CALL:
215 case FTRACE_UPDATE_MAKE_NOP:
216 old = ftrace_call_replace(rec->ip, ftrace_get_addr_curr(rec));
220 ret = ftrace_verify_code(rec->ip, old);
222 ftrace_expected = old;
223 ftrace_bug(ret, rec);
224 ftrace_expected = NULL;
229 for_ftrace_rec_iter(iter) {
230 rec = ftrace_rec_iter_record(iter);
232 switch (ftrace_test_record(rec, enable)) {
233 case FTRACE_UPDATE_IGNORE:
237 case FTRACE_UPDATE_MAKE_CALL:
238 case FTRACE_UPDATE_MODIFY_CALL:
239 new = ftrace_call_replace(rec->ip, ftrace_get_addr_new(rec));
242 case FTRACE_UPDATE_MAKE_NOP:
243 new = ftrace_nop_replace();
247 text_poke_queue((void *)rec->ip, new, MCOUNT_INSN_SIZE, NULL);
248 ftrace_update_record(rec, enable);
253 void arch_ftrace_update_code(int command)
255 ftrace_modify_all_code(command);
258 int __init ftrace_dyn_arch_init(void)
263 /* Currently only x86_64 supports dynamic trampolines */
266 #ifdef CONFIG_MODULES
267 #include <linux/moduleloader.h>
268 /* Module allocation simplifies allocating memory for code */
269 static inline void *alloc_tramp(unsigned long size)
271 return module_alloc(size);
273 static inline void tramp_free(void *tramp)
275 module_memfree(tramp);
278 /* Trampolines can only be created if modules are supported */
279 static inline void *alloc_tramp(unsigned long size)
283 static inline void tramp_free(void *tramp) { }
286 /* Defined as markers to the end of the ftrace default trampolines */
287 extern void ftrace_regs_caller_end(void);
288 extern void ftrace_regs_caller_ret(void);
289 extern void ftrace_caller_end(void);
290 extern void ftrace_caller_op_ptr(void);
291 extern void ftrace_regs_caller_op_ptr(void);
292 extern void ftrace_regs_caller_jmp(void);
294 /* movq function_trace_op(%rip), %rdx */
295 /* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
296 #define OP_REF_SIZE 7
299 * The ftrace_ops is passed to the function callback. Since the
300 * trampoline only services a single ftrace_ops, we can pass in
303 * The ftrace_op_code_union is used to create a pointer to the
304 * ftrace_ops that will be passed to the callback function.
306 union ftrace_op_code_union {
307 char code[OP_REF_SIZE];
311 } __attribute__((packed));
314 #define RET_SIZE (IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS))
317 create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
319 unsigned long start_offset;
320 unsigned long end_offset;
321 unsigned long op_offset;
322 unsigned long call_offset;
323 unsigned long jmp_offset;
324 unsigned long offset;
325 unsigned long npages;
330 /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
331 unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
332 unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
333 union ftrace_op_code_union op_ptr;
336 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
337 start_offset = (unsigned long)ftrace_regs_caller;
338 end_offset = (unsigned long)ftrace_regs_caller_end;
339 op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
340 call_offset = (unsigned long)ftrace_regs_call;
341 jmp_offset = (unsigned long)ftrace_regs_caller_jmp;
343 start_offset = (unsigned long)ftrace_caller;
344 end_offset = (unsigned long)ftrace_caller_end;
345 op_offset = (unsigned long)ftrace_caller_op_ptr;
346 call_offset = (unsigned long)ftrace_call;
350 size = end_offset - start_offset;
353 * Allocate enough size to store the ftrace_caller code,
354 * the iret , as well as the address of the ftrace_ops this
355 * trampoline is used for.
357 trampoline = alloc_tramp(size + RET_SIZE + sizeof(void *));
361 *tramp_size = size + RET_SIZE + sizeof(void *);
362 npages = DIV_ROUND_UP(*tramp_size, PAGE_SIZE);
364 /* Copy ftrace_caller onto the trampoline memory */
365 ret = copy_from_kernel_nofault(trampoline, (void *)start_offset, size);
366 if (WARN_ON(ret < 0))
369 ip = trampoline + size;
371 /* The trampoline ends with ret(q) */
372 if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
373 memcpy(ip, text_gen_insn(JMP32_INSN_OPCODE, ip, &__x86_return_thunk), JMP32_INSN_SIZE);
375 memcpy(ip, retq, sizeof(retq));
377 /* No need to test direct calls on created trampolines */
378 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
379 /* NOP the jnz 1f; but make sure it's a 2 byte jnz */
380 ip = trampoline + (jmp_offset - start_offset);
381 if (WARN_ON(*(char *)ip != 0x75))
383 ret = copy_from_kernel_nofault(ip, x86_nops[2], 2);
389 * The address of the ftrace_ops that is used for this trampoline
390 * is stored at the end of the trampoline. This will be used to
391 * load the third parameter for the callback. Basically, that
392 * location at the end of the trampoline takes the place of
393 * the global function_trace_op variable.
396 ptr = (unsigned long *)(trampoline + size + RET_SIZE);
397 *ptr = (unsigned long)ops;
399 op_offset -= start_offset;
400 memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
402 /* Are we pointing to the reference? */
403 if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0))
406 /* Load the contents of ptr into the callback parameter */
407 offset = (unsigned long)ptr;
408 offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
410 op_ptr.offset = offset;
412 /* put in the new offset to the ftrace_ops */
413 memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
415 /* put in the call to the function */
416 mutex_lock(&text_mutex);
417 call_offset -= start_offset;
418 memcpy(trampoline + call_offset,
419 text_gen_insn(CALL_INSN_OPCODE,
420 trampoline + call_offset,
421 ftrace_ops_get_func(ops)), CALL_INSN_SIZE);
422 mutex_unlock(&text_mutex);
424 /* ALLOC_TRAMP flags lets us know we created it */
425 ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
427 set_vm_flush_reset_perms(trampoline);
429 if (likely(system_state != SYSTEM_BOOTING))
430 set_memory_ro((unsigned long)trampoline, npages);
431 set_memory_x((unsigned long)trampoline, npages);
432 return (unsigned long)trampoline;
434 tramp_free(trampoline);
438 void set_ftrace_ops_ro(void)
440 struct ftrace_ops *ops;
441 unsigned long start_offset;
442 unsigned long end_offset;
443 unsigned long npages;
446 do_for_each_ftrace_op(ops, ftrace_ops_list) {
447 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
450 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
451 start_offset = (unsigned long)ftrace_regs_caller;
452 end_offset = (unsigned long)ftrace_regs_caller_end;
454 start_offset = (unsigned long)ftrace_caller;
455 end_offset = (unsigned long)ftrace_caller_end;
457 size = end_offset - start_offset;
458 size = size + RET_SIZE + sizeof(void *);
459 npages = DIV_ROUND_UP(size, PAGE_SIZE);
460 set_memory_ro((unsigned long)ops->trampoline, npages);
461 } while_for_each_ftrace_op(ops);
464 static unsigned long calc_trampoline_call_offset(bool save_regs)
466 unsigned long start_offset;
467 unsigned long call_offset;
470 start_offset = (unsigned long)ftrace_regs_caller;
471 call_offset = (unsigned long)ftrace_regs_call;
473 start_offset = (unsigned long)ftrace_caller;
474 call_offset = (unsigned long)ftrace_call;
477 return call_offset - start_offset;
480 void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
483 unsigned long offset;
488 if (!ops->trampoline) {
489 ops->trampoline = create_trampoline(ops, &size);
490 if (!ops->trampoline)
492 ops->trampoline_size = size;
497 * The ftrace_ops caller may set up its own trampoline.
498 * In such a case, this code must not modify it.
500 if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
503 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
504 ip = ops->trampoline + offset;
505 func = ftrace_ops_get_func(ops);
507 mutex_lock(&text_mutex);
508 /* Do a safe modify in case the trampoline is executing */
509 new = ftrace_call_replace(ip, (unsigned long)func);
510 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
511 mutex_unlock(&text_mutex);
514 /* Return the address of the function the trampoline calls */
515 static void *addr_from_call(void *ptr)
517 union text_poke_insn call;
520 ret = copy_from_kernel_nofault(&call, ptr, CALL_INSN_SIZE);
521 if (WARN_ON_ONCE(ret < 0))
524 /* Make sure this is a call */
525 if (WARN_ON_ONCE(call.opcode != CALL_INSN_OPCODE)) {
526 pr_warn("Expected E8, got %x\n", call.opcode);
530 return ptr + CALL_INSN_SIZE + call.disp;
533 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
534 unsigned long frame_pointer);
537 * If the ops->trampoline was not allocated, then it probably
538 * has a static trampoline func, or is the ftrace caller itself.
540 static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
542 unsigned long offset;
543 bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
546 if (ops && ops->trampoline) {
547 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
549 * We only know about function graph tracer setting as static
552 if (ops->trampoline == FTRACE_GRAPH_ADDR)
553 return (void *)prepare_ftrace_return;
558 offset = calc_trampoline_call_offset(save_regs);
561 ptr = (void *)FTRACE_REGS_ADDR + offset;
563 ptr = (void *)FTRACE_ADDR + offset;
565 return addr_from_call(ptr);
568 void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
570 unsigned long offset;
572 /* If we didn't allocate this trampoline, consider it static */
573 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
574 return static_tramp_func(ops, rec);
576 offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
577 return addr_from_call((void *)ops->trampoline + offset);
580 void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
582 if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
585 tramp_free((void *)ops->trampoline);
589 #endif /* CONFIG_X86_64 */
590 #endif /* CONFIG_DYNAMIC_FTRACE */
592 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
594 #ifdef CONFIG_DYNAMIC_FTRACE
595 extern void ftrace_graph_call(void);
597 static const char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
599 return text_gen_insn(JMP32_INSN_OPCODE, (void *)ip, (void *)addr);
602 static int ftrace_mod_jmp(unsigned long ip, void *func)
606 new = ftrace_jmp_replace(ip, (unsigned long)func);
607 text_poke_bp((void *)ip, new, MCOUNT_INSN_SIZE, NULL);
611 int ftrace_enable_ftrace_graph_caller(void)
613 unsigned long ip = (unsigned long)(&ftrace_graph_call);
615 return ftrace_mod_jmp(ip, &ftrace_graph_caller);
618 int ftrace_disable_ftrace_graph_caller(void)
620 unsigned long ip = (unsigned long)(&ftrace_graph_call);
622 return ftrace_mod_jmp(ip, &ftrace_stub);
625 #endif /* !CONFIG_DYNAMIC_FTRACE */
628 * Hook the return address and push it in the stack of return addrs
629 * in current thread info.
631 void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
632 unsigned long frame_pointer)
634 unsigned long return_hooker = (unsigned long)&return_to_handler;
639 * When resuming from suspend-to-ram, this function can be indirectly
640 * called from early CPU startup code while the CPU is in real mode,
641 * which would fail miserably. Make sure the stack pointer is a
644 * This check isn't as accurate as virt_addr_valid(), but it should be
645 * good enough for this purpose, and it's fast.
647 if (unlikely((long)__builtin_frame_address(0) >= 0))
650 if (unlikely(ftrace_graph_is_dead()))
653 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
657 * Protect against fault, even if it shouldn't
658 * happen. This tool is too much intrusive to
659 * ignore such a protection.
662 "1: " _ASM_MOV " (%[parent]), %[old]\n"
663 "2: " _ASM_MOV " %[return_hooker], (%[parent])\n"
664 " movl $0, %[faulted]\n"
667 ".section .fixup, \"ax\"\n"
668 "4: movl $1, %[faulted]\n"
675 : [old] "=&r" (old), [faulted] "=r" (faulted)
676 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
680 if (unlikely(faulted)) {
686 if (function_graph_enter(old, self_addr, frame_pointer, parent))
689 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */