1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * arch/arm64/kernel/entry-ftrace.S
5 * Copyright (C) 2013 Linaro Limited
6 * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
9 #include <linux/linkage.h>
10 #include <asm/asm-offsets.h>
11 #include <asm/assembler.h>
12 #include <asm/ftrace.h>
15 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
17 * Due to -fpatchable-function-entry=2, the compiler has placed two NOPs before
18 * the regular function prologue. For an enabled callsite, ftrace_init_nop() and
19 * ftrace_make_call() have patched those NOPs to:
24 * ... where <entry> is either ftrace_caller or ftrace_regs_caller.
26 * Each instrumented function follows the AAPCS, so here x0-x8 and x18-x30 are
27 * live (x18 holds the Shadow Call Stack pointer), and x9-x17 are safe to
30 * We save the callsite's context into a pt_regs before invoking any ftrace
31 * callbacks. So that we can get a sensible backtrace, we create a stack record
32 * for the callsite and the ftrace entry assembly. This is not sufficient for
33 * reliable stacktrace: until we create the callsite stack record, its caller
34 * is missing from the LR and existing chain of frame records.
36 .macro ftrace_regs_entry, allregs=0
37 /* Make room for pt_regs, plus a callee frame */
38 sub sp, sp, #(PT_REGS_SIZE + 16)
40 /* Save function arguments (and x9 for simplicity) */
41 stp x0, x1, [sp, #S_X0]
42 stp x2, x3, [sp, #S_X2]
43 stp x4, x5, [sp, #S_X4]
44 stp x6, x7, [sp, #S_X6]
45 stp x8, x9, [sp, #S_X8]
47 /* Optionally save the callee-saved registers, always save the FP */
49 stp x10, x11, [sp, #S_X10]
50 stp x12, x13, [sp, #S_X12]
51 stp x14, x15, [sp, #S_X14]
52 stp x16, x17, [sp, #S_X16]
53 stp x18, x19, [sp, #S_X18]
54 stp x20, x21, [sp, #S_X20]
55 stp x22, x23, [sp, #S_X22]
56 stp x24, x25, [sp, #S_X24]
57 stp x26, x27, [sp, #S_X26]
58 stp x28, x29, [sp, #S_X28]
63 /* Save the callsite's SP and LR */
64 add x10, sp, #(PT_REGS_SIZE + 16)
65 stp x9, x10, [sp, #S_LR]
67 /* Save the PC after the ftrace callsite */
70 /* Create a frame record for the callsite above pt_regs */
71 stp x29, x9, [sp, #PT_REGS_SIZE]
72 add x29, sp, #PT_REGS_SIZE
74 /* Create our frame record within pt_regs. */
75 stp x29, x30, [sp, #S_STACKFRAME]
76 add x29, sp, #S_STACKFRAME
79 SYM_CODE_START(ftrace_regs_caller)
85 SYM_CODE_END(ftrace_regs_caller)
87 SYM_CODE_START(ftrace_caller)
93 SYM_CODE_END(ftrace_caller)
95 SYM_CODE_START(ftrace_common)
96 sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
97 mov x1, x9 // parent_ip (callsite's LR)
98 ldr_l x2, function_trace_op // op
101 SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
104 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
105 SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
106 nop // If enabled, this will be replaced
107 // "b ftrace_graph_caller"
111 * At the callsite x0-x8 and x19-x30 were live. Any C code will have preserved
112 * x19-x29 per the AAPCS, and we created frame records upon entry, so we need
113 * to restore x0-x8, x29, and x30.
115 ftrace_common_return:
116 /* Restore function arguments */
118 ldp x2, x3, [sp, #S_X2]
119 ldp x4, x5, [sp, #S_X4]
120 ldp x6, x7, [sp, #S_X6]
123 /* Restore the callsite's FP, LR, PC */
128 /* Restore the callsite's SP */
129 add sp, sp, #PT_REGS_SIZE + 16
132 SYM_CODE_END(ftrace_common)
134 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
135 SYM_CODE_START(ftrace_graph_caller)
137 sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
138 add x1, sp, #S_LR // parent_ip (callsite's LR)
139 ldr x2, [sp, #PT_REGS_SIZE] // parent fp (callsite's FP)
140 bl prepare_ftrace_return
141 b ftrace_common_return
142 SYM_CODE_END(ftrace_graph_caller)
145 #else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
148 * Gcc with -pg will put the following code in the beginning of each function:
151 * [function's body ...]
152 * "bl _mcount" may be replaced to "bl ftrace_caller" or NOP if dynamic
155 * Please note that x0 as an argument will not be used here because we can
156 * get lr(x30) of instrumented function at any time by winding up call stack
157 * as long as the kernel is compiled without -fomit-frame-pointer.
158 * (or CONFIG_FRAME_POINTER, this is forced on arm64)
160 * stack layout after mcount_enter in _mcount():
162 * current sp/fp => 0:+-----+
163 * in _mcount() | x29 | -> instrumented function's fp
165 * | x30 | -> _mcount()'s lr (= instrumented function's pc)
166 * old sp => +16:+-----+
167 * when instrumented | |
168 * function calls | ... |
171 * instrumented => +xx:+-----+
172 * function's fp | x29 | -> parent's fp
174 * | x30 | -> instrumented function's lr (= parent's pc)
180 stp x29, x30, [sp, #-16]!
185 ldp x29, x30, [sp], #16
189 .macro mcount_adjust_addr rd, rn
190 sub \rd, \rn, #AARCH64_INSN_SIZE
193 /* for instrumented function's parent */
194 .macro mcount_get_parent_fp reg
199 /* for instrumented function */
200 .macro mcount_get_pc0 reg
201 mcount_adjust_addr \reg, x30
204 .macro mcount_get_pc reg
206 mcount_adjust_addr \reg, \reg
209 .macro mcount_get_lr reg
214 .macro mcount_get_lr_addr reg
219 #ifndef CONFIG_DYNAMIC_FTRACE
221 * void _mcount(unsigned long return_address)
222 * @return_address: return address to instrumented function
224 * This function makes calls, if enabled, to:
225 * - tracer function to probe instrumented function's entry,
226 * - ftrace_graph_caller to set up an exit hook
228 SYM_FUNC_START(_mcount)
231 ldr_l x2, ftrace_trace_function
233 cmp x0, x2 // if (ftrace_trace_function
234 b.eq skip_ftrace_call // != ftrace_stub) {
236 mcount_get_pc x0 // function's pc
237 mcount_get_lr x1 // function's lr (= parent's pc)
238 blr x2 // (*ftrace_trace_function)(pc, lr);
240 skip_ftrace_call: // }
241 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
242 ldr_l x2, ftrace_graph_return
243 cmp x0, x2 // if ((ftrace_graph_return
244 b.ne ftrace_graph_caller // != ftrace_stub)
246 ldr_l x2, ftrace_graph_entry // || (ftrace_graph_entry
247 adr_l x0, ftrace_graph_entry_stub // != ftrace_graph_entry_stub))
249 b.ne ftrace_graph_caller // ftrace_graph_caller();
250 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
252 SYM_FUNC_END(_mcount)
253 EXPORT_SYMBOL(_mcount)
256 #else /* CONFIG_DYNAMIC_FTRACE */
258 * _mcount() is used to build the kernel with -pg option, but all the branch
259 * instructions to _mcount() are replaced to NOP initially at kernel start up,
260 * and later on, NOP to branch to ftrace_caller() when enabled or branch to
261 * NOP when disabled per-function base.
263 SYM_FUNC_START(_mcount)
265 SYM_FUNC_END(_mcount)
266 EXPORT_SYMBOL(_mcount)
270 * void ftrace_caller(unsigned long return_address)
271 * @return_address: return address to instrumented function
273 * This function is a counterpart of _mcount() in 'static' ftrace, and
275 * - tracer function to probe instrumented function's entry,
276 * - ftrace_graph_caller to set up an exit hook
278 SYM_FUNC_START(ftrace_caller)
281 mcount_get_pc0 x0 // function's pc
282 mcount_get_lr x1 // function's lr
284 SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) // tracer(pc, lr);
285 nop // This will be replaced with "bl xxx"
286 // where xxx can be any kind of tracer.
288 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
289 SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
290 nop // If enabled, this will be replaced
291 // "b ftrace_graph_caller"
295 SYM_FUNC_END(ftrace_caller)
296 #endif /* CONFIG_DYNAMIC_FTRACE */
298 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
300 * void ftrace_graph_caller(void)
302 * Called from _mcount() or ftrace_caller() when function_graph tracer is
304 * This function w/ prepare_ftrace_return() fakes link register's value on
305 * the call stack in order to intercept instrumented function's return path
306 * and run return_to_handler() later on its exit.
308 SYM_FUNC_START(ftrace_graph_caller)
309 mcount_get_pc x0 // function's pc
310 mcount_get_lr_addr x1 // pointer to function's saved lr
311 mcount_get_parent_fp x2 // parent's fp
312 bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp)
315 SYM_FUNC_END(ftrace_graph_caller)
316 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
317 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
319 SYM_FUNC_START(ftrace_stub)
321 SYM_FUNC_END(ftrace_stub)
323 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
325 * void return_to_handler(void)
327 * Run ftrace_return_to_handler() before going back to parent.
328 * @fp is checked against the value passed by ftrace_graph_caller().
330 SYM_CODE_START(return_to_handler)
331 /* save return value regs */
334 stp x2, x3, [sp, #16]
335 stp x4, x5, [sp, #32]
336 stp x6, x7, [sp, #48]
338 mov x0, x29 // parent's fp
339 bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
340 mov x30, x0 // restore the original return address
342 /* restore return value regs */
344 ldp x2, x3, [sp, #16]
345 ldp x4, x5, [sp, #32]
346 ldp x6, x7, [sp, #48]
350 SYM_CODE_END(return_to_handler)
351 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */