1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Split from ftrace_64.S
6 #include <linux/export.h>
7 #include <linux/magic.h>
8 #include <asm/ppc_asm.h>
9 #include <asm/asm-offsets.h>
10 #include <asm/ftrace.h>
11 #include <asm/ppc-opcode.h>
12 #include <asm/thread_info.h>
14 #include <asm/ptrace.h>
18 * ftrace_caller()/ftrace_regs_caller() is the function that replaces _mcount()
19 * when ftrace is active.
21 * We arrive here after a function A calls function B, and we are the trace
22 * function for B. When we enter r1 points to A's stack frame, B has not yet
23 * had a chance to allocate one yet.
25 * Additionally r2 may point either to the TOC for A, or B, depending on
26 * whether B did a TOC setup sequence before calling us.
28 * On entry the LR points back to the _mcount() call site, and r0 holds the
29 * saved LR as it was on entry to B, ie. the original return address at the
32 * Our job is to save the register state into a struct pt_regs (on the stack)
33 * and then arrange for the ftrace function to be called.
35 .macro ftrace_regs_entry allregs
36 /* Create a minimal stack frame for representing B */
37 PPC_STLU r1, -STACK_FRAME_MIN_SIZE(r1)
39 /* Create our stack frame + pt_regs */
40 PPC_STLU r1,-SWITCH_FRAME_SIZE(r1)
42 /* Save all gprs to pt_regs */
47 /* Save the original return address in A's stack frame */
48 std r0, LRSAVE+SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE(r1)
50 lbz r3, PACA_FTRACE_ENABLED(r13)
59 #ifdef CONFIG_LIVEPATCH_64
64 /* Save previous stack pointer (r1) */
65 addi r8, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
69 /* Load special regs for save below */
75 /* Clear MSR to flag as ftrace_caller versus frace_regs_caller */
79 /* Get the _mcount() call site out of LR */
81 /* Save it as pt_regs->nip */
83 /* Also save it in B's stackframe header for proper unwind */
84 PPC_STL r7, LRSAVE+SWITCH_FRAME_SIZE(r1)
85 /* Save the read LR in pt_regs->link */
89 /* Save callee's TOC in the ABI compliant location */
91 LOAD_PACA_TOC() /* get kernel TOC in r2 */
92 LOAD_REG_ADDR(r3, function_trace_op)
95 lis r3,function_trace_op@ha
96 lwz r5,function_trace_op@l(r3)
99 #ifdef CONFIG_LIVEPATCH_64
100 mr r14, r7 /* remember old NIP */
103 /* Calculate ip from nip-4 into r3 for call below */
104 subi r3, r7, MCOUNT_INSN_SIZE
106 /* Put the original return address in r4 as parent_ip */
109 /* Save special regs */
113 PPC_STL r10, _XER(r1)
114 PPC_STL r11, _CCR(r1)
117 /* Load &pt_regs in r6 for call below */
118 addi r6, r1, STACK_INT_FRAME_REGS
121 .macro ftrace_regs_exit allregs
122 /* Load ctr with the possibly modified NIP */
126 #ifdef CONFIG_LIVEPATCH_64
127 cmpd r14, r3 /* has NIP been altered? */
135 #ifdef CONFIG_LIVEPATCH_64
140 /* Restore possibly modified LR */
145 /* Restore callee's TOC */
149 /* Pop our stack frame */
150 addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
152 #ifdef CONFIG_LIVEPATCH_64
153 /* Based on the cmpd above, if the NIP was altered handle livepatch */
154 bne- livepatch_handler
156 bctr /* jump after _mcount site */
159 _GLOBAL(ftrace_regs_caller)
161 /* ftrace_call(r3, r4, r5, r6) */
162 .globl ftrace_regs_call
168 _GLOBAL(ftrace_caller)
170 /* ftrace_call(r3, r4, r5, r6) */
185 addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
190 #ifdef CONFIG_LIVEPATCH_64
192 * This function runs in the mcount context, between two functions. As
193 * such it can only clobber registers which are volatile and used in
196 * We get here when a function A, calls another function B, but B has
197 * been live patched with a new function C.
200 * - we have no stack frame and can not allocate one
201 * - LR points back to the original caller (in A)
202 * - CTR holds the new NIP in C
203 * - r0, r11 & r12 are free
206 ld r12, PACA_THREAD_INFO(r13)
208 /* Allocate 3 x 8 bytes */
209 ld r11, TI_livepatch_sp(r12)
211 std r11, TI_livepatch_sp(r12)
213 /* Save toc & real LR on livepatch stack */
218 /* Store stack end marker */
219 lis r12, STACK_END_MAGIC@h
220 ori r12, r12, STACK_END_MAGIC@l
223 /* Put ctr in r12 for global entry and branch there */
228 * Now we are returning from the patched function to the original
229 * caller A. We are free to use r11, r12 and we can use r2 until we
233 ld r12, PACA_THREAD_INFO(r13)
235 ld r11, TI_livepatch_sp(r12)
237 /* Check stack marker hasn't been trashed */
238 lis r2, STACK_END_MAGIC@h
239 ori r2, r2, STACK_END_MAGIC@l
242 EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
244 /* Restore LR & toc from livepatch stack */
249 /* Pop livepatch stack frame */
250 ld r12, PACA_THREAD_INFO(r13)
252 std r11, TI_livepatch_sp(r12)
254 /* Return to original caller of live patched function */
256 #endif /* CONFIG_LIVEPATCH */
258 #ifndef CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY
261 EXPORT_SYMBOL(_mcount)
268 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
269 _GLOBAL(return_to_handler)
270 /* need to save return values */
281 * We might be called from a module.
282 * Switch to our TOC to run inside the core kernel.
291 bl ftrace_return_to_handler
294 /* return value has real return address */
309 /* Jump back to real return address */
311 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
313 .pushsection ".tramp.ftrace.text","aw",@progbits;
314 .globl ftrace_tramp_text
319 .pushsection ".tramp.ftrace.init","aw",@progbits;
320 .globl ftrace_tramp_init