1 // SPDX-License-Identifier: GPL-2.0
3 * Dynamic function tracer architecture backend.
5 * Copyright IBM Corp. 2009,2014
7 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
8 * Martin Schwidefsky <schwidefsky@de.ibm.com>
11 #include <linux/moduleloader.h>
12 #include <linux/hardirq.h>
13 #include <linux/uaccess.h>
14 #include <linux/ftrace.h>
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/kprobes.h>
18 #include <trace/syscall.h>
19 #include <asm/asm-offsets.h>
20 #include <asm/cacheflush.h>
21 #include <asm/set_memory.h>
25 * The mcount code looks like this:
26 * stg %r14,8(%r15) # offset 0
27 * larl %r1,<&counter> # offset 6
28 * brasl %r14,_mcount # offset 12
29 * lg %r14,8(%r15) # offset 18
30 * Total length is 24 bytes. Only the first instruction will be patched
31 * by ftrace_make_call / ftrace_make_nop.
32 * The enabled ftrace code block looks like this:
33 * > brasl %r0,ftrace_caller # offset 0
34 * larl %r1,<&counter> # offset 6
35 * brasl %r14,_mcount # offset 12
36 * lg %r14,8(%r15) # offset 18
37 * The ftrace function gets called with a non-standard C function call ABI
38 * where r0 contains the return address. It is also expected that the called
39 * function only clobbers r0 and r1, but restores r2-r15.
40 * For module code we can't directly jump to ftrace caller, but need a
41 * trampoline (ftrace_plt), which clobbers also r1.
42 * The return point of the ftrace function has offset 24, so execution
43 * continues behind the mcount block.
44 * The disabled ftrace code block looks like this:
45 * > jg .+24 # offset 0
46 * larl %r1,<&counter> # offset 6
47 * brasl %r14,_mcount # offset 12
48 * lg %r14,8(%r15) # offset 18
49 * The jg instruction branches to offset 24 to skip as many instructions
51 * In case we use gcc's hotpatch feature the original and also the disabled
52 * function prologue contains only a single six byte instruction and looks
54 * > brcl 0,0 # offset 0
55 * To enable ftrace the code gets patched like above and afterwards looks
57 * > brasl %r0,ftrace_caller # offset 0
60 void *ftrace_func __read_mostly = ftrace_stub;
61 unsigned long ftrace_plt;
63 static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
65 #ifdef CC_USING_HOTPATCH
72 insn->disp = 0xf0080024;
76 static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
79 if (insn->opc == BREAKPOINT_INSTRUCTION)
85 static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
88 insn->opc = BREAKPOINT_INSTRUCTION;
89 insn->disp = KPROBE_ON_FTRACE_NOP;
93 static inline void ftrace_generate_kprobe_call_insn(struct ftrace_insn *insn)
96 insn->opc = BREAKPOINT_INSTRUCTION;
97 insn->disp = KPROBE_ON_FTRACE_CALL;
101 int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
107 int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
110 struct ftrace_insn orig, new, old;
112 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
114 if (addr == MCOUNT_ADDR) {
115 /* Initial code replacement */
116 ftrace_generate_orig_insn(&orig);
117 ftrace_generate_nop_insn(&new);
118 } else if (is_kprobe_on_ftrace(&old)) {
120 * If we find a breakpoint instruction, a kprobe has been
121 * placed at the beginning of the function. We write the
122 * constant KPROBE_ON_FTRACE_NOP into the remaining four
123 * bytes of the original instruction so that the kprobes
124 * handler can execute a nop, if it reaches this breakpoint.
126 ftrace_generate_kprobe_call_insn(&orig);
127 ftrace_generate_kprobe_nop_insn(&new);
129 /* Replace ftrace call with a nop. */
130 ftrace_generate_call_insn(&orig, rec->ip);
131 ftrace_generate_nop_insn(&new);
133 /* Verify that the to be replaced code matches what we expect. */
134 if (memcmp(&orig, &old, sizeof(old)))
136 s390_kernel_write((void *) rec->ip, &new, sizeof(new));
140 int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
142 struct ftrace_insn orig, new, old;
144 if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
146 if (is_kprobe_on_ftrace(&old)) {
148 * If we find a breakpoint instruction, a kprobe has been
149 * placed at the beginning of the function. We write the
150 * constant KPROBE_ON_FTRACE_CALL into the remaining four
151 * bytes of the original instruction so that the kprobes
152 * handler can execute a brasl if it reaches this breakpoint.
154 ftrace_generate_kprobe_nop_insn(&orig);
155 ftrace_generate_kprobe_call_insn(&new);
157 /* Replace nop with an ftrace call. */
158 ftrace_generate_nop_insn(&orig);
159 ftrace_generate_call_insn(&new, rec->ip);
161 /* Verify that the to be replaced code matches what we expect. */
162 if (memcmp(&orig, &old, sizeof(old)))
164 s390_kernel_write((void *) rec->ip, &new, sizeof(new));
168 int ftrace_update_ftrace_func(ftrace_func_t func)
174 int __init ftrace_dyn_arch_init(void)
179 #ifdef CONFIG_MODULES
181 static int __init ftrace_plt_init(void)
185 ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE);
187 panic("cannot allocate ftrace plt\n");
188 ip = (unsigned int *) ftrace_plt;
189 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
192 ip[3] = FTRACE_ADDR >> 32;
193 ip[4] = FTRACE_ADDR & 0xffffffff;
194 set_memory_ro(ftrace_plt, 1);
197 device_initcall(ftrace_plt_init);
199 #endif /* CONFIG_MODULES */
201 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
203 * Hook the return address and push it in the stack of return addresses
204 * in current thread info.
206 unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
208 struct ftrace_graph_ent trace;
210 if (unlikely(ftrace_graph_is_dead()))
212 if (unlikely(atomic_read(¤t->tracing_graph_pause)))
214 ip -= MCOUNT_INSN_SIZE;
216 trace.depth = current->curr_ret_stack + 1;
217 /* Only trace if the calling function expects to. */
218 if (!ftrace_graph_entry(&trace))
220 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0,
223 parent = (unsigned long) return_to_handler;
227 NOKPROBE_SYMBOL(prepare_ftrace_return);
230 * Patch the kernel code at ftrace_graph_caller location. The instruction
231 * there is branch relative on condition. To enable the ftrace graph code
232 * block, we simply patch the mask field of the instruction to zero and
233 * turn the instruction into a nop.
234 * To disable the ftrace graph code the mask field will be patched to
235 * all ones, which turns the instruction into an unconditional branch.
237 int ftrace_enable_ftrace_graph_caller(void)
239 u8 op = 0x04; /* set mask field to zero */
241 s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
245 int ftrace_disable_ftrace_graph_caller(void)
247 u8 op = 0xf4; /* set mask field to all ones */
249 s390_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
253 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */