1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/highmem.h>
4 #include <linux/ptrace.h>
5 #include <linux/uprobes.h>
8 #include "decode-insn.h"
10 #define UPROBE_TRAP_NR UINT_MAX
12 bool is_swbp_insn(uprobe_opcode_t *insn)
14 #ifdef CONFIG_RISCV_ISA_C
15 return (*insn & 0xffff) == UPROBE_SWBP_INSN;
17 return *insn == UPROBE_SWBP_INSN;
21 bool is_trap_insn(uprobe_opcode_t *insn)
23 return riscv_insn_is_ebreak(*insn) || riscv_insn_is_c_ebreak(*insn);
26 unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
28 return instruction_pointer(regs);
31 int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
34 probe_opcode_t opcode;
36 opcode = *(probe_opcode_t *)(&auprobe->insn[0]);
38 auprobe->insn_size = GET_INSN_LENGTH(opcode);
40 switch (riscv_probe_decode_insn(&opcode, &auprobe->api)) {
44 case INSN_GOOD_NO_SLOT:
45 auprobe->simulate = true;
49 auprobe->simulate = false;
59 int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
61 struct uprobe_task *utask = current->utask;
63 utask->autask.saved_cause = current->thread.bad_cause;
64 current->thread.bad_cause = UPROBE_TRAP_NR;
66 instruction_pointer_set(regs, utask->xol_vaddr);
71 int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
73 struct uprobe_task *utask = current->utask;
75 WARN_ON_ONCE(current->thread.bad_cause != UPROBE_TRAP_NR);
76 current->thread.bad_cause = utask->autask.saved_cause;
78 instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
83 bool arch_uprobe_xol_was_trapped(struct task_struct *t)
85 if (t->thread.bad_cause != UPROBE_TRAP_NR)
91 bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
96 if (!auprobe->simulate)
99 insn = *(probe_opcode_t *)(&auprobe->insn[0]);
100 addr = instruction_pointer(regs);
102 if (auprobe->api.handler)
103 auprobe->api.handler(insn, addr, regs);
108 void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
110 struct uprobe_task *utask = current->utask;
112 current->thread.bad_cause = utask->autask.saved_cause;
114 * Task has received a fatal signal, so reset back to probbed
117 instruction_pointer_set(regs, utask->vaddr);
120 bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
121 struct pt_regs *regs)
123 if (ctx == RP_CHECK_CHAIN_CALL)
124 return regs->sp <= ret->stack;
126 return regs->sp < ret->stack;
130 arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
131 struct pt_regs *regs)
137 regs->ra = trampoline_vaddr;
142 int arch_uprobe_exception_notify(struct notifier_block *self,
143 unsigned long val, void *data)
148 bool uprobe_breakpoint_handler(struct pt_regs *regs)
150 if (uprobe_pre_sstep_notifier(regs))
156 bool uprobe_single_step_handler(struct pt_regs *regs)
158 if (uprobe_post_sstep_notifier(regs))
164 void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
165 void *src, unsigned long len)
167 /* Initialize the slot */
168 void *kaddr = kmap_atomic(page);
169 void *dst = kaddr + (vaddr & ~PAGE_MASK);
171 memcpy(dst, src, len);
173 /* Add ebreak behind opcode to simulate singlestep */
175 dst += GET_INSN_LENGTH(*(probe_opcode_t *)src);
176 *(uprobe_opcode_t *)dst = __BUG_INSN_32;
179 kunmap_atomic(kaddr);
182 * We probably need flush_icache_user_page() but it needs vma.
183 * This should work on most of architectures by default. If
184 * architecture needs to do something different it can define
185 * its own version of the function.
187 flush_dcache_page(page);