Mention branches and keyring.
[releases.git] / riscv / kernel / probes / kprobes.c
1 // SPDX-License-Identifier: GPL-2.0+
2
3 #define pr_fmt(fmt) "kprobes: " fmt
4
5 #include <linux/kprobes.h>
6 #include <linux/extable.h>
7 #include <linux/slab.h>
8 #include <linux/stop_machine.h>
9 #include <asm/ptrace.h>
10 #include <linux/uaccess.h>
11 #include <asm/sections.h>
12 #include <asm/cacheflush.h>
13 #include <asm/bug.h>
14 #include <asm/patch.h>
15
16 #include "decode-insn.h"
17
18 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
19 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
20
21 static void __kprobes
22 post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *);
23
24 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
25 {
26         unsigned long offset = GET_INSN_LENGTH(p->opcode);
27
28         p->ainsn.api.restore = (unsigned long)p->addr + offset;
29
30         patch_text(p->ainsn.api.insn, p->opcode);
31         patch_text((void *)((unsigned long)(p->ainsn.api.insn) + offset),
32                    __BUG_INSN_32);
33 }
34
35 static void __kprobes arch_prepare_simulate(struct kprobe *p)
36 {
37         p->ainsn.api.restore = 0;
38 }
39
40 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
41 {
42         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
43
44         if (p->ainsn.api.handler)
45                 p->ainsn.api.handler((u32)p->opcode,
46                                         (unsigned long)p->addr, regs);
47
48         post_kprobe_handler(p, kcb, regs);
49 }
50
51 static bool __kprobes arch_check_kprobe(struct kprobe *p)
52 {
53         unsigned long tmp  = (unsigned long)p->addr - p->offset;
54         unsigned long addr = (unsigned long)p->addr;
55
56         while (tmp <= addr) {
57                 if (tmp == addr)
58                         return true;
59
60                 tmp += GET_INSN_LENGTH(*(u16 *)tmp);
61         }
62
63         return false;
64 }
65
66 int __kprobes arch_prepare_kprobe(struct kprobe *p)
67 {
68         u16 *insn = (u16 *)p->addr;
69
70         if ((unsigned long)insn & 0x1)
71                 return -EILSEQ;
72
73         if (!arch_check_kprobe(p))
74                 return -EILSEQ;
75
76         /* copy instruction */
77         p->opcode = (kprobe_opcode_t)(*insn++);
78         if (GET_INSN_LENGTH(p->opcode) == 4)
79                 p->opcode |= (kprobe_opcode_t)(*insn) << 16;
80
81         /* decode instruction */
82         switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) {
83         case INSN_REJECTED:     /* insn not supported */
84                 return -EINVAL;
85
86         case INSN_GOOD_NO_SLOT: /* insn need simulation */
87                 p->ainsn.api.insn = NULL;
88                 break;
89
90         case INSN_GOOD: /* instruction uses slot */
91                 p->ainsn.api.insn = get_insn_slot();
92                 if (!p->ainsn.api.insn)
93                         return -ENOMEM;
94                 break;
95         }
96
97         /* prepare the instruction */
98         if (p->ainsn.api.insn)
99                 arch_prepare_ss_slot(p);
100         else
101                 arch_prepare_simulate(p);
102
103         return 0;
104 }
105
106 #ifdef CONFIG_MMU
107 void *alloc_insn_page(void)
108 {
109         return  __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
110                                      GFP_KERNEL, PAGE_KERNEL_READ_EXEC,
111                                      VM_FLUSH_RESET_PERMS, NUMA_NO_NODE,
112                                      __builtin_return_address(0));
113 }
114 #endif
115
116 /* install breakpoint in text */
117 void __kprobes arch_arm_kprobe(struct kprobe *p)
118 {
119         if ((p->opcode & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
120                 patch_text(p->addr, __BUG_INSN_32);
121         else
122                 patch_text(p->addr, __BUG_INSN_16);
123 }
124
125 /* remove breakpoint from text */
126 void __kprobes arch_disarm_kprobe(struct kprobe *p)
127 {
128         patch_text(p->addr, p->opcode);
129 }
130
131 void __kprobes arch_remove_kprobe(struct kprobe *p)
132 {
133 }
134
135 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
136 {
137         kcb->prev_kprobe.kp = kprobe_running();
138         kcb->prev_kprobe.status = kcb->kprobe_status;
139 }
140
141 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
142 {
143         __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
144         kcb->kprobe_status = kcb->prev_kprobe.status;
145 }
146
147 static void __kprobes set_current_kprobe(struct kprobe *p)
148 {
149         __this_cpu_write(current_kprobe, p);
150 }
151
152 /*
153  * Interrupts need to be disabled before single-step mode is set, and not
154  * reenabled until after single-step mode ends.
155  * Without disabling interrupt on local CPU, there is a chance of
156  * interrupt occurrence in the period of exception return and  start of
157  * out-of-line single-step, that result in wrongly single stepping
158  * into the interrupt handler.
159  */
160 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
161                                                 struct pt_regs *regs)
162 {
163         kcb->saved_status = regs->status;
164         regs->status &= ~SR_SPIE;
165 }
166
167 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
168                                                 struct pt_regs *regs)
169 {
170         regs->status = kcb->saved_status;
171 }
172
173 static void __kprobes setup_singlestep(struct kprobe *p,
174                                        struct pt_regs *regs,
175                                        struct kprobe_ctlblk *kcb, int reenter)
176 {
177         unsigned long slot;
178
179         if (reenter) {
180                 save_previous_kprobe(kcb);
181                 set_current_kprobe(p);
182                 kcb->kprobe_status = KPROBE_REENTER;
183         } else {
184                 kcb->kprobe_status = KPROBE_HIT_SS;
185         }
186
187         if (p->ainsn.api.insn) {
188                 /* prepare for single stepping */
189                 slot = (unsigned long)p->ainsn.api.insn;
190
191                 /* IRQs and single stepping do not mix well. */
192                 kprobes_save_local_irqflag(kcb, regs);
193
194                 instruction_pointer_set(regs, slot);
195         } else {
196                 /* insn simulation */
197                 arch_simulate_insn(p, regs);
198         }
199 }
200
201 static int __kprobes reenter_kprobe(struct kprobe *p,
202                                     struct pt_regs *regs,
203                                     struct kprobe_ctlblk *kcb)
204 {
205         switch (kcb->kprobe_status) {
206         case KPROBE_HIT_SSDONE:
207         case KPROBE_HIT_ACTIVE:
208                 kprobes_inc_nmissed_count(p);
209                 setup_singlestep(p, regs, kcb, 1);
210                 break;
211         case KPROBE_HIT_SS:
212         case KPROBE_REENTER:
213                 pr_warn("Failed to recover from reentered kprobes.\n");
214                 dump_kprobe(p);
215                 BUG();
216                 break;
217         default:
218                 WARN_ON(1);
219                 return 0;
220         }
221
222         return 1;
223 }
224
225 static void __kprobes
226 post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs)
227 {
228         /* return addr restore if non-branching insn */
229         if (cur->ainsn.api.restore != 0)
230                 regs->epc = cur->ainsn.api.restore;
231
232         /* restore back original saved kprobe variables and continue */
233         if (kcb->kprobe_status == KPROBE_REENTER) {
234                 restore_previous_kprobe(kcb);
235                 return;
236         }
237
238         /* call post handler */
239         kcb->kprobe_status = KPROBE_HIT_SSDONE;
240         if (cur->post_handler)  {
241                 /* post_handler can hit breakpoint and single step
242                  * again, so we enable D-flag for recursive exception.
243                  */
244                 cur->post_handler(cur, regs, 0);
245         }
246
247         reset_current_kprobe();
248 }
249
250 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
251 {
252         struct kprobe *cur = kprobe_running();
253         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
254
255         switch (kcb->kprobe_status) {
256         case KPROBE_HIT_SS:
257         case KPROBE_REENTER:
258                 /*
259                  * We are here because the instruction being single
260                  * stepped caused a page fault. We reset the current
261                  * kprobe and the ip points back to the probe address
262                  * and allow the page fault handler to continue as a
263                  * normal page fault.
264                  */
265                 regs->epc = (unsigned long) cur->addr;
266                 BUG_ON(!instruction_pointer(regs));
267
268                 if (kcb->kprobe_status == KPROBE_REENTER)
269                         restore_previous_kprobe(kcb);
270                 else {
271                         kprobes_restore_local_irqflag(kcb, regs);
272                         reset_current_kprobe();
273                 }
274
275                 break;
276         case KPROBE_HIT_ACTIVE:
277         case KPROBE_HIT_SSDONE:
278                 /*
279                  * In case the user-specified fault handler returned
280                  * zero, try to fix up.
281                  */
282                 if (fixup_exception(regs))
283                         return 1;
284         }
285         return 0;
286 }
287
288 bool __kprobes
289 kprobe_breakpoint_handler(struct pt_regs *regs)
290 {
291         struct kprobe *p, *cur_kprobe;
292         struct kprobe_ctlblk *kcb;
293         unsigned long addr = instruction_pointer(regs);
294
295         kcb = get_kprobe_ctlblk();
296         cur_kprobe = kprobe_running();
297
298         p = get_kprobe((kprobe_opcode_t *) addr);
299
300         if (p) {
301                 if (cur_kprobe) {
302                         if (reenter_kprobe(p, regs, kcb))
303                                 return true;
304                 } else {
305                         /* Probe hit */
306                         set_current_kprobe(p);
307                         kcb->kprobe_status = KPROBE_HIT_ACTIVE;
308
309                         /*
310                          * If we have no pre-handler or it returned 0, we
311                          * continue with normal processing.  If we have a
312                          * pre-handler and it returned non-zero, it will
313                          * modify the execution path and no need to single
314                          * stepping. Let's just reset current kprobe and exit.
315                          *
316                          * pre_handler can hit a breakpoint and can step thru
317                          * before return.
318                          */
319                         if (!p->pre_handler || !p->pre_handler(p, regs))
320                                 setup_singlestep(p, regs, kcb, 0);
321                         else
322                                 reset_current_kprobe();
323                 }
324                 return true;
325         }
326
327         /*
328          * The breakpoint instruction was removed right
329          * after we hit it.  Another cpu has removed
330          * either a probepoint or a debugger breakpoint
331          * at this address.  In either case, no further
332          * handling of this interrupt is appropriate.
333          * Return back to original instruction, and continue.
334          */
335         return false;
336 }
337
338 bool __kprobes
339 kprobe_single_step_handler(struct pt_regs *regs)
340 {
341         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
342         unsigned long addr = instruction_pointer(regs);
343         struct kprobe *cur = kprobe_running();
344
345         if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
346             ((unsigned long)&cur->ainsn.api.insn[0] + GET_INSN_LENGTH(cur->opcode) == addr)) {
347                 kprobes_restore_local_irqflag(kcb, regs);
348                 post_kprobe_handler(cur, kcb, regs);
349                 return true;
350         }
351         /* not ours, kprobes should ignore it */
352         return false;
353 }
354
355 /*
356  * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
357  * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
358  */
359 int __init arch_populate_kprobe_blacklist(void)
360 {
361         int ret;
362
363         ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
364                                         (unsigned long)__irqentry_text_end);
365         return ret;
366 }
367
368 void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
369 {
370         return (void *)kretprobe_trampoline_handler(regs, NULL);
371 }
372
373 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
374                                       struct pt_regs *regs)
375 {
376         ri->ret_addr = (kprobe_opcode_t *)regs->ra;
377         ri->fp = NULL;
378         regs->ra = (unsigned long) &__kretprobe_trampoline;
379 }
380
381 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
382 {
383         return 0;
384 }
385
386 int __init arch_init_kprobes(void)
387 {
388         return 0;
389 }