2 * arch/arm64/kernel/probes/kprobes.c
4 * Kprobes support for ARM64
6 * Copyright (C) 2013 Linaro Limited.
7 * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
19 #include <linux/kasan.h>
20 #include <linux/kernel.h>
21 #include <linux/kprobes.h>
22 #include <linux/extable.h>
23 #include <linux/slab.h>
24 #include <linux/stop_machine.h>
25 #include <linux/sched/debug.h>
26 #include <linux/set_memory.h>
27 #include <linux/stringify.h>
28 #include <linux/vmalloc.h>
29 #include <asm/traps.h>
30 #include <asm/ptrace.h>
31 #include <asm/cacheflush.h>
32 #include <asm/debug-monitors.h>
33 #include <asm/system_misc.h>
35 #include <linux/uaccess.h>
37 #include <asm/sections.h>
39 #include "decode-insn.h"
41 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
42 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
45 post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
47 static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
55 return aarch64_insn_patch_text(addrs, insns, 1);
58 static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
60 /* prepare insn slot */
61 patch_text(p->ainsn.api.insn, p->opcode);
63 flush_icache_range((uintptr_t) (p->ainsn.api.insn),
64 (uintptr_t) (p->ainsn.api.insn) +
65 MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
68 * Needs restoring of return address after stepping xol.
70 p->ainsn.api.restore = (unsigned long) p->addr +
71 sizeof(kprobe_opcode_t);
74 static void __kprobes arch_prepare_simulate(struct kprobe *p)
76 /* This instructions is not executed xol. No need to adjust the PC */
77 p->ainsn.api.restore = 0;
80 static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
82 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
84 if (p->ainsn.api.handler)
85 p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
87 /* single step simulated, now go for post processing */
88 post_kprobe_handler(kcb, regs);
91 int __kprobes arch_prepare_kprobe(struct kprobe *p)
93 unsigned long probe_addr = (unsigned long)p->addr;
94 extern char __start_rodata[];
95 extern char __end_rodata[];
100 /* copy instruction */
101 p->opcode = le32_to_cpu(*p->addr);
103 if (in_exception_text(probe_addr))
105 if (probe_addr >= (unsigned long) __start_rodata &&
106 probe_addr <= (unsigned long) __end_rodata)
109 /* decode instruction */
110 switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) {
111 case INSN_REJECTED: /* insn not supported */
114 case INSN_GOOD_NO_SLOT: /* insn need simulation */
115 p->ainsn.api.insn = NULL;
118 case INSN_GOOD: /* instruction uses slot */
119 p->ainsn.api.insn = get_insn_slot();
120 if (!p->ainsn.api.insn)
125 /* prepare the instruction */
126 if (p->ainsn.api.insn)
127 arch_prepare_ss_slot(p);
129 arch_prepare_simulate(p);
134 void *alloc_insn_page(void)
138 page = vmalloc_exec(PAGE_SIZE);
140 set_memory_ro((unsigned long)page, 1);
145 /* arm kprobe: install breakpoint in text */
146 void __kprobes arch_arm_kprobe(struct kprobe *p)
148 patch_text(p->addr, BRK64_OPCODE_KPROBES);
151 /* disarm kprobe: remove breakpoint from text */
152 void __kprobes arch_disarm_kprobe(struct kprobe *p)
154 patch_text(p->addr, p->opcode);
157 void __kprobes arch_remove_kprobe(struct kprobe *p)
159 if (p->ainsn.api.insn) {
160 free_insn_slot(p->ainsn.api.insn, 0);
161 p->ainsn.api.insn = NULL;
165 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
167 kcb->prev_kprobe.kp = kprobe_running();
168 kcb->prev_kprobe.status = kcb->kprobe_status;
171 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
173 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
174 kcb->kprobe_status = kcb->prev_kprobe.status;
177 static void __kprobes set_current_kprobe(struct kprobe *p)
179 __this_cpu_write(current_kprobe, p);
183 * When PSTATE.D is set (masked), then software step exceptions can not be
185 * SPSR's D bit shows the value of PSTATE.D immediately before the
186 * exception was taken. PSTATE.D is set while entering into any exception
187 * mode, however software clears it for any normal (none-debug-exception)
188 * mode in the exception entry. Therefore, when we are entering into kprobe
189 * breakpoint handler from any normal mode then SPSR.D bit is already
190 * cleared, however it is set when we are entering from any debug exception
192 * Since we always need to generate single step exception after a kprobe
193 * breakpoint exception therefore we need to clear it unconditionally, when
194 * we become sure that the current breakpoint exception is for kprobe.
196 static void __kprobes
197 spsr_set_debug_flag(struct pt_regs *regs, int mask)
199 unsigned long spsr = regs->pstate;
210 * Interrupts need to be disabled before single-step mode is set, and not
211 * reenabled until after single-step mode ends.
212 * Without disabling interrupt on local CPU, there is a chance of
213 * interrupt occurrence in the period of exception return and start of
214 * out-of-line single-step, that result in wrongly single stepping
215 * into the interrupt handler.
217 static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
218 struct pt_regs *regs)
220 kcb->saved_irqflag = regs->pstate;
221 regs->pstate |= PSR_I_BIT;
224 static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
225 struct pt_regs *regs)
227 if (kcb->saved_irqflag & PSR_I_BIT)
228 regs->pstate |= PSR_I_BIT;
230 regs->pstate &= ~PSR_I_BIT;
233 static void __kprobes
234 set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
236 kcb->ss_ctx.ss_pending = true;
237 kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
240 static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
242 kcb->ss_ctx.ss_pending = false;
243 kcb->ss_ctx.match_addr = 0;
246 static void __kprobes setup_singlestep(struct kprobe *p,
247 struct pt_regs *regs,
248 struct kprobe_ctlblk *kcb, int reenter)
253 save_previous_kprobe(kcb);
254 set_current_kprobe(p);
255 kcb->kprobe_status = KPROBE_REENTER;
257 kcb->kprobe_status = KPROBE_HIT_SS;
261 if (p->ainsn.api.insn) {
262 /* prepare for single stepping */
263 slot = (unsigned long)p->ainsn.api.insn;
265 set_ss_context(kcb, slot); /* mark pending ss */
267 spsr_set_debug_flag(regs, 0);
269 /* IRQs and single stepping do not mix well. */
270 kprobes_save_local_irqflag(kcb, regs);
271 kernel_enable_single_step(regs);
272 instruction_pointer_set(regs, slot);
274 /* insn simulation */
275 arch_simulate_insn(p, regs);
279 static int __kprobes reenter_kprobe(struct kprobe *p,
280 struct pt_regs *regs,
281 struct kprobe_ctlblk *kcb)
283 switch (kcb->kprobe_status) {
284 case KPROBE_HIT_SSDONE:
285 case KPROBE_HIT_ACTIVE:
286 kprobes_inc_nmissed_count(p);
287 setup_singlestep(p, regs, kcb, 1);
291 pr_warn("Unrecoverable kprobe detected.\n");
303 static void __kprobes
304 post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
306 struct kprobe *cur = kprobe_running();
311 /* return addr restore if non-branching insn */
312 if (cur->ainsn.api.restore != 0)
313 instruction_pointer_set(regs, cur->ainsn.api.restore);
315 /* restore back original saved kprobe variables and continue */
316 if (kcb->kprobe_status == KPROBE_REENTER) {
317 restore_previous_kprobe(kcb);
320 /* call post handler */
321 kcb->kprobe_status = KPROBE_HIT_SSDONE;
322 if (cur->post_handler) {
323 /* post_handler can hit breakpoint and single step
324 * again, so we enable D-flag for recursive exception.
326 cur->post_handler(cur, regs, 0);
329 reset_current_kprobe();
332 int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
334 struct kprobe *cur = kprobe_running();
335 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
337 switch (kcb->kprobe_status) {
341 * We are here because the instruction being single
342 * stepped caused a page fault. We reset the current
343 * kprobe and the ip points back to the probe address
344 * and allow the page fault handler to continue as a
347 instruction_pointer_set(regs, (unsigned long) cur->addr);
348 if (!instruction_pointer(regs))
351 kernel_disable_single_step();
353 if (kcb->kprobe_status == KPROBE_REENTER)
354 restore_previous_kprobe(kcb);
356 reset_current_kprobe();
359 case KPROBE_HIT_ACTIVE:
360 case KPROBE_HIT_SSDONE:
362 * We increment the nmissed count for accounting,
363 * we can also use npre/npostfault count for accounting
364 * these specific fault cases.
366 kprobes_inc_nmissed_count(cur);
369 * We come here because instructions in the pre/post
370 * handler caused the page_fault, this could happen
371 * if handler tries to access user space by
372 * copy_from_user(), get_user() etc. Let the
373 * user-specified handler try to fix it first.
375 if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
379 * In case the user-specified fault handler returned
380 * zero, try to fix up.
382 if (fixup_exception(regs))
388 static void __kprobes kprobe_handler(struct pt_regs *regs)
390 struct kprobe *p, *cur_kprobe;
391 struct kprobe_ctlblk *kcb;
392 unsigned long addr = instruction_pointer(regs);
394 kcb = get_kprobe_ctlblk();
395 cur_kprobe = kprobe_running();
397 p = get_kprobe((kprobe_opcode_t *) addr);
401 if (reenter_kprobe(p, regs, kcb))
405 set_current_kprobe(p);
406 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
409 * If we have no pre-handler or it returned 0, we
410 * continue with normal processing. If we have a
411 * pre-handler and it returned non-zero, it prepped
412 * for calling the break_handler below on re-entry,
413 * so get out doing nothing more here.
415 * pre_handler can hit a breakpoint and can step thru
416 * before return, keep PSTATE D-flag enabled until
417 * pre_handler return back.
419 if (!p->pre_handler || !p->pre_handler(p, regs)) {
420 setup_singlestep(p, regs, kcb, 0);
424 } else if ((le32_to_cpu(*(kprobe_opcode_t *) addr) ==
425 BRK64_OPCODE_KPROBES) && cur_kprobe) {
426 /* We probably hit a jprobe. Call its break handler. */
427 if (cur_kprobe->break_handler &&
428 cur_kprobe->break_handler(cur_kprobe, regs)) {
429 setup_singlestep(cur_kprobe, regs, kcb, 0);
434 * The breakpoint instruction was removed right
435 * after we hit it. Another cpu has removed
436 * either a probepoint or a debugger breakpoint
437 * at this address. In either case, no further
438 * handling of this interrupt is appropriate.
439 * Return back to original instruction, and continue.
444 kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
446 if ((kcb->ss_ctx.ss_pending)
447 && (kcb->ss_ctx.match_addr == addr)) {
448 clear_ss_context(kcb); /* clear pending ss */
449 return DBG_HOOK_HANDLED;
451 /* not ours, kprobes should ignore it */
452 return DBG_HOOK_ERROR;
456 kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
458 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
462 return DBG_HOOK_ERROR;
464 /* return error if this is not our step */
465 retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
467 if (retval == DBG_HOOK_HANDLED) {
468 kprobes_restore_local_irqflag(kcb, regs);
469 kernel_disable_single_step();
471 post_kprobe_handler(kcb, regs);
478 kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
481 return DBG_HOOK_ERROR;
483 kprobe_handler(regs);
484 return DBG_HOOK_HANDLED;
487 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
489 struct jprobe *jp = container_of(p, struct jprobe, kp);
490 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
492 kcb->jprobe_saved_regs = *regs;
494 * Since we can't be sure where in the stack frame "stacked"
495 * pass-by-value arguments are stored we just don't try to
496 * duplicate any of the stack. Do not use jprobes on functions that
497 * use more than 64 bytes (after padding each to an 8 byte boundary)
498 * of arguments, or pass individual arguments larger than 16 bytes.
501 instruction_pointer_set(regs, (unsigned long) jp->entry);
503 pause_graph_tracing();
507 void __kprobes jprobe_return(void)
509 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
512 * Jprobe handler return by entering break exception,
513 * encoded same as kprobe, but with following conditions
514 * -a special PC to identify it from the other kprobes.
515 * -restore stack addr to original saved pt_regs
517 asm volatile(" mov sp, %0 \n"
518 "jprobe_return_break: brk %1 \n"
520 : "r" (kcb->jprobe_saved_regs.sp),
521 "I" (BRK64_ESR_KPROBES)
527 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
529 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
530 long stack_addr = kcb->jprobe_saved_regs.sp;
531 long orig_sp = kernel_stack_pointer(regs);
532 struct jprobe *jp = container_of(p, struct jprobe, kp);
533 extern const char jprobe_return_break[];
535 if (instruction_pointer(regs) != (u64) jprobe_return_break)
538 if (orig_sp != stack_addr) {
539 struct pt_regs *saved_regs =
540 (struct pt_regs *)kcb->jprobe_saved_regs.sp;
541 pr_err("current sp %lx does not match saved sp %lx\n",
542 orig_sp, stack_addr);
543 pr_err("Saved registers for jprobe %p\n", jp);
544 __show_regs(saved_regs);
545 pr_err("Current registers\n");
549 unpause_graph_tracing();
550 *regs = kcb->jprobe_saved_regs;
551 preempt_enable_no_resched();
555 bool arch_within_kprobe_blacklist(unsigned long addr)
557 if ((addr >= (unsigned long)__kprobes_text_start &&
558 addr < (unsigned long)__kprobes_text_end) ||
559 (addr >= (unsigned long)__entry_text_start &&
560 addr < (unsigned long)__entry_text_end) ||
561 (addr >= (unsigned long)__idmap_text_start &&
562 addr < (unsigned long)__idmap_text_end) ||
563 (addr >= (unsigned long)__hyp_text_start &&
564 addr < (unsigned long)__hyp_text_end) ||
565 !!search_exception_tables(addr))
568 if (!is_kernel_in_hyp_mode()) {
569 if ((addr >= (unsigned long)__hyp_idmap_text_start &&
570 addr < (unsigned long)__hyp_idmap_text_end))
577 void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
579 struct kretprobe_instance *ri = NULL;
580 struct hlist_head *head, empty_rp;
581 struct hlist_node *tmp;
582 unsigned long flags, orig_ret_address = 0;
583 unsigned long trampoline_address =
584 (unsigned long)&kretprobe_trampoline;
585 kprobe_opcode_t *correct_ret_addr = NULL;
587 INIT_HLIST_HEAD(&empty_rp);
588 kretprobe_hash_lock(current, &head, &flags);
591 * It is possible to have multiple instances associated with a given
592 * task either because multiple functions in the call path have
593 * return probes installed on them, and/or more than one
594 * return probe was registered for a target function.
596 * We can handle this because:
597 * - instances are always pushed into the head of the list
598 * - when multiple return probes are registered for the same
599 * function, the (chronologically) first instance's ret_addr
600 * will be the real return address, and all the rest will
601 * point to kretprobe_trampoline.
603 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
604 if (ri->task != current)
605 /* another task is sharing our hash bucket */
608 orig_ret_address = (unsigned long)ri->ret_addr;
610 if (orig_ret_address != trampoline_address)
612 * This is the real return address. Any other
613 * instances associated with this task are for
614 * other calls deeper on the call stack
619 kretprobe_assert(ri, orig_ret_address, trampoline_address);
621 correct_ret_addr = ri->ret_addr;
622 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
623 if (ri->task != current)
624 /* another task is sharing our hash bucket */
627 orig_ret_address = (unsigned long)ri->ret_addr;
628 if (ri->rp && ri->rp->handler) {
629 __this_cpu_write(current_kprobe, &ri->rp->kp);
630 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
631 ri->ret_addr = correct_ret_addr;
632 ri->rp->handler(ri, regs);
633 __this_cpu_write(current_kprobe, NULL);
636 recycle_rp_inst(ri, &empty_rp);
638 if (orig_ret_address != trampoline_address)
640 * This is the real return address. Any other
641 * instances associated with this task are for
642 * other calls deeper on the call stack
647 kretprobe_hash_unlock(current, &flags);
649 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
650 hlist_del(&ri->hlist);
653 return (void *)orig_ret_address;
656 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
657 struct pt_regs *regs)
659 ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
661 /* replace return addr (x30) with trampoline */
662 regs->regs[30] = (long)&kretprobe_trampoline;
665 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
670 int __init arch_init_kprobes(void)