GNU Linux-libre 4.14.328-gnu1
[releases.git] / arch / powerpc / kernel / kprobes.c
1 /*
2  *  Kernel Probes (KProbes)
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2002, 2004
19  *
20  * 2002-Oct     Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21  *              Probes initial implementation ( includes contributions from
22  *              Rusty Russell).
23  * 2004-July    Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24  *              interface to access function arguments.
25  * 2004-Nov     Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
26  *              for PPC64
27  */
28
29 #include <linux/kprobes.h>
30 #include <linux/ptrace.h>
31 #include <linux/preempt.h>
32 #include <linux/extable.h>
33 #include <linux/kdebug.h>
34 #include <linux/slab.h>
35 #include <asm/code-patching.h>
36 #include <asm/cacheflush.h>
37 #include <asm/sstep.h>
38 #include <asm/sections.h>
39 #include <linux/uaccess.h>
40
41 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
42 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
43
44 struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
45
46 int is_current_kprobe_addr(unsigned long addr)
47 {
48         struct kprobe *p = kprobe_running();
49         return (p && (unsigned long)p->addr == addr) ? 1 : 0;
50 }
51
52 bool arch_within_kprobe_blacklist(unsigned long addr)
53 {
54         return  (addr >= (unsigned long)__kprobes_text_start &&
55                  addr < (unsigned long)__kprobes_text_end) ||
56                 (addr >= (unsigned long)_stext &&
57                  addr < (unsigned long)__head_end);
58 }
59
60 kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
61 {
62         kprobe_opcode_t *addr;
63
64 #ifdef PPC64_ELF_ABI_v2
65         /* PPC64 ABIv2 needs local entry point */
66         addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
67         if (addr && !offset) {
68 #ifdef CONFIG_KPROBES_ON_FTRACE
69                 unsigned long faddr;
70                 /*
71                  * Per livepatch.h, ftrace location is always within the first
72                  * 16 bytes of a function on powerpc with -mprofile-kernel.
73                  */
74                 faddr = ftrace_location_range((unsigned long)addr,
75                                               (unsigned long)addr + 16);
76                 if (faddr)
77                         addr = (kprobe_opcode_t *)faddr;
78                 else
79 #endif
80                         addr = (kprobe_opcode_t *)ppc_function_entry(addr);
81         }
82 #elif defined(PPC64_ELF_ABI_v1)
83         /*
84          * 64bit powerpc ABIv1 uses function descriptors:
85          * - Check for the dot variant of the symbol first.
86          * - If that fails, try looking up the symbol provided.
87          *
88          * This ensures we always get to the actual symbol and not
89          * the descriptor.
90          *
91          * Also handle <module:symbol> format.
92          */
93         char dot_name[MODULE_NAME_LEN + 1 + KSYM_NAME_LEN];
94         const char *modsym;
95         bool dot_appended = false;
96         if ((modsym = strchr(name, ':')) != NULL) {
97                 modsym++;
98                 if (*modsym != '\0' && *modsym != '.') {
99                         /* Convert to <module:.symbol> */
100                         strncpy(dot_name, name, modsym - name);
101                         dot_name[modsym - name] = '.';
102                         dot_name[modsym - name + 1] = '\0';
103                         strncat(dot_name, modsym,
104                                 sizeof(dot_name) - (modsym - name) - 2);
105                         dot_appended = true;
106                 } else {
107                         dot_name[0] = '\0';
108                         strncat(dot_name, name, sizeof(dot_name) - 1);
109                 }
110         } else if (name[0] != '.') {
111                 dot_name[0] = '.';
112                 dot_name[1] = '\0';
113                 strncat(dot_name, name, KSYM_NAME_LEN - 2);
114                 dot_appended = true;
115         } else {
116                 dot_name[0] = '\0';
117                 strncat(dot_name, name, KSYM_NAME_LEN - 1);
118         }
119         addr = (kprobe_opcode_t *)kallsyms_lookup_name(dot_name);
120         if (!addr && dot_appended) {
121                 /* Let's try the original non-dot symbol lookup */
122                 addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
123         }
124 #else
125         addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
126 #endif
127
128         return addr;
129 }
130
131 int arch_prepare_kprobe(struct kprobe *p)
132 {
133         int ret = 0;
134         kprobe_opcode_t insn = *p->addr;
135
136         if ((unsigned long)p->addr & 0x03) {
137                 printk("Attempt to register kprobe at an unaligned address\n");
138                 ret = -EINVAL;
139         } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
140                 printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
141                 ret = -EINVAL;
142         }
143
144         /* insn must be on a special executable page on ppc64.  This is
145          * not explicitly required on ppc32 (right now), but it doesn't hurt */
146         if (!ret) {
147                 p->ainsn.insn = get_insn_slot();
148                 if (!p->ainsn.insn)
149                         ret = -ENOMEM;
150         }
151
152         if (!ret) {
153                 memcpy(p->ainsn.insn, p->addr,
154                                 MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
155                 p->opcode = *p->addr;
156                 flush_icache_range((unsigned long)p->ainsn.insn,
157                         (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
158         }
159
160         p->ainsn.boostable = 0;
161         return ret;
162 }
163 NOKPROBE_SYMBOL(arch_prepare_kprobe);
164
165 void arch_arm_kprobe(struct kprobe *p)
166 {
167         patch_instruction(p->addr, BREAKPOINT_INSTRUCTION);
168 }
169 NOKPROBE_SYMBOL(arch_arm_kprobe);
170
171 void arch_disarm_kprobe(struct kprobe *p)
172 {
173         patch_instruction(p->addr, p->opcode);
174 }
175 NOKPROBE_SYMBOL(arch_disarm_kprobe);
176
177 void arch_remove_kprobe(struct kprobe *p)
178 {
179         if (p->ainsn.insn) {
180                 free_insn_slot(p->ainsn.insn, 0);
181                 p->ainsn.insn = NULL;
182         }
183 }
184 NOKPROBE_SYMBOL(arch_remove_kprobe);
185
186 static nokprobe_inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
187 {
188         enable_single_step(regs);
189
190         /*
191          * On powerpc we should single step on the original
192          * instruction even if the probed insn is a trap
193          * variant as values in regs could play a part in
194          * if the trap is taken or not
195          */
196         regs->nip = (unsigned long)p->ainsn.insn;
197 }
198
199 static nokprobe_inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
200 {
201         kcb->prev_kprobe.kp = kprobe_running();
202         kcb->prev_kprobe.status = kcb->kprobe_status;
203         kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
204 }
205
206 static nokprobe_inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
207 {
208         __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
209         kcb->kprobe_status = kcb->prev_kprobe.status;
210         kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
211 }
212
213 static nokprobe_inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
214                                 struct kprobe_ctlblk *kcb)
215 {
216         __this_cpu_write(current_kprobe, p);
217         kcb->kprobe_saved_msr = regs->msr;
218 }
219
220 bool arch_kprobe_on_func_entry(unsigned long offset)
221 {
222 #ifdef PPC64_ELF_ABI_v2
223 #ifdef CONFIG_KPROBES_ON_FTRACE
224         return offset <= 16;
225 #else
226         return offset <= 8;
227 #endif
228 #else
229         return !offset;
230 #endif
231 }
232
233 void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
234 {
235         ri->ret_addr = (kprobe_opcode_t *)regs->link;
236
237         /* Replace the return addr with trampoline addr */
238         regs->link = (unsigned long)kretprobe_trampoline;
239 }
240 NOKPROBE_SYMBOL(arch_prepare_kretprobe);
241
242 int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
243 {
244         int ret;
245         unsigned int insn = *p->ainsn.insn;
246
247         /* regs->nip is also adjusted if emulate_step returns 1 */
248         ret = emulate_step(regs, insn);
249         if (ret > 0) {
250                 /*
251                  * Once this instruction has been boosted
252                  * successfully, set the boostable flag
253                  */
254                 if (unlikely(p->ainsn.boostable == 0))
255                         p->ainsn.boostable = 1;
256         } else if (ret < 0) {
257                 /*
258                  * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
259                  * So, we should never get here... but, its still
260                  * good to catch them, just in case...
261                  */
262                 printk("Can't step on instruction %x\n", insn);
263                 BUG();
264         } else if (ret == 0)
265                 /* This instruction can't be boosted */
266                 p->ainsn.boostable = -1;
267
268         return ret;
269 }
270 NOKPROBE_SYMBOL(try_to_emulate);
271
272 int kprobe_handler(struct pt_regs *regs)
273 {
274         struct kprobe *p;
275         int ret = 0;
276         unsigned int *addr = (unsigned int *)regs->nip;
277         struct kprobe_ctlblk *kcb;
278
279         if (user_mode(regs))
280                 return 0;
281
282         if (!IS_ENABLED(CONFIG_BOOKE) &&
283             (!(regs->msr & MSR_IR) || !(regs->msr & MSR_DR)))
284                 return 0;
285
286         /*
287          * We don't want to be preempted for the entire
288          * duration of kprobe processing
289          */
290         preempt_disable();
291         kcb = get_kprobe_ctlblk();
292
293         /* Check we're not actually recursing */
294         if (kprobe_running()) {
295                 p = get_kprobe(addr);
296                 if (p) {
297                         kprobe_opcode_t insn = *p->ainsn.insn;
298                         if (kcb->kprobe_status == KPROBE_HIT_SS &&
299                                         is_trap(insn)) {
300                                 /* Turn off 'trace' bits */
301                                 regs->msr &= ~MSR_SINGLESTEP;
302                                 regs->msr |= kcb->kprobe_saved_msr;
303                                 goto no_kprobe;
304                         }
305                         /* We have reentered the kprobe_handler(), since
306                          * another probe was hit while within the handler.
307                          * We here save the original kprobes variables and
308                          * just single step on the instruction of the new probe
309                          * without calling any user handlers.
310                          */
311                         save_previous_kprobe(kcb);
312                         set_current_kprobe(p, regs, kcb);
313                         kprobes_inc_nmissed_count(p);
314                         kcb->kprobe_status = KPROBE_REENTER;
315                         if (p->ainsn.boostable >= 0) {
316                                 ret = try_to_emulate(p, regs);
317
318                                 if (ret > 0) {
319                                         restore_previous_kprobe(kcb);
320                                         preempt_enable_no_resched();
321                                         return 1;
322                                 }
323                         }
324                         prepare_singlestep(p, regs);
325                         return 1;
326                 } else {
327                         if (*addr != BREAKPOINT_INSTRUCTION) {
328                                 /* If trap variant, then it belongs not to us */
329                                 kprobe_opcode_t cur_insn = *addr;
330                                 if (is_trap(cur_insn))
331                                         goto no_kprobe;
332                                 /* The breakpoint instruction was removed by
333                                  * another cpu right after we hit, no further
334                                  * handling of this interrupt is appropriate
335                                  */
336                                 ret = 1;
337                                 goto no_kprobe;
338                         }
339                         p = __this_cpu_read(current_kprobe);
340                         if (p->break_handler && p->break_handler(p, regs)) {
341                                 if (!skip_singlestep(p, regs, kcb))
342                                         goto ss_probe;
343                                 ret = 1;
344                         }
345                 }
346                 goto no_kprobe;
347         }
348
349         p = get_kprobe(addr);
350         if (!p) {
351                 if (*addr != BREAKPOINT_INSTRUCTION) {
352                         /*
353                          * PowerPC has multiple variants of the "trap"
354                          * instruction. If the current instruction is a
355                          * trap variant, it could belong to someone else
356                          */
357                         kprobe_opcode_t cur_insn = *addr;
358                         if (is_trap(cur_insn))
359                                 goto no_kprobe;
360                         /*
361                          * The breakpoint instruction was removed right
362                          * after we hit it.  Another cpu has removed
363                          * either a probepoint or a debugger breakpoint
364                          * at this address.  In either case, no further
365                          * handling of this interrupt is appropriate.
366                          */
367                         ret = 1;
368                 }
369                 /* Not one of ours: let kernel handle it */
370                 goto no_kprobe;
371         }
372
373         kcb->kprobe_status = KPROBE_HIT_ACTIVE;
374         set_current_kprobe(p, regs, kcb);
375         if (p->pre_handler && p->pre_handler(p, regs))
376                 /* handler has already set things up, so skip ss setup */
377                 return 1;
378
379 ss_probe:
380         if (p->ainsn.boostable >= 0) {
381                 ret = try_to_emulate(p, regs);
382
383                 if (ret > 0) {
384                         if (p->post_handler)
385                                 p->post_handler(p, regs, 0);
386
387                         kcb->kprobe_status = KPROBE_HIT_SSDONE;
388                         reset_current_kprobe();
389                         preempt_enable_no_resched();
390                         return 1;
391                 }
392         }
393         prepare_singlestep(p, regs);
394         kcb->kprobe_status = KPROBE_HIT_SS;
395         return 1;
396
397 no_kprobe:
398         preempt_enable_no_resched();
399         return ret;
400 }
401 NOKPROBE_SYMBOL(kprobe_handler);
402
403 /*
404  * Function return probe trampoline:
405  *      - init_kprobes() establishes a probepoint here
406  *      - When the probed function returns, this probe
407  *              causes the handlers to fire
408  */
409 asm(".global kretprobe_trampoline\n"
410         ".type kretprobe_trampoline, @function\n"
411         "kretprobe_trampoline:\n"
412         "nop\n"
413         "blr\n"
414         ".size kretprobe_trampoline, .-kretprobe_trampoline\n");
415
416 /*
417  * Called when the probe at kretprobe trampoline is hit
418  */
419 static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
420 {
421         struct kretprobe_instance *ri = NULL;
422         struct hlist_head *head, empty_rp;
423         struct hlist_node *tmp;
424         unsigned long flags, orig_ret_address = 0;
425         unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
426
427         INIT_HLIST_HEAD(&empty_rp);
428         kretprobe_hash_lock(current, &head, &flags);
429
430         /*
431          * It is possible to have multiple instances associated with a given
432          * task either because an multiple functions in the call path
433          * have a return probe installed on them, and/or more than one return
434          * return probe was registered for a target function.
435          *
436          * We can handle this because:
437          *     - instances are always inserted at the head of the list
438          *     - when multiple return probes are registered for the same
439          *       function, the first instance's ret_addr will point to the
440          *       real return address, and all the rest will point to
441          *       kretprobe_trampoline
442          */
443         hlist_for_each_entry_safe(ri, tmp, head, hlist) {
444                 if (ri->task != current)
445                         /* another task is sharing our hash bucket */
446                         continue;
447
448                 if (ri->rp && ri->rp->handler)
449                         ri->rp->handler(ri, regs);
450
451                 orig_ret_address = (unsigned long)ri->ret_addr;
452                 recycle_rp_inst(ri, &empty_rp);
453
454                 if (orig_ret_address != trampoline_address)
455                         /*
456                          * This is the real return address. Any other
457                          * instances associated with this task are for
458                          * other calls deeper on the call stack
459                          */
460                         break;
461         }
462
463         kretprobe_assert(ri, orig_ret_address, trampoline_address);
464
465         /*
466          * We get here through one of two paths:
467          * 1. by taking a trap -> kprobe_handler() -> here
468          * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here
469          *
470          * When going back through (1), we need regs->nip to be setup properly
471          * as it is used to determine the return address from the trap.
472          * For (2), since nip is not honoured with optprobes, we instead setup
473          * the link register properly so that the subsequent 'blr' in
474          * kretprobe_trampoline jumps back to the right instruction.
475          *
476          * For nip, we should set the address to the previous instruction since
477          * we end up emulating it in kprobe_handler(), which increments the nip
478          * again.
479          */
480         regs->nip = orig_ret_address - 4;
481         regs->link = orig_ret_address;
482
483         kretprobe_hash_unlock(current, &flags);
484
485         hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
486                 hlist_del(&ri->hlist);
487                 kfree(ri);
488         }
489
490         return 0;
491 }
492 NOKPROBE_SYMBOL(trampoline_probe_handler);
493
494 /*
495  * Called after single-stepping.  p->addr is the address of the
496  * instruction whose first byte has been replaced by the "breakpoint"
497  * instruction.  To avoid the SMP problems that can occur when we
498  * temporarily put back the original opcode to single-step, we
499  * single-stepped a copy of the instruction.  The address of this
500  * copy is p->ainsn.insn.
501  */
502 int kprobe_post_handler(struct pt_regs *regs)
503 {
504         struct kprobe *cur = kprobe_running();
505         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
506
507         if (!cur || user_mode(regs))
508                 return 0;
509
510         /* make sure we got here for instruction we have a kprobe on */
511         if (((unsigned long)cur->ainsn.insn + 4) != regs->nip)
512                 return 0;
513
514         if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
515                 kcb->kprobe_status = KPROBE_HIT_SSDONE;
516                 cur->post_handler(cur, regs, 0);
517         }
518
519         /* Adjust nip to after the single-stepped instruction */
520         regs->nip = (unsigned long)cur->addr + 4;
521         regs->msr |= kcb->kprobe_saved_msr;
522
523         /*Restore back the original saved kprobes variables and continue. */
524         if (kcb->kprobe_status == KPROBE_REENTER) {
525                 restore_previous_kprobe(kcb);
526                 goto out;
527         }
528         reset_current_kprobe();
529 out:
530         preempt_enable_no_resched();
531
532         /*
533          * if somebody else is singlestepping across a probe point, msr
534          * will have DE/SE set, in which case, continue the remaining processing
535          * of do_debug, as if this is not a probe hit.
536          */
537         if (regs->msr & MSR_SINGLESTEP)
538                 return 0;
539
540         return 1;
541 }
542 NOKPROBE_SYMBOL(kprobe_post_handler);
543
544 int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
545 {
546         struct kprobe *cur = kprobe_running();
547         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
548         const struct exception_table_entry *entry;
549
550         switch(kcb->kprobe_status) {
551         case KPROBE_HIT_SS:
552         case KPROBE_REENTER:
553                 /*
554                  * We are here because the instruction being single
555                  * stepped caused a page fault. We reset the current
556                  * kprobe and the nip points back to the probe address
557                  * and allow the page fault handler to continue as a
558                  * normal page fault.
559                  */
560                 regs->nip = (unsigned long)cur->addr;
561                 regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */
562                 regs->msr |= kcb->kprobe_saved_msr;
563                 if (kcb->kprobe_status == KPROBE_REENTER)
564                         restore_previous_kprobe(kcb);
565                 else
566                         reset_current_kprobe();
567                 preempt_enable_no_resched();
568                 break;
569         case KPROBE_HIT_ACTIVE:
570         case KPROBE_HIT_SSDONE:
571                 /*
572                  * We increment the nmissed count for accounting,
573                  * we can also use npre/npostfault count for accounting
574                  * these specific fault cases.
575                  */
576                 kprobes_inc_nmissed_count(cur);
577
578                 /*
579                  * We come here because instructions in the pre/post
580                  * handler caused the page_fault, this could happen
581                  * if handler tries to access user space by
582                  * copy_from_user(), get_user() etc. Let the
583                  * user-specified handler try to fix it first.
584                  */
585                 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
586                         return 1;
587
588                 /*
589                  * In case the user-specified fault handler returned
590                  * zero, try to fix up.
591                  */
592                 if ((entry = search_exception_tables(regs->nip)) != NULL) {
593                         regs->nip = extable_fixup(entry);
594                         return 1;
595                 }
596
597                 /*
598                  * fixup_exception() could not handle it,
599                  * Let do_page_fault() fix it.
600                  */
601                 break;
602         default:
603                 break;
604         }
605         return 0;
606 }
607 NOKPROBE_SYMBOL(kprobe_fault_handler);
608
609 unsigned long arch_deref_entry_point(void *entry)
610 {
611 #ifdef PPC64_ELF_ABI_v1
612         if (!kernel_text_address((unsigned long)entry))
613                 return ppc_global_function_entry(entry);
614         else
615 #endif
616                 return (unsigned long)entry;
617 }
618 NOKPROBE_SYMBOL(arch_deref_entry_point);
619
620 int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
621 {
622         struct jprobe *jp = container_of(p, struct jprobe, kp);
623         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
624
625         memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
626
627         /* setup return addr to the jprobe handler routine */
628         regs->nip = arch_deref_entry_point(jp->entry);
629 #ifdef PPC64_ELF_ABI_v2
630         regs->gpr[12] = (unsigned long)jp->entry;
631 #elif defined(PPC64_ELF_ABI_v1)
632         regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
633 #endif
634
635         /*
636          * jprobes use jprobe_return() which skips the normal return
637          * path of the function, and this messes up the accounting of the
638          * function graph tracer.
639          *
640          * Pause function graph tracing while performing the jprobe function.
641          */
642         pause_graph_tracing();
643
644         return 1;
645 }
646 NOKPROBE_SYMBOL(setjmp_pre_handler);
647
648 void __used jprobe_return(void)
649 {
650         asm volatile("trap" ::: "memory");
651 }
652 NOKPROBE_SYMBOL(jprobe_return);
653
654 static void __used jprobe_return_end(void)
655 {
656 }
657 NOKPROBE_SYMBOL(jprobe_return_end);
658
659 int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
660 {
661         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
662
663         /*
664          * FIXME - we should ideally be validating that we got here 'cos
665          * of the "trap" in jprobe_return() above, before restoring the
666          * saved regs...
667          */
668         memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
669         /* It's OK to start function graph tracing again */
670         unpause_graph_tracing();
671         preempt_enable_no_resched();
672         return 1;
673 }
674 NOKPROBE_SYMBOL(longjmp_break_handler);
675
676 static struct kprobe trampoline_p = {
677         .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
678         .pre_handler = trampoline_probe_handler
679 };
680
681 int __init arch_init_kprobes(void)
682 {
683         return register_kprobe(&trampoline_p);
684 }
685
686 int arch_trampoline_kprobe(struct kprobe *p)
687 {
688         if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
689                 return 1;
690
691         return 0;
692 }
693 NOKPROBE_SYMBOL(arch_trampoline_kprobe);