GNU Linux-libre 5.10.217-gnu1
[releases.git] / kernel / kprobes.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Kernel Probes (KProbes)
4  *  kernel/kprobes.c
5  *
6  * Copyright (C) IBM Corporation, 2002, 2004
7  *
8  * 2002-Oct     Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
9  *              Probes initial implementation (includes suggestions from
10  *              Rusty Russell).
11  * 2004-Aug     Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
12  *              hlists and exceptions notifier as suggested by Andi Kleen.
13  * 2004-July    Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
14  *              interface to access function arguments.
15  * 2004-Sep     Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
16  *              exceptions notifier to be first on the priority list.
17  * 2005-May     Hien Nguyen <hien@us.ibm.com>, Jim Keniston
18  *              <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
19  *              <prasanna@in.ibm.com> added function-return probes.
20  */
21 #include <linux/kprobes.h>
22 #include <linux/hash.h>
23 #include <linux/init.h>
24 #include <linux/slab.h>
25 #include <linux/stddef.h>
26 #include <linux/export.h>
27 #include <linux/moduleloader.h>
28 #include <linux/kallsyms.h>
29 #include <linux/freezer.h>
30 #include <linux/seq_file.h>
31 #include <linux/debugfs.h>
32 #include <linux/sysctl.h>
33 #include <linux/kdebug.h>
34 #include <linux/memory.h>
35 #include <linux/ftrace.h>
36 #include <linux/cpu.h>
37 #include <linux/jump_label.h>
38 #include <linux/perf_event.h>
39 #include <linux/static_call.h>
40
41 #include <asm/sections.h>
42 #include <asm/cacheflush.h>
43 #include <asm/errno.h>
44 #include <linux/uaccess.h>
45
46 #define KPROBE_HASH_BITS 6
47 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
48
49
50 static int kprobes_initialized;
51 /* kprobe_table can be accessed by
52  * - Normal hlist traversal and RCU add/del under kprobe_mutex is held.
53  * Or
54  * - RCU hlist traversal under disabling preempt (breakpoint handlers)
55  */
56 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
57 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
58
59 /* NOTE: change this value only with kprobe_mutex held */
60 static bool kprobes_all_disarmed;
61
62 /* This protects kprobe_table and optimizing_list */
63 static DEFINE_MUTEX(kprobe_mutex);
64 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
65 static struct {
66         raw_spinlock_t lock ____cacheline_aligned_in_smp;
67 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
68
69 kprobe_opcode_t * __weak kprobe_lookup_name(const char *name,
70                                         unsigned int __unused)
71 {
72         return ((kprobe_opcode_t *)(kallsyms_lookup_name(name)));
73 }
74
75 static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
76 {
77         return &(kretprobe_table_locks[hash].lock);
78 }
79
80 /* Blacklist -- list of struct kprobe_blacklist_entry */
81 static LIST_HEAD(kprobe_blacklist);
82
83 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
84 /*
85  * kprobe->ainsn.insn points to the copy of the instruction to be
86  * single-stepped. x86_64, POWER4 and above have no-exec support and
87  * stepping on the instruction on a vmalloced/kmalloced/data page
88  * is a recipe for disaster
89  */
90 struct kprobe_insn_page {
91         struct list_head list;
92         kprobe_opcode_t *insns;         /* Page of instruction slots */
93         struct kprobe_insn_cache *cache;
94         int nused;
95         int ngarbage;
96         char slot_used[];
97 };
98
99 #define KPROBE_INSN_PAGE_SIZE(slots)                    \
100         (offsetof(struct kprobe_insn_page, slot_used) + \
101          (sizeof(char) * (slots)))
102
103 static int slots_per_page(struct kprobe_insn_cache *c)
104 {
105         return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
106 }
107
108 enum kprobe_slot_state {
109         SLOT_CLEAN = 0,
110         SLOT_DIRTY = 1,
111         SLOT_USED = 2,
112 };
113
114 void __weak *alloc_insn_page(void)
115 {
116         return module_alloc(PAGE_SIZE);
117 }
118
119 void __weak free_insn_page(void *page)
120 {
121         module_memfree(page);
122 }
123
124 struct kprobe_insn_cache kprobe_insn_slots = {
125         .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
126         .alloc = alloc_insn_page,
127         .free = free_insn_page,
128         .sym = KPROBE_INSN_PAGE_SYM,
129         .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
130         .insn_size = MAX_INSN_SIZE,
131         .nr_garbage = 0,
132 };
133 static int collect_garbage_slots(struct kprobe_insn_cache *c);
134
135 /**
136  * __get_insn_slot() - Find a slot on an executable page for an instruction.
137  * We allocate an executable page if there's no room on existing ones.
138  */
139 kprobe_opcode_t *__get_insn_slot(struct kprobe_insn_cache *c)
140 {
141         struct kprobe_insn_page *kip;
142         kprobe_opcode_t *slot = NULL;
143
144         /* Since the slot array is not protected by rcu, we need a mutex */
145         mutex_lock(&c->mutex);
146  retry:
147         rcu_read_lock();
148         list_for_each_entry_rcu(kip, &c->pages, list) {
149                 if (kip->nused < slots_per_page(c)) {
150                         int i;
151                         for (i = 0; i < slots_per_page(c); i++) {
152                                 if (kip->slot_used[i] == SLOT_CLEAN) {
153                                         kip->slot_used[i] = SLOT_USED;
154                                         kip->nused++;
155                                         slot = kip->insns + (i * c->insn_size);
156                                         rcu_read_unlock();
157                                         goto out;
158                                 }
159                         }
160                         /* kip->nused is broken. Fix it. */
161                         kip->nused = slots_per_page(c);
162                         WARN_ON(1);
163                 }
164         }
165         rcu_read_unlock();
166
167         /* If there are any garbage slots, collect it and try again. */
168         if (c->nr_garbage && collect_garbage_slots(c) == 0)
169                 goto retry;
170
171         /* All out of space.  Need to allocate a new page. */
172         kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
173         if (!kip)
174                 goto out;
175
176         /*
177          * Use module_alloc so this page is within +/- 2GB of where the
178          * kernel image and loaded module images reside. This is required
179          * so x86_64 can correctly handle the %rip-relative fixups.
180          */
181         kip->insns = c->alloc();
182         if (!kip->insns) {
183                 kfree(kip);
184                 goto out;
185         }
186         INIT_LIST_HEAD(&kip->list);
187         memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
188         kip->slot_used[0] = SLOT_USED;
189         kip->nused = 1;
190         kip->ngarbage = 0;
191         kip->cache = c;
192         list_add_rcu(&kip->list, &c->pages);
193         slot = kip->insns;
194
195         /* Record the perf ksymbol register event after adding the page */
196         perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL, (unsigned long)kip->insns,
197                            PAGE_SIZE, false, c->sym);
198 out:
199         mutex_unlock(&c->mutex);
200         return slot;
201 }
202
203 /* Return 1 if all garbages are collected, otherwise 0. */
204 static int collect_one_slot(struct kprobe_insn_page *kip, int idx)
205 {
206         kip->slot_used[idx] = SLOT_CLEAN;
207         kip->nused--;
208         if (kip->nused == 0) {
209                 /*
210                  * Page is no longer in use.  Free it unless
211                  * it's the last one.  We keep the last one
212                  * so as not to have to set it up again the
213                  * next time somebody inserts a probe.
214                  */
215                 if (!list_is_singular(&kip->list)) {
216                         /*
217                          * Record perf ksymbol unregister event before removing
218                          * the page.
219                          */
220                         perf_event_ksymbol(PERF_RECORD_KSYMBOL_TYPE_OOL,
221                                            (unsigned long)kip->insns, PAGE_SIZE, true,
222                                            kip->cache->sym);
223                         list_del_rcu(&kip->list);
224                         synchronize_rcu();
225                         kip->cache->free(kip->insns);
226                         kfree(kip);
227                 }
228                 return 1;
229         }
230         return 0;
231 }
232
233 static int collect_garbage_slots(struct kprobe_insn_cache *c)
234 {
235         struct kprobe_insn_page *kip, *next;
236
237         /* Ensure no-one is interrupted on the garbages */
238         synchronize_rcu();
239
240         list_for_each_entry_safe(kip, next, &c->pages, list) {
241                 int i;
242                 if (kip->ngarbage == 0)
243                         continue;
244                 kip->ngarbage = 0;      /* we will collect all garbages */
245                 for (i = 0; i < slots_per_page(c); i++) {
246                         if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot(kip, i))
247                                 break;
248                 }
249         }
250         c->nr_garbage = 0;
251         return 0;
252 }
253
254 void __free_insn_slot(struct kprobe_insn_cache *c,
255                       kprobe_opcode_t *slot, int dirty)
256 {
257         struct kprobe_insn_page *kip;
258         long idx;
259
260         mutex_lock(&c->mutex);
261         rcu_read_lock();
262         list_for_each_entry_rcu(kip, &c->pages, list) {
263                 idx = ((long)slot - (long)kip->insns) /
264                         (c->insn_size * sizeof(kprobe_opcode_t));
265                 if (idx >= 0 && idx < slots_per_page(c))
266                         goto out;
267         }
268         /* Could not find this slot. */
269         WARN_ON(1);
270         kip = NULL;
271 out:
272         rcu_read_unlock();
273         /* Mark and sweep: this may sleep */
274         if (kip) {
275                 /* Check double free */
276                 WARN_ON(kip->slot_used[idx] != SLOT_USED);
277                 if (dirty) {
278                         kip->slot_used[idx] = SLOT_DIRTY;
279                         kip->ngarbage++;
280                         if (++c->nr_garbage > slots_per_page(c))
281                                 collect_garbage_slots(c);
282                 } else {
283                         collect_one_slot(kip, idx);
284                 }
285         }
286         mutex_unlock(&c->mutex);
287 }
288
289 /*
290  * Check given address is on the page of kprobe instruction slots.
291  * This will be used for checking whether the address on a stack
292  * is on a text area or not.
293  */
294 bool __is_insn_slot_addr(struct kprobe_insn_cache *c, unsigned long addr)
295 {
296         struct kprobe_insn_page *kip;
297         bool ret = false;
298
299         rcu_read_lock();
300         list_for_each_entry_rcu(kip, &c->pages, list) {
301                 if (addr >= (unsigned long)kip->insns &&
302                     addr < (unsigned long)kip->insns + PAGE_SIZE) {
303                         ret = true;
304                         break;
305                 }
306         }
307         rcu_read_unlock();
308
309         return ret;
310 }
311
312 int kprobe_cache_get_kallsym(struct kprobe_insn_cache *c, unsigned int *symnum,
313                              unsigned long *value, char *type, char *sym)
314 {
315         struct kprobe_insn_page *kip;
316         int ret = -ERANGE;
317
318         rcu_read_lock();
319         list_for_each_entry_rcu(kip, &c->pages, list) {
320                 if ((*symnum)--)
321                         continue;
322                 strlcpy(sym, c->sym, KSYM_NAME_LEN);
323                 *type = 't';
324                 *value = (unsigned long)kip->insns;
325                 ret = 0;
326                 break;
327         }
328         rcu_read_unlock();
329
330         return ret;
331 }
332
333 #ifdef CONFIG_OPTPROBES
334 /* For optimized_kprobe buffer */
335 struct kprobe_insn_cache kprobe_optinsn_slots = {
336         .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
337         .alloc = alloc_insn_page,
338         .free = free_insn_page,
339         .sym = KPROBE_OPTINSN_PAGE_SYM,
340         .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
341         /* .insn_size is initialized later */
342         .nr_garbage = 0,
343 };
344 #endif
345 #endif
346
347 /* We have preemption disabled.. so it is safe to use __ versions */
348 static inline void set_kprobe_instance(struct kprobe *kp)
349 {
350         __this_cpu_write(kprobe_instance, kp);
351 }
352
353 static inline void reset_kprobe_instance(void)
354 {
355         __this_cpu_write(kprobe_instance, NULL);
356 }
357
358 /*
359  * This routine is called either:
360  *      - under the kprobe_mutex - during kprobe_[un]register()
361  *                              OR
362  *      - with preemption disabled - from arch/xxx/kernel/kprobes.c
363  */
364 struct kprobe *get_kprobe(void *addr)
365 {
366         struct hlist_head *head;
367         struct kprobe *p;
368
369         head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
370         hlist_for_each_entry_rcu(p, head, hlist,
371                                  lockdep_is_held(&kprobe_mutex)) {
372                 if (p->addr == addr)
373                         return p;
374         }
375
376         return NULL;
377 }
378 NOKPROBE_SYMBOL(get_kprobe);
379
380 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
381
382 /* Return true if the kprobe is an aggregator */
383 static inline int kprobe_aggrprobe(struct kprobe *p)
384 {
385         return p->pre_handler == aggr_pre_handler;
386 }
387
388 /* Return true(!0) if the kprobe is unused */
389 static inline int kprobe_unused(struct kprobe *p)
390 {
391         return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
392                list_empty(&p->list);
393 }
394
395 /*
396  * Keep all fields in the kprobe consistent
397  */
398 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
399 {
400         memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
401         memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
402 }
403
404 #ifdef CONFIG_OPTPROBES
405 /* NOTE: change this value only with kprobe_mutex held */
406 static bool kprobes_allow_optimization;
407
408 /*
409  * Call all pre_handler on the list, but ignores its return value.
410  * This must be called from arch-dep optimized caller.
411  */
412 void opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
413 {
414         struct kprobe *kp;
415
416         list_for_each_entry_rcu(kp, &p->list, list) {
417                 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
418                         set_kprobe_instance(kp);
419                         kp->pre_handler(kp, regs);
420                 }
421                 reset_kprobe_instance();
422         }
423 }
424 NOKPROBE_SYMBOL(opt_pre_handler);
425
426 /* Free optimized instructions and optimized_kprobe */
427 static void free_aggr_kprobe(struct kprobe *p)
428 {
429         struct optimized_kprobe *op;
430
431         op = container_of(p, struct optimized_kprobe, kp);
432         arch_remove_optimized_kprobe(op);
433         arch_remove_kprobe(p);
434         kfree(op);
435 }
436
437 /* Return true(!0) if the kprobe is ready for optimization. */
438 static inline int kprobe_optready(struct kprobe *p)
439 {
440         struct optimized_kprobe *op;
441
442         if (kprobe_aggrprobe(p)) {
443                 op = container_of(p, struct optimized_kprobe, kp);
444                 return arch_prepared_optinsn(&op->optinsn);
445         }
446
447         return 0;
448 }
449
450 /* Return true if the kprobe is disarmed. Note: p must be on hash list */
451 bool kprobe_disarmed(struct kprobe *p)
452 {
453         struct optimized_kprobe *op;
454
455         /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
456         if (!kprobe_aggrprobe(p))
457                 return kprobe_disabled(p);
458
459         op = container_of(p, struct optimized_kprobe, kp);
460
461         return kprobe_disabled(p) && list_empty(&op->list);
462 }
463
464 /* Return true(!0) if the probe is queued on (un)optimizing lists */
465 static int kprobe_queued(struct kprobe *p)
466 {
467         struct optimized_kprobe *op;
468
469         if (kprobe_aggrprobe(p)) {
470                 op = container_of(p, struct optimized_kprobe, kp);
471                 if (!list_empty(&op->list))
472                         return 1;
473         }
474         return 0;
475 }
476
477 /*
478  * Return an optimized kprobe whose optimizing code replaces
479  * instructions including addr (exclude breakpoint).
480  */
481 static struct kprobe *get_optimized_kprobe(unsigned long addr)
482 {
483         int i;
484         struct kprobe *p = NULL;
485         struct optimized_kprobe *op;
486
487         /* Don't check i == 0, since that is a breakpoint case. */
488         for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
489                 p = get_kprobe((void *)(addr - i));
490
491         if (p && kprobe_optready(p)) {
492                 op = container_of(p, struct optimized_kprobe, kp);
493                 if (arch_within_optimized_kprobe(op, addr))
494                         return p;
495         }
496
497         return NULL;
498 }
499
500 /* Optimization staging list, protected by kprobe_mutex */
501 static LIST_HEAD(optimizing_list);
502 static LIST_HEAD(unoptimizing_list);
503 static LIST_HEAD(freeing_list);
504
505 static void kprobe_optimizer(struct work_struct *work);
506 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
507 #define OPTIMIZE_DELAY 5
508
509 /*
510  * Optimize (replace a breakpoint with a jump) kprobes listed on
511  * optimizing_list.
512  */
513 static void do_optimize_kprobes(void)
514 {
515         lockdep_assert_held(&text_mutex);
516         /*
517          * The optimization/unoptimization refers online_cpus via
518          * stop_machine() and cpu-hotplug modifies online_cpus.
519          * And same time, text_mutex will be held in cpu-hotplug and here.
520          * This combination can cause a deadlock (cpu-hotplug try to lock
521          * text_mutex but stop_machine can not be done because online_cpus
522          * has been changed)
523          * To avoid this deadlock, caller must have locked cpu hotplug
524          * for preventing cpu-hotplug outside of text_mutex locking.
525          */
526         lockdep_assert_cpus_held();
527
528         /* Optimization never be done when disarmed */
529         if (kprobes_all_disarmed || !kprobes_allow_optimization ||
530             list_empty(&optimizing_list))
531                 return;
532
533         arch_optimize_kprobes(&optimizing_list);
534 }
535
536 /*
537  * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
538  * if need) kprobes listed on unoptimizing_list.
539  */
540 static void do_unoptimize_kprobes(void)
541 {
542         struct optimized_kprobe *op, *tmp;
543
544         lockdep_assert_held(&text_mutex);
545         /* See comment in do_optimize_kprobes() */
546         lockdep_assert_cpus_held();
547
548         if (!list_empty(&unoptimizing_list))
549                 arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
550
551         /* Loop on 'freeing_list' for disarming and removing from kprobe hash list */
552         list_for_each_entry_safe(op, tmp, &freeing_list, list) {
553                 /* Switching from detour code to origin */
554                 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
555                 /* Disarm probes if marked disabled and not gone */
556                 if (kprobe_disabled(&op->kp) && !kprobe_gone(&op->kp))
557                         arch_disarm_kprobe(&op->kp);
558                 if (kprobe_unused(&op->kp)) {
559                         /*
560                          * Remove unused probes from hash list. After waiting
561                          * for synchronization, these probes are reclaimed.
562                          * (reclaiming is done by do_free_cleaned_kprobes.)
563                          */
564                         hlist_del_rcu(&op->kp.hlist);
565                 } else
566                         list_del_init(&op->list);
567         }
568 }
569
570 /* Reclaim all kprobes on the free_list */
571 static void do_free_cleaned_kprobes(void)
572 {
573         struct optimized_kprobe *op, *tmp;
574
575         list_for_each_entry_safe(op, tmp, &freeing_list, list) {
576                 list_del_init(&op->list);
577                 if (WARN_ON_ONCE(!kprobe_unused(&op->kp))) {
578                         /*
579                          * This must not happen, but if there is a kprobe
580                          * still in use, keep it on kprobes hash list.
581                          */
582                         continue;
583                 }
584                 free_aggr_kprobe(&op->kp);
585         }
586 }
587
588 /* Start optimizer after OPTIMIZE_DELAY passed */
589 static void kick_kprobe_optimizer(void)
590 {
591         schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
592 }
593
594 /* Kprobe jump optimizer */
595 static void kprobe_optimizer(struct work_struct *work)
596 {
597         mutex_lock(&kprobe_mutex);
598         cpus_read_lock();
599         mutex_lock(&text_mutex);
600
601         /*
602          * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
603          * kprobes before waiting for quiesence period.
604          */
605         do_unoptimize_kprobes();
606
607         /*
608          * Step 2: Wait for quiesence period to ensure all potentially
609          * preempted tasks to have normally scheduled. Because optprobe
610          * may modify multiple instructions, there is a chance that Nth
611          * instruction is preempted. In that case, such tasks can return
612          * to 2nd-Nth byte of jump instruction. This wait is for avoiding it.
613          * Note that on non-preemptive kernel, this is transparently converted
614          * to synchronoze_sched() to wait for all interrupts to have completed.
615          */
616         synchronize_rcu_tasks();
617
618         /* Step 3: Optimize kprobes after quiesence period */
619         do_optimize_kprobes();
620
621         /* Step 4: Free cleaned kprobes after quiesence period */
622         do_free_cleaned_kprobes();
623
624         mutex_unlock(&text_mutex);
625         cpus_read_unlock();
626
627         /* Step 5: Kick optimizer again if needed */
628         if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
629                 kick_kprobe_optimizer();
630
631         mutex_unlock(&kprobe_mutex);
632 }
633
634 /* Wait for completing optimization and unoptimization */
635 void wait_for_kprobe_optimizer(void)
636 {
637         mutex_lock(&kprobe_mutex);
638
639         while (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) {
640                 mutex_unlock(&kprobe_mutex);
641
642                 /* this will also make optimizing_work execute immmediately */
643                 flush_delayed_work(&optimizing_work);
644                 /* @optimizing_work might not have been queued yet, relax */
645                 cpu_relax();
646
647                 mutex_lock(&kprobe_mutex);
648         }
649
650         mutex_unlock(&kprobe_mutex);
651 }
652
653 bool optprobe_queued_unopt(struct optimized_kprobe *op)
654 {
655         struct optimized_kprobe *_op;
656
657         list_for_each_entry(_op, &unoptimizing_list, list) {
658                 if (op == _op)
659                         return true;
660         }
661
662         return false;
663 }
664
665 /* Optimize kprobe if p is ready to be optimized */
666 static void optimize_kprobe(struct kprobe *p)
667 {
668         struct optimized_kprobe *op;
669
670         /* Check if the kprobe is disabled or not ready for optimization. */
671         if (!kprobe_optready(p) || !kprobes_allow_optimization ||
672             (kprobe_disabled(p) || kprobes_all_disarmed))
673                 return;
674
675         /* kprobes with post_handler can not be optimized */
676         if (p->post_handler)
677                 return;
678
679         op = container_of(p, struct optimized_kprobe, kp);
680
681         /* Check there is no other kprobes at the optimized instructions */
682         if (arch_check_optimized_kprobe(op) < 0)
683                 return;
684
685         /* Check if it is already optimized. */
686         if (op->kp.flags & KPROBE_FLAG_OPTIMIZED) {
687                 if (optprobe_queued_unopt(op)) {
688                         /* This is under unoptimizing. Just dequeue the probe */
689                         list_del_init(&op->list);
690                 }
691                 return;
692         }
693         op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
694
695         /* On unoptimizing/optimizing_list, op must have OPTIMIZED flag */
696         if (WARN_ON_ONCE(!list_empty(&op->list)))
697                 return;
698
699         list_add(&op->list, &optimizing_list);
700         kick_kprobe_optimizer();
701 }
702
703 /* Short cut to direct unoptimizing */
704 static void force_unoptimize_kprobe(struct optimized_kprobe *op)
705 {
706         lockdep_assert_cpus_held();
707         arch_unoptimize_kprobe(op);
708         op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
709 }
710
711 /* Unoptimize a kprobe if p is optimized */
712 static void unoptimize_kprobe(struct kprobe *p, bool force)
713 {
714         struct optimized_kprobe *op;
715
716         if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
717                 return; /* This is not an optprobe nor optimized */
718
719         op = container_of(p, struct optimized_kprobe, kp);
720         if (!kprobe_optimized(p))
721                 return;
722
723         if (!list_empty(&op->list)) {
724                 if (optprobe_queued_unopt(op)) {
725                         /* Queued in unoptimizing queue */
726                         if (force) {
727                                 /*
728                                  * Forcibly unoptimize the kprobe here, and queue it
729                                  * in the freeing list for release afterwards.
730                                  */
731                                 force_unoptimize_kprobe(op);
732                                 list_move(&op->list, &freeing_list);
733                         }
734                 } else {
735                         /* Dequeue from the optimizing queue */
736                         list_del_init(&op->list);
737                         op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
738                 }
739                 return;
740         }
741
742         /* Optimized kprobe case */
743         if (force) {
744                 /* Forcibly update the code: this is a special case */
745                 force_unoptimize_kprobe(op);
746         } else {
747                 list_add(&op->list, &unoptimizing_list);
748                 kick_kprobe_optimizer();
749         }
750 }
751
752 /* Cancel unoptimizing for reusing */
753 static int reuse_unused_kprobe(struct kprobe *ap)
754 {
755         struct optimized_kprobe *op;
756
757         /*
758          * Unused kprobe MUST be on the way of delayed unoptimizing (means
759          * there is still a relative jump) and disabled.
760          */
761         op = container_of(ap, struct optimized_kprobe, kp);
762         WARN_ON_ONCE(list_empty(&op->list));
763         /* Enable the probe again */
764         ap->flags &= ~KPROBE_FLAG_DISABLED;
765         /* Optimize it again (remove from op->list) */
766         if (!kprobe_optready(ap))
767                 return -EINVAL;
768
769         optimize_kprobe(ap);
770         return 0;
771 }
772
773 /* Remove optimized instructions */
774 static void kill_optimized_kprobe(struct kprobe *p)
775 {
776         struct optimized_kprobe *op;
777
778         op = container_of(p, struct optimized_kprobe, kp);
779         if (!list_empty(&op->list))
780                 /* Dequeue from the (un)optimization queue */
781                 list_del_init(&op->list);
782         op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
783
784         if (kprobe_unused(p)) {
785                 /*
786                  * Unused kprobe is on unoptimizing or freeing list. We move it
787                  * to freeing_list and let the kprobe_optimizer() remove it from
788                  * the kprobe hash list and free it.
789                  */
790                 if (optprobe_queued_unopt(op))
791                         list_move(&op->list, &freeing_list);
792         }
793
794         /* Don't touch the code, because it is already freed. */
795         arch_remove_optimized_kprobe(op);
796 }
797
798 static inline
799 void __prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
800 {
801         if (!kprobe_ftrace(p))
802                 arch_prepare_optimized_kprobe(op, p);
803 }
804
805 /* Try to prepare optimized instructions */
806 static void prepare_optimized_kprobe(struct kprobe *p)
807 {
808         struct optimized_kprobe *op;
809
810         op = container_of(p, struct optimized_kprobe, kp);
811         __prepare_optimized_kprobe(op, p);
812 }
813
814 /* Allocate new optimized_kprobe and try to prepare optimized instructions */
815 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
816 {
817         struct optimized_kprobe *op;
818
819         op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
820         if (!op)
821                 return NULL;
822
823         INIT_LIST_HEAD(&op->list);
824         op->kp.addr = p->addr;
825         __prepare_optimized_kprobe(op, p);
826
827         return &op->kp;
828 }
829
830 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
831
832 /*
833  * Prepare an optimized_kprobe and optimize it
834  * NOTE: p must be a normal registered kprobe
835  */
836 static void try_to_optimize_kprobe(struct kprobe *p)
837 {
838         struct kprobe *ap;
839         struct optimized_kprobe *op;
840
841         /* Impossible to optimize ftrace-based kprobe */
842         if (kprobe_ftrace(p))
843                 return;
844
845         /* For preparing optimization, jump_label_text_reserved() is called */
846         cpus_read_lock();
847         jump_label_lock();
848         mutex_lock(&text_mutex);
849
850         ap = alloc_aggr_kprobe(p);
851         if (!ap)
852                 goto out;
853
854         op = container_of(ap, struct optimized_kprobe, kp);
855         if (!arch_prepared_optinsn(&op->optinsn)) {
856                 /* If failed to setup optimizing, fallback to kprobe */
857                 arch_remove_optimized_kprobe(op);
858                 kfree(op);
859                 goto out;
860         }
861
862         init_aggr_kprobe(ap, p);
863         optimize_kprobe(ap);    /* This just kicks optimizer thread */
864
865 out:
866         mutex_unlock(&text_mutex);
867         jump_label_unlock();
868         cpus_read_unlock();
869 }
870
871 static void optimize_all_kprobes(void)
872 {
873         struct hlist_head *head;
874         struct kprobe *p;
875         unsigned int i;
876
877         mutex_lock(&kprobe_mutex);
878         /* If optimization is already allowed, just return */
879         if (kprobes_allow_optimization)
880                 goto out;
881
882         cpus_read_lock();
883         kprobes_allow_optimization = true;
884         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
885                 head = &kprobe_table[i];
886                 hlist_for_each_entry(p, head, hlist)
887                         if (!kprobe_disabled(p))
888                                 optimize_kprobe(p);
889         }
890         cpus_read_unlock();
891         printk(KERN_INFO "Kprobes globally optimized\n");
892 out:
893         mutex_unlock(&kprobe_mutex);
894 }
895
896 #ifdef CONFIG_SYSCTL
897 static void unoptimize_all_kprobes(void)
898 {
899         struct hlist_head *head;
900         struct kprobe *p;
901         unsigned int i;
902
903         mutex_lock(&kprobe_mutex);
904         /* If optimization is already prohibited, just return */
905         if (!kprobes_allow_optimization) {
906                 mutex_unlock(&kprobe_mutex);
907                 return;
908         }
909
910         cpus_read_lock();
911         kprobes_allow_optimization = false;
912         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
913                 head = &kprobe_table[i];
914                 hlist_for_each_entry(p, head, hlist) {
915                         if (!kprobe_disabled(p))
916                                 unoptimize_kprobe(p, false);
917                 }
918         }
919         cpus_read_unlock();
920         mutex_unlock(&kprobe_mutex);
921
922         /* Wait for unoptimizing completion */
923         wait_for_kprobe_optimizer();
924         printk(KERN_INFO "Kprobes globally unoptimized\n");
925 }
926
927 static DEFINE_MUTEX(kprobe_sysctl_mutex);
928 int sysctl_kprobes_optimization;
929 int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
930                                       void *buffer, size_t *length,
931                                       loff_t *ppos)
932 {
933         int ret;
934
935         mutex_lock(&kprobe_sysctl_mutex);
936         sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
937         ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
938
939         if (sysctl_kprobes_optimization)
940                 optimize_all_kprobes();
941         else
942                 unoptimize_all_kprobes();
943         mutex_unlock(&kprobe_sysctl_mutex);
944
945         return ret;
946 }
947 #endif /* CONFIG_SYSCTL */
948
949 /* Put a breakpoint for a probe. Must be called with text_mutex locked */
950 static void __arm_kprobe(struct kprobe *p)
951 {
952         struct kprobe *_p;
953
954         /* Check collision with other optimized kprobes */
955         _p = get_optimized_kprobe((unsigned long)p->addr);
956         if (unlikely(_p))
957                 /* Fallback to unoptimized kprobe */
958                 unoptimize_kprobe(_p, true);
959
960         arch_arm_kprobe(p);
961         optimize_kprobe(p);     /* Try to optimize (add kprobe to a list) */
962 }
963
964 /* Remove the breakpoint of a probe. Must be called with text_mutex locked */
965 static void __disarm_kprobe(struct kprobe *p, bool reopt)
966 {
967         struct kprobe *_p;
968
969         /* Try to unoptimize */
970         unoptimize_kprobe(p, kprobes_all_disarmed);
971
972         if (!kprobe_queued(p)) {
973                 arch_disarm_kprobe(p);
974                 /* If another kprobe was blocked, optimize it. */
975                 _p = get_optimized_kprobe((unsigned long)p->addr);
976                 if (unlikely(_p) && reopt)
977                         optimize_kprobe(_p);
978         }
979         /* TODO: reoptimize others after unoptimized this probe */
980 }
981
982 #else /* !CONFIG_OPTPROBES */
983
984 #define optimize_kprobe(p)                      do {} while (0)
985 #define unoptimize_kprobe(p, f)                 do {} while (0)
986 #define kill_optimized_kprobe(p)                do {} while (0)
987 #define prepare_optimized_kprobe(p)             do {} while (0)
988 #define try_to_optimize_kprobe(p)               do {} while (0)
989 #define __arm_kprobe(p)                         arch_arm_kprobe(p)
990 #define __disarm_kprobe(p, o)                   arch_disarm_kprobe(p)
991 #define kprobe_disarmed(p)                      kprobe_disabled(p)
992 #define wait_for_kprobe_optimizer()             do {} while (0)
993
994 static int reuse_unused_kprobe(struct kprobe *ap)
995 {
996         /*
997          * If the optimized kprobe is NOT supported, the aggr kprobe is
998          * released at the same time that the last aggregated kprobe is
999          * unregistered.
1000          * Thus there should be no chance to reuse unused kprobe.
1001          */
1002         printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
1003         return -EINVAL;
1004 }
1005
1006 static void free_aggr_kprobe(struct kprobe *p)
1007 {
1008         arch_remove_kprobe(p);
1009         kfree(p);
1010 }
1011
1012 static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
1013 {
1014         return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
1015 }
1016 #endif /* CONFIG_OPTPROBES */
1017
1018 #ifdef CONFIG_KPROBES_ON_FTRACE
1019 static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
1020         .func = kprobe_ftrace_handler,
1021         .flags = FTRACE_OPS_FL_SAVE_REGS,
1022 };
1023
1024 static struct ftrace_ops kprobe_ipmodify_ops __read_mostly = {
1025         .func = kprobe_ftrace_handler,
1026         .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
1027 };
1028
1029 static int kprobe_ipmodify_enabled;
1030 static int kprobe_ftrace_enabled;
1031
1032 /* Must ensure p->addr is really on ftrace */
1033 static int prepare_kprobe(struct kprobe *p)
1034 {
1035         if (!kprobe_ftrace(p))
1036                 return arch_prepare_kprobe(p);
1037
1038         return arch_prepare_kprobe_ftrace(p);
1039 }
1040
1041 /* Caller must lock kprobe_mutex */
1042 static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1043                                int *cnt)
1044 {
1045         int ret = 0;
1046
1047         ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
1048         if (ret) {
1049                 pr_debug("Failed to arm kprobe-ftrace at %pS (%d)\n",
1050                          p->addr, ret);
1051                 return ret;
1052         }
1053
1054         if (*cnt == 0) {
1055                 ret = register_ftrace_function(ops);
1056                 if (ret) {
1057                         pr_debug("Failed to init kprobe-ftrace (%d)\n", ret);
1058                         goto err_ftrace;
1059                 }
1060         }
1061
1062         (*cnt)++;
1063         return ret;
1064
1065 err_ftrace:
1066         /*
1067          * At this point, sinec ops is not registered, we should be sefe from
1068          * registering empty filter.
1069          */
1070         ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
1071         return ret;
1072 }
1073
1074 static int arm_kprobe_ftrace(struct kprobe *p)
1075 {
1076         bool ipmodify = (p->post_handler != NULL);
1077
1078         return __arm_kprobe_ftrace(p,
1079                 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1080                 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1081 }
1082
1083 /* Caller must lock kprobe_mutex */
1084 static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
1085                                   int *cnt)
1086 {
1087         int ret = 0;
1088
1089         if (*cnt == 1) {
1090                 ret = unregister_ftrace_function(ops);
1091                 if (WARN(ret < 0, "Failed to unregister kprobe-ftrace (%d)\n", ret))
1092                         return ret;
1093         }
1094
1095         (*cnt)--;
1096
1097         ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 1, 0);
1098         WARN_ONCE(ret < 0, "Failed to disarm kprobe-ftrace at %pS (%d)\n",
1099                   p->addr, ret);
1100         return ret;
1101 }
1102
1103 static int disarm_kprobe_ftrace(struct kprobe *p)
1104 {
1105         bool ipmodify = (p->post_handler != NULL);
1106
1107         return __disarm_kprobe_ftrace(p,
1108                 ipmodify ? &kprobe_ipmodify_ops : &kprobe_ftrace_ops,
1109                 ipmodify ? &kprobe_ipmodify_enabled : &kprobe_ftrace_enabled);
1110 }
1111 #else   /* !CONFIG_KPROBES_ON_FTRACE */
1112 static inline int prepare_kprobe(struct kprobe *p)
1113 {
1114         return arch_prepare_kprobe(p);
1115 }
1116
1117 static inline int arm_kprobe_ftrace(struct kprobe *p)
1118 {
1119         return -ENODEV;
1120 }
1121
1122 static inline int disarm_kprobe_ftrace(struct kprobe *p)
1123 {
1124         return -ENODEV;
1125 }
1126 #endif
1127
1128 /* Arm a kprobe with text_mutex */
1129 static int arm_kprobe(struct kprobe *kp)
1130 {
1131         if (unlikely(kprobe_ftrace(kp)))
1132                 return arm_kprobe_ftrace(kp);
1133
1134         cpus_read_lock();
1135         mutex_lock(&text_mutex);
1136         __arm_kprobe(kp);
1137         mutex_unlock(&text_mutex);
1138         cpus_read_unlock();
1139
1140         return 0;
1141 }
1142
1143 /* Disarm a kprobe with text_mutex */
1144 static int disarm_kprobe(struct kprobe *kp, bool reopt)
1145 {
1146         if (unlikely(kprobe_ftrace(kp)))
1147                 return disarm_kprobe_ftrace(kp);
1148
1149         cpus_read_lock();
1150         mutex_lock(&text_mutex);
1151         __disarm_kprobe(kp, reopt);
1152         mutex_unlock(&text_mutex);
1153         cpus_read_unlock();
1154
1155         return 0;
1156 }
1157
1158 /*
1159  * Aggregate handlers for multiple kprobes support - these handlers
1160  * take care of invoking the individual kprobe handlers on p->list
1161  */
1162 static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
1163 {
1164         struct kprobe *kp;
1165
1166         list_for_each_entry_rcu(kp, &p->list, list) {
1167                 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
1168                         set_kprobe_instance(kp);
1169                         if (kp->pre_handler(kp, regs))
1170                                 return 1;
1171                 }
1172                 reset_kprobe_instance();
1173         }
1174         return 0;
1175 }
1176 NOKPROBE_SYMBOL(aggr_pre_handler);
1177
1178 static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
1179                               unsigned long flags)
1180 {
1181         struct kprobe *kp;
1182
1183         list_for_each_entry_rcu(kp, &p->list, list) {
1184                 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
1185                         set_kprobe_instance(kp);
1186                         kp->post_handler(kp, regs, flags);
1187                         reset_kprobe_instance();
1188                 }
1189         }
1190 }
1191 NOKPROBE_SYMBOL(aggr_post_handler);
1192
1193 static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
1194                               int trapnr)
1195 {
1196         struct kprobe *cur = __this_cpu_read(kprobe_instance);
1197
1198         /*
1199          * if we faulted "during" the execution of a user specified
1200          * probe handler, invoke just that probe's fault handler
1201          */
1202         if (cur && cur->fault_handler) {
1203                 if (cur->fault_handler(cur, regs, trapnr))
1204                         return 1;
1205         }
1206         return 0;
1207 }
1208 NOKPROBE_SYMBOL(aggr_fault_handler);
1209
1210 /* Walks the list and increments nmissed count for multiprobe case */
1211 void kprobes_inc_nmissed_count(struct kprobe *p)
1212 {
1213         struct kprobe *kp;
1214         if (!kprobe_aggrprobe(p)) {
1215                 p->nmissed++;
1216         } else {
1217                 list_for_each_entry_rcu(kp, &p->list, list)
1218                         kp->nmissed++;
1219         }
1220         return;
1221 }
1222 NOKPROBE_SYMBOL(kprobes_inc_nmissed_count);
1223
1224 static void recycle_rp_inst(struct kretprobe_instance *ri)
1225 {
1226         struct kretprobe *rp = ri->rp;
1227
1228         /* remove rp inst off the rprobe_inst_table */
1229         hlist_del(&ri->hlist);
1230         INIT_HLIST_NODE(&ri->hlist);
1231         if (likely(rp)) {
1232                 raw_spin_lock(&rp->lock);
1233                 hlist_add_head(&ri->hlist, &rp->free_instances);
1234                 raw_spin_unlock(&rp->lock);
1235         } else
1236                 kfree_rcu(ri, rcu);
1237 }
1238 NOKPROBE_SYMBOL(recycle_rp_inst);
1239
1240 static void kretprobe_hash_lock(struct task_struct *tsk,
1241                          struct hlist_head **head, unsigned long *flags)
1242 __acquires(hlist_lock)
1243 {
1244         unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1245         raw_spinlock_t *hlist_lock;
1246
1247         *head = &kretprobe_inst_table[hash];
1248         hlist_lock = kretprobe_table_lock_ptr(hash);
1249         /*
1250          * Nested is a workaround that will soon not be needed.
1251          * There's other protections that make sure the same lock
1252          * is not taken on the same CPU that lockdep is unaware of.
1253          * Differentiate when it is taken in NMI context.
1254          */
1255         raw_spin_lock_irqsave_nested(hlist_lock, *flags, !!in_nmi());
1256 }
1257 NOKPROBE_SYMBOL(kretprobe_hash_lock);
1258
1259 static void kretprobe_table_lock(unsigned long hash,
1260                                  unsigned long *flags)
1261 __acquires(hlist_lock)
1262 {
1263         raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1264         /*
1265          * Nested is a workaround that will soon not be needed.
1266          * There's other protections that make sure the same lock
1267          * is not taken on the same CPU that lockdep is unaware of.
1268          * Differentiate when it is taken in NMI context.
1269          */
1270         raw_spin_lock_irqsave_nested(hlist_lock, *flags, !!in_nmi());
1271 }
1272 NOKPROBE_SYMBOL(kretprobe_table_lock);
1273
1274 static void kretprobe_hash_unlock(struct task_struct *tsk,
1275                            unsigned long *flags)
1276 __releases(hlist_lock)
1277 {
1278         unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1279         raw_spinlock_t *hlist_lock;
1280
1281         hlist_lock = kretprobe_table_lock_ptr(hash);
1282         raw_spin_unlock_irqrestore(hlist_lock, *flags);
1283 }
1284 NOKPROBE_SYMBOL(kretprobe_hash_unlock);
1285
1286 static void kretprobe_table_unlock(unsigned long hash,
1287                                    unsigned long *flags)
1288 __releases(hlist_lock)
1289 {
1290         raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1291         raw_spin_unlock_irqrestore(hlist_lock, *flags);
1292 }
1293 NOKPROBE_SYMBOL(kretprobe_table_unlock);
1294
1295 static struct kprobe kprobe_busy = {
1296         .addr = (void *) get_kprobe,
1297 };
1298
1299 void kprobe_busy_begin(void)
1300 {
1301         struct kprobe_ctlblk *kcb;
1302
1303         preempt_disable();
1304         __this_cpu_write(current_kprobe, &kprobe_busy);
1305         kcb = get_kprobe_ctlblk();
1306         kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1307 }
1308
1309 void kprobe_busy_end(void)
1310 {
1311         __this_cpu_write(current_kprobe, NULL);
1312         preempt_enable();
1313 }
1314
1315 /*
1316  * This function is called from finish_task_switch when task tk becomes dead,
1317  * so that we can recycle any function-return probe instances associated
1318  * with this task. These left over instances represent probed functions
1319  * that have been called but will never return.
1320  */
1321 void kprobe_flush_task(struct task_struct *tk)
1322 {
1323         struct kretprobe_instance *ri;
1324         struct hlist_head *head;
1325         struct hlist_node *tmp;
1326         unsigned long hash, flags = 0;
1327
1328         if (unlikely(!kprobes_initialized))
1329                 /* Early boot.  kretprobe_table_locks not yet initialized. */
1330                 return;
1331
1332         kprobe_busy_begin();
1333
1334         hash = hash_ptr(tk, KPROBE_HASH_BITS);
1335         head = &kretprobe_inst_table[hash];
1336         kretprobe_table_lock(hash, &flags);
1337         hlist_for_each_entry_safe(ri, tmp, head, hlist) {
1338                 if (ri->task == tk)
1339                         recycle_rp_inst(ri);
1340         }
1341         kretprobe_table_unlock(hash, &flags);
1342
1343         kprobe_busy_end();
1344 }
1345 NOKPROBE_SYMBOL(kprobe_flush_task);
1346
1347 static inline void free_rp_inst(struct kretprobe *rp)
1348 {
1349         struct kretprobe_instance *ri;
1350         struct hlist_node *next;
1351
1352         hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) {
1353                 hlist_del(&ri->hlist);
1354                 kfree(ri);
1355         }
1356 }
1357
1358 static void cleanup_rp_inst(struct kretprobe *rp)
1359 {
1360         unsigned long flags, hash;
1361         struct kretprobe_instance *ri;
1362         struct hlist_node *next;
1363         struct hlist_head *head;
1364
1365         /* To avoid recursive kretprobe by NMI, set kprobe busy here */
1366         kprobe_busy_begin();
1367         for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1368                 kretprobe_table_lock(hash, &flags);
1369                 head = &kretprobe_inst_table[hash];
1370                 hlist_for_each_entry_safe(ri, next, head, hlist) {
1371                         if (ri->rp == rp)
1372                                 ri->rp = NULL;
1373                 }
1374                 kretprobe_table_unlock(hash, &flags);
1375         }
1376         kprobe_busy_end();
1377
1378         free_rp_inst(rp);
1379 }
1380 NOKPROBE_SYMBOL(cleanup_rp_inst);
1381
1382 /* Add the new probe to ap->list */
1383 static int add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1384 {
1385         if (p->post_handler)
1386                 unoptimize_kprobe(ap, true);    /* Fall back to normal kprobe */
1387
1388         list_add_rcu(&p->list, &ap->list);
1389         if (p->post_handler && !ap->post_handler)
1390                 ap->post_handler = aggr_post_handler;
1391
1392         return 0;
1393 }
1394
1395 /*
1396  * Fill in the required fields of the "manager kprobe". Replace the
1397  * earlier kprobe in the hlist with the manager kprobe
1398  */
1399 static void init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1400 {
1401         /* Copy p's insn slot to ap */
1402         copy_kprobe(p, ap);
1403         flush_insn_slot(ap);
1404         ap->addr = p->addr;
1405         ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1406         ap->pre_handler = aggr_pre_handler;
1407         ap->fault_handler = aggr_fault_handler;
1408         /* We don't care the kprobe which has gone. */
1409         if (p->post_handler && !kprobe_gone(p))
1410                 ap->post_handler = aggr_post_handler;
1411
1412         INIT_LIST_HEAD(&ap->list);
1413         INIT_HLIST_NODE(&ap->hlist);
1414
1415         list_add_rcu(&p->list, &ap->list);
1416         hlist_replace_rcu(&p->hlist, &ap->hlist);
1417 }
1418
1419 /*
1420  * This is the second or subsequent kprobe at the address - handle
1421  * the intricacies
1422  */
1423 static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
1424 {
1425         int ret = 0;
1426         struct kprobe *ap = orig_p;
1427
1428         cpus_read_lock();
1429
1430         /* For preparing optimization, jump_label_text_reserved() is called */
1431         jump_label_lock();
1432         mutex_lock(&text_mutex);
1433
1434         if (!kprobe_aggrprobe(orig_p)) {
1435                 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1436                 ap = alloc_aggr_kprobe(orig_p);
1437                 if (!ap) {
1438                         ret = -ENOMEM;
1439                         goto out;
1440                 }
1441                 init_aggr_kprobe(ap, orig_p);
1442         } else if (kprobe_unused(ap)) {
1443                 /* This probe is going to die. Rescue it */
1444                 ret = reuse_unused_kprobe(ap);
1445                 if (ret)
1446                         goto out;
1447         }
1448
1449         if (kprobe_gone(ap)) {
1450                 /*
1451                  * Attempting to insert new probe at the same location that
1452                  * had a probe in the module vaddr area which already
1453                  * freed. So, the instruction slot has already been
1454                  * released. We need a new slot for the new probe.
1455                  */
1456                 ret = arch_prepare_kprobe(ap);
1457                 if (ret)
1458                         /*
1459                          * Even if fail to allocate new slot, don't need to
1460                          * free aggr_probe. It will be used next time, or
1461                          * freed by unregister_kprobe.
1462                          */
1463                         goto out;
1464
1465                 /* Prepare optimized instructions if possible. */
1466                 prepare_optimized_kprobe(ap);
1467
1468                 /*
1469                  * Clear gone flag to prevent allocating new slot again, and
1470                  * set disabled flag because it is not armed yet.
1471                  */
1472                 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1473                             | KPROBE_FLAG_DISABLED;
1474         }
1475
1476         /* Copy ap's insn slot to p */
1477         copy_kprobe(ap, p);
1478         ret = add_new_kprobe(ap, p);
1479
1480 out:
1481         mutex_unlock(&text_mutex);
1482         jump_label_unlock();
1483         cpus_read_unlock();
1484
1485         if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
1486                 ap->flags &= ~KPROBE_FLAG_DISABLED;
1487                 if (!kprobes_all_disarmed) {
1488                         /* Arm the breakpoint again. */
1489                         ret = arm_kprobe(ap);
1490                         if (ret) {
1491                                 ap->flags |= KPROBE_FLAG_DISABLED;
1492                                 list_del_rcu(&p->list);
1493                                 synchronize_rcu();
1494                         }
1495                 }
1496         }
1497         return ret;
1498 }
1499
1500 bool __weak arch_within_kprobe_blacklist(unsigned long addr)
1501 {
1502         /* The __kprobes marked functions and entry code must not be probed */
1503         return addr >= (unsigned long)__kprobes_text_start &&
1504                addr < (unsigned long)__kprobes_text_end;
1505 }
1506
1507 static bool __within_kprobe_blacklist(unsigned long addr)
1508 {
1509         struct kprobe_blacklist_entry *ent;
1510
1511         if (arch_within_kprobe_blacklist(addr))
1512                 return true;
1513         /*
1514          * If there exists a kprobe_blacklist, verify and
1515          * fail any probe registration in the prohibited area
1516          */
1517         list_for_each_entry(ent, &kprobe_blacklist, list) {
1518                 if (addr >= ent->start_addr && addr < ent->end_addr)
1519                         return true;
1520         }
1521         return false;
1522 }
1523
1524 bool within_kprobe_blacklist(unsigned long addr)
1525 {
1526         char symname[KSYM_NAME_LEN], *p;
1527
1528         if (__within_kprobe_blacklist(addr))
1529                 return true;
1530
1531         /* Check if the address is on a suffixed-symbol */
1532         if (!lookup_symbol_name(addr, symname)) {
1533                 p = strchr(symname, '.');
1534                 if (!p)
1535                         return false;
1536                 *p = '\0';
1537                 addr = (unsigned long)kprobe_lookup_name(symname, 0);
1538                 if (addr)
1539                         return __within_kprobe_blacklist(addr);
1540         }
1541         return false;
1542 }
1543
1544 /*
1545  * If we have a symbol_name argument, look it up and add the offset field
1546  * to it. This way, we can specify a relative address to a symbol.
1547  * This returns encoded errors if it fails to look up symbol or invalid
1548  * combination of parameters.
1549  */
1550 static kprobe_opcode_t *_kprobe_addr(kprobe_opcode_t *addr,
1551                         const char *symbol_name, unsigned int offset)
1552 {
1553         if ((symbol_name && addr) || (!symbol_name && !addr))
1554                 goto invalid;
1555
1556         if (symbol_name) {
1557                 addr = kprobe_lookup_name(symbol_name, offset);
1558                 if (!addr)
1559                         return ERR_PTR(-ENOENT);
1560         }
1561
1562         addr = (kprobe_opcode_t *)(((char *)addr) + offset);
1563         if (addr)
1564                 return addr;
1565
1566 invalid:
1567         return ERR_PTR(-EINVAL);
1568 }
1569
1570 static kprobe_opcode_t *kprobe_addr(struct kprobe *p)
1571 {
1572         return _kprobe_addr(p->addr, p->symbol_name, p->offset);
1573 }
1574
1575 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
1576 static struct kprobe *__get_valid_kprobe(struct kprobe *p)
1577 {
1578         struct kprobe *ap, *list_p;
1579
1580         lockdep_assert_held(&kprobe_mutex);
1581
1582         ap = get_kprobe(p->addr);
1583         if (unlikely(!ap))
1584                 return NULL;
1585
1586         if (p != ap) {
1587                 list_for_each_entry(list_p, &ap->list, list)
1588                         if (list_p == p)
1589                         /* kprobe p is a valid probe */
1590                                 goto valid;
1591                 return NULL;
1592         }
1593 valid:
1594         return ap;
1595 }
1596
1597 /* Return error if the kprobe is being re-registered */
1598 static inline int check_kprobe_rereg(struct kprobe *p)
1599 {
1600         int ret = 0;
1601
1602         mutex_lock(&kprobe_mutex);
1603         if (__get_valid_kprobe(p))
1604                 ret = -EINVAL;
1605         mutex_unlock(&kprobe_mutex);
1606
1607         return ret;
1608 }
1609
1610 int __weak arch_check_ftrace_location(struct kprobe *p)
1611 {
1612         unsigned long ftrace_addr;
1613
1614         ftrace_addr = ftrace_location((unsigned long)p->addr);
1615         if (ftrace_addr) {
1616 #ifdef CONFIG_KPROBES_ON_FTRACE
1617                 /* Given address is not on the instruction boundary */
1618                 if ((unsigned long)p->addr != ftrace_addr)
1619                         return -EILSEQ;
1620                 p->flags |= KPROBE_FLAG_FTRACE;
1621 #else   /* !CONFIG_KPROBES_ON_FTRACE */
1622                 return -EINVAL;
1623 #endif
1624         }
1625         return 0;
1626 }
1627
1628 static bool is_cfi_preamble_symbol(unsigned long addr)
1629 {
1630         char symbuf[KSYM_NAME_LEN];
1631
1632         if (lookup_symbol_name(addr, symbuf))
1633                 return false;
1634
1635         return str_has_prefix("__cfi_", symbuf) ||
1636                 str_has_prefix("__pfx_", symbuf);
1637 }
1638
1639 static int check_kprobe_address_safe(struct kprobe *p,
1640                                      struct module **probed_mod)
1641 {
1642         int ret;
1643
1644         ret = arch_check_ftrace_location(p);
1645         if (ret)
1646                 return ret;
1647         jump_label_lock();
1648         preempt_disable();
1649
1650         /* Ensure the address is in a text area, and find a module if exists. */
1651         *probed_mod = NULL;
1652         if (!core_kernel_text((unsigned long) p->addr)) {
1653                 *probed_mod = __module_text_address((unsigned long) p->addr);
1654                 if (!(*probed_mod)) {
1655                         ret = -EINVAL;
1656                         goto out;
1657                 }
1658         }
1659         /* Ensure it is not in reserved area. */
1660         if (in_gate_area_no_mm((unsigned long) p->addr) ||
1661             within_kprobe_blacklist((unsigned long) p->addr) ||
1662             jump_label_text_reserved(p->addr, p->addr) ||
1663             static_call_text_reserved(p->addr, p->addr) ||
1664             find_bug((unsigned long)p->addr) ||
1665             is_cfi_preamble_symbol((unsigned long)p->addr)) {
1666                 ret = -EINVAL;
1667                 goto out;
1668         }
1669
1670         /* Get module refcount and reject __init functions for loaded modules. */
1671         if (*probed_mod) {
1672                 /*
1673                  * We must hold a refcount of the probed module while updating
1674                  * its code to prohibit unexpected unloading.
1675                  */
1676                 if (unlikely(!try_module_get(*probed_mod))) {
1677                         ret = -ENOENT;
1678                         goto out;
1679                 }
1680
1681                 /*
1682                  * If the module freed .init.text, we couldn't insert
1683                  * kprobes in there.
1684                  */
1685                 if (within_module_init((unsigned long)p->addr, *probed_mod) &&
1686                     (*probed_mod)->state != MODULE_STATE_COMING) {
1687                         module_put(*probed_mod);
1688                         *probed_mod = NULL;
1689                         ret = -ENOENT;
1690                 }
1691         }
1692 out:
1693         preempt_enable();
1694         jump_label_unlock();
1695
1696         return ret;
1697 }
1698
1699 int register_kprobe(struct kprobe *p)
1700 {
1701         int ret;
1702         struct kprobe *old_p;
1703         struct module *probed_mod;
1704         kprobe_opcode_t *addr;
1705
1706         /* Adjust probe address from symbol */
1707         addr = kprobe_addr(p);
1708         if (IS_ERR(addr))
1709                 return PTR_ERR(addr);
1710         p->addr = addr;
1711
1712         ret = check_kprobe_rereg(p);
1713         if (ret)
1714                 return ret;
1715
1716         /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1717         p->flags &= KPROBE_FLAG_DISABLED;
1718         p->nmissed = 0;
1719         INIT_LIST_HEAD(&p->list);
1720
1721         ret = check_kprobe_address_safe(p, &probed_mod);
1722         if (ret)
1723                 return ret;
1724
1725         mutex_lock(&kprobe_mutex);
1726
1727         old_p = get_kprobe(p->addr);
1728         if (old_p) {
1729                 /* Since this may unoptimize old_p, locking text_mutex. */
1730                 ret = register_aggr_kprobe(old_p, p);
1731                 goto out;
1732         }
1733
1734         cpus_read_lock();
1735         /* Prevent text modification */
1736         mutex_lock(&text_mutex);
1737         ret = prepare_kprobe(p);
1738         mutex_unlock(&text_mutex);
1739         cpus_read_unlock();
1740         if (ret)
1741                 goto out;
1742
1743         INIT_HLIST_NODE(&p->hlist);
1744         hlist_add_head_rcu(&p->hlist,
1745                        &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1746
1747         if (!kprobes_all_disarmed && !kprobe_disabled(p)) {
1748                 ret = arm_kprobe(p);
1749                 if (ret) {
1750                         hlist_del_rcu(&p->hlist);
1751                         synchronize_rcu();
1752                         goto out;
1753                 }
1754         }
1755
1756         /* Try to optimize kprobe */
1757         try_to_optimize_kprobe(p);
1758 out:
1759         mutex_unlock(&kprobe_mutex);
1760
1761         if (probed_mod)
1762                 module_put(probed_mod);
1763
1764         return ret;
1765 }
1766 EXPORT_SYMBOL_GPL(register_kprobe);
1767
1768 /* Check if all probes on the aggrprobe are disabled */
1769 static int aggr_kprobe_disabled(struct kprobe *ap)
1770 {
1771         struct kprobe *kp;
1772
1773         lockdep_assert_held(&kprobe_mutex);
1774
1775         list_for_each_entry(kp, &ap->list, list)
1776                 if (!kprobe_disabled(kp))
1777                         /*
1778                          * There is an active probe on the list.
1779                          * We can't disable this ap.
1780                          */
1781                         return 0;
1782
1783         return 1;
1784 }
1785
1786 /* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1787 static struct kprobe *__disable_kprobe(struct kprobe *p)
1788 {
1789         struct kprobe *orig_p;
1790         int ret;
1791
1792         /* Get an original kprobe for return */
1793         orig_p = __get_valid_kprobe(p);
1794         if (unlikely(orig_p == NULL))
1795                 return ERR_PTR(-EINVAL);
1796
1797         if (!kprobe_disabled(p)) {
1798                 /* Disable probe if it is a child probe */
1799                 if (p != orig_p)
1800                         p->flags |= KPROBE_FLAG_DISABLED;
1801
1802                 /* Try to disarm and disable this/parent probe */
1803                 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1804                         /*
1805                          * Don't be lazy here.  Even if 'kprobes_all_disarmed'
1806                          * is false, 'orig_p' might not have been armed yet.
1807                          * Note arm_all_kprobes() __tries__ to arm all kprobes
1808                          * on the best effort basis.
1809                          */
1810                         if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
1811                                 ret = disarm_kprobe(orig_p, true);
1812                                 if (ret) {
1813                                         p->flags &= ~KPROBE_FLAG_DISABLED;
1814                                         return ERR_PTR(ret);
1815                                 }
1816                         }
1817                         orig_p->flags |= KPROBE_FLAG_DISABLED;
1818                 }
1819         }
1820
1821         return orig_p;
1822 }
1823
1824 /*
1825  * Unregister a kprobe without a scheduler synchronization.
1826  */
1827 static int __unregister_kprobe_top(struct kprobe *p)
1828 {
1829         struct kprobe *ap, *list_p;
1830
1831         /* Disable kprobe. This will disarm it if needed. */
1832         ap = __disable_kprobe(p);
1833         if (IS_ERR(ap))
1834                 return PTR_ERR(ap);
1835
1836         if (ap == p)
1837                 /*
1838                  * This probe is an independent(and non-optimized) kprobe
1839                  * (not an aggrprobe). Remove from the hash list.
1840                  */
1841                 goto disarmed;
1842
1843         /* Following process expects this probe is an aggrprobe */
1844         WARN_ON(!kprobe_aggrprobe(ap));
1845
1846         if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1847                 /*
1848                  * !disarmed could be happen if the probe is under delayed
1849                  * unoptimizing.
1850                  */
1851                 goto disarmed;
1852         else {
1853                 /* If disabling probe has special handlers, update aggrprobe */
1854                 if (p->post_handler && !kprobe_gone(p)) {
1855                         list_for_each_entry(list_p, &ap->list, list) {
1856                                 if ((list_p != p) && (list_p->post_handler))
1857                                         goto noclean;
1858                         }
1859                         /*
1860                          * For the kprobe-on-ftrace case, we keep the
1861                          * post_handler setting to identify this aggrprobe
1862                          * armed with kprobe_ipmodify_ops.
1863                          */
1864                         if (!kprobe_ftrace(ap))
1865                                 ap->post_handler = NULL;
1866                 }
1867 noclean:
1868                 /*
1869                  * Remove from the aggrprobe: this path will do nothing in
1870                  * __unregister_kprobe_bottom().
1871                  */
1872                 list_del_rcu(&p->list);
1873                 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1874                         /*
1875                          * Try to optimize this probe again, because post
1876                          * handler may have been changed.
1877                          */
1878                         optimize_kprobe(ap);
1879         }
1880         return 0;
1881
1882 disarmed:
1883         hlist_del_rcu(&ap->hlist);
1884         return 0;
1885 }
1886
1887 static void __unregister_kprobe_bottom(struct kprobe *p)
1888 {
1889         struct kprobe *ap;
1890
1891         if (list_empty(&p->list))
1892                 /* This is an independent kprobe */
1893                 arch_remove_kprobe(p);
1894         else if (list_is_singular(&p->list)) {
1895                 /* This is the last child of an aggrprobe */
1896                 ap = list_entry(p->list.next, struct kprobe, list);
1897                 list_del(&p->list);
1898                 free_aggr_kprobe(ap);
1899         }
1900         /* Otherwise, do nothing. */
1901 }
1902
1903 int register_kprobes(struct kprobe **kps, int num)
1904 {
1905         int i, ret = 0;
1906
1907         if (num <= 0)
1908                 return -EINVAL;
1909         for (i = 0; i < num; i++) {
1910                 ret = register_kprobe(kps[i]);
1911                 if (ret < 0) {
1912                         if (i > 0)
1913                                 unregister_kprobes(kps, i);
1914                         break;
1915                 }
1916         }
1917         return ret;
1918 }
1919 EXPORT_SYMBOL_GPL(register_kprobes);
1920
1921 void unregister_kprobe(struct kprobe *p)
1922 {
1923         unregister_kprobes(&p, 1);
1924 }
1925 EXPORT_SYMBOL_GPL(unregister_kprobe);
1926
1927 void unregister_kprobes(struct kprobe **kps, int num)
1928 {
1929         int i;
1930
1931         if (num <= 0)
1932                 return;
1933         mutex_lock(&kprobe_mutex);
1934         for (i = 0; i < num; i++)
1935                 if (__unregister_kprobe_top(kps[i]) < 0)
1936                         kps[i]->addr = NULL;
1937         mutex_unlock(&kprobe_mutex);
1938
1939         synchronize_rcu();
1940         for (i = 0; i < num; i++)
1941                 if (kps[i]->addr)
1942                         __unregister_kprobe_bottom(kps[i]);
1943 }
1944 EXPORT_SYMBOL_GPL(unregister_kprobes);
1945
1946 int __weak kprobe_exceptions_notify(struct notifier_block *self,
1947                                         unsigned long val, void *data)
1948 {
1949         return NOTIFY_DONE;
1950 }
1951 NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1952
1953 static struct notifier_block kprobe_exceptions_nb = {
1954         .notifier_call = kprobe_exceptions_notify,
1955         .priority = 0x7fffffff /* we need to be notified first */
1956 };
1957
1958 unsigned long __weak arch_deref_entry_point(void *entry)
1959 {
1960         return (unsigned long)entry;
1961 }
1962
1963 #ifdef CONFIG_KRETPROBES
1964
1965 unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs,
1966                                              void *trampoline_address,
1967                                              void *frame_pointer)
1968 {
1969         struct kretprobe_instance *ri = NULL, *last = NULL;
1970         struct hlist_head *head;
1971         struct hlist_node *tmp;
1972         unsigned long flags;
1973         kprobe_opcode_t *correct_ret_addr = NULL;
1974         bool skipped = false;
1975
1976         kretprobe_hash_lock(current, &head, &flags);
1977
1978         /*
1979          * It is possible to have multiple instances associated with a given
1980          * task either because multiple functions in the call path have
1981          * return probes installed on them, and/or more than one
1982          * return probe was registered for a target function.
1983          *
1984          * We can handle this because:
1985          *     - instances are always pushed into the head of the list
1986          *     - when multiple return probes are registered for the same
1987          *       function, the (chronologically) first instance's ret_addr
1988          *       will be the real return address, and all the rest will
1989          *       point to kretprobe_trampoline.
1990          */
1991         hlist_for_each_entry(ri, head, hlist) {
1992                 if (ri->task != current)
1993                         /* another task is sharing our hash bucket */
1994                         continue;
1995                 /*
1996                  * Return probes must be pushed on this hash list correct
1997                  * order (same as return order) so that it can be popped
1998                  * correctly. However, if we find it is pushed it incorrect
1999                  * order, this means we find a function which should not be
2000                  * probed, because the wrong order entry is pushed on the
2001                  * path of processing other kretprobe itself.
2002                  */
2003                 if (ri->fp != frame_pointer) {
2004                         if (!skipped)
2005                                 pr_warn("kretprobe is stacked incorrectly. Trying to fixup.\n");
2006                         skipped = true;
2007                         continue;
2008                 }
2009
2010                 correct_ret_addr = ri->ret_addr;
2011                 if (skipped)
2012                         pr_warn("%ps must be blacklisted because of incorrect kretprobe order\n",
2013                                 ri->rp->kp.addr);
2014
2015                 if (correct_ret_addr != trampoline_address)
2016                         /*
2017                          * This is the real return address. Any other
2018                          * instances associated with this task are for
2019                          * other calls deeper on the call stack
2020                          */
2021                         break;
2022         }
2023
2024         BUG_ON(!correct_ret_addr || (correct_ret_addr == trampoline_address));
2025         last = ri;
2026
2027         hlist_for_each_entry_safe(ri, tmp, head, hlist) {
2028                 if (ri->task != current)
2029                         /* another task is sharing our hash bucket */
2030                         continue;
2031                 if (ri->fp != frame_pointer)
2032                         continue;
2033
2034                 if (ri->rp && ri->rp->handler) {
2035                         struct kprobe *prev = kprobe_running();
2036
2037                         __this_cpu_write(current_kprobe, &ri->rp->kp);
2038                         ri->ret_addr = correct_ret_addr;
2039                         ri->rp->handler(ri, regs);
2040                         __this_cpu_write(current_kprobe, prev);
2041                 }
2042
2043                 recycle_rp_inst(ri);
2044
2045                 if (ri == last)
2046                         break;
2047         }
2048
2049         kretprobe_hash_unlock(current, &flags);
2050
2051         return (unsigned long)correct_ret_addr;
2052 }
2053 NOKPROBE_SYMBOL(__kretprobe_trampoline_handler)
2054
2055 /*
2056  * This kprobe pre_handler is registered with every kretprobe. When probe
2057  * hits it will set up the return probe.
2058  */
2059 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2060 {
2061         struct kretprobe *rp = container_of(p, struct kretprobe, kp);
2062         unsigned long hash, flags = 0;
2063         struct kretprobe_instance *ri;
2064
2065         /* TODO: consider to only swap the RA after the last pre_handler fired */
2066         hash = hash_ptr(current, KPROBE_HASH_BITS);
2067         /*
2068          * Nested is a workaround that will soon not be needed.
2069          * There's other protections that make sure the same lock
2070          * is not taken on the same CPU that lockdep is unaware of.
2071          */
2072         raw_spin_lock_irqsave_nested(&rp->lock, flags, 1);
2073         if (!hlist_empty(&rp->free_instances)) {
2074                 ri = hlist_entry(rp->free_instances.first,
2075                                 struct kretprobe_instance, hlist);
2076                 hlist_del(&ri->hlist);
2077                 raw_spin_unlock_irqrestore(&rp->lock, flags);
2078
2079                 ri->rp = rp;
2080                 ri->task = current;
2081
2082                 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
2083                         raw_spin_lock_irqsave_nested(&rp->lock, flags, 1);
2084                         hlist_add_head(&ri->hlist, &rp->free_instances);
2085                         raw_spin_unlock_irqrestore(&rp->lock, flags);
2086                         return 0;
2087                 }
2088
2089                 arch_prepare_kretprobe(ri, regs);
2090
2091                 /* XXX(hch): why is there no hlist_move_head? */
2092                 INIT_HLIST_NODE(&ri->hlist);
2093                 kretprobe_table_lock(hash, &flags);
2094                 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
2095                 kretprobe_table_unlock(hash, &flags);
2096         } else {
2097                 rp->nmissed++;
2098                 raw_spin_unlock_irqrestore(&rp->lock, flags);
2099         }
2100         return 0;
2101 }
2102 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2103
2104 bool __weak arch_kprobe_on_func_entry(unsigned long offset)
2105 {
2106         return !offset;
2107 }
2108
2109 /**
2110  * kprobe_on_func_entry() -- check whether given address is function entry
2111  * @addr: Target address
2112  * @sym:  Target symbol name
2113  * @offset: The offset from the symbol or the address
2114  *
2115  * This checks whether the given @addr+@offset or @sym+@offset is on the
2116  * function entry address or not.
2117  * This returns 0 if it is the function entry, or -EINVAL if it is not.
2118  * And also it returns -ENOENT if it fails the symbol or address lookup.
2119  * Caller must pass @addr or @sym (either one must be NULL), or this
2120  * returns -EINVAL.
2121  */
2122 int kprobe_on_func_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset)
2123 {
2124         kprobe_opcode_t *kp_addr = _kprobe_addr(addr, sym, offset);
2125
2126         if (IS_ERR(kp_addr))
2127                 return PTR_ERR(kp_addr);
2128
2129         if (!kallsyms_lookup_size_offset((unsigned long)kp_addr, NULL, &offset))
2130                 return -ENOENT;
2131
2132         if (!arch_kprobe_on_func_entry(offset))
2133                 return -EINVAL;
2134
2135         return 0;
2136 }
2137
2138 int register_kretprobe(struct kretprobe *rp)
2139 {
2140         int ret;
2141         struct kretprobe_instance *inst;
2142         int i;
2143         void *addr;
2144
2145         ret = kprobe_on_func_entry(rp->kp.addr, rp->kp.symbol_name, rp->kp.offset);
2146         if (ret)
2147                 return ret;
2148
2149         /* If only rp->kp.addr is specified, check reregistering kprobes */
2150         if (rp->kp.addr && check_kprobe_rereg(&rp->kp))
2151                 return -EINVAL;
2152
2153         if (kretprobe_blacklist_size) {
2154                 addr = kprobe_addr(&rp->kp);
2155                 if (IS_ERR(addr))
2156                         return PTR_ERR(addr);
2157
2158                 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2159                         if (kretprobe_blacklist[i].addr == addr)
2160                                 return -EINVAL;
2161                 }
2162         }
2163
2164         if (rp->data_size > KRETPROBE_MAX_DATA_SIZE)
2165                 return -E2BIG;
2166
2167         rp->kp.pre_handler = pre_handler_kretprobe;
2168         rp->kp.post_handler = NULL;
2169         rp->kp.fault_handler = NULL;
2170
2171         /* Pre-allocate memory for max kretprobe instances */
2172         if (rp->maxactive <= 0) {
2173 #ifdef CONFIG_PREEMPTION
2174                 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
2175 #else
2176                 rp->maxactive = num_possible_cpus();
2177 #endif
2178         }
2179         raw_spin_lock_init(&rp->lock);
2180         INIT_HLIST_HEAD(&rp->free_instances);
2181         for (i = 0; i < rp->maxactive; i++) {
2182                 inst = kmalloc(sizeof(struct kretprobe_instance) +
2183                                rp->data_size, GFP_KERNEL);
2184                 if (inst == NULL) {
2185                         free_rp_inst(rp);
2186                         return -ENOMEM;
2187                 }
2188                 INIT_HLIST_NODE(&inst->hlist);
2189                 hlist_add_head(&inst->hlist, &rp->free_instances);
2190         }
2191
2192         rp->nmissed = 0;
2193         /* Establish function entry probe point */
2194         ret = register_kprobe(&rp->kp);
2195         if (ret != 0)
2196                 free_rp_inst(rp);
2197         return ret;
2198 }
2199 EXPORT_SYMBOL_GPL(register_kretprobe);
2200
2201 int register_kretprobes(struct kretprobe **rps, int num)
2202 {
2203         int ret = 0, i;
2204
2205         if (num <= 0)
2206                 return -EINVAL;
2207         for (i = 0; i < num; i++) {
2208                 ret = register_kretprobe(rps[i]);
2209                 if (ret < 0) {
2210                         if (i > 0)
2211                                 unregister_kretprobes(rps, i);
2212                         break;
2213                 }
2214         }
2215         return ret;
2216 }
2217 EXPORT_SYMBOL_GPL(register_kretprobes);
2218
2219 void unregister_kretprobe(struct kretprobe *rp)
2220 {
2221         unregister_kretprobes(&rp, 1);
2222 }
2223 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2224
2225 void unregister_kretprobes(struct kretprobe **rps, int num)
2226 {
2227         int i;
2228
2229         if (num <= 0)
2230                 return;
2231         mutex_lock(&kprobe_mutex);
2232         for (i = 0; i < num; i++)
2233                 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
2234                         rps[i]->kp.addr = NULL;
2235         mutex_unlock(&kprobe_mutex);
2236
2237         synchronize_rcu();
2238         for (i = 0; i < num; i++) {
2239                 if (rps[i]->kp.addr) {
2240                         __unregister_kprobe_bottom(&rps[i]->kp);
2241                         cleanup_rp_inst(rps[i]);
2242                 }
2243         }
2244 }
2245 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2246
2247 #else /* CONFIG_KRETPROBES */
2248 int register_kretprobe(struct kretprobe *rp)
2249 {
2250         return -ENOSYS;
2251 }
2252 EXPORT_SYMBOL_GPL(register_kretprobe);
2253
2254 int register_kretprobes(struct kretprobe **rps, int num)
2255 {
2256         return -ENOSYS;
2257 }
2258 EXPORT_SYMBOL_GPL(register_kretprobes);
2259
2260 void unregister_kretprobe(struct kretprobe *rp)
2261 {
2262 }
2263 EXPORT_SYMBOL_GPL(unregister_kretprobe);
2264
2265 void unregister_kretprobes(struct kretprobe **rps, int num)
2266 {
2267 }
2268 EXPORT_SYMBOL_GPL(unregister_kretprobes);
2269
2270 static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
2271 {
2272         return 0;
2273 }
2274 NOKPROBE_SYMBOL(pre_handler_kretprobe);
2275
2276 #endif /* CONFIG_KRETPROBES */
2277
2278 /* Set the kprobe gone and remove its instruction buffer. */
2279 static void kill_kprobe(struct kprobe *p)
2280 {
2281         struct kprobe *kp;
2282
2283         lockdep_assert_held(&kprobe_mutex);
2284
2285         if (WARN_ON_ONCE(kprobe_gone(p)))
2286                 return;
2287
2288         p->flags |= KPROBE_FLAG_GONE;
2289         if (kprobe_aggrprobe(p)) {
2290                 /*
2291                  * If this is an aggr_kprobe, we have to list all the
2292                  * chained probes and mark them GONE.
2293                  */
2294                 list_for_each_entry(kp, &p->list, list)
2295                         kp->flags |= KPROBE_FLAG_GONE;
2296                 p->post_handler = NULL;
2297                 kill_optimized_kprobe(p);
2298         }
2299         /*
2300          * Here, we can remove insn_slot safely, because no thread calls
2301          * the original probed function (which will be freed soon) any more.
2302          */
2303         arch_remove_kprobe(p);
2304
2305         /*
2306          * The module is going away. We should disarm the kprobe which
2307          * is using ftrace, because ftrace framework is still available at
2308          * MODULE_STATE_GOING notification.
2309          */
2310         if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed)
2311                 disarm_kprobe_ftrace(p);
2312 }
2313
2314 /* Disable one kprobe */
2315 int disable_kprobe(struct kprobe *kp)
2316 {
2317         int ret = 0;
2318         struct kprobe *p;
2319
2320         mutex_lock(&kprobe_mutex);
2321
2322         /* Disable this kprobe */
2323         p = __disable_kprobe(kp);
2324         if (IS_ERR(p))
2325                 ret = PTR_ERR(p);
2326
2327         mutex_unlock(&kprobe_mutex);
2328         return ret;
2329 }
2330 EXPORT_SYMBOL_GPL(disable_kprobe);
2331
2332 /* Enable one kprobe */
2333 int enable_kprobe(struct kprobe *kp)
2334 {
2335         int ret = 0;
2336         struct kprobe *p;
2337
2338         mutex_lock(&kprobe_mutex);
2339
2340         /* Check whether specified probe is valid. */
2341         p = __get_valid_kprobe(kp);
2342         if (unlikely(p == NULL)) {
2343                 ret = -EINVAL;
2344                 goto out;
2345         }
2346
2347         if (kprobe_gone(kp)) {
2348                 /* This kprobe has gone, we couldn't enable it. */
2349                 ret = -EINVAL;
2350                 goto out;
2351         }
2352
2353         if (p != kp)
2354                 kp->flags &= ~KPROBE_FLAG_DISABLED;
2355
2356         if (!kprobes_all_disarmed && kprobe_disabled(p)) {
2357                 p->flags &= ~KPROBE_FLAG_DISABLED;
2358                 ret = arm_kprobe(p);
2359                 if (ret) {
2360                         p->flags |= KPROBE_FLAG_DISABLED;
2361                         if (p != kp)
2362                                 kp->flags |= KPROBE_FLAG_DISABLED;
2363                 }
2364         }
2365 out:
2366         mutex_unlock(&kprobe_mutex);
2367         return ret;
2368 }
2369 EXPORT_SYMBOL_GPL(enable_kprobe);
2370
2371 /* Caller must NOT call this in usual path. This is only for critical case */
2372 void dump_kprobe(struct kprobe *kp)
2373 {
2374         pr_err("Dumping kprobe:\n");
2375         pr_err("Name: %s\nOffset: %x\nAddress: %pS\n",
2376                kp->symbol_name, kp->offset, kp->addr);
2377 }
2378 NOKPROBE_SYMBOL(dump_kprobe);
2379
2380 int kprobe_add_ksym_blacklist(unsigned long entry)
2381 {
2382         struct kprobe_blacklist_entry *ent;
2383         unsigned long offset = 0, size = 0;
2384
2385         if (!kernel_text_address(entry) ||
2386             !kallsyms_lookup_size_offset(entry, &size, &offset))
2387                 return -EINVAL;
2388
2389         ent = kmalloc(sizeof(*ent), GFP_KERNEL);
2390         if (!ent)
2391                 return -ENOMEM;
2392         ent->start_addr = entry;
2393         ent->end_addr = entry + size;
2394         INIT_LIST_HEAD(&ent->list);
2395         list_add_tail(&ent->list, &kprobe_blacklist);
2396
2397         return (int)size;
2398 }
2399
2400 /* Add all symbols in given area into kprobe blacklist */
2401 int kprobe_add_area_blacklist(unsigned long start, unsigned long end)
2402 {
2403         unsigned long entry;
2404         int ret = 0;
2405
2406         for (entry = start; entry < end; entry += ret) {
2407                 ret = kprobe_add_ksym_blacklist(entry);
2408                 if (ret < 0)
2409                         return ret;
2410                 if (ret == 0)   /* In case of alias symbol */
2411                         ret = 1;
2412         }
2413         return 0;
2414 }
2415
2416 /* Remove all symbols in given area from kprobe blacklist */
2417 static void kprobe_remove_area_blacklist(unsigned long start, unsigned long end)
2418 {
2419         struct kprobe_blacklist_entry *ent, *n;
2420
2421         list_for_each_entry_safe(ent, n, &kprobe_blacklist, list) {
2422                 if (ent->start_addr < start || ent->start_addr >= end)
2423                         continue;
2424                 list_del(&ent->list);
2425                 kfree(ent);
2426         }
2427 }
2428
2429 static void kprobe_remove_ksym_blacklist(unsigned long entry)
2430 {
2431         kprobe_remove_area_blacklist(entry, entry + 1);
2432 }
2433
2434 int __weak arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
2435                                    char *type, char *sym)
2436 {
2437         return -ERANGE;
2438 }
2439
2440 int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
2441                        char *sym)
2442 {
2443 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
2444         if (!kprobe_cache_get_kallsym(&kprobe_insn_slots, &symnum, value, type, sym))
2445                 return 0;
2446 #ifdef CONFIG_OPTPROBES
2447         if (!kprobe_cache_get_kallsym(&kprobe_optinsn_slots, &symnum, value, type, sym))
2448                 return 0;
2449 #endif
2450 #endif
2451         if (!arch_kprobe_get_kallsym(&symnum, value, type, sym))
2452                 return 0;
2453         return -ERANGE;
2454 }
2455
2456 int __init __weak arch_populate_kprobe_blacklist(void)
2457 {
2458         return 0;
2459 }
2460
2461 /*
2462  * Lookup and populate the kprobe_blacklist.
2463  *
2464  * Unlike the kretprobe blacklist, we'll need to determine
2465  * the range of addresses that belong to the said functions,
2466  * since a kprobe need not necessarily be at the beginning
2467  * of a function.
2468  */
2469 static int __init populate_kprobe_blacklist(unsigned long *start,
2470                                              unsigned long *end)
2471 {
2472         unsigned long entry;
2473         unsigned long *iter;
2474         int ret;
2475
2476         for (iter = start; iter < end; iter++) {
2477                 entry = arch_deref_entry_point((void *)*iter);
2478                 ret = kprobe_add_ksym_blacklist(entry);
2479                 if (ret == -EINVAL)
2480                         continue;
2481                 if (ret < 0)
2482                         return ret;
2483         }
2484
2485         /* Symbols in __kprobes_text are blacklisted */
2486         ret = kprobe_add_area_blacklist((unsigned long)__kprobes_text_start,
2487                                         (unsigned long)__kprobes_text_end);
2488         if (ret)
2489                 return ret;
2490
2491         /* Symbols in noinstr section are blacklisted */
2492         ret = kprobe_add_area_blacklist((unsigned long)__noinstr_text_start,
2493                                         (unsigned long)__noinstr_text_end);
2494
2495         return ret ? : arch_populate_kprobe_blacklist();
2496 }
2497
2498 static void add_module_kprobe_blacklist(struct module *mod)
2499 {
2500         unsigned long start, end;
2501         int i;
2502
2503         if (mod->kprobe_blacklist) {
2504                 for (i = 0; i < mod->num_kprobe_blacklist; i++)
2505                         kprobe_add_ksym_blacklist(mod->kprobe_blacklist[i]);
2506         }
2507
2508         start = (unsigned long)mod->kprobes_text_start;
2509         if (start) {
2510                 end = start + mod->kprobes_text_size;
2511                 kprobe_add_area_blacklist(start, end);
2512         }
2513
2514         start = (unsigned long)mod->noinstr_text_start;
2515         if (start) {
2516                 end = start + mod->noinstr_text_size;
2517                 kprobe_add_area_blacklist(start, end);
2518         }
2519 }
2520
2521 static void remove_module_kprobe_blacklist(struct module *mod)
2522 {
2523         unsigned long start, end;
2524         int i;
2525
2526         if (mod->kprobe_blacklist) {
2527                 for (i = 0; i < mod->num_kprobe_blacklist; i++)
2528                         kprobe_remove_ksym_blacklist(mod->kprobe_blacklist[i]);
2529         }
2530
2531         start = (unsigned long)mod->kprobes_text_start;
2532         if (start) {
2533                 end = start + mod->kprobes_text_size;
2534                 kprobe_remove_area_blacklist(start, end);
2535         }
2536
2537         start = (unsigned long)mod->noinstr_text_start;
2538         if (start) {
2539                 end = start + mod->noinstr_text_size;
2540                 kprobe_remove_area_blacklist(start, end);
2541         }
2542 }
2543
2544 /* Module notifier call back, checking kprobes on the module */
2545 static int kprobes_module_callback(struct notifier_block *nb,
2546                                    unsigned long val, void *data)
2547 {
2548         struct module *mod = data;
2549         struct hlist_head *head;
2550         struct kprobe *p;
2551         unsigned int i;
2552         int checkcore = (val == MODULE_STATE_GOING);
2553
2554         if (val == MODULE_STATE_COMING) {
2555                 mutex_lock(&kprobe_mutex);
2556                 add_module_kprobe_blacklist(mod);
2557                 mutex_unlock(&kprobe_mutex);
2558         }
2559         if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
2560                 return NOTIFY_DONE;
2561
2562         /*
2563          * When MODULE_STATE_GOING was notified, both of module .text and
2564          * .init.text sections would be freed. When MODULE_STATE_LIVE was
2565          * notified, only .init.text section would be freed. We need to
2566          * disable kprobes which have been inserted in the sections.
2567          */
2568         mutex_lock(&kprobe_mutex);
2569         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2570                 head = &kprobe_table[i];
2571                 hlist_for_each_entry(p, head, hlist) {
2572                         if (kprobe_gone(p))
2573                                 continue;
2574
2575                         if (within_module_init((unsigned long)p->addr, mod) ||
2576                             (checkcore &&
2577                              within_module_core((unsigned long)p->addr, mod))) {
2578                                 /*
2579                                  * The vaddr this probe is installed will soon
2580                                  * be vfreed buy not synced to disk. Hence,
2581                                  * disarming the breakpoint isn't needed.
2582                                  *
2583                                  * Note, this will also move any optimized probes
2584                                  * that are pending to be removed from their
2585                                  * corresponding lists to the freeing_list and
2586                                  * will not be touched by the delayed
2587                                  * kprobe_optimizer work handler.
2588                                  */
2589                                 kill_kprobe(p);
2590                         }
2591                 }
2592         }
2593         if (val == MODULE_STATE_GOING)
2594                 remove_module_kprobe_blacklist(mod);
2595         mutex_unlock(&kprobe_mutex);
2596         return NOTIFY_DONE;
2597 }
2598
2599 static struct notifier_block kprobe_module_nb = {
2600         .notifier_call = kprobes_module_callback,
2601         .priority = 0
2602 };
2603
2604 /* Markers of _kprobe_blacklist section */
2605 extern unsigned long __start_kprobe_blacklist[];
2606 extern unsigned long __stop_kprobe_blacklist[];
2607
2608 void kprobe_free_init_mem(void)
2609 {
2610         void *start = (void *)(&__init_begin);
2611         void *end = (void *)(&__init_end);
2612         struct hlist_head *head;
2613         struct kprobe *p;
2614         int i;
2615
2616         mutex_lock(&kprobe_mutex);
2617
2618         /* Kill all kprobes on initmem */
2619         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2620                 head = &kprobe_table[i];
2621                 hlist_for_each_entry(p, head, hlist) {
2622                         if (start <= (void *)p->addr && (void *)p->addr < end)
2623                                 kill_kprobe(p);
2624                 }
2625         }
2626
2627         mutex_unlock(&kprobe_mutex);
2628 }
2629
2630 static int __init init_kprobes(void)
2631 {
2632         int i, err = 0;
2633
2634         /* FIXME allocate the probe table, currently defined statically */
2635         /* initialize all list heads */
2636         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2637                 INIT_HLIST_HEAD(&kprobe_table[i]);
2638                 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
2639                 raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
2640         }
2641
2642         err = populate_kprobe_blacklist(__start_kprobe_blacklist,
2643                                         __stop_kprobe_blacklist);
2644         if (err) {
2645                 pr_err("kprobes: failed to populate blacklist: %d\n", err);
2646                 pr_err("Please take care of using kprobes.\n");
2647         }
2648
2649         if (kretprobe_blacklist_size) {
2650                 /* lookup the function address from its name */
2651                 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2652                         kretprobe_blacklist[i].addr =
2653                                 kprobe_lookup_name(kretprobe_blacklist[i].name, 0);
2654                         if (!kretprobe_blacklist[i].addr)
2655                                 printk("kretprobe: lookup failed: %s\n",
2656                                        kretprobe_blacklist[i].name);
2657                 }
2658         }
2659
2660         /* By default, kprobes are armed */
2661         kprobes_all_disarmed = false;
2662
2663 #if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2664         /* Init kprobe_optinsn_slots for allocation */
2665         kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2666 #endif
2667
2668         err = arch_init_kprobes();
2669         if (!err)
2670                 err = register_die_notifier(&kprobe_exceptions_nb);
2671         if (!err)
2672                 err = register_module_notifier(&kprobe_module_nb);
2673
2674         kprobes_initialized = (err == 0);
2675
2676         if (!err)
2677                 init_test_probes();
2678         return err;
2679 }
2680 early_initcall(init_kprobes);
2681
2682 #if defined(CONFIG_OPTPROBES)
2683 static int __init init_optprobes(void)
2684 {
2685         /*
2686          * Enable kprobe optimization - this kicks the optimizer which
2687          * depends on synchronize_rcu_tasks() and ksoftirqd, that is
2688          * not spawned in early initcall. So delay the optimization.
2689          */
2690         optimize_all_kprobes();
2691
2692         return 0;
2693 }
2694 subsys_initcall(init_optprobes);
2695 #endif
2696
2697 #ifdef CONFIG_DEBUG_FS
2698 static void report_probe(struct seq_file *pi, struct kprobe *p,
2699                 const char *sym, int offset, char *modname, struct kprobe *pp)
2700 {
2701         char *kprobe_type;
2702         void *addr = p->addr;
2703
2704         if (p->pre_handler == pre_handler_kretprobe)
2705                 kprobe_type = "r";
2706         else
2707                 kprobe_type = "k";
2708
2709         if (!kallsyms_show_value(pi->file->f_cred))
2710                 addr = NULL;
2711
2712         if (sym)
2713                 seq_printf(pi, "%px  %s  %s+0x%x  %s ",
2714                         addr, kprobe_type, sym, offset,
2715                         (modname ? modname : " "));
2716         else    /* try to use %pS */
2717                 seq_printf(pi, "%px  %s  %pS ",
2718                         addr, kprobe_type, p->addr);
2719
2720         if (!pp)
2721                 pp = p;
2722         seq_printf(pi, "%s%s%s%s\n",
2723                 (kprobe_gone(p) ? "[GONE]" : ""),
2724                 ((kprobe_disabled(p) && !kprobe_gone(p)) ?  "[DISABLED]" : ""),
2725                 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
2726                 (kprobe_ftrace(pp) ? "[FTRACE]" : ""));
2727 }
2728
2729 static void *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2730 {
2731         return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2732 }
2733
2734 static void *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2735 {
2736         (*pos)++;
2737         if (*pos >= KPROBE_TABLE_SIZE)
2738                 return NULL;
2739         return pos;
2740 }
2741
2742 static void kprobe_seq_stop(struct seq_file *f, void *v)
2743 {
2744         /* Nothing to do */
2745 }
2746
2747 static int show_kprobe_addr(struct seq_file *pi, void *v)
2748 {
2749         struct hlist_head *head;
2750         struct kprobe *p, *kp;
2751         const char *sym = NULL;
2752         unsigned int i = *(loff_t *) v;
2753         unsigned long offset = 0;
2754         char *modname, namebuf[KSYM_NAME_LEN];
2755
2756         head = &kprobe_table[i];
2757         preempt_disable();
2758         hlist_for_each_entry_rcu(p, head, hlist) {
2759                 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2760                                         &offset, &modname, namebuf);
2761                 if (kprobe_aggrprobe(p)) {
2762                         list_for_each_entry_rcu(kp, &p->list, list)
2763                                 report_probe(pi, kp, sym, offset, modname, p);
2764                 } else
2765                         report_probe(pi, p, sym, offset, modname, NULL);
2766         }
2767         preempt_enable();
2768         return 0;
2769 }
2770
2771 static const struct seq_operations kprobes_sops = {
2772         .start = kprobe_seq_start,
2773         .next  = kprobe_seq_next,
2774         .stop  = kprobe_seq_stop,
2775         .show  = show_kprobe_addr
2776 };
2777
2778 DEFINE_SEQ_ATTRIBUTE(kprobes);
2779
2780 /* kprobes/blacklist -- shows which functions can not be probed */
2781 static void *kprobe_blacklist_seq_start(struct seq_file *m, loff_t *pos)
2782 {
2783         mutex_lock(&kprobe_mutex);
2784         return seq_list_start(&kprobe_blacklist, *pos);
2785 }
2786
2787 static void *kprobe_blacklist_seq_next(struct seq_file *m, void *v, loff_t *pos)
2788 {
2789         return seq_list_next(v, &kprobe_blacklist, pos);
2790 }
2791
2792 static int kprobe_blacklist_seq_show(struct seq_file *m, void *v)
2793 {
2794         struct kprobe_blacklist_entry *ent =
2795                 list_entry(v, struct kprobe_blacklist_entry, list);
2796
2797         /*
2798          * If /proc/kallsyms is not showing kernel address, we won't
2799          * show them here either.
2800          */
2801         if (!kallsyms_show_value(m->file->f_cred))
2802                 seq_printf(m, "0x%px-0x%px\t%ps\n", NULL, NULL,
2803                            (void *)ent->start_addr);
2804         else
2805                 seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr,
2806                            (void *)ent->end_addr, (void *)ent->start_addr);
2807         return 0;
2808 }
2809
2810 static void kprobe_blacklist_seq_stop(struct seq_file *f, void *v)
2811 {
2812         mutex_unlock(&kprobe_mutex);
2813 }
2814
2815 static const struct seq_operations kprobe_blacklist_sops = {
2816         .start = kprobe_blacklist_seq_start,
2817         .next  = kprobe_blacklist_seq_next,
2818         .stop  = kprobe_blacklist_seq_stop,
2819         .show  = kprobe_blacklist_seq_show,
2820 };
2821 DEFINE_SEQ_ATTRIBUTE(kprobe_blacklist);
2822
2823 static int arm_all_kprobes(void)
2824 {
2825         struct hlist_head *head;
2826         struct kprobe *p;
2827         unsigned int i, total = 0, errors = 0;
2828         int err, ret = 0;
2829
2830         mutex_lock(&kprobe_mutex);
2831
2832         /* If kprobes are armed, just return */
2833         if (!kprobes_all_disarmed)
2834                 goto already_enabled;
2835
2836         /*
2837          * optimize_kprobe() called by arm_kprobe() checks
2838          * kprobes_all_disarmed, so set kprobes_all_disarmed before
2839          * arm_kprobe.
2840          */
2841         kprobes_all_disarmed = false;
2842         /* Arming kprobes doesn't optimize kprobe itself */
2843         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2844                 head = &kprobe_table[i];
2845                 /* Arm all kprobes on a best-effort basis */
2846                 hlist_for_each_entry(p, head, hlist) {
2847                         if (!kprobe_disabled(p)) {
2848                                 err = arm_kprobe(p);
2849                                 if (err)  {
2850                                         errors++;
2851                                         ret = err;
2852                                 }
2853                                 total++;
2854                         }
2855                 }
2856         }
2857
2858         if (errors)
2859                 pr_warn("Kprobes globally enabled, but failed to arm %d out of %d probes\n",
2860                         errors, total);
2861         else
2862                 pr_info("Kprobes globally enabled\n");
2863
2864 already_enabled:
2865         mutex_unlock(&kprobe_mutex);
2866         return ret;
2867 }
2868
2869 static int disarm_all_kprobes(void)
2870 {
2871         struct hlist_head *head;
2872         struct kprobe *p;
2873         unsigned int i, total = 0, errors = 0;
2874         int err, ret = 0;
2875
2876         mutex_lock(&kprobe_mutex);
2877
2878         /* If kprobes are already disarmed, just return */
2879         if (kprobes_all_disarmed) {
2880                 mutex_unlock(&kprobe_mutex);
2881                 return 0;
2882         }
2883
2884         kprobes_all_disarmed = true;
2885
2886         for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2887                 head = &kprobe_table[i];
2888                 /* Disarm all kprobes on a best-effort basis */
2889                 hlist_for_each_entry(p, head, hlist) {
2890                         if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) {
2891                                 err = disarm_kprobe(p, false);
2892                                 if (err) {
2893                                         errors++;
2894                                         ret = err;
2895                                 }
2896                                 total++;
2897                         }
2898                 }
2899         }
2900
2901         if (errors)
2902                 pr_warn("Kprobes globally disabled, but failed to disarm %d out of %d probes\n",
2903                         errors, total);
2904         else
2905                 pr_info("Kprobes globally disabled\n");
2906
2907         mutex_unlock(&kprobe_mutex);
2908
2909         /* Wait for disarming all kprobes by optimizer */
2910         wait_for_kprobe_optimizer();
2911
2912         return ret;
2913 }
2914
2915 /*
2916  * XXX: The debugfs bool file interface doesn't allow for callbacks
2917  * when the bool state is switched. We can reuse that facility when
2918  * available
2919  */
2920 static ssize_t read_enabled_file_bool(struct file *file,
2921                char __user *user_buf, size_t count, loff_t *ppos)
2922 {
2923         char buf[3];
2924
2925         if (!kprobes_all_disarmed)
2926                 buf[0] = '1';
2927         else
2928                 buf[0] = '0';
2929         buf[1] = '\n';
2930         buf[2] = 0x00;
2931         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2932 }
2933
2934 static ssize_t write_enabled_file_bool(struct file *file,
2935                const char __user *user_buf, size_t count, loff_t *ppos)
2936 {
2937         char buf[32];
2938         size_t buf_size;
2939         int ret = 0;
2940
2941         buf_size = min(count, (sizeof(buf)-1));
2942         if (copy_from_user(buf, user_buf, buf_size))
2943                 return -EFAULT;
2944
2945         buf[buf_size] = '\0';
2946         switch (buf[0]) {
2947         case 'y':
2948         case 'Y':
2949         case '1':
2950                 ret = arm_all_kprobes();
2951                 break;
2952         case 'n':
2953         case 'N':
2954         case '0':
2955                 ret = disarm_all_kprobes();
2956                 break;
2957         default:
2958                 return -EINVAL;
2959         }
2960
2961         if (ret)
2962                 return ret;
2963
2964         return count;
2965 }
2966
2967 static const struct file_operations fops_kp = {
2968         .read =         read_enabled_file_bool,
2969         .write =        write_enabled_file_bool,
2970         .llseek =       default_llseek,
2971 };
2972
2973 static int __init debugfs_kprobe_init(void)
2974 {
2975         struct dentry *dir;
2976
2977         dir = debugfs_create_dir("kprobes", NULL);
2978
2979         debugfs_create_file("list", 0400, dir, NULL, &kprobes_fops);
2980
2981         debugfs_create_file("enabled", 0600, dir, NULL, &fops_kp);
2982
2983         debugfs_create_file("blacklist", 0400, dir, NULL,
2984                             &kprobe_blacklist_fops);
2985
2986         return 0;
2987 }
2988
2989 late_initcall(debugfs_kprobe_init);
2990 #endif /* CONFIG_DEBUG_FS */