GNU Linux-libre 4.19.207-gnu1
[releases.git] / arch / x86 / kernel / unwind_orc.c
1 #include <linux/module.h>
2 #include <linux/sort.h>
3 #include <asm/ptrace.h>
4 #include <asm/stacktrace.h>
5 #include <asm/unwind.h>
6 #include <asm/orc_types.h>
7 #include <asm/orc_lookup.h>
8
9 #define orc_warn(fmt, ...) \
10         printk_deferred_once(KERN_WARNING pr_fmt("WARNING: " fmt), ##__VA_ARGS__)
11
12 extern int __start_orc_unwind_ip[];
13 extern int __stop_orc_unwind_ip[];
14 extern struct orc_entry __start_orc_unwind[];
15 extern struct orc_entry __stop_orc_unwind[];
16
17 static DEFINE_MUTEX(sort_mutex);
18 int *cur_orc_ip_table = __start_orc_unwind_ip;
19 struct orc_entry *cur_orc_table = __start_orc_unwind;
20
21 unsigned int lookup_num_blocks;
22 bool orc_init;
23
24 static inline unsigned long orc_ip(const int *ip)
25 {
26         return (unsigned long)ip + *ip;
27 }
28
29 static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
30                                     unsigned int num_entries, unsigned long ip)
31 {
32         int *first = ip_table;
33         int *last = ip_table + num_entries - 1;
34         int *mid = first, *found = first;
35
36         if (!num_entries)
37                 return NULL;
38
39         /*
40          * Do a binary range search to find the rightmost duplicate of a given
41          * starting address.  Some entries are section terminators which are
42          * "weak" entries for ensuring there are no gaps.  They should be
43          * ignored when they conflict with a real entry.
44          */
45         while (first <= last) {
46                 mid = first + ((last - first) / 2);
47
48                 if (orc_ip(mid) <= ip) {
49                         found = mid;
50                         first = mid + 1;
51                 } else
52                         last = mid - 1;
53         }
54
55         return u_table + (found - ip_table);
56 }
57
58 #ifdef CONFIG_MODULES
59 static struct orc_entry *orc_module_find(unsigned long ip)
60 {
61         struct module *mod;
62
63         mod = __module_address(ip);
64         if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
65                 return NULL;
66         return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind,
67                           mod->arch.num_orcs, ip);
68 }
69 #else
70 static struct orc_entry *orc_module_find(unsigned long ip)
71 {
72         return NULL;
73 }
74 #endif
75
76 #ifdef CONFIG_DYNAMIC_FTRACE
77 static struct orc_entry *orc_find(unsigned long ip);
78
79 /*
80  * Ftrace dynamic trampolines do not have orc entries of their own.
81  * But they are copies of the ftrace entries that are static and
82  * defined in ftrace_*.S, which do have orc entries.
83  *
84  * If the undwinder comes across a ftrace trampoline, then find the
85  * ftrace function that was used to create it, and use that ftrace
86  * function's orc entrie, as the placement of the return code in
87  * the stack will be identical.
88  */
89 static struct orc_entry *orc_ftrace_find(unsigned long ip)
90 {
91         struct ftrace_ops *ops;
92         unsigned long caller;
93
94         ops = ftrace_ops_trampoline(ip);
95         if (!ops)
96                 return NULL;
97
98         if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
99                 caller = (unsigned long)ftrace_regs_call;
100         else
101                 caller = (unsigned long)ftrace_call;
102
103         /* Prevent unlikely recursion */
104         if (ip == caller)
105                 return NULL;
106
107         return orc_find(caller);
108 }
109 #else
110 static struct orc_entry *orc_ftrace_find(unsigned long ip)
111 {
112         return NULL;
113 }
114 #endif
115
116 /*
117  * If we crash with IP==0, the last successfully executed instruction
118  * was probably an indirect function call with a NULL function pointer,
119  * and we don't have unwind information for NULL.
120  * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
121  * pointer into its parent and then continue normally from there.
122  */
123 static struct orc_entry null_orc_entry = {
124         .sp_offset = sizeof(long),
125         .sp_reg = ORC_REG_SP,
126         .bp_reg = ORC_REG_UNDEFINED,
127         .type = ORC_TYPE_CALL
128 };
129
130 static struct orc_entry *orc_find(unsigned long ip)
131 {
132         static struct orc_entry *orc;
133
134         if (ip == 0)
135                 return &null_orc_entry;
136
137         /* For non-init vmlinux addresses, use the fast lookup table: */
138         if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
139                 unsigned int idx, start, stop;
140
141                 idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
142
143                 if (unlikely((idx >= lookup_num_blocks-1))) {
144                         orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
145                                  idx, lookup_num_blocks, (void *)ip);
146                         return NULL;
147                 }
148
149                 start = orc_lookup[idx];
150                 stop = orc_lookup[idx + 1] + 1;
151
152                 if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
153                              (__start_orc_unwind + stop > __stop_orc_unwind))) {
154                         orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
155                                  idx, lookup_num_blocks, start, stop, (void *)ip);
156                         return NULL;
157                 }
158
159                 return __orc_find(__start_orc_unwind_ip + start,
160                                   __start_orc_unwind + start, stop - start, ip);
161         }
162
163         /* vmlinux .init slow lookup: */
164         if (init_kernel_text(ip))
165                 return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
166                                   __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
167
168         /* Module lookup: */
169         orc = orc_module_find(ip);
170         if (orc)
171                 return orc;
172
173         return orc_ftrace_find(ip);
174 }
175
176 static void orc_sort_swap(void *_a, void *_b, int size)
177 {
178         struct orc_entry *orc_a, *orc_b;
179         struct orc_entry orc_tmp;
180         int *a = _a, *b = _b, tmp;
181         int delta = _b - _a;
182
183         /* Swap the .orc_unwind_ip entries: */
184         tmp = *a;
185         *a = *b + delta;
186         *b = tmp - delta;
187
188         /* Swap the corresponding .orc_unwind entries: */
189         orc_a = cur_orc_table + (a - cur_orc_ip_table);
190         orc_b = cur_orc_table + (b - cur_orc_ip_table);
191         orc_tmp = *orc_a;
192         *orc_a = *orc_b;
193         *orc_b = orc_tmp;
194 }
195
196 static int orc_sort_cmp(const void *_a, const void *_b)
197 {
198         struct orc_entry *orc_a;
199         const int *a = _a, *b = _b;
200         unsigned long a_val = orc_ip(a);
201         unsigned long b_val = orc_ip(b);
202
203         if (a_val > b_val)
204                 return 1;
205         if (a_val < b_val)
206                 return -1;
207
208         /*
209          * The "weak" section terminator entries need to always be on the left
210          * to ensure the lookup code skips them in favor of real entries.
211          * These terminator entries exist to handle any gaps created by
212          * whitelisted .o files which didn't get objtool generation.
213          */
214         orc_a = cur_orc_table + (a - cur_orc_ip_table);
215         return orc_a->sp_reg == ORC_REG_UNDEFINED && !orc_a->end ? -1 : 1;
216 }
217
218 #ifdef CONFIG_MODULES
219 void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
220                         void *_orc, size_t orc_size)
221 {
222         int *orc_ip = _orc_ip;
223         struct orc_entry *orc = _orc;
224         unsigned int num_entries = orc_ip_size / sizeof(int);
225
226         WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
227                      orc_size % sizeof(*orc) != 0 ||
228                      num_entries != orc_size / sizeof(*orc));
229
230         /*
231          * The 'cur_orc_*' globals allow the orc_sort_swap() callback to
232          * associate an .orc_unwind_ip table entry with its corresponding
233          * .orc_unwind entry so they can both be swapped.
234          */
235         mutex_lock(&sort_mutex);
236         cur_orc_ip_table = orc_ip;
237         cur_orc_table = orc;
238         sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
239         mutex_unlock(&sort_mutex);
240
241         mod->arch.orc_unwind_ip = orc_ip;
242         mod->arch.orc_unwind = orc;
243         mod->arch.num_orcs = num_entries;
244 }
245 #endif
246
247 void __init unwind_init(void)
248 {
249         size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
250         size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
251         size_t num_entries = orc_ip_size / sizeof(int);
252         struct orc_entry *orc;
253         int i;
254
255         if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
256             orc_size % sizeof(struct orc_entry) != 0 ||
257             num_entries != orc_size / sizeof(struct orc_entry)) {
258                 orc_warn("WARNING: Bad or missing .orc_unwind table.  Disabling unwinder.\n");
259                 return;
260         }
261
262         /* Sort the .orc_unwind and .orc_unwind_ip tables: */
263         sort(__start_orc_unwind_ip, num_entries, sizeof(int), orc_sort_cmp,
264              orc_sort_swap);
265
266         /* Initialize the fast lookup table: */
267         lookup_num_blocks = orc_lookup_end - orc_lookup;
268         for (i = 0; i < lookup_num_blocks-1; i++) {
269                 orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
270                                  num_entries,
271                                  LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
272                 if (!orc) {
273                         orc_warn("WARNING: Corrupt .orc_unwind table.  Disabling unwinder.\n");
274                         return;
275                 }
276
277                 orc_lookup[i] = orc - __start_orc_unwind;
278         }
279
280         /* Initialize the ending block: */
281         orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries,
282                          LOOKUP_STOP_IP);
283         if (!orc) {
284                 orc_warn("WARNING: Corrupt .orc_unwind table.  Disabling unwinder.\n");
285                 return;
286         }
287         orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
288
289         orc_init = true;
290 }
291
292 unsigned long unwind_get_return_address(struct unwind_state *state)
293 {
294         if (unwind_done(state))
295                 return 0;
296
297         return __kernel_text_address(state->ip) ? state->ip : 0;
298 }
299 EXPORT_SYMBOL_GPL(unwind_get_return_address);
300
301 unsigned long *unwind_get_return_address_ptr(struct unwind_state *state)
302 {
303         if (unwind_done(state))
304                 return NULL;
305
306         if (state->regs)
307                 return &state->regs->ip;
308
309         if (state->sp)
310                 return (unsigned long *)state->sp - 1;
311
312         return NULL;
313 }
314
315 static bool stack_access_ok(struct unwind_state *state, unsigned long _addr,
316                             size_t len)
317 {
318         struct stack_info *info = &state->stack_info;
319         void *addr = (void *)_addr;
320
321         if (!on_stack(info, addr, len) &&
322             (get_stack_info(addr, state->task, info, &state->stack_mask)))
323                 return false;
324
325         return true;
326 }
327
328 static bool deref_stack_reg(struct unwind_state *state, unsigned long addr,
329                             unsigned long *val)
330 {
331         if (!stack_access_ok(state, addr, sizeof(long)))
332                 return false;
333
334         *val = READ_ONCE_NOCHECK(*(unsigned long *)addr);
335         return true;
336 }
337
338 static bool deref_stack_regs(struct unwind_state *state, unsigned long addr,
339                              unsigned long *ip, unsigned long *sp)
340 {
341         struct pt_regs *regs = (struct pt_regs *)addr;
342
343         /* x86-32 support will be more complicated due to the &regs->sp hack */
344         BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_32));
345
346         if (!stack_access_ok(state, addr, sizeof(struct pt_regs)))
347                 return false;
348
349         *ip = READ_ONCE_NOCHECK(regs->ip);
350         *sp = READ_ONCE_NOCHECK(regs->sp);
351         return true;
352 }
353
354 static bool deref_stack_iret_regs(struct unwind_state *state, unsigned long addr,
355                                   unsigned long *ip, unsigned long *sp)
356 {
357         struct pt_regs *regs = (void *)addr - IRET_FRAME_OFFSET;
358
359         if (!stack_access_ok(state, addr, IRET_FRAME_SIZE))
360                 return false;
361
362         *ip = READ_ONCE_NOCHECK(regs->ip);
363         *sp = READ_ONCE_NOCHECK(regs->sp);
364         return true;
365 }
366
367 /*
368  * If state->regs is non-NULL, and points to a full pt_regs, just get the reg
369  * value from state->regs.
370  *
371  * Otherwise, if state->regs just points to IRET regs, and the previous frame
372  * had full regs, it's safe to get the value from the previous regs.  This can
373  * happen when early/late IRQ entry code gets interrupted by an NMI.
374  */
375 static bool get_reg(struct unwind_state *state, unsigned int reg_off,
376                     unsigned long *val)
377 {
378         unsigned int reg = reg_off/8;
379
380         if (!state->regs)
381                 return false;
382
383         if (state->full_regs) {
384                 *val = READ_ONCE_NOCHECK(((unsigned long *)state->regs)[reg]);
385                 return true;
386         }
387
388         if (state->prev_regs) {
389                 *val = READ_ONCE_NOCHECK(((unsigned long *)state->prev_regs)[reg]);
390                 return true;
391         }
392
393         return false;
394 }
395
396 bool unwind_next_frame(struct unwind_state *state)
397 {
398         unsigned long ip_p, sp, tmp, orig_ip = state->ip, prev_sp = state->sp;
399         enum stack_type prev_type = state->stack_info.type;
400         struct orc_entry *orc;
401         bool indirect = false;
402
403         if (unwind_done(state))
404                 return false;
405
406         /* Don't let modules unload while we're reading their ORC data. */
407         preempt_disable();
408
409         /* End-of-stack check for user tasks: */
410         if (state->regs && user_mode(state->regs))
411                 goto the_end;
412
413         /*
414          * Find the orc_entry associated with the text address.
415          *
416          * For a call frame (as opposed to a signal frame), state->ip points to
417          * the instruction after the call.  That instruction's stack layout
418          * could be different from the call instruction's layout, for example
419          * if the call was to a noreturn function.  So get the ORC data for the
420          * call instruction itself.
421          */
422         orc = orc_find(state->signal ? state->ip : state->ip - 1);
423         if (!orc)
424                 goto err;
425
426         /* End-of-stack check for kernel threads: */
427         if (orc->sp_reg == ORC_REG_UNDEFINED) {
428                 if (!orc->end)
429                         goto err;
430
431                 goto the_end;
432         }
433
434         /* Find the previous frame's stack: */
435         switch (orc->sp_reg) {
436         case ORC_REG_SP:
437                 sp = state->sp + orc->sp_offset;
438                 break;
439
440         case ORC_REG_BP:
441                 sp = state->bp + orc->sp_offset;
442                 break;
443
444         case ORC_REG_SP_INDIRECT:
445                 sp = state->sp + orc->sp_offset;
446                 indirect = true;
447                 break;
448
449         case ORC_REG_BP_INDIRECT:
450                 sp = state->bp + orc->sp_offset;
451                 indirect = true;
452                 break;
453
454         case ORC_REG_R10:
455                 if (!get_reg(state, offsetof(struct pt_regs, r10), &sp)) {
456                         orc_warn("missing regs for base reg R10 at ip %pB\n",
457                                  (void *)state->ip);
458                         goto err;
459                 }
460                 break;
461
462         case ORC_REG_R13:
463                 if (!get_reg(state, offsetof(struct pt_regs, r13), &sp)) {
464                         orc_warn("missing regs for base reg R13 at ip %pB\n",
465                                  (void *)state->ip);
466                         goto err;
467                 }
468                 break;
469
470         case ORC_REG_DI:
471                 if (!get_reg(state, offsetof(struct pt_regs, di), &sp)) {
472                         orc_warn("missing regs for base reg DI at ip %pB\n",
473                                  (void *)state->ip);
474                         goto err;
475                 }
476                 break;
477
478         case ORC_REG_DX:
479                 if (!get_reg(state, offsetof(struct pt_regs, dx), &sp)) {
480                         orc_warn("missing regs for base reg DX at ip %pB\n",
481                                  (void *)state->ip);
482                         goto err;
483                 }
484                 break;
485
486         default:
487                 orc_warn("unknown SP base reg %d for ip %pB\n",
488                          orc->sp_reg, (void *)state->ip);
489                 goto err;
490         }
491
492         if (indirect) {
493                 if (!deref_stack_reg(state, sp, &sp))
494                         goto err;
495         }
496
497         /* Find IP, SP and possibly regs: */
498         switch (orc->type) {
499         case ORC_TYPE_CALL:
500                 ip_p = sp - sizeof(long);
501
502                 if (!deref_stack_reg(state, ip_p, &state->ip))
503                         goto err;
504
505                 state->ip = ftrace_graph_ret_addr(state->task, &state->graph_idx,
506                                                   state->ip, (void *)ip_p);
507
508                 state->sp = sp;
509                 state->regs = NULL;
510                 state->prev_regs = NULL;
511                 state->signal = false;
512                 break;
513
514         case ORC_TYPE_REGS:
515                 if (!deref_stack_regs(state, sp, &state->ip, &state->sp)) {
516                         orc_warn("can't dereference registers at %p for ip %pB\n",
517                                  (void *)sp, (void *)orig_ip);
518                         goto err;
519                 }
520
521                 state->regs = (struct pt_regs *)sp;
522                 state->prev_regs = NULL;
523                 state->full_regs = true;
524                 state->signal = true;
525                 break;
526
527         case ORC_TYPE_REGS_IRET:
528                 if (!deref_stack_iret_regs(state, sp, &state->ip, &state->sp)) {
529                         orc_warn("can't dereference iret registers at %p for ip %pB\n",
530                                  (void *)sp, (void *)orig_ip);
531                         goto err;
532                 }
533
534                 if (state->full_regs)
535                         state->prev_regs = state->regs;
536                 state->regs = (void *)sp - IRET_FRAME_OFFSET;
537                 state->full_regs = false;
538                 state->signal = true;
539                 break;
540
541         default:
542                 orc_warn("unknown .orc_unwind entry type %d for ip %pB\n",
543                          orc->type, (void *)orig_ip);
544                 goto err;
545         }
546
547         /* Find BP: */
548         switch (orc->bp_reg) {
549         case ORC_REG_UNDEFINED:
550                 if (get_reg(state, offsetof(struct pt_regs, bp), &tmp))
551                         state->bp = tmp;
552                 break;
553
554         case ORC_REG_PREV_SP:
555                 if (!deref_stack_reg(state, sp + orc->bp_offset, &state->bp))
556                         goto err;
557                 break;
558
559         case ORC_REG_BP:
560                 if (!deref_stack_reg(state, state->bp + orc->bp_offset, &state->bp))
561                         goto err;
562                 break;
563
564         default:
565                 orc_warn("unknown BP base reg %d for ip %pB\n",
566                          orc->bp_reg, (void *)orig_ip);
567                 goto err;
568         }
569
570         /* Prevent a recursive loop due to bad ORC data: */
571         if (state->stack_info.type == prev_type &&
572             on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) &&
573             state->sp <= prev_sp) {
574                 orc_warn("stack going in the wrong direction? ip=%pB\n",
575                          (void *)orig_ip);
576                 goto err;
577         }
578
579         preempt_enable();
580         return true;
581
582 err:
583         state->error = true;
584
585 the_end:
586         preempt_enable();
587         state->stack_info.type = STACK_TYPE_UNKNOWN;
588         return false;
589 }
590 EXPORT_SYMBOL_GPL(unwind_next_frame);
591
592 void __unwind_start(struct unwind_state *state, struct task_struct *task,
593                     struct pt_regs *regs, unsigned long *first_frame)
594 {
595         memset(state, 0, sizeof(*state));
596         state->task = task;
597
598         if (!orc_init)
599                 goto err;
600
601         /*
602          * Refuse to unwind the stack of a task while it's executing on another
603          * CPU.  This check is racy, but that's ok: the unwinder has other
604          * checks to prevent it from going off the rails.
605          */
606         if (task_on_another_cpu(task))
607                 goto err;
608
609         if (regs) {
610                 if (user_mode(regs))
611                         goto the_end;
612
613                 state->ip = regs->ip;
614                 state->sp = kernel_stack_pointer(regs);
615                 state->bp = regs->bp;
616                 state->regs = regs;
617                 state->full_regs = true;
618                 state->signal = true;
619
620         } else if (task == current) {
621                 asm volatile("lea (%%rip), %0\n\t"
622                              "mov %%rsp, %1\n\t"
623                              "mov %%rbp, %2\n\t"
624                              : "=r" (state->ip), "=r" (state->sp),
625                                "=r" (state->bp));
626
627         } else {
628                 struct inactive_task_frame *frame = (void *)task->thread.sp;
629
630                 state->sp = task->thread.sp + sizeof(*frame);
631                 state->bp = READ_ONCE_NOCHECK(frame->bp);
632                 state->ip = READ_ONCE_NOCHECK(frame->ret_addr);
633                 state->signal = (void *)state->ip == ret_from_fork;
634         }
635
636         if (get_stack_info((unsigned long *)state->sp, state->task,
637                            &state->stack_info, &state->stack_mask)) {
638                 /*
639                  * We weren't on a valid stack.  It's possible that
640                  * we overflowed a valid stack into a guard page.
641                  * See if the next page up is valid so that we can
642                  * generate some kind of backtrace if this happens.
643                  */
644                 void *next_page = (void *)PAGE_ALIGN((unsigned long)state->sp);
645                 state->error = true;
646                 if (get_stack_info(next_page, state->task, &state->stack_info,
647                                    &state->stack_mask))
648                         return;
649         }
650
651         /*
652          * The caller can provide the address of the first frame directly
653          * (first_frame) or indirectly (regs->sp) to indicate which stack frame
654          * to start unwinding at.  Skip ahead until we reach it.
655          */
656
657         /* When starting from regs, skip the regs frame: */
658         if (regs) {
659                 unwind_next_frame(state);
660                 return;
661         }
662
663         /* Otherwise, skip ahead to the user-specified starting frame: */
664         while (!unwind_done(state) &&
665                (!on_stack(&state->stack_info, first_frame, sizeof(long)) ||
666                         state->sp < (unsigned long)first_frame))
667                 unwind_next_frame(state);
668
669         return;
670
671 err:
672         state->error = true;
673 the_end:
674         state->stack_info.type = STACK_TYPE_UNKNOWN;
675 }
676 EXPORT_SYMBOL_GPL(__unwind_start);