GNU Linux-libre 5.10.217-gnu1
[releases.git] / kernel / debug / debug_core.c
1 /*
2  * Kernel Debug Core
3  *
4  * Maintainer: Jason Wessel <jason.wessel@windriver.com>
5  *
6  * Copyright (C) 2000-2001 VERITAS Software Corporation.
7  * Copyright (C) 2002-2004 Timesys Corporation
8  * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
9  * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
10  * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
11  * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
12  * Copyright (C) 2005-2009 Wind River Systems, Inc.
13  * Copyright (C) 2007 MontaVista Software, Inc.
14  * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
15  *
16  * Contributors at various stages not listed above:
17  *  Jason Wessel ( jason.wessel@windriver.com )
18  *  George Anzinger <george@mvista.com>
19  *  Anurekh Saxena (anurekh.saxena@timesys.com)
20  *  Lake Stevens Instrument Division (Glenn Engel)
21  *  Jim Kingdon, Cygnus Support.
22  *
23  * Original KGDB stub: David Grothe <dave@gcom.com>,
24  * Tigran Aivazian <tigran@sco.com>
25  *
26  * This file is licensed under the terms of the GNU General Public License
27  * version 2. This program is licensed "as is" without any warranty of any
28  * kind, whether express or implied.
29  */
30
31 #define pr_fmt(fmt) "KGDB: " fmt
32
33 #include <linux/pid_namespace.h>
34 #include <linux/clocksource.h>
35 #include <linux/serial_core.h>
36 #include <linux/interrupt.h>
37 #include <linux/spinlock.h>
38 #include <linux/console.h>
39 #include <linux/threads.h>
40 #include <linux/uaccess.h>
41 #include <linux/kernel.h>
42 #include <linux/module.h>
43 #include <linux/ptrace.h>
44 #include <linux/string.h>
45 #include <linux/delay.h>
46 #include <linux/sched.h>
47 #include <linux/sysrq.h>
48 #include <linux/reboot.h>
49 #include <linux/init.h>
50 #include <linux/kgdb.h>
51 #include <linux/kdb.h>
52 #include <linux/nmi.h>
53 #include <linux/pid.h>
54 #include <linux/smp.h>
55 #include <linux/mm.h>
56 #include <linux/vmacache.h>
57 #include <linux/rcupdate.h>
58 #include <linux/irq.h>
59 #include <linux/security.h>
60
61 #include <asm/cacheflush.h>
62 #include <asm/byteorder.h>
63 #include <linux/atomic.h>
64
65 #include "debug_core.h"
66
67 static int kgdb_break_asap;
68
69 struct debuggerinfo_struct kgdb_info[NR_CPUS];
70
71 /* kgdb_connected - Is a host GDB connected to us? */
72 int                             kgdb_connected;
73 EXPORT_SYMBOL_GPL(kgdb_connected);
74
75 /* All the KGDB handlers are installed */
76 int                     kgdb_io_module_registered;
77
78 /* Guard for recursive entry */
79 static int                      exception_level;
80
81 struct kgdb_io          *dbg_io_ops;
82 static DEFINE_SPINLOCK(kgdb_registration_lock);
83
84 /* Action for the reboot notifier, a global allow kdb to change it */
85 static int kgdbreboot;
86 /* kgdb console driver is loaded */
87 static int kgdb_con_registered;
88 /* determine if kgdb console output should be used */
89 static int kgdb_use_con;
90 /* Flag for alternate operations for early debugging */
91 bool dbg_is_early = true;
92 /* Next cpu to become the master debug core */
93 int dbg_switch_cpu;
94
95 /* Use kdb or gdbserver mode */
96 int dbg_kdb_mode = 1;
97
98 module_param(kgdb_use_con, int, 0644);
99 module_param(kgdbreboot, int, 0644);
100
101 /*
102  * Holds information about breakpoints in a kernel. These breakpoints are
103  * added and removed by gdb.
104  */
105 static struct kgdb_bkpt         kgdb_break[KGDB_MAX_BREAKPOINTS] = {
106         [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
107 };
108
109 /*
110  * The CPU# of the active CPU, or -1 if none:
111  */
112 atomic_t                        kgdb_active = ATOMIC_INIT(-1);
113 EXPORT_SYMBOL_GPL(kgdb_active);
114 static DEFINE_RAW_SPINLOCK(dbg_master_lock);
115 static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
116
117 /*
118  * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
119  * bootup code (which might not have percpu set up yet):
120  */
121 static atomic_t                 masters_in_kgdb;
122 static atomic_t                 slaves_in_kgdb;
123 static atomic_t                 kgdb_break_tasklet_var;
124 atomic_t                        kgdb_setting_breakpoint;
125
126 struct task_struct              *kgdb_usethread;
127 struct task_struct              *kgdb_contthread;
128
129 int                             kgdb_single_step;
130 static pid_t                    kgdb_sstep_pid;
131
132 /* to keep track of the CPU which is doing the single stepping*/
133 atomic_t                        kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
134
135 /*
136  * If you are debugging a problem where roundup (the collection of
137  * all other CPUs) is a problem [this should be extremely rare],
138  * then use the nokgdbroundup option to avoid roundup. In that case
139  * the other CPUs might interfere with your debugging context, so
140  * use this with care:
141  */
142 static int kgdb_do_roundup = 1;
143
144 static int __init opt_nokgdbroundup(char *str)
145 {
146         kgdb_do_roundup = 0;
147
148         return 0;
149 }
150
151 early_param("nokgdbroundup", opt_nokgdbroundup);
152
153 /*
154  * Finally, some KGDB code :-)
155  */
156
157 /*
158  * Weak aliases for breakpoint management,
159  * can be overridden by architectures when needed:
160  */
161 int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
162 {
163         int err;
164
165         err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr,
166                                 BREAK_INSTR_SIZE);
167         if (err)
168                 return err;
169         err = copy_to_kernel_nofault((char *)bpt->bpt_addr,
170                                  arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
171         return err;
172 }
173 NOKPROBE_SYMBOL(kgdb_arch_set_breakpoint);
174
175 int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
176 {
177         return copy_to_kernel_nofault((char *)bpt->bpt_addr,
178                                   (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
179 }
180 NOKPROBE_SYMBOL(kgdb_arch_remove_breakpoint);
181
182 int __weak kgdb_validate_break_address(unsigned long addr)
183 {
184         struct kgdb_bkpt tmp;
185         int err;
186
187         if (kgdb_within_blocklist(addr))
188                 return -EINVAL;
189
190         /* Validate setting the breakpoint and then removing it.  If the
191          * remove fails, the kernel needs to emit a bad message because we
192          * are deep trouble not being able to put things back the way we
193          * found them.
194          */
195         tmp.bpt_addr = addr;
196         err = kgdb_arch_set_breakpoint(&tmp);
197         if (err)
198                 return err;
199         err = kgdb_arch_remove_breakpoint(&tmp);
200         if (err)
201                 pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
202                        addr);
203         return err;
204 }
205
206 unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
207 {
208         return instruction_pointer(regs);
209 }
210 NOKPROBE_SYMBOL(kgdb_arch_pc);
211
212 int __weak kgdb_arch_init(void)
213 {
214         return 0;
215 }
216
217 int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
218 {
219         return 0;
220 }
221 NOKPROBE_SYMBOL(kgdb_skipexception);
222
223 #ifdef CONFIG_SMP
224
225 /*
226  * Default (weak) implementation for kgdb_roundup_cpus
227  */
228
229 static DEFINE_PER_CPU(call_single_data_t, kgdb_roundup_csd);
230
231 void __weak kgdb_call_nmi_hook(void *ignored)
232 {
233         /*
234          * NOTE: get_irq_regs() is supposed to get the registers from
235          * before the IPI interrupt happened and so is supposed to
236          * show where the processor was.  In some situations it's
237          * possible we might be called without an IPI, so it might be
238          * safer to figure out how to make kgdb_breakpoint() work
239          * properly here.
240          */
241         kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
242 }
243 NOKPROBE_SYMBOL(kgdb_call_nmi_hook);
244
245 void __weak kgdb_roundup_cpus(void)
246 {
247         call_single_data_t *csd;
248         int this_cpu = raw_smp_processor_id();
249         int cpu;
250         int ret;
251
252         for_each_online_cpu(cpu) {
253                 /* No need to roundup ourselves */
254                 if (cpu == this_cpu)
255                         continue;
256
257                 csd = &per_cpu(kgdb_roundup_csd, cpu);
258
259                 /*
260                  * If it didn't round up last time, don't try again
261                  * since smp_call_function_single_async() will block.
262                  *
263                  * If rounding_up is false then we know that the
264                  * previous call must have at least started and that
265                  * means smp_call_function_single_async() won't block.
266                  */
267                 if (kgdb_info[cpu].rounding_up)
268                         continue;
269                 kgdb_info[cpu].rounding_up = true;
270
271                 csd->func = kgdb_call_nmi_hook;
272                 ret = smp_call_function_single_async(cpu, csd);
273                 if (ret)
274                         kgdb_info[cpu].rounding_up = false;
275         }
276 }
277 NOKPROBE_SYMBOL(kgdb_roundup_cpus);
278
279 #endif
280
281 /*
282  * Some architectures need cache flushes when we set/clear a
283  * breakpoint:
284  */
285 static void kgdb_flush_swbreak_addr(unsigned long addr)
286 {
287         if (!CACHE_FLUSH_IS_SAFE)
288                 return;
289
290         if (current->mm) {
291                 int i;
292
293                 for (i = 0; i < VMACACHE_SIZE; i++) {
294                         if (!current->vmacache.vmas[i])
295                                 continue;
296                         flush_cache_range(current->vmacache.vmas[i],
297                                           addr, addr + BREAK_INSTR_SIZE);
298                 }
299         }
300
301         /* Force flush instruction cache if it was outside the mm */
302         flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
303 }
304 NOKPROBE_SYMBOL(kgdb_flush_swbreak_addr);
305
306 /*
307  * SW breakpoint management:
308  */
309 int dbg_activate_sw_breakpoints(void)
310 {
311         int error;
312         int ret = 0;
313         int i;
314
315         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
316                 if (kgdb_break[i].state != BP_SET)
317                         continue;
318
319                 error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
320                 if (error) {
321                         ret = error;
322                         pr_info("BP install failed: %lx\n",
323                                 kgdb_break[i].bpt_addr);
324                         continue;
325                 }
326
327                 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
328                 kgdb_break[i].state = BP_ACTIVE;
329         }
330         return ret;
331 }
332 NOKPROBE_SYMBOL(dbg_activate_sw_breakpoints);
333
334 int dbg_set_sw_break(unsigned long addr)
335 {
336         int err = kgdb_validate_break_address(addr);
337         int breakno = -1;
338         int i;
339
340         if (err)
341                 return err;
342
343         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
344                 if ((kgdb_break[i].state == BP_SET) &&
345                                         (kgdb_break[i].bpt_addr == addr))
346                         return -EEXIST;
347         }
348         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
349                 if (kgdb_break[i].state == BP_REMOVED &&
350                                         kgdb_break[i].bpt_addr == addr) {
351                         breakno = i;
352                         break;
353                 }
354         }
355
356         if (breakno == -1) {
357                 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
358                         if (kgdb_break[i].state == BP_UNDEFINED) {
359                                 breakno = i;
360                                 break;
361                         }
362                 }
363         }
364
365         if (breakno == -1)
366                 return -E2BIG;
367
368         kgdb_break[breakno].state = BP_SET;
369         kgdb_break[breakno].type = BP_BREAKPOINT;
370         kgdb_break[breakno].bpt_addr = addr;
371
372         return 0;
373 }
374
375 int dbg_deactivate_sw_breakpoints(void)
376 {
377         int error;
378         int ret = 0;
379         int i;
380
381         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
382                 if (kgdb_break[i].state != BP_ACTIVE)
383                         continue;
384                 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
385                 if (error) {
386                         pr_info("BP remove failed: %lx\n",
387                                 kgdb_break[i].bpt_addr);
388                         ret = error;
389                 }
390
391                 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
392                 kgdb_break[i].state = BP_SET;
393         }
394         return ret;
395 }
396 NOKPROBE_SYMBOL(dbg_deactivate_sw_breakpoints);
397
398 int dbg_remove_sw_break(unsigned long addr)
399 {
400         int i;
401
402         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
403                 if ((kgdb_break[i].state == BP_SET) &&
404                                 (kgdb_break[i].bpt_addr == addr)) {
405                         kgdb_break[i].state = BP_REMOVED;
406                         return 0;
407                 }
408         }
409         return -ENOENT;
410 }
411
412 int kgdb_isremovedbreak(unsigned long addr)
413 {
414         int i;
415
416         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
417                 if ((kgdb_break[i].state == BP_REMOVED) &&
418                                         (kgdb_break[i].bpt_addr == addr))
419                         return 1;
420         }
421         return 0;
422 }
423
424 int kgdb_has_hit_break(unsigned long addr)
425 {
426         int i;
427
428         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
429                 if (kgdb_break[i].state == BP_ACTIVE &&
430                     kgdb_break[i].bpt_addr == addr)
431                         return 1;
432         }
433         return 0;
434 }
435
436 int dbg_remove_all_break(void)
437 {
438         int error;
439         int i;
440
441         /* Clear memory breakpoints. */
442         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
443                 if (kgdb_break[i].state != BP_ACTIVE)
444                         goto setundefined;
445                 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
446                 if (error)
447                         pr_err("breakpoint remove failed: %lx\n",
448                                kgdb_break[i].bpt_addr);
449 setundefined:
450                 kgdb_break[i].state = BP_UNDEFINED;
451         }
452
453         /* Clear hardware breakpoints. */
454         if (arch_kgdb_ops.remove_all_hw_break)
455                 arch_kgdb_ops.remove_all_hw_break();
456
457         return 0;
458 }
459
460 void kgdb_free_init_mem(void)
461 {
462         int i;
463
464         /* Clear init memory breakpoints. */
465         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
466                 if (init_section_contains((void *)kgdb_break[i].bpt_addr, 0))
467                         kgdb_break[i].state = BP_UNDEFINED;
468         }
469 }
470
471 #ifdef CONFIG_KGDB_KDB
472 void kdb_dump_stack_on_cpu(int cpu)
473 {
474         if (cpu == raw_smp_processor_id() || !IS_ENABLED(CONFIG_SMP)) {
475                 dump_stack();
476                 return;
477         }
478
479         if (!(kgdb_info[cpu].exception_state & DCPU_IS_SLAVE)) {
480                 kdb_printf("ERROR: Task on cpu %d didn't stop in the debugger\n",
481                            cpu);
482                 return;
483         }
484
485         /*
486          * In general, architectures don't support dumping the stack of a
487          * "running" process that's not the current one.  From the point of
488          * view of the Linux, kernel processes that are looping in the kgdb
489          * slave loop are still "running".  There's also no API (that actually
490          * works across all architectures) that can do a stack crawl based
491          * on registers passed as a parameter.
492          *
493          * Solve this conundrum by asking slave CPUs to do the backtrace
494          * themselves.
495          */
496         kgdb_info[cpu].exception_state |= DCPU_WANT_BT;
497         while (kgdb_info[cpu].exception_state & DCPU_WANT_BT)
498                 cpu_relax();
499 }
500 #endif
501
502 /*
503  * Return true if there is a valid kgdb I/O module.  Also if no
504  * debugger is attached a message can be printed to the console about
505  * waiting for the debugger to attach.
506  *
507  * The print_wait argument is only to be true when called from inside
508  * the core kgdb_handle_exception, because it will wait for the
509  * debugger to attach.
510  */
511 static int kgdb_io_ready(int print_wait)
512 {
513         if (!dbg_io_ops)
514                 return 0;
515         if (kgdb_connected)
516                 return 1;
517         if (atomic_read(&kgdb_setting_breakpoint))
518                 return 1;
519         if (print_wait) {
520 #ifdef CONFIG_KGDB_KDB
521                 if (!dbg_kdb_mode)
522                         pr_crit("waiting... or $3#33 for KDB\n");
523 #else
524                 pr_crit("Waiting for remote debugger\n");
525 #endif
526         }
527         return 1;
528 }
529 NOKPROBE_SYMBOL(kgdb_io_ready);
530
531 static int kgdb_reenter_check(struct kgdb_state *ks)
532 {
533         unsigned long addr;
534
535         if (atomic_read(&kgdb_active) != raw_smp_processor_id())
536                 return 0;
537
538         /* Panic on recursive debugger calls: */
539         exception_level++;
540         addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
541         dbg_deactivate_sw_breakpoints();
542
543         /*
544          * If the break point removed ok at the place exception
545          * occurred, try to recover and print a warning to the end
546          * user because the user planted a breakpoint in a place that
547          * KGDB needs in order to function.
548          */
549         if (dbg_remove_sw_break(addr) == 0) {
550                 exception_level = 0;
551                 kgdb_skipexception(ks->ex_vector, ks->linux_regs);
552                 dbg_activate_sw_breakpoints();
553                 pr_crit("re-enter error: breakpoint removed %lx\n", addr);
554                 WARN_ON_ONCE(1);
555
556                 return 1;
557         }
558         dbg_remove_all_break();
559         kgdb_skipexception(ks->ex_vector, ks->linux_regs);
560
561         if (exception_level > 1) {
562                 dump_stack();
563                 kgdb_io_module_registered = false;
564                 panic("Recursive entry to debugger");
565         }
566
567         pr_crit("re-enter exception: ALL breakpoints killed\n");
568 #ifdef CONFIG_KGDB_KDB
569         /* Allow kdb to debug itself one level */
570         return 0;
571 #endif
572         dump_stack();
573         panic("Recursive entry to debugger");
574
575         return 1;
576 }
577 NOKPROBE_SYMBOL(kgdb_reenter_check);
578
579 static void dbg_touch_watchdogs(void)
580 {
581         touch_softlockup_watchdog_sync();
582         clocksource_touch_watchdog();
583         rcu_cpu_stall_reset();
584 }
585 NOKPROBE_SYMBOL(dbg_touch_watchdogs);
586
587 static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
588                 int exception_state)
589 {
590         unsigned long flags;
591         int sstep_tries = 100;
592         int error;
593         int cpu;
594         int trace_on = 0;
595         int online_cpus = num_online_cpus();
596         u64 time_left;
597
598         kgdb_info[ks->cpu].enter_kgdb++;
599         kgdb_info[ks->cpu].exception_state |= exception_state;
600
601         if (exception_state == DCPU_WANT_MASTER)
602                 atomic_inc(&masters_in_kgdb);
603         else
604                 atomic_inc(&slaves_in_kgdb);
605
606         if (arch_kgdb_ops.disable_hw_break)
607                 arch_kgdb_ops.disable_hw_break(regs);
608
609 acquirelock:
610         rcu_read_lock();
611         /*
612          * Interrupts will be restored by the 'trap return' code, except when
613          * single stepping.
614          */
615         local_irq_save(flags);
616
617         cpu = ks->cpu;
618         kgdb_info[cpu].debuggerinfo = regs;
619         kgdb_info[cpu].task = current;
620         kgdb_info[cpu].ret_state = 0;
621         kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
622
623         /* Make sure the above info reaches the primary CPU */
624         smp_mb();
625
626         if (exception_level == 1) {
627                 if (raw_spin_trylock(&dbg_master_lock))
628                         atomic_xchg(&kgdb_active, cpu);
629                 goto cpu_master_loop;
630         }
631
632         /*
633          * CPU will loop if it is a slave or request to become a kgdb
634          * master cpu and acquire the kgdb_active lock:
635          */
636         while (1) {
637 cpu_loop:
638                 if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
639                         kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
640                         goto cpu_master_loop;
641                 } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
642                         if (raw_spin_trylock(&dbg_master_lock)) {
643                                 atomic_xchg(&kgdb_active, cpu);
644                                 break;
645                         }
646                 } else if (kgdb_info[cpu].exception_state & DCPU_WANT_BT) {
647                         dump_stack();
648                         kgdb_info[cpu].exception_state &= ~DCPU_WANT_BT;
649                 } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
650                         if (!raw_spin_is_locked(&dbg_slave_lock))
651                                 goto return_normal;
652                 } else {
653 return_normal:
654                         /* Return to normal operation by executing any
655                          * hw breakpoint fixup.
656                          */
657                         if (arch_kgdb_ops.correct_hw_break)
658                                 arch_kgdb_ops.correct_hw_break();
659                         if (trace_on)
660                                 tracing_on();
661                         kgdb_info[cpu].debuggerinfo = NULL;
662                         kgdb_info[cpu].task = NULL;
663                         kgdb_info[cpu].exception_state &=
664                                 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
665                         kgdb_info[cpu].enter_kgdb--;
666                         smp_mb__before_atomic();
667                         atomic_dec(&slaves_in_kgdb);
668                         dbg_touch_watchdogs();
669                         local_irq_restore(flags);
670                         rcu_read_unlock();
671                         return 0;
672                 }
673                 cpu_relax();
674         }
675
676         /*
677          * For single stepping, try to only enter on the processor
678          * that was single stepping.  To guard against a deadlock, the
679          * kernel will only try for the value of sstep_tries before
680          * giving up and continuing on.
681          */
682         if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
683             (kgdb_info[cpu].task &&
684              kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
685                 atomic_set(&kgdb_active, -1);
686                 raw_spin_unlock(&dbg_master_lock);
687                 dbg_touch_watchdogs();
688                 local_irq_restore(flags);
689                 rcu_read_unlock();
690
691                 goto acquirelock;
692         }
693
694         if (!kgdb_io_ready(1)) {
695                 kgdb_info[cpu].ret_state = 1;
696                 goto kgdb_restore; /* No I/O connection, resume the system */
697         }
698
699         /*
700          * Don't enter if we have hit a removed breakpoint.
701          */
702         if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
703                 goto kgdb_restore;
704
705         atomic_inc(&ignore_console_lock_warning);
706
707         /* Call the I/O driver's pre_exception routine */
708         if (dbg_io_ops->pre_exception)
709                 dbg_io_ops->pre_exception();
710
711         /*
712          * Get the passive CPU lock which will hold all the non-primary
713          * CPU in a spin state while the debugger is active
714          */
715         if (!kgdb_single_step)
716                 raw_spin_lock(&dbg_slave_lock);
717
718 #ifdef CONFIG_SMP
719         /* If send_ready set, slaves are already waiting */
720         if (ks->send_ready)
721                 atomic_set(ks->send_ready, 1);
722
723         /* Signal the other CPUs to enter kgdb_wait() */
724         else if ((!kgdb_single_step) && kgdb_do_roundup)
725                 kgdb_roundup_cpus();
726 #endif
727
728         /*
729          * Wait for the other CPUs to be notified and be waiting for us:
730          */
731         time_left = MSEC_PER_SEC;
732         while (kgdb_do_roundup && --time_left &&
733                (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
734                    online_cpus)
735                 udelay(1000);
736         if (!time_left)
737                 pr_crit("Timed out waiting for secondary CPUs.\n");
738
739         /*
740          * At this point the primary processor is completely
741          * in the debugger and all secondary CPUs are quiescent
742          */
743         dbg_deactivate_sw_breakpoints();
744         kgdb_single_step = 0;
745         kgdb_contthread = current;
746         exception_level = 0;
747         trace_on = tracing_is_on();
748         if (trace_on)
749                 tracing_off();
750
751         while (1) {
752 cpu_master_loop:
753                 if (dbg_kdb_mode) {
754                         kgdb_connected = 1;
755                         error = kdb_stub(ks);
756                         if (error == -1)
757                                 continue;
758                         kgdb_connected = 0;
759                 } else {
760                         /*
761                          * This is a brutal way to interfere with the debugger
762                          * and prevent gdb being used to poke at kernel memory.
763                          * This could cause trouble if lockdown is applied when
764                          * there is already an active gdb session. For now the
765                          * answer is simply "don't do that". Typically lockdown
766                          * *will* be applied before the debug core gets started
767                          * so only developers using kgdb for fairly advanced
768                          * early kernel debug can be biten by this. Hopefully
769                          * they are sophisticated enough to take care of
770                          * themselves, especially with help from the lockdown
771                          * message printed on the console!
772                          */
773                         if (security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL)) {
774                                 if (IS_ENABLED(CONFIG_KGDB_KDB)) {
775                                         /* Switch back to kdb if possible... */
776                                         dbg_kdb_mode = 1;
777                                         continue;
778                                 } else {
779                                         /* ... otherwise just bail */
780                                         break;
781                                 }
782                         }
783                         error = gdb_serial_stub(ks);
784                 }
785
786                 if (error == DBG_PASS_EVENT) {
787                         dbg_kdb_mode = !dbg_kdb_mode;
788                 } else if (error == DBG_SWITCH_CPU_EVENT) {
789                         kgdb_info[dbg_switch_cpu].exception_state |=
790                                 DCPU_NEXT_MASTER;
791                         goto cpu_loop;
792                 } else {
793                         kgdb_info[cpu].ret_state = error;
794                         break;
795                 }
796         }
797
798         dbg_activate_sw_breakpoints();
799
800         /* Call the I/O driver's post_exception routine */
801         if (dbg_io_ops->post_exception)
802                 dbg_io_ops->post_exception();
803
804         atomic_dec(&ignore_console_lock_warning);
805
806         if (!kgdb_single_step) {
807                 raw_spin_unlock(&dbg_slave_lock);
808                 /* Wait till all the CPUs have quit from the debugger. */
809                 while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
810                         cpu_relax();
811         }
812
813 kgdb_restore:
814         if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
815                 int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
816                 if (kgdb_info[sstep_cpu].task)
817                         kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
818                 else
819                         kgdb_sstep_pid = 0;
820         }
821         if (arch_kgdb_ops.correct_hw_break)
822                 arch_kgdb_ops.correct_hw_break();
823         if (trace_on)
824                 tracing_on();
825
826         kgdb_info[cpu].debuggerinfo = NULL;
827         kgdb_info[cpu].task = NULL;
828         kgdb_info[cpu].exception_state &=
829                 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
830         kgdb_info[cpu].enter_kgdb--;
831         smp_mb__before_atomic();
832         atomic_dec(&masters_in_kgdb);
833         /* Free kgdb_active */
834         atomic_set(&kgdb_active, -1);
835         raw_spin_unlock(&dbg_master_lock);
836         dbg_touch_watchdogs();
837         local_irq_restore(flags);
838         rcu_read_unlock();
839
840         return kgdb_info[cpu].ret_state;
841 }
842 NOKPROBE_SYMBOL(kgdb_cpu_enter);
843
844 /*
845  * kgdb_handle_exception() - main entry point from a kernel exception
846  *
847  * Locking hierarchy:
848  *      interface locks, if any (begin_session)
849  *      kgdb lock (kgdb_active)
850  */
851 int
852 kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
853 {
854         struct kgdb_state kgdb_var;
855         struct kgdb_state *ks = &kgdb_var;
856         int ret = 0;
857
858         if (arch_kgdb_ops.enable_nmi)
859                 arch_kgdb_ops.enable_nmi(0);
860         /*
861          * Avoid entering the debugger if we were triggered due to an oops
862          * but panic_timeout indicates the system should automatically
863          * reboot on panic. We don't want to get stuck waiting for input
864          * on such systems, especially if its "just" an oops.
865          */
866         if (signo != SIGTRAP && panic_timeout)
867                 return 1;
868
869         memset(ks, 0, sizeof(struct kgdb_state));
870         ks->cpu                 = raw_smp_processor_id();
871         ks->ex_vector           = evector;
872         ks->signo               = signo;
873         ks->err_code            = ecode;
874         ks->linux_regs          = regs;
875
876         if (kgdb_reenter_check(ks))
877                 goto out; /* Ouch, double exception ! */
878         if (kgdb_info[ks->cpu].enter_kgdb != 0)
879                 goto out;
880
881         ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
882 out:
883         if (arch_kgdb_ops.enable_nmi)
884                 arch_kgdb_ops.enable_nmi(1);
885         return ret;
886 }
887 NOKPROBE_SYMBOL(kgdb_handle_exception);
888
889 /*
890  * GDB places a breakpoint at this function to know dynamically loaded objects.
891  */
892 static int module_event(struct notifier_block *self, unsigned long val,
893         void *data)
894 {
895         return 0;
896 }
897
898 static struct notifier_block dbg_module_load_nb = {
899         .notifier_call  = module_event,
900 };
901
902 int kgdb_nmicallback(int cpu, void *regs)
903 {
904 #ifdef CONFIG_SMP
905         struct kgdb_state kgdb_var;
906         struct kgdb_state *ks = &kgdb_var;
907
908         kgdb_info[cpu].rounding_up = false;
909
910         memset(ks, 0, sizeof(struct kgdb_state));
911         ks->cpu                 = cpu;
912         ks->linux_regs          = regs;
913
914         if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
915                         raw_spin_is_locked(&dbg_master_lock)) {
916                 kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
917                 return 0;
918         }
919 #endif
920         return 1;
921 }
922 NOKPROBE_SYMBOL(kgdb_nmicallback);
923
924 int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
925                                                         atomic_t *send_ready)
926 {
927 #ifdef CONFIG_SMP
928         if (!kgdb_io_ready(0) || !send_ready)
929                 return 1;
930
931         if (kgdb_info[cpu].enter_kgdb == 0) {
932                 struct kgdb_state kgdb_var;
933                 struct kgdb_state *ks = &kgdb_var;
934
935                 memset(ks, 0, sizeof(struct kgdb_state));
936                 ks->cpu                 = cpu;
937                 ks->ex_vector           = trapnr;
938                 ks->signo               = SIGTRAP;
939                 ks->err_code            = err_code;
940                 ks->linux_regs          = regs;
941                 ks->send_ready          = send_ready;
942                 kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
943                 return 0;
944         }
945 #endif
946         return 1;
947 }
948 NOKPROBE_SYMBOL(kgdb_nmicallin);
949
950 static void kgdb_console_write(struct console *co, const char *s,
951    unsigned count)
952 {
953         unsigned long flags;
954
955         /* If we're debugging, or KGDB has not connected, don't try
956          * and print. */
957         if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
958                 return;
959
960         local_irq_save(flags);
961         gdbstub_msg_write(s, count);
962         local_irq_restore(flags);
963 }
964
965 static struct console kgdbcons = {
966         .name           = "kgdb",
967         .write          = kgdb_console_write,
968         .flags          = CON_PRINTBUFFER | CON_ENABLED,
969         .index          = -1,
970 };
971
972 static int __init opt_kgdb_con(char *str)
973 {
974         kgdb_use_con = 1;
975
976         if (kgdb_io_module_registered && !kgdb_con_registered) {
977                 register_console(&kgdbcons);
978                 kgdb_con_registered = 1;
979         }
980
981         return 0;
982 }
983
984 early_param("kgdbcon", opt_kgdb_con);
985
986 #ifdef CONFIG_MAGIC_SYSRQ
987 static void sysrq_handle_dbg(int key)
988 {
989         if (!dbg_io_ops) {
990                 pr_crit("ERROR: No KGDB I/O module available\n");
991                 return;
992         }
993         if (!kgdb_connected) {
994 #ifdef CONFIG_KGDB_KDB
995                 if (!dbg_kdb_mode)
996                         pr_crit("KGDB or $3#33 for KDB\n");
997 #else
998                 pr_crit("Entering KGDB\n");
999 #endif
1000         }
1001
1002         kgdb_breakpoint();
1003 }
1004
1005 static const struct sysrq_key_op sysrq_dbg_op = {
1006         .handler        = sysrq_handle_dbg,
1007         .help_msg       = "debug(g)",
1008         .action_msg     = "DEBUG",
1009 };
1010 #endif
1011
1012 void kgdb_panic(const char *msg)
1013 {
1014         if (!kgdb_io_module_registered)
1015                 return;
1016
1017         /*
1018          * We don't want to get stuck waiting for input from user if
1019          * "panic_timeout" indicates the system should automatically
1020          * reboot on panic.
1021          */
1022         if (panic_timeout)
1023                 return;
1024
1025         debug_locks_off();
1026         console_flush_on_panic(CONSOLE_FLUSH_PENDING);
1027
1028         if (dbg_kdb_mode)
1029                 kdb_printf("PANIC: %s\n", msg);
1030
1031         kgdb_breakpoint();
1032 }
1033
1034 static void kgdb_initial_breakpoint(void)
1035 {
1036         kgdb_break_asap = 0;
1037
1038         pr_crit("Waiting for connection from remote gdb...\n");
1039         kgdb_breakpoint();
1040 }
1041
1042 void __weak kgdb_arch_late(void)
1043 {
1044 }
1045
1046 void __init dbg_late_init(void)
1047 {
1048         dbg_is_early = false;
1049         if (kgdb_io_module_registered)
1050                 kgdb_arch_late();
1051         kdb_init(KDB_INIT_FULL);
1052
1053         if (kgdb_io_module_registered && kgdb_break_asap)
1054                 kgdb_initial_breakpoint();
1055 }
1056
1057 static int
1058 dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
1059 {
1060         /*
1061          * Take the following action on reboot notify depending on value:
1062          *    1 == Enter debugger
1063          *    0 == [the default] detatch debug client
1064          *   -1 == Do nothing... and use this until the board resets
1065          */
1066         switch (kgdbreboot) {
1067         case 1:
1068                 kgdb_breakpoint();
1069         case -1:
1070                 goto done;
1071         }
1072         if (!dbg_kdb_mode)
1073                 gdbstub_exit(code);
1074 done:
1075         return NOTIFY_DONE;
1076 }
1077
1078 static struct notifier_block dbg_reboot_notifier = {
1079         .notifier_call          = dbg_notify_reboot,
1080         .next                   = NULL,
1081         .priority               = INT_MAX,
1082 };
1083
1084 static void kgdb_register_callbacks(void)
1085 {
1086         if (!kgdb_io_module_registered) {
1087                 kgdb_io_module_registered = 1;
1088                 kgdb_arch_init();
1089                 if (!dbg_is_early)
1090                         kgdb_arch_late();
1091                 register_module_notifier(&dbg_module_load_nb);
1092                 register_reboot_notifier(&dbg_reboot_notifier);
1093 #ifdef CONFIG_MAGIC_SYSRQ
1094                 register_sysrq_key('g', &sysrq_dbg_op);
1095 #endif
1096                 if (kgdb_use_con && !kgdb_con_registered) {
1097                         register_console(&kgdbcons);
1098                         kgdb_con_registered = 1;
1099                 }
1100         }
1101 }
1102
1103 static void kgdb_unregister_callbacks(void)
1104 {
1105         /*
1106          * When this routine is called KGDB should unregister from
1107          * handlers and clean up, making sure it is not handling any
1108          * break exceptions at the time.
1109          */
1110         if (kgdb_io_module_registered) {
1111                 kgdb_io_module_registered = 0;
1112                 unregister_reboot_notifier(&dbg_reboot_notifier);
1113                 unregister_module_notifier(&dbg_module_load_nb);
1114                 kgdb_arch_exit();
1115 #ifdef CONFIG_MAGIC_SYSRQ
1116                 unregister_sysrq_key('g', &sysrq_dbg_op);
1117 #endif
1118                 if (kgdb_con_registered) {
1119                         unregister_console(&kgdbcons);
1120                         kgdb_con_registered = 0;
1121                 }
1122         }
1123 }
1124
1125 /*
1126  * There are times a tasklet needs to be used vs a compiled in
1127  * break point so as to cause an exception outside a kgdb I/O module,
1128  * such as is the case with kgdboe, where calling a breakpoint in the
1129  * I/O driver itself would be fatal.
1130  */
1131 static void kgdb_tasklet_bpt(unsigned long ing)
1132 {
1133         kgdb_breakpoint();
1134         atomic_set(&kgdb_break_tasklet_var, 0);
1135 }
1136
1137 static DECLARE_TASKLET_OLD(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt);
1138
1139 void kgdb_schedule_breakpoint(void)
1140 {
1141         if (atomic_read(&kgdb_break_tasklet_var) ||
1142                 atomic_read(&kgdb_active) != -1 ||
1143                 atomic_read(&kgdb_setting_breakpoint))
1144                 return;
1145         atomic_inc(&kgdb_break_tasklet_var);
1146         tasklet_schedule(&kgdb_tasklet_breakpoint);
1147 }
1148 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
1149
1150 /**
1151  *      kgdb_register_io_module - register KGDB IO module
1152  *      @new_dbg_io_ops: the io ops vector
1153  *
1154  *      Register it with the KGDB core.
1155  */
1156 int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
1157 {
1158         struct kgdb_io *old_dbg_io_ops;
1159         int err;
1160
1161         spin_lock(&kgdb_registration_lock);
1162
1163         old_dbg_io_ops = dbg_io_ops;
1164         if (old_dbg_io_ops) {
1165                 if (!old_dbg_io_ops->deinit) {
1166                         spin_unlock(&kgdb_registration_lock);
1167
1168                         pr_err("KGDB I/O driver %s can't replace %s.\n",
1169                                 new_dbg_io_ops->name, old_dbg_io_ops->name);
1170                         return -EBUSY;
1171                 }
1172                 pr_info("Replacing I/O driver %s with %s\n",
1173                         old_dbg_io_ops->name, new_dbg_io_ops->name);
1174         }
1175
1176         if (new_dbg_io_ops->init) {
1177                 err = new_dbg_io_ops->init();
1178                 if (err) {
1179                         spin_unlock(&kgdb_registration_lock);
1180                         return err;
1181                 }
1182         }
1183
1184         dbg_io_ops = new_dbg_io_ops;
1185
1186         spin_unlock(&kgdb_registration_lock);
1187
1188         if (old_dbg_io_ops) {
1189                 old_dbg_io_ops->deinit();
1190                 return 0;
1191         }
1192
1193         pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
1194
1195         /* Arm KGDB now. */
1196         kgdb_register_callbacks();
1197
1198         if (kgdb_break_asap &&
1199             (!dbg_is_early || IS_ENABLED(CONFIG_ARCH_HAS_EARLY_DEBUG)))
1200                 kgdb_initial_breakpoint();
1201
1202         return 0;
1203 }
1204 EXPORT_SYMBOL_GPL(kgdb_register_io_module);
1205
1206 /**
1207  *      kkgdb_unregister_io_module - unregister KGDB IO module
1208  *      @old_dbg_io_ops: the io ops vector
1209  *
1210  *      Unregister it with the KGDB core.
1211  */
1212 void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
1213 {
1214         BUG_ON(kgdb_connected);
1215
1216         /*
1217          * KGDB is no longer able to communicate out, so
1218          * unregister our callbacks and reset state.
1219          */
1220         kgdb_unregister_callbacks();
1221
1222         spin_lock(&kgdb_registration_lock);
1223
1224         WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
1225         dbg_io_ops = NULL;
1226
1227         spin_unlock(&kgdb_registration_lock);
1228
1229         if (old_dbg_io_ops->deinit)
1230                 old_dbg_io_ops->deinit();
1231
1232         pr_info("Unregistered I/O driver %s, debugger disabled\n",
1233                 old_dbg_io_ops->name);
1234 }
1235 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
1236
1237 int dbg_io_get_char(void)
1238 {
1239         int ret = dbg_io_ops->read_char();
1240         if (ret == NO_POLL_CHAR)
1241                 return -1;
1242         if (!dbg_kdb_mode)
1243                 return ret;
1244         if (ret == 127)
1245                 return 8;
1246         return ret;
1247 }
1248
1249 /**
1250  * kgdb_breakpoint - generate breakpoint exception
1251  *
1252  * This function will generate a breakpoint exception.  It is used at the
1253  * beginning of a program to sync up with a debugger and can be used
1254  * otherwise as a quick means to stop program execution and "break" into
1255  * the debugger.
1256  */
1257 noinline void kgdb_breakpoint(void)
1258 {
1259         atomic_inc(&kgdb_setting_breakpoint);
1260         wmb(); /* Sync point before breakpoint */
1261         arch_kgdb_breakpoint();
1262         wmb(); /* Sync point after breakpoint */
1263         atomic_dec(&kgdb_setting_breakpoint);
1264 }
1265 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
1266
1267 static int __init opt_kgdb_wait(char *str)
1268 {
1269         kgdb_break_asap = 1;
1270
1271         kdb_init(KDB_INIT_EARLY);
1272         if (kgdb_io_module_registered &&
1273             IS_ENABLED(CONFIG_ARCH_HAS_EARLY_DEBUG))
1274                 kgdb_initial_breakpoint();
1275
1276         return 0;
1277 }
1278
1279 early_param("kgdbwait", opt_kgdb_wait);