kernel: debug: Convert to SPDX identifier
[linux-2.6-microblaze.git] / kernel / debug / debug_core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel Debug Core
4  *
5  * Maintainer: Jason Wessel <jason.wessel@windriver.com>
6  *
7  * Copyright (C) 2000-2001 VERITAS Software Corporation.
8  * Copyright (C) 2002-2004 Timesys Corporation
9  * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
10  * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
11  * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
12  * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
13  * Copyright (C) 2005-2009 Wind River Systems, Inc.
14  * Copyright (C) 2007 MontaVista Software, Inc.
15  * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
16  *
17  * Contributors at various stages not listed above:
18  *  Jason Wessel ( jason.wessel@windriver.com )
19  *  George Anzinger <george@mvista.com>
20  *  Anurekh Saxena (anurekh.saxena@timesys.com)
21  *  Lake Stevens Instrument Division (Glenn Engel)
22  *  Jim Kingdon, Cygnus Support.
23  *
24  * Original KGDB stub: David Grothe <dave@gcom.com>,
25  * Tigran Aivazian <tigran@sco.com>
26  */
27
28 #define pr_fmt(fmt) "KGDB: " fmt
29
30 #include <linux/pid_namespace.h>
31 #include <linux/clocksource.h>
32 #include <linux/serial_core.h>
33 #include <linux/interrupt.h>
34 #include <linux/spinlock.h>
35 #include <linux/console.h>
36 #include <linux/threads.h>
37 #include <linux/uaccess.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/ptrace.h>
41 #include <linux/string.h>
42 #include <linux/delay.h>
43 #include <linux/sched.h>
44 #include <linux/sysrq.h>
45 #include <linux/reboot.h>
46 #include <linux/init.h>
47 #include <linux/kgdb.h>
48 #include <linux/kdb.h>
49 #include <linux/nmi.h>
50 #include <linux/pid.h>
51 #include <linux/smp.h>
52 #include <linux/mm.h>
53 #include <linux/vmacache.h>
54 #include <linux/rcupdate.h>
55 #include <linux/irq.h>
56
57 #include <asm/cacheflush.h>
58 #include <asm/byteorder.h>
59 #include <linux/atomic.h>
60
61 #include "debug_core.h"
62
63 static int kgdb_break_asap;
64
65 struct debuggerinfo_struct kgdb_info[NR_CPUS];
66
67 /* kgdb_connected - Is a host GDB connected to us? */
68 int                             kgdb_connected;
69 EXPORT_SYMBOL_GPL(kgdb_connected);
70
71 /* All the KGDB handlers are installed */
72 int                     kgdb_io_module_registered;
73
74 /* Guard for recursive entry */
75 static int                      exception_level;
76
77 struct kgdb_io          *dbg_io_ops;
78 static DEFINE_SPINLOCK(kgdb_registration_lock);
79
80 /* Action for the reboot notifier, a global allow kdb to change it */
81 static int kgdbreboot;
82 /* kgdb console driver is loaded */
83 static int kgdb_con_registered;
84 /* determine if kgdb console output should be used */
85 static int kgdb_use_con;
86 /* Flag for alternate operations for early debugging */
87 bool dbg_is_early = true;
88 /* Next cpu to become the master debug core */
89 int dbg_switch_cpu;
90
91 /* Use kdb or gdbserver mode */
92 int dbg_kdb_mode = 1;
93
94 module_param(kgdb_use_con, int, 0644);
95 module_param(kgdbreboot, int, 0644);
96
97 /*
98  * Holds information about breakpoints in a kernel. These breakpoints are
99  * added and removed by gdb.
100  */
101 static struct kgdb_bkpt         kgdb_break[KGDB_MAX_BREAKPOINTS] = {
102         [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
103 };
104
105 /*
106  * The CPU# of the active CPU, or -1 if none:
107  */
108 atomic_t                        kgdb_active = ATOMIC_INIT(-1);
109 EXPORT_SYMBOL_GPL(kgdb_active);
110 static DEFINE_RAW_SPINLOCK(dbg_master_lock);
111 static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
112
113 /*
114  * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
115  * bootup code (which might not have percpu set up yet):
116  */
117 static atomic_t                 masters_in_kgdb;
118 static atomic_t                 slaves_in_kgdb;
119 atomic_t                        kgdb_setting_breakpoint;
120
121 struct task_struct              *kgdb_usethread;
122 struct task_struct              *kgdb_contthread;
123
124 int                             kgdb_single_step;
125 static pid_t                    kgdb_sstep_pid;
126
127 /* to keep track of the CPU which is doing the single stepping*/
128 atomic_t                        kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
129
130 /*
131  * If you are debugging a problem where roundup (the collection of
132  * all other CPUs) is a problem [this should be extremely rare],
133  * then use the nokgdbroundup option to avoid roundup. In that case
134  * the other CPUs might interfere with your debugging context, so
135  * use this with care:
136  */
137 static int kgdb_do_roundup = 1;
138
139 static int __init opt_nokgdbroundup(char *str)
140 {
141         kgdb_do_roundup = 0;
142
143         return 0;
144 }
145
146 early_param("nokgdbroundup", opt_nokgdbroundup);
147
148 /*
149  * Finally, some KGDB code :-)
150  */
151
152 /*
153  * Weak aliases for breakpoint management,
154  * can be overridden by architectures when needed:
155  */
156 int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
157 {
158         int err;
159
160         err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr,
161                                 BREAK_INSTR_SIZE);
162         if (err)
163                 return err;
164         err = copy_to_kernel_nofault((char *)bpt->bpt_addr,
165                                  arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
166         return err;
167 }
168 NOKPROBE_SYMBOL(kgdb_arch_set_breakpoint);
169
170 int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
171 {
172         return copy_to_kernel_nofault((char *)bpt->bpt_addr,
173                                   (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
174 }
175 NOKPROBE_SYMBOL(kgdb_arch_remove_breakpoint);
176
177 int __weak kgdb_validate_break_address(unsigned long addr)
178 {
179         struct kgdb_bkpt tmp;
180         int err;
181
182         if (kgdb_within_blocklist(addr))
183                 return -EINVAL;
184
185         /* Validate setting the breakpoint and then removing it.  If the
186          * remove fails, the kernel needs to emit a bad message because we
187          * are deep trouble not being able to put things back the way we
188          * found them.
189          */
190         tmp.bpt_addr = addr;
191         err = kgdb_arch_set_breakpoint(&tmp);
192         if (err)
193                 return err;
194         err = kgdb_arch_remove_breakpoint(&tmp);
195         if (err)
196                 pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
197                        addr);
198         return err;
199 }
200
201 unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
202 {
203         return instruction_pointer(regs);
204 }
205 NOKPROBE_SYMBOL(kgdb_arch_pc);
206
207 int __weak kgdb_arch_init(void)
208 {
209         return 0;
210 }
211
212 int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
213 {
214         return 0;
215 }
216 NOKPROBE_SYMBOL(kgdb_skipexception);
217
218 #ifdef CONFIG_SMP
219
220 /*
221  * Default (weak) implementation for kgdb_roundup_cpus
222  */
223
224 void __weak kgdb_call_nmi_hook(void *ignored)
225 {
226         /*
227          * NOTE: get_irq_regs() is supposed to get the registers from
228          * before the IPI interrupt happened and so is supposed to
229          * show where the processor was.  In some situations it's
230          * possible we might be called without an IPI, so it might be
231          * safer to figure out how to make kgdb_breakpoint() work
232          * properly here.
233          */
234         kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
235 }
236 NOKPROBE_SYMBOL(kgdb_call_nmi_hook);
237
238 static DEFINE_PER_CPU(call_single_data_t, kgdb_roundup_csd) =
239         CSD_INIT(kgdb_call_nmi_hook, NULL);
240
241 void __weak kgdb_roundup_cpus(void)
242 {
243         call_single_data_t *csd;
244         int this_cpu = raw_smp_processor_id();
245         int cpu;
246         int ret;
247
248         for_each_online_cpu(cpu) {
249                 /* No need to roundup ourselves */
250                 if (cpu == this_cpu)
251                         continue;
252
253                 csd = &per_cpu(kgdb_roundup_csd, cpu);
254
255                 /*
256                  * If it didn't round up last time, don't try again
257                  * since smp_call_function_single_async() will block.
258                  *
259                  * If rounding_up is false then we know that the
260                  * previous call must have at least started and that
261                  * means smp_call_function_single_async() won't block.
262                  */
263                 if (kgdb_info[cpu].rounding_up)
264                         continue;
265                 kgdb_info[cpu].rounding_up = true;
266
267                 ret = smp_call_function_single_async(cpu, csd);
268                 if (ret)
269                         kgdb_info[cpu].rounding_up = false;
270         }
271 }
272 NOKPROBE_SYMBOL(kgdb_roundup_cpus);
273
274 #endif
275
276 /*
277  * Some architectures need cache flushes when we set/clear a
278  * breakpoint:
279  */
280 static void kgdb_flush_swbreak_addr(unsigned long addr)
281 {
282         if (!CACHE_FLUSH_IS_SAFE)
283                 return;
284
285         if (current->mm) {
286                 int i;
287
288                 for (i = 0; i < VMACACHE_SIZE; i++) {
289                         if (!current->vmacache.vmas[i])
290                                 continue;
291                         flush_cache_range(current->vmacache.vmas[i],
292                                           addr, addr + BREAK_INSTR_SIZE);
293                 }
294         }
295
296         /* Force flush instruction cache if it was outside the mm */
297         flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
298 }
299 NOKPROBE_SYMBOL(kgdb_flush_swbreak_addr);
300
301 /*
302  * SW breakpoint management:
303  */
304 int dbg_activate_sw_breakpoints(void)
305 {
306         int error;
307         int ret = 0;
308         int i;
309
310         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
311                 if (kgdb_break[i].state != BP_SET)
312                         continue;
313
314                 error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
315                 if (error) {
316                         ret = error;
317                         pr_info("BP install failed: %lx\n",
318                                 kgdb_break[i].bpt_addr);
319                         continue;
320                 }
321
322                 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
323                 kgdb_break[i].state = BP_ACTIVE;
324         }
325         return ret;
326 }
327 NOKPROBE_SYMBOL(dbg_activate_sw_breakpoints);
328
329 int dbg_set_sw_break(unsigned long addr)
330 {
331         int err = kgdb_validate_break_address(addr);
332         int breakno = -1;
333         int i;
334
335         if (err)
336                 return err;
337
338         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
339                 if ((kgdb_break[i].state == BP_SET) &&
340                                         (kgdb_break[i].bpt_addr == addr))
341                         return -EEXIST;
342         }
343         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
344                 if (kgdb_break[i].state == BP_REMOVED &&
345                                         kgdb_break[i].bpt_addr == addr) {
346                         breakno = i;
347                         break;
348                 }
349         }
350
351         if (breakno == -1) {
352                 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
353                         if (kgdb_break[i].state == BP_UNDEFINED) {
354                                 breakno = i;
355                                 break;
356                         }
357                 }
358         }
359
360         if (breakno == -1)
361                 return -E2BIG;
362
363         kgdb_break[breakno].state = BP_SET;
364         kgdb_break[breakno].type = BP_BREAKPOINT;
365         kgdb_break[breakno].bpt_addr = addr;
366
367         return 0;
368 }
369
370 int dbg_deactivate_sw_breakpoints(void)
371 {
372         int error;
373         int ret = 0;
374         int i;
375
376         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
377                 if (kgdb_break[i].state != BP_ACTIVE)
378                         continue;
379                 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
380                 if (error) {
381                         pr_info("BP remove failed: %lx\n",
382                                 kgdb_break[i].bpt_addr);
383                         ret = error;
384                 }
385
386                 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
387                 kgdb_break[i].state = BP_SET;
388         }
389         return ret;
390 }
391 NOKPROBE_SYMBOL(dbg_deactivate_sw_breakpoints);
392
393 int dbg_remove_sw_break(unsigned long addr)
394 {
395         int i;
396
397         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
398                 if ((kgdb_break[i].state == BP_SET) &&
399                                 (kgdb_break[i].bpt_addr == addr)) {
400                         kgdb_break[i].state = BP_REMOVED;
401                         return 0;
402                 }
403         }
404         return -ENOENT;
405 }
406
407 int kgdb_isremovedbreak(unsigned long addr)
408 {
409         int i;
410
411         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
412                 if ((kgdb_break[i].state == BP_REMOVED) &&
413                                         (kgdb_break[i].bpt_addr == addr))
414                         return 1;
415         }
416         return 0;
417 }
418
419 int kgdb_has_hit_break(unsigned long addr)
420 {
421         int i;
422
423         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
424                 if (kgdb_break[i].state == BP_ACTIVE &&
425                     kgdb_break[i].bpt_addr == addr)
426                         return 1;
427         }
428         return 0;
429 }
430
431 int dbg_remove_all_break(void)
432 {
433         int error;
434         int i;
435
436         /* Clear memory breakpoints. */
437         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
438                 if (kgdb_break[i].state != BP_ACTIVE)
439                         goto setundefined;
440                 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
441                 if (error)
442                         pr_err("breakpoint remove failed: %lx\n",
443                                kgdb_break[i].bpt_addr);
444 setundefined:
445                 kgdb_break[i].state = BP_UNDEFINED;
446         }
447
448         /* Clear hardware breakpoints. */
449         if (arch_kgdb_ops.remove_all_hw_break)
450                 arch_kgdb_ops.remove_all_hw_break();
451
452         return 0;
453 }
454
455 void kgdb_free_init_mem(void)
456 {
457         int i;
458
459         /* Clear init memory breakpoints. */
460         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
461                 if (init_section_contains((void *)kgdb_break[i].bpt_addr, 0))
462                         kgdb_break[i].state = BP_UNDEFINED;
463         }
464 }
465
466 #ifdef CONFIG_KGDB_KDB
467 void kdb_dump_stack_on_cpu(int cpu)
468 {
469         if (cpu == raw_smp_processor_id() || !IS_ENABLED(CONFIG_SMP)) {
470                 dump_stack();
471                 return;
472         }
473
474         if (!(kgdb_info[cpu].exception_state & DCPU_IS_SLAVE)) {
475                 kdb_printf("ERROR: Task on cpu %d didn't stop in the debugger\n",
476                            cpu);
477                 return;
478         }
479
480         /*
481          * In general, architectures don't support dumping the stack of a
482          * "running" process that's not the current one.  From the point of
483          * view of the Linux, kernel processes that are looping in the kgdb
484          * slave loop are still "running".  There's also no API (that actually
485          * works across all architectures) that can do a stack crawl based
486          * on registers passed as a parameter.
487          *
488          * Solve this conundrum by asking slave CPUs to do the backtrace
489          * themselves.
490          */
491         kgdb_info[cpu].exception_state |= DCPU_WANT_BT;
492         while (kgdb_info[cpu].exception_state & DCPU_WANT_BT)
493                 cpu_relax();
494 }
495 #endif
496
497 /*
498  * Return true if there is a valid kgdb I/O module.  Also if no
499  * debugger is attached a message can be printed to the console about
500  * waiting for the debugger to attach.
501  *
502  * The print_wait argument is only to be true when called from inside
503  * the core kgdb_handle_exception, because it will wait for the
504  * debugger to attach.
505  */
506 static int kgdb_io_ready(int print_wait)
507 {
508         if (!dbg_io_ops)
509                 return 0;
510         if (kgdb_connected)
511                 return 1;
512         if (atomic_read(&kgdb_setting_breakpoint))
513                 return 1;
514         if (print_wait) {
515 #ifdef CONFIG_KGDB_KDB
516                 if (!dbg_kdb_mode)
517                         pr_crit("waiting... or $3#33 for KDB\n");
518 #else
519                 pr_crit("Waiting for remote debugger\n");
520 #endif
521         }
522         return 1;
523 }
524 NOKPROBE_SYMBOL(kgdb_io_ready);
525
526 static int kgdb_reenter_check(struct kgdb_state *ks)
527 {
528         unsigned long addr;
529
530         if (atomic_read(&kgdb_active) != raw_smp_processor_id())
531                 return 0;
532
533         /* Panic on recursive debugger calls: */
534         exception_level++;
535         addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
536         dbg_deactivate_sw_breakpoints();
537
538         /*
539          * If the break point removed ok at the place exception
540          * occurred, try to recover and print a warning to the end
541          * user because the user planted a breakpoint in a place that
542          * KGDB needs in order to function.
543          */
544         if (dbg_remove_sw_break(addr) == 0) {
545                 exception_level = 0;
546                 kgdb_skipexception(ks->ex_vector, ks->linux_regs);
547                 dbg_activate_sw_breakpoints();
548                 pr_crit("re-enter error: breakpoint removed %lx\n", addr);
549                 WARN_ON_ONCE(1);
550
551                 return 1;
552         }
553         dbg_remove_all_break();
554         kgdb_skipexception(ks->ex_vector, ks->linux_regs);
555
556         if (exception_level > 1) {
557                 dump_stack();
558                 kgdb_io_module_registered = false;
559                 panic("Recursive entry to debugger");
560         }
561
562         pr_crit("re-enter exception: ALL breakpoints killed\n");
563 #ifdef CONFIG_KGDB_KDB
564         /* Allow kdb to debug itself one level */
565         return 0;
566 #endif
567         dump_stack();
568         panic("Recursive entry to debugger");
569
570         return 1;
571 }
572 NOKPROBE_SYMBOL(kgdb_reenter_check);
573
574 static void dbg_touch_watchdogs(void)
575 {
576         touch_softlockup_watchdog_sync();
577         clocksource_touch_watchdog();
578         rcu_cpu_stall_reset();
579 }
580 NOKPROBE_SYMBOL(dbg_touch_watchdogs);
581
582 static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
583                 int exception_state)
584 {
585         unsigned long flags;
586         int sstep_tries = 100;
587         int error;
588         int cpu;
589         int trace_on = 0;
590         int online_cpus = num_online_cpus();
591         u64 time_left;
592
593         kgdb_info[ks->cpu].enter_kgdb++;
594         kgdb_info[ks->cpu].exception_state |= exception_state;
595
596         if (exception_state == DCPU_WANT_MASTER)
597                 atomic_inc(&masters_in_kgdb);
598         else
599                 atomic_inc(&slaves_in_kgdb);
600
601         if (arch_kgdb_ops.disable_hw_break)
602                 arch_kgdb_ops.disable_hw_break(regs);
603
604 acquirelock:
605         rcu_read_lock();
606         /*
607          * Interrupts will be restored by the 'trap return' code, except when
608          * single stepping.
609          */
610         local_irq_save(flags);
611
612         cpu = ks->cpu;
613         kgdb_info[cpu].debuggerinfo = regs;
614         kgdb_info[cpu].task = current;
615         kgdb_info[cpu].ret_state = 0;
616         kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
617
618         /* Make sure the above info reaches the primary CPU */
619         smp_mb();
620
621         if (exception_level == 1) {
622                 if (raw_spin_trylock(&dbg_master_lock))
623                         atomic_xchg(&kgdb_active, cpu);
624                 goto cpu_master_loop;
625         }
626
627         /*
628          * CPU will loop if it is a slave or request to become a kgdb
629          * master cpu and acquire the kgdb_active lock:
630          */
631         while (1) {
632 cpu_loop:
633                 if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
634                         kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
635                         goto cpu_master_loop;
636                 } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
637                         if (raw_spin_trylock(&dbg_master_lock)) {
638                                 atomic_xchg(&kgdb_active, cpu);
639                                 break;
640                         }
641                 } else if (kgdb_info[cpu].exception_state & DCPU_WANT_BT) {
642                         dump_stack();
643                         kgdb_info[cpu].exception_state &= ~DCPU_WANT_BT;
644                 } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
645                         if (!raw_spin_is_locked(&dbg_slave_lock))
646                                 goto return_normal;
647                 } else {
648 return_normal:
649                         /* Return to normal operation by executing any
650                          * hw breakpoint fixup.
651                          */
652                         if (arch_kgdb_ops.correct_hw_break)
653                                 arch_kgdb_ops.correct_hw_break();
654                         if (trace_on)
655                                 tracing_on();
656                         kgdb_info[cpu].debuggerinfo = NULL;
657                         kgdb_info[cpu].task = NULL;
658                         kgdb_info[cpu].exception_state &=
659                                 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
660                         kgdb_info[cpu].enter_kgdb--;
661                         smp_mb__before_atomic();
662                         atomic_dec(&slaves_in_kgdb);
663                         dbg_touch_watchdogs();
664                         local_irq_restore(flags);
665                         rcu_read_unlock();
666                         return 0;
667                 }
668                 cpu_relax();
669         }
670
671         /*
672          * For single stepping, try to only enter on the processor
673          * that was single stepping.  To guard against a deadlock, the
674          * kernel will only try for the value of sstep_tries before
675          * giving up and continuing on.
676          */
677         if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
678             (kgdb_info[cpu].task &&
679              kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
680                 atomic_set(&kgdb_active, -1);
681                 raw_spin_unlock(&dbg_master_lock);
682                 dbg_touch_watchdogs();
683                 local_irq_restore(flags);
684                 rcu_read_unlock();
685
686                 goto acquirelock;
687         }
688
689         if (!kgdb_io_ready(1)) {
690                 kgdb_info[cpu].ret_state = 1;
691                 goto kgdb_restore; /* No I/O connection, resume the system */
692         }
693
694         /*
695          * Don't enter if we have hit a removed breakpoint.
696          */
697         if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
698                 goto kgdb_restore;
699
700         atomic_inc(&ignore_console_lock_warning);
701
702         /* Call the I/O driver's pre_exception routine */
703         if (dbg_io_ops->pre_exception)
704                 dbg_io_ops->pre_exception();
705
706         /*
707          * Get the passive CPU lock which will hold all the non-primary
708          * CPU in a spin state while the debugger is active
709          */
710         if (!kgdb_single_step)
711                 raw_spin_lock(&dbg_slave_lock);
712
713 #ifdef CONFIG_SMP
714         /* If send_ready set, slaves are already waiting */
715         if (ks->send_ready)
716                 atomic_set(ks->send_ready, 1);
717
718         /* Signal the other CPUs to enter kgdb_wait() */
719         else if ((!kgdb_single_step) && kgdb_do_roundup)
720                 kgdb_roundup_cpus();
721 #endif
722
723         /*
724          * Wait for the other CPUs to be notified and be waiting for us:
725          */
726         time_left = MSEC_PER_SEC;
727         while (kgdb_do_roundup && --time_left &&
728                (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
729                    online_cpus)
730                 udelay(1000);
731         if (!time_left)
732                 pr_crit("Timed out waiting for secondary CPUs.\n");
733
734         /*
735          * At this point the primary processor is completely
736          * in the debugger and all secondary CPUs are quiescent
737          */
738         dbg_deactivate_sw_breakpoints();
739         kgdb_single_step = 0;
740         kgdb_contthread = current;
741         exception_level = 0;
742         trace_on = tracing_is_on();
743         if (trace_on)
744                 tracing_off();
745
746         while (1) {
747 cpu_master_loop:
748                 if (dbg_kdb_mode) {
749                         kgdb_connected = 1;
750                         error = kdb_stub(ks);
751                         if (error == -1)
752                                 continue;
753                         kgdb_connected = 0;
754                 } else {
755                         error = gdb_serial_stub(ks);
756                 }
757
758                 if (error == DBG_PASS_EVENT) {
759                         dbg_kdb_mode = !dbg_kdb_mode;
760                 } else if (error == DBG_SWITCH_CPU_EVENT) {
761                         kgdb_info[dbg_switch_cpu].exception_state |=
762                                 DCPU_NEXT_MASTER;
763                         goto cpu_loop;
764                 } else {
765                         kgdb_info[cpu].ret_state = error;
766                         break;
767                 }
768         }
769
770         dbg_activate_sw_breakpoints();
771
772         /* Call the I/O driver's post_exception routine */
773         if (dbg_io_ops->post_exception)
774                 dbg_io_ops->post_exception();
775
776         atomic_dec(&ignore_console_lock_warning);
777
778         if (!kgdb_single_step) {
779                 raw_spin_unlock(&dbg_slave_lock);
780                 /* Wait till all the CPUs have quit from the debugger. */
781                 while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
782                         cpu_relax();
783         }
784
785 kgdb_restore:
786         if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
787                 int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
788                 if (kgdb_info[sstep_cpu].task)
789                         kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
790                 else
791                         kgdb_sstep_pid = 0;
792         }
793         if (arch_kgdb_ops.correct_hw_break)
794                 arch_kgdb_ops.correct_hw_break();
795         if (trace_on)
796                 tracing_on();
797
798         kgdb_info[cpu].debuggerinfo = NULL;
799         kgdb_info[cpu].task = NULL;
800         kgdb_info[cpu].exception_state &=
801                 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
802         kgdb_info[cpu].enter_kgdb--;
803         smp_mb__before_atomic();
804         atomic_dec(&masters_in_kgdb);
805         /* Free kgdb_active */
806         atomic_set(&kgdb_active, -1);
807         raw_spin_unlock(&dbg_master_lock);
808         dbg_touch_watchdogs();
809         local_irq_restore(flags);
810         rcu_read_unlock();
811
812         return kgdb_info[cpu].ret_state;
813 }
814 NOKPROBE_SYMBOL(kgdb_cpu_enter);
815
816 /*
817  * kgdb_handle_exception() - main entry point from a kernel exception
818  *
819  * Locking hierarchy:
820  *      interface locks, if any (begin_session)
821  *      kgdb lock (kgdb_active)
822  */
823 int
824 kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
825 {
826         struct kgdb_state kgdb_var;
827         struct kgdb_state *ks = &kgdb_var;
828         int ret = 0;
829
830         if (arch_kgdb_ops.enable_nmi)
831                 arch_kgdb_ops.enable_nmi(0);
832         /*
833          * Avoid entering the debugger if we were triggered due to an oops
834          * but panic_timeout indicates the system should automatically
835          * reboot on panic. We don't want to get stuck waiting for input
836          * on such systems, especially if its "just" an oops.
837          */
838         if (signo != SIGTRAP && panic_timeout)
839                 return 1;
840
841         memset(ks, 0, sizeof(struct kgdb_state));
842         ks->cpu                 = raw_smp_processor_id();
843         ks->ex_vector           = evector;
844         ks->signo               = signo;
845         ks->err_code            = ecode;
846         ks->linux_regs          = regs;
847
848         if (kgdb_reenter_check(ks))
849                 goto out; /* Ouch, double exception ! */
850         if (kgdb_info[ks->cpu].enter_kgdb != 0)
851                 goto out;
852
853         ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
854 out:
855         if (arch_kgdb_ops.enable_nmi)
856                 arch_kgdb_ops.enable_nmi(1);
857         return ret;
858 }
859 NOKPROBE_SYMBOL(kgdb_handle_exception);
860
861 /*
862  * GDB places a breakpoint at this function to know dynamically loaded objects.
863  */
864 static int module_event(struct notifier_block *self, unsigned long val,
865         void *data)
866 {
867         return 0;
868 }
869
870 static struct notifier_block dbg_module_load_nb = {
871         .notifier_call  = module_event,
872 };
873
874 int kgdb_nmicallback(int cpu, void *regs)
875 {
876 #ifdef CONFIG_SMP
877         struct kgdb_state kgdb_var;
878         struct kgdb_state *ks = &kgdb_var;
879
880         kgdb_info[cpu].rounding_up = false;
881
882         memset(ks, 0, sizeof(struct kgdb_state));
883         ks->cpu                 = cpu;
884         ks->linux_regs          = regs;
885
886         if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
887                         raw_spin_is_locked(&dbg_master_lock)) {
888                 kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
889                 return 0;
890         }
891 #endif
892         return 1;
893 }
894 NOKPROBE_SYMBOL(kgdb_nmicallback);
895
896 int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
897                                                         atomic_t *send_ready)
898 {
899 #ifdef CONFIG_SMP
900         if (!kgdb_io_ready(0) || !send_ready)
901                 return 1;
902
903         if (kgdb_info[cpu].enter_kgdb == 0) {
904                 struct kgdb_state kgdb_var;
905                 struct kgdb_state *ks = &kgdb_var;
906
907                 memset(ks, 0, sizeof(struct kgdb_state));
908                 ks->cpu                 = cpu;
909                 ks->ex_vector           = trapnr;
910                 ks->signo               = SIGTRAP;
911                 ks->err_code            = err_code;
912                 ks->linux_regs          = regs;
913                 ks->send_ready          = send_ready;
914                 kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
915                 return 0;
916         }
917 #endif
918         return 1;
919 }
920 NOKPROBE_SYMBOL(kgdb_nmicallin);
921
922 static void kgdb_console_write(struct console *co, const char *s,
923    unsigned count)
924 {
925         unsigned long flags;
926
927         /* If we're debugging, or KGDB has not connected, don't try
928          * and print. */
929         if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
930                 return;
931
932         local_irq_save(flags);
933         gdbstub_msg_write(s, count);
934         local_irq_restore(flags);
935 }
936
937 static struct console kgdbcons = {
938         .name           = "kgdb",
939         .write          = kgdb_console_write,
940         .flags          = CON_PRINTBUFFER | CON_ENABLED,
941         .index          = -1,
942 };
943
944 static int __init opt_kgdb_con(char *str)
945 {
946         kgdb_use_con = 1;
947
948         if (kgdb_io_module_registered && !kgdb_con_registered) {
949                 register_console(&kgdbcons);
950                 kgdb_con_registered = 1;
951         }
952
953         return 0;
954 }
955
956 early_param("kgdbcon", opt_kgdb_con);
957
958 #ifdef CONFIG_MAGIC_SYSRQ
959 static void sysrq_handle_dbg(int key)
960 {
961         if (!dbg_io_ops) {
962                 pr_crit("ERROR: No KGDB I/O module available\n");
963                 return;
964         }
965         if (!kgdb_connected) {
966 #ifdef CONFIG_KGDB_KDB
967                 if (!dbg_kdb_mode)
968                         pr_crit("KGDB or $3#33 for KDB\n");
969 #else
970                 pr_crit("Entering KGDB\n");
971 #endif
972         }
973
974         kgdb_breakpoint();
975 }
976
977 static const struct sysrq_key_op sysrq_dbg_op = {
978         .handler        = sysrq_handle_dbg,
979         .help_msg       = "debug(g)",
980         .action_msg     = "DEBUG",
981 };
982 #endif
983
984 void kgdb_panic(const char *msg)
985 {
986         if (!kgdb_io_module_registered)
987                 return;
988
989         /*
990          * We don't want to get stuck waiting for input from user if
991          * "panic_timeout" indicates the system should automatically
992          * reboot on panic.
993          */
994         if (panic_timeout)
995                 return;
996
997         if (dbg_kdb_mode)
998                 kdb_printf("PANIC: %s\n", msg);
999
1000         kgdb_breakpoint();
1001 }
1002
1003 static void kgdb_initial_breakpoint(void)
1004 {
1005         kgdb_break_asap = 0;
1006
1007         pr_crit("Waiting for connection from remote gdb...\n");
1008         kgdb_breakpoint();
1009 }
1010
1011 void __weak kgdb_arch_late(void)
1012 {
1013 }
1014
1015 void __init dbg_late_init(void)
1016 {
1017         dbg_is_early = false;
1018         if (kgdb_io_module_registered)
1019                 kgdb_arch_late();
1020         kdb_init(KDB_INIT_FULL);
1021
1022         if (kgdb_io_module_registered && kgdb_break_asap)
1023                 kgdb_initial_breakpoint();
1024 }
1025
1026 static int
1027 dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
1028 {
1029         /*
1030          * Take the following action on reboot notify depending on value:
1031          *    1 == Enter debugger
1032          *    0 == [the default] detach debug client
1033          *   -1 == Do nothing... and use this until the board resets
1034          */
1035         switch (kgdbreboot) {
1036         case 1:
1037                 kgdb_breakpoint();
1038                 goto done;
1039         case -1:
1040                 goto done;
1041         }
1042         if (!dbg_kdb_mode)
1043                 gdbstub_exit(code);
1044 done:
1045         return NOTIFY_DONE;
1046 }
1047
1048 static struct notifier_block dbg_reboot_notifier = {
1049         .notifier_call          = dbg_notify_reboot,
1050         .next                   = NULL,
1051         .priority               = INT_MAX,
1052 };
1053
1054 static void kgdb_register_callbacks(void)
1055 {
1056         if (!kgdb_io_module_registered) {
1057                 kgdb_io_module_registered = 1;
1058                 kgdb_arch_init();
1059                 if (!dbg_is_early)
1060                         kgdb_arch_late();
1061                 register_module_notifier(&dbg_module_load_nb);
1062                 register_reboot_notifier(&dbg_reboot_notifier);
1063 #ifdef CONFIG_MAGIC_SYSRQ
1064                 register_sysrq_key('g', &sysrq_dbg_op);
1065 #endif
1066                 if (kgdb_use_con && !kgdb_con_registered) {
1067                         register_console(&kgdbcons);
1068                         kgdb_con_registered = 1;
1069                 }
1070         }
1071 }
1072
1073 static void kgdb_unregister_callbacks(void)
1074 {
1075         /*
1076          * When this routine is called KGDB should unregister from
1077          * handlers and clean up, making sure it is not handling any
1078          * break exceptions at the time.
1079          */
1080         if (kgdb_io_module_registered) {
1081                 kgdb_io_module_registered = 0;
1082                 unregister_reboot_notifier(&dbg_reboot_notifier);
1083                 unregister_module_notifier(&dbg_module_load_nb);
1084                 kgdb_arch_exit();
1085 #ifdef CONFIG_MAGIC_SYSRQ
1086                 unregister_sysrq_key('g', &sysrq_dbg_op);
1087 #endif
1088                 if (kgdb_con_registered) {
1089                         unregister_console(&kgdbcons);
1090                         kgdb_con_registered = 0;
1091                 }
1092         }
1093 }
1094
1095 /**
1096  *      kgdb_register_io_module - register KGDB IO module
1097  *      @new_dbg_io_ops: the io ops vector
1098  *
1099  *      Register it with the KGDB core.
1100  */
1101 int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
1102 {
1103         struct kgdb_io *old_dbg_io_ops;
1104         int err;
1105
1106         spin_lock(&kgdb_registration_lock);
1107
1108         old_dbg_io_ops = dbg_io_ops;
1109         if (old_dbg_io_ops) {
1110                 if (!old_dbg_io_ops->deinit) {
1111                         spin_unlock(&kgdb_registration_lock);
1112
1113                         pr_err("KGDB I/O driver %s can't replace %s.\n",
1114                                 new_dbg_io_ops->name, old_dbg_io_ops->name);
1115                         return -EBUSY;
1116                 }
1117                 pr_info("Replacing I/O driver %s with %s\n",
1118                         old_dbg_io_ops->name, new_dbg_io_ops->name);
1119         }
1120
1121         if (new_dbg_io_ops->init) {
1122                 err = new_dbg_io_ops->init();
1123                 if (err) {
1124                         spin_unlock(&kgdb_registration_lock);
1125                         return err;
1126                 }
1127         }
1128
1129         dbg_io_ops = new_dbg_io_ops;
1130
1131         spin_unlock(&kgdb_registration_lock);
1132
1133         if (old_dbg_io_ops) {
1134                 old_dbg_io_ops->deinit();
1135                 return 0;
1136         }
1137
1138         pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
1139
1140         /* Arm KGDB now. */
1141         kgdb_register_callbacks();
1142
1143         if (kgdb_break_asap &&
1144             (!dbg_is_early || IS_ENABLED(CONFIG_ARCH_HAS_EARLY_DEBUG)))
1145                 kgdb_initial_breakpoint();
1146
1147         return 0;
1148 }
1149 EXPORT_SYMBOL_GPL(kgdb_register_io_module);
1150
1151 /**
1152  *      kgdb_unregister_io_module - unregister KGDB IO module
1153  *      @old_dbg_io_ops: the io ops vector
1154  *
1155  *      Unregister it with the KGDB core.
1156  */
1157 void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
1158 {
1159         BUG_ON(kgdb_connected);
1160
1161         /*
1162          * KGDB is no longer able to communicate out, so
1163          * unregister our callbacks and reset state.
1164          */
1165         kgdb_unregister_callbacks();
1166
1167         spin_lock(&kgdb_registration_lock);
1168
1169         WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
1170         dbg_io_ops = NULL;
1171
1172         spin_unlock(&kgdb_registration_lock);
1173
1174         if (old_dbg_io_ops->deinit)
1175                 old_dbg_io_ops->deinit();
1176
1177         pr_info("Unregistered I/O driver %s, debugger disabled\n",
1178                 old_dbg_io_ops->name);
1179 }
1180 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
1181
1182 int dbg_io_get_char(void)
1183 {
1184         int ret = dbg_io_ops->read_char();
1185         if (ret == NO_POLL_CHAR)
1186                 return -1;
1187         if (!dbg_kdb_mode)
1188                 return ret;
1189         if (ret == 127)
1190                 return 8;
1191         return ret;
1192 }
1193
1194 /**
1195  * kgdb_breakpoint - generate breakpoint exception
1196  *
1197  * This function will generate a breakpoint exception.  It is used at the
1198  * beginning of a program to sync up with a debugger and can be used
1199  * otherwise as a quick means to stop program execution and "break" into
1200  * the debugger.
1201  */
1202 noinline void kgdb_breakpoint(void)
1203 {
1204         atomic_inc(&kgdb_setting_breakpoint);
1205         wmb(); /* Sync point before breakpoint */
1206         arch_kgdb_breakpoint();
1207         wmb(); /* Sync point after breakpoint */
1208         atomic_dec(&kgdb_setting_breakpoint);
1209 }
1210 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
1211
1212 static int __init opt_kgdb_wait(char *str)
1213 {
1214         kgdb_break_asap = 1;
1215
1216         kdb_init(KDB_INIT_EARLY);
1217         if (kgdb_io_module_registered &&
1218             IS_ENABLED(CONFIG_ARCH_HAS_EARLY_DEBUG))
1219                 kgdb_initial_breakpoint();
1220
1221         return 0;
1222 }
1223
1224 early_param("kgdbwait", opt_kgdb_wait);