Merge tag 'for-5.19-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux-2.6-microblaze.git] / kernel / debug / debug_core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel Debug Core
4  *
5  * Maintainer: Jason Wessel <jason.wessel@windriver.com>
6  *
7  * Copyright (C) 2000-2001 VERITAS Software Corporation.
8  * Copyright (C) 2002-2004 Timesys Corporation
9  * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
10  * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
11  * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
12  * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
13  * Copyright (C) 2005-2009 Wind River Systems, Inc.
14  * Copyright (C) 2007 MontaVista Software, Inc.
15  * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
16  *
17  * Contributors at various stages not listed above:
18  *  Jason Wessel ( jason.wessel@windriver.com )
19  *  George Anzinger <george@mvista.com>
20  *  Anurekh Saxena (anurekh.saxena@timesys.com)
21  *  Lake Stevens Instrument Division (Glenn Engel)
22  *  Jim Kingdon, Cygnus Support.
23  *
24  * Original KGDB stub: David Grothe <dave@gcom.com>,
25  * Tigran Aivazian <tigran@sco.com>
26  */
27
28 #define pr_fmt(fmt) "KGDB: " fmt
29
30 #include <linux/pid_namespace.h>
31 #include <linux/clocksource.h>
32 #include <linux/serial_core.h>
33 #include <linux/interrupt.h>
34 #include <linux/spinlock.h>
35 #include <linux/console.h>
36 #include <linux/threads.h>
37 #include <linux/uaccess.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/ptrace.h>
41 #include <linux/string.h>
42 #include <linux/delay.h>
43 #include <linux/sched.h>
44 #include <linux/sysrq.h>
45 #include <linux/reboot.h>
46 #include <linux/init.h>
47 #include <linux/kgdb.h>
48 #include <linux/kdb.h>
49 #include <linux/nmi.h>
50 #include <linux/pid.h>
51 #include <linux/smp.h>
52 #include <linux/mm.h>
53 #include <linux/vmacache.h>
54 #include <linux/rcupdate.h>
55 #include <linux/irq.h>
56 #include <linux/security.h>
57
58 #include <asm/cacheflush.h>
59 #include <asm/byteorder.h>
60 #include <linux/atomic.h>
61
62 #include "debug_core.h"
63
64 static int kgdb_break_asap;
65
66 struct debuggerinfo_struct kgdb_info[NR_CPUS];
67
68 /* kgdb_connected - Is a host GDB connected to us? */
69 int                             kgdb_connected;
70 EXPORT_SYMBOL_GPL(kgdb_connected);
71
72 /* All the KGDB handlers are installed */
73 int                     kgdb_io_module_registered;
74
75 /* Guard for recursive entry */
76 static int                      exception_level;
77
78 struct kgdb_io          *dbg_io_ops;
79 static DEFINE_SPINLOCK(kgdb_registration_lock);
80
81 /* Action for the reboot notifier, a global allow kdb to change it */
82 static int kgdbreboot;
83 /* kgdb console driver is loaded */
84 static int kgdb_con_registered;
85 /* determine if kgdb console output should be used */
86 static int kgdb_use_con;
87 /* Flag for alternate operations for early debugging */
88 bool dbg_is_early = true;
89 /* Next cpu to become the master debug core */
90 int dbg_switch_cpu;
91
92 /* Use kdb or gdbserver mode */
93 int dbg_kdb_mode = 1;
94
95 module_param(kgdb_use_con, int, 0644);
96 module_param(kgdbreboot, int, 0644);
97
98 /*
99  * Holds information about breakpoints in a kernel. These breakpoints are
100  * added and removed by gdb.
101  */
102 static struct kgdb_bkpt         kgdb_break[KGDB_MAX_BREAKPOINTS] = {
103         [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
104 };
105
106 /*
107  * The CPU# of the active CPU, or -1 if none:
108  */
109 atomic_t                        kgdb_active = ATOMIC_INIT(-1);
110 EXPORT_SYMBOL_GPL(kgdb_active);
111 static DEFINE_RAW_SPINLOCK(dbg_master_lock);
112 static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
113
114 /*
115  * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
116  * bootup code (which might not have percpu set up yet):
117  */
118 static atomic_t                 masters_in_kgdb;
119 static atomic_t                 slaves_in_kgdb;
120 atomic_t                        kgdb_setting_breakpoint;
121
122 struct task_struct              *kgdb_usethread;
123 struct task_struct              *kgdb_contthread;
124
125 int                             kgdb_single_step;
126 static pid_t                    kgdb_sstep_pid;
127
128 /* to keep track of the CPU which is doing the single stepping*/
129 atomic_t                        kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
130
131 /*
132  * If you are debugging a problem where roundup (the collection of
133  * all other CPUs) is a problem [this should be extremely rare],
134  * then use the nokgdbroundup option to avoid roundup. In that case
135  * the other CPUs might interfere with your debugging context, so
136  * use this with care:
137  */
138 static int kgdb_do_roundup = 1;
139
140 static int __init opt_nokgdbroundup(char *str)
141 {
142         kgdb_do_roundup = 0;
143
144         return 0;
145 }
146
147 early_param("nokgdbroundup", opt_nokgdbroundup);
148
149 /*
150  * Finally, some KGDB code :-)
151  */
152
153 /*
154  * Weak aliases for breakpoint management,
155  * can be overridden by architectures when needed:
156  */
157 int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
158 {
159         int err;
160
161         err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr,
162                                 BREAK_INSTR_SIZE);
163         if (err)
164                 return err;
165         err = copy_to_kernel_nofault((char *)bpt->bpt_addr,
166                                  arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
167         return err;
168 }
169 NOKPROBE_SYMBOL(kgdb_arch_set_breakpoint);
170
171 int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
172 {
173         return copy_to_kernel_nofault((char *)bpt->bpt_addr,
174                                   (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
175 }
176 NOKPROBE_SYMBOL(kgdb_arch_remove_breakpoint);
177
178 int __weak kgdb_validate_break_address(unsigned long addr)
179 {
180         struct kgdb_bkpt tmp;
181         int err;
182
183         if (kgdb_within_blocklist(addr))
184                 return -EINVAL;
185
186         /* Validate setting the breakpoint and then removing it.  If the
187          * remove fails, the kernel needs to emit a bad message because we
188          * are deep trouble not being able to put things back the way we
189          * found them.
190          */
191         tmp.bpt_addr = addr;
192         err = kgdb_arch_set_breakpoint(&tmp);
193         if (err)
194                 return err;
195         err = kgdb_arch_remove_breakpoint(&tmp);
196         if (err)
197                 pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
198                        addr);
199         return err;
200 }
201
202 unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
203 {
204         return instruction_pointer(regs);
205 }
206 NOKPROBE_SYMBOL(kgdb_arch_pc);
207
208 int __weak kgdb_arch_init(void)
209 {
210         return 0;
211 }
212
213 int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
214 {
215         return 0;
216 }
217 NOKPROBE_SYMBOL(kgdb_skipexception);
218
219 #ifdef CONFIG_SMP
220
221 /*
222  * Default (weak) implementation for kgdb_roundup_cpus
223  */
224
225 void __weak kgdb_call_nmi_hook(void *ignored)
226 {
227         /*
228          * NOTE: get_irq_regs() is supposed to get the registers from
229          * before the IPI interrupt happened and so is supposed to
230          * show where the processor was.  In some situations it's
231          * possible we might be called without an IPI, so it might be
232          * safer to figure out how to make kgdb_breakpoint() work
233          * properly here.
234          */
235         kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
236 }
237 NOKPROBE_SYMBOL(kgdb_call_nmi_hook);
238
239 static DEFINE_PER_CPU(call_single_data_t, kgdb_roundup_csd) =
240         CSD_INIT(kgdb_call_nmi_hook, NULL);
241
242 void __weak kgdb_roundup_cpus(void)
243 {
244         call_single_data_t *csd;
245         int this_cpu = raw_smp_processor_id();
246         int cpu;
247         int ret;
248
249         for_each_online_cpu(cpu) {
250                 /* No need to roundup ourselves */
251                 if (cpu == this_cpu)
252                         continue;
253
254                 csd = &per_cpu(kgdb_roundup_csd, cpu);
255
256                 /*
257                  * If it didn't round up last time, don't try again
258                  * since smp_call_function_single_async() will block.
259                  *
260                  * If rounding_up is false then we know that the
261                  * previous call must have at least started and that
262                  * means smp_call_function_single_async() won't block.
263                  */
264                 if (kgdb_info[cpu].rounding_up)
265                         continue;
266                 kgdb_info[cpu].rounding_up = true;
267
268                 ret = smp_call_function_single_async(cpu, csd);
269                 if (ret)
270                         kgdb_info[cpu].rounding_up = false;
271         }
272 }
273 NOKPROBE_SYMBOL(kgdb_roundup_cpus);
274
275 #endif
276
277 /*
278  * Some architectures need cache flushes when we set/clear a
279  * breakpoint:
280  */
281 static void kgdb_flush_swbreak_addr(unsigned long addr)
282 {
283         if (!CACHE_FLUSH_IS_SAFE)
284                 return;
285
286         if (current->mm) {
287                 int i;
288
289                 for (i = 0; i < VMACACHE_SIZE; i++) {
290                         if (!current->vmacache.vmas[i])
291                                 continue;
292                         flush_cache_range(current->vmacache.vmas[i],
293                                           addr, addr + BREAK_INSTR_SIZE);
294                 }
295         }
296
297         /* Force flush instruction cache if it was outside the mm */
298         flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
299 }
300 NOKPROBE_SYMBOL(kgdb_flush_swbreak_addr);
301
302 /*
303  * SW breakpoint management:
304  */
305 int dbg_activate_sw_breakpoints(void)
306 {
307         int error;
308         int ret = 0;
309         int i;
310
311         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
312                 if (kgdb_break[i].state != BP_SET)
313                         continue;
314
315                 error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
316                 if (error) {
317                         ret = error;
318                         pr_info("BP install failed: %lx\n",
319                                 kgdb_break[i].bpt_addr);
320                         continue;
321                 }
322
323                 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
324                 kgdb_break[i].state = BP_ACTIVE;
325         }
326         return ret;
327 }
328 NOKPROBE_SYMBOL(dbg_activate_sw_breakpoints);
329
330 int dbg_set_sw_break(unsigned long addr)
331 {
332         int err = kgdb_validate_break_address(addr);
333         int breakno = -1;
334         int i;
335
336         if (err)
337                 return err;
338
339         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
340                 if ((kgdb_break[i].state == BP_SET) &&
341                                         (kgdb_break[i].bpt_addr == addr))
342                         return -EEXIST;
343         }
344         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
345                 if (kgdb_break[i].state == BP_REMOVED &&
346                                         kgdb_break[i].bpt_addr == addr) {
347                         breakno = i;
348                         break;
349                 }
350         }
351
352         if (breakno == -1) {
353                 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
354                         if (kgdb_break[i].state == BP_UNDEFINED) {
355                                 breakno = i;
356                                 break;
357                         }
358                 }
359         }
360
361         if (breakno == -1)
362                 return -E2BIG;
363
364         kgdb_break[breakno].state = BP_SET;
365         kgdb_break[breakno].type = BP_BREAKPOINT;
366         kgdb_break[breakno].bpt_addr = addr;
367
368         return 0;
369 }
370
371 int dbg_deactivate_sw_breakpoints(void)
372 {
373         int error;
374         int ret = 0;
375         int i;
376
377         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
378                 if (kgdb_break[i].state != BP_ACTIVE)
379                         continue;
380                 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
381                 if (error) {
382                         pr_info("BP remove failed: %lx\n",
383                                 kgdb_break[i].bpt_addr);
384                         ret = error;
385                 }
386
387                 kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
388                 kgdb_break[i].state = BP_SET;
389         }
390         return ret;
391 }
392 NOKPROBE_SYMBOL(dbg_deactivate_sw_breakpoints);
393
394 int dbg_remove_sw_break(unsigned long addr)
395 {
396         int i;
397
398         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
399                 if ((kgdb_break[i].state == BP_SET) &&
400                                 (kgdb_break[i].bpt_addr == addr)) {
401                         kgdb_break[i].state = BP_REMOVED;
402                         return 0;
403                 }
404         }
405         return -ENOENT;
406 }
407
408 int kgdb_isremovedbreak(unsigned long addr)
409 {
410         int i;
411
412         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
413                 if ((kgdb_break[i].state == BP_REMOVED) &&
414                                         (kgdb_break[i].bpt_addr == addr))
415                         return 1;
416         }
417         return 0;
418 }
419
420 int kgdb_has_hit_break(unsigned long addr)
421 {
422         int i;
423
424         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
425                 if (kgdb_break[i].state == BP_ACTIVE &&
426                     kgdb_break[i].bpt_addr == addr)
427                         return 1;
428         }
429         return 0;
430 }
431
432 int dbg_remove_all_break(void)
433 {
434         int error;
435         int i;
436
437         /* Clear memory breakpoints. */
438         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
439                 if (kgdb_break[i].state != BP_ACTIVE)
440                         goto setundefined;
441                 error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
442                 if (error)
443                         pr_err("breakpoint remove failed: %lx\n",
444                                kgdb_break[i].bpt_addr);
445 setundefined:
446                 kgdb_break[i].state = BP_UNDEFINED;
447         }
448
449         /* Clear hardware breakpoints. */
450         if (arch_kgdb_ops.remove_all_hw_break)
451                 arch_kgdb_ops.remove_all_hw_break();
452
453         return 0;
454 }
455
456 void kgdb_free_init_mem(void)
457 {
458         int i;
459
460         /* Clear init memory breakpoints. */
461         for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
462                 if (init_section_contains((void *)kgdb_break[i].bpt_addr, 0))
463                         kgdb_break[i].state = BP_UNDEFINED;
464         }
465 }
466
467 #ifdef CONFIG_KGDB_KDB
468 void kdb_dump_stack_on_cpu(int cpu)
469 {
470         if (cpu == raw_smp_processor_id() || !IS_ENABLED(CONFIG_SMP)) {
471                 dump_stack();
472                 return;
473         }
474
475         if (!(kgdb_info[cpu].exception_state & DCPU_IS_SLAVE)) {
476                 kdb_printf("ERROR: Task on cpu %d didn't stop in the debugger\n",
477                            cpu);
478                 return;
479         }
480
481         /*
482          * In general, architectures don't support dumping the stack of a
483          * "running" process that's not the current one.  From the point of
484          * view of the Linux, kernel processes that are looping in the kgdb
485          * slave loop are still "running".  There's also no API (that actually
486          * works across all architectures) that can do a stack crawl based
487          * on registers passed as a parameter.
488          *
489          * Solve this conundrum by asking slave CPUs to do the backtrace
490          * themselves.
491          */
492         kgdb_info[cpu].exception_state |= DCPU_WANT_BT;
493         while (kgdb_info[cpu].exception_state & DCPU_WANT_BT)
494                 cpu_relax();
495 }
496 #endif
497
498 /*
499  * Return true if there is a valid kgdb I/O module.  Also if no
500  * debugger is attached a message can be printed to the console about
501  * waiting for the debugger to attach.
502  *
503  * The print_wait argument is only to be true when called from inside
504  * the core kgdb_handle_exception, because it will wait for the
505  * debugger to attach.
506  */
507 static int kgdb_io_ready(int print_wait)
508 {
509         if (!dbg_io_ops)
510                 return 0;
511         if (kgdb_connected)
512                 return 1;
513         if (atomic_read(&kgdb_setting_breakpoint))
514                 return 1;
515         if (print_wait) {
516 #ifdef CONFIG_KGDB_KDB
517                 if (!dbg_kdb_mode)
518                         pr_crit("waiting... or $3#33 for KDB\n");
519 #else
520                 pr_crit("Waiting for remote debugger\n");
521 #endif
522         }
523         return 1;
524 }
525 NOKPROBE_SYMBOL(kgdb_io_ready);
526
527 static int kgdb_reenter_check(struct kgdb_state *ks)
528 {
529         unsigned long addr;
530
531         if (atomic_read(&kgdb_active) != raw_smp_processor_id())
532                 return 0;
533
534         /* Panic on recursive debugger calls: */
535         exception_level++;
536         addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
537         dbg_deactivate_sw_breakpoints();
538
539         /*
540          * If the break point removed ok at the place exception
541          * occurred, try to recover and print a warning to the end
542          * user because the user planted a breakpoint in a place that
543          * KGDB needs in order to function.
544          */
545         if (dbg_remove_sw_break(addr) == 0) {
546                 exception_level = 0;
547                 kgdb_skipexception(ks->ex_vector, ks->linux_regs);
548                 dbg_activate_sw_breakpoints();
549                 pr_crit("re-enter error: breakpoint removed %lx\n", addr);
550                 WARN_ON_ONCE(1);
551
552                 return 1;
553         }
554         dbg_remove_all_break();
555         kgdb_skipexception(ks->ex_vector, ks->linux_regs);
556
557         if (exception_level > 1) {
558                 dump_stack();
559                 kgdb_io_module_registered = false;
560                 panic("Recursive entry to debugger");
561         }
562
563         pr_crit("re-enter exception: ALL breakpoints killed\n");
564 #ifdef CONFIG_KGDB_KDB
565         /* Allow kdb to debug itself one level */
566         return 0;
567 #endif
568         dump_stack();
569         panic("Recursive entry to debugger");
570
571         return 1;
572 }
573 NOKPROBE_SYMBOL(kgdb_reenter_check);
574
575 static void dbg_touch_watchdogs(void)
576 {
577         touch_softlockup_watchdog_sync();
578         clocksource_touch_watchdog();
579         rcu_cpu_stall_reset();
580 }
581 NOKPROBE_SYMBOL(dbg_touch_watchdogs);
582
583 static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
584                 int exception_state)
585 {
586         unsigned long flags;
587         int sstep_tries = 100;
588         int error;
589         int cpu;
590         int trace_on = 0;
591         int online_cpus = num_online_cpus();
592         u64 time_left;
593
594         kgdb_info[ks->cpu].enter_kgdb++;
595         kgdb_info[ks->cpu].exception_state |= exception_state;
596
597         if (exception_state == DCPU_WANT_MASTER)
598                 atomic_inc(&masters_in_kgdb);
599         else
600                 atomic_inc(&slaves_in_kgdb);
601
602         if (arch_kgdb_ops.disable_hw_break)
603                 arch_kgdb_ops.disable_hw_break(regs);
604
605 acquirelock:
606         rcu_read_lock();
607         /*
608          * Interrupts will be restored by the 'trap return' code, except when
609          * single stepping.
610          */
611         local_irq_save(flags);
612
613         cpu = ks->cpu;
614         kgdb_info[cpu].debuggerinfo = regs;
615         kgdb_info[cpu].task = current;
616         kgdb_info[cpu].ret_state = 0;
617         kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
618
619         /* Make sure the above info reaches the primary CPU */
620         smp_mb();
621
622         if (exception_level == 1) {
623                 if (raw_spin_trylock(&dbg_master_lock))
624                         atomic_xchg(&kgdb_active, cpu);
625                 goto cpu_master_loop;
626         }
627
628         /*
629          * CPU will loop if it is a slave or request to become a kgdb
630          * master cpu and acquire the kgdb_active lock:
631          */
632         while (1) {
633 cpu_loop:
634                 if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
635                         kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
636                         goto cpu_master_loop;
637                 } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
638                         if (raw_spin_trylock(&dbg_master_lock)) {
639                                 atomic_xchg(&kgdb_active, cpu);
640                                 break;
641                         }
642                 } else if (kgdb_info[cpu].exception_state & DCPU_WANT_BT) {
643                         dump_stack();
644                         kgdb_info[cpu].exception_state &= ~DCPU_WANT_BT;
645                 } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
646                         if (!raw_spin_is_locked(&dbg_slave_lock))
647                                 goto return_normal;
648                 } else {
649 return_normal:
650                         /* Return to normal operation by executing any
651                          * hw breakpoint fixup.
652                          */
653                         if (arch_kgdb_ops.correct_hw_break)
654                                 arch_kgdb_ops.correct_hw_break();
655                         if (trace_on)
656                                 tracing_on();
657                         kgdb_info[cpu].debuggerinfo = NULL;
658                         kgdb_info[cpu].task = NULL;
659                         kgdb_info[cpu].exception_state &=
660                                 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
661                         kgdb_info[cpu].enter_kgdb--;
662                         smp_mb__before_atomic();
663                         atomic_dec(&slaves_in_kgdb);
664                         dbg_touch_watchdogs();
665                         local_irq_restore(flags);
666                         rcu_read_unlock();
667                         return 0;
668                 }
669                 cpu_relax();
670         }
671
672         /*
673          * For single stepping, try to only enter on the processor
674          * that was single stepping.  To guard against a deadlock, the
675          * kernel will only try for the value of sstep_tries before
676          * giving up and continuing on.
677          */
678         if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
679             (kgdb_info[cpu].task &&
680              kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
681                 atomic_set(&kgdb_active, -1);
682                 raw_spin_unlock(&dbg_master_lock);
683                 dbg_touch_watchdogs();
684                 local_irq_restore(flags);
685                 rcu_read_unlock();
686
687                 goto acquirelock;
688         }
689
690         if (!kgdb_io_ready(1)) {
691                 kgdb_info[cpu].ret_state = 1;
692                 goto kgdb_restore; /* No I/O connection, resume the system */
693         }
694
695         /*
696          * Don't enter if we have hit a removed breakpoint.
697          */
698         if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
699                 goto kgdb_restore;
700
701         atomic_inc(&ignore_console_lock_warning);
702
703         /* Call the I/O driver's pre_exception routine */
704         if (dbg_io_ops->pre_exception)
705                 dbg_io_ops->pre_exception();
706
707         /*
708          * Get the passive CPU lock which will hold all the non-primary
709          * CPU in a spin state while the debugger is active
710          */
711         if (!kgdb_single_step)
712                 raw_spin_lock(&dbg_slave_lock);
713
714 #ifdef CONFIG_SMP
715         /* If send_ready set, slaves are already waiting */
716         if (ks->send_ready)
717                 atomic_set(ks->send_ready, 1);
718
719         /* Signal the other CPUs to enter kgdb_wait() */
720         else if ((!kgdb_single_step) && kgdb_do_roundup)
721                 kgdb_roundup_cpus();
722 #endif
723
724         /*
725          * Wait for the other CPUs to be notified and be waiting for us:
726          */
727         time_left = MSEC_PER_SEC;
728         while (kgdb_do_roundup && --time_left &&
729                (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
730                    online_cpus)
731                 udelay(1000);
732         if (!time_left)
733                 pr_crit("Timed out waiting for secondary CPUs.\n");
734
735         /*
736          * At this point the primary processor is completely
737          * in the debugger and all secondary CPUs are quiescent
738          */
739         dbg_deactivate_sw_breakpoints();
740         kgdb_single_step = 0;
741         kgdb_contthread = current;
742         exception_level = 0;
743         trace_on = tracing_is_on();
744         if (trace_on)
745                 tracing_off();
746
747         while (1) {
748 cpu_master_loop:
749                 if (dbg_kdb_mode) {
750                         kgdb_connected = 1;
751                         error = kdb_stub(ks);
752                         if (error == -1)
753                                 continue;
754                         kgdb_connected = 0;
755                 } else {
756                         /*
757                          * This is a brutal way to interfere with the debugger
758                          * and prevent gdb being used to poke at kernel memory.
759                          * This could cause trouble if lockdown is applied when
760                          * there is already an active gdb session. For now the
761                          * answer is simply "don't do that". Typically lockdown
762                          * *will* be applied before the debug core gets started
763                          * so only developers using kgdb for fairly advanced
764                          * early kernel debug can be biten by this. Hopefully
765                          * they are sophisticated enough to take care of
766                          * themselves, especially with help from the lockdown
767                          * message printed on the console!
768                          */
769                         if (security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL)) {
770                                 if (IS_ENABLED(CONFIG_KGDB_KDB)) {
771                                         /* Switch back to kdb if possible... */
772                                         dbg_kdb_mode = 1;
773                                         continue;
774                                 } else {
775                                         /* ... otherwise just bail */
776                                         break;
777                                 }
778                         }
779                         error = gdb_serial_stub(ks);
780                 }
781
782                 if (error == DBG_PASS_EVENT) {
783                         dbg_kdb_mode = !dbg_kdb_mode;
784                 } else if (error == DBG_SWITCH_CPU_EVENT) {
785                         kgdb_info[dbg_switch_cpu].exception_state |=
786                                 DCPU_NEXT_MASTER;
787                         goto cpu_loop;
788                 } else {
789                         kgdb_info[cpu].ret_state = error;
790                         break;
791                 }
792         }
793
794         dbg_activate_sw_breakpoints();
795
796         /* Call the I/O driver's post_exception routine */
797         if (dbg_io_ops->post_exception)
798                 dbg_io_ops->post_exception();
799
800         atomic_dec(&ignore_console_lock_warning);
801
802         if (!kgdb_single_step) {
803                 raw_spin_unlock(&dbg_slave_lock);
804                 /* Wait till all the CPUs have quit from the debugger. */
805                 while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
806                         cpu_relax();
807         }
808
809 kgdb_restore:
810         if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
811                 int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
812                 if (kgdb_info[sstep_cpu].task)
813                         kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
814                 else
815                         kgdb_sstep_pid = 0;
816         }
817         if (arch_kgdb_ops.correct_hw_break)
818                 arch_kgdb_ops.correct_hw_break();
819         if (trace_on)
820                 tracing_on();
821
822         kgdb_info[cpu].debuggerinfo = NULL;
823         kgdb_info[cpu].task = NULL;
824         kgdb_info[cpu].exception_state &=
825                 ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
826         kgdb_info[cpu].enter_kgdb--;
827         smp_mb__before_atomic();
828         atomic_dec(&masters_in_kgdb);
829         /* Free kgdb_active */
830         atomic_set(&kgdb_active, -1);
831         raw_spin_unlock(&dbg_master_lock);
832         dbg_touch_watchdogs();
833         local_irq_restore(flags);
834         rcu_read_unlock();
835
836         return kgdb_info[cpu].ret_state;
837 }
838 NOKPROBE_SYMBOL(kgdb_cpu_enter);
839
840 /*
841  * kgdb_handle_exception() - main entry point from a kernel exception
842  *
843  * Locking hierarchy:
844  *      interface locks, if any (begin_session)
845  *      kgdb lock (kgdb_active)
846  */
847 int
848 kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
849 {
850         struct kgdb_state kgdb_var;
851         struct kgdb_state *ks = &kgdb_var;
852         int ret = 0;
853
854         if (arch_kgdb_ops.enable_nmi)
855                 arch_kgdb_ops.enable_nmi(0);
856         /*
857          * Avoid entering the debugger if we were triggered due to an oops
858          * but panic_timeout indicates the system should automatically
859          * reboot on panic. We don't want to get stuck waiting for input
860          * on such systems, especially if its "just" an oops.
861          */
862         if (signo != SIGTRAP && panic_timeout)
863                 return 1;
864
865         memset(ks, 0, sizeof(struct kgdb_state));
866         ks->cpu                 = raw_smp_processor_id();
867         ks->ex_vector           = evector;
868         ks->signo               = signo;
869         ks->err_code            = ecode;
870         ks->linux_regs          = regs;
871
872         if (kgdb_reenter_check(ks))
873                 goto out; /* Ouch, double exception ! */
874         if (kgdb_info[ks->cpu].enter_kgdb != 0)
875                 goto out;
876
877         ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
878 out:
879         if (arch_kgdb_ops.enable_nmi)
880                 arch_kgdb_ops.enable_nmi(1);
881         return ret;
882 }
883 NOKPROBE_SYMBOL(kgdb_handle_exception);
884
885 /*
886  * GDB places a breakpoint at this function to know dynamically loaded objects.
887  */
888 static int module_event(struct notifier_block *self, unsigned long val,
889         void *data)
890 {
891         return 0;
892 }
893
894 static struct notifier_block dbg_module_load_nb = {
895         .notifier_call  = module_event,
896 };
897
898 int kgdb_nmicallback(int cpu, void *regs)
899 {
900 #ifdef CONFIG_SMP
901         struct kgdb_state kgdb_var;
902         struct kgdb_state *ks = &kgdb_var;
903
904         kgdb_info[cpu].rounding_up = false;
905
906         memset(ks, 0, sizeof(struct kgdb_state));
907         ks->cpu                 = cpu;
908         ks->linux_regs          = regs;
909
910         if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
911                         raw_spin_is_locked(&dbg_master_lock)) {
912                 kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
913                 return 0;
914         }
915 #endif
916         return 1;
917 }
918 NOKPROBE_SYMBOL(kgdb_nmicallback);
919
920 int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
921                                                         atomic_t *send_ready)
922 {
923 #ifdef CONFIG_SMP
924         if (!kgdb_io_ready(0) || !send_ready)
925                 return 1;
926
927         if (kgdb_info[cpu].enter_kgdb == 0) {
928                 struct kgdb_state kgdb_var;
929                 struct kgdb_state *ks = &kgdb_var;
930
931                 memset(ks, 0, sizeof(struct kgdb_state));
932                 ks->cpu                 = cpu;
933                 ks->ex_vector           = trapnr;
934                 ks->signo               = SIGTRAP;
935                 ks->err_code            = err_code;
936                 ks->linux_regs          = regs;
937                 ks->send_ready          = send_ready;
938                 kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
939                 return 0;
940         }
941 #endif
942         return 1;
943 }
944 NOKPROBE_SYMBOL(kgdb_nmicallin);
945
946 static void kgdb_console_write(struct console *co, const char *s,
947    unsigned count)
948 {
949         unsigned long flags;
950
951         /* If we're debugging, or KGDB has not connected, don't try
952          * and print. */
953         if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
954                 return;
955
956         local_irq_save(flags);
957         gdbstub_msg_write(s, count);
958         local_irq_restore(flags);
959 }
960
961 static struct console kgdbcons = {
962         .name           = "kgdb",
963         .write          = kgdb_console_write,
964         .flags          = CON_PRINTBUFFER | CON_ENABLED,
965         .index          = -1,
966 };
967
968 static int __init opt_kgdb_con(char *str)
969 {
970         kgdb_use_con = 1;
971
972         if (kgdb_io_module_registered && !kgdb_con_registered) {
973                 register_console(&kgdbcons);
974                 kgdb_con_registered = 1;
975         }
976
977         return 0;
978 }
979
980 early_param("kgdbcon", opt_kgdb_con);
981
982 #ifdef CONFIG_MAGIC_SYSRQ
983 static void sysrq_handle_dbg(int key)
984 {
985         if (!dbg_io_ops) {
986                 pr_crit("ERROR: No KGDB I/O module available\n");
987                 return;
988         }
989         if (!kgdb_connected) {
990 #ifdef CONFIG_KGDB_KDB
991                 if (!dbg_kdb_mode)
992                         pr_crit("KGDB or $3#33 for KDB\n");
993 #else
994                 pr_crit("Entering KGDB\n");
995 #endif
996         }
997
998         kgdb_breakpoint();
999 }
1000
1001 static const struct sysrq_key_op sysrq_dbg_op = {
1002         .handler        = sysrq_handle_dbg,
1003         .help_msg       = "debug(g)",
1004         .action_msg     = "DEBUG",
1005 };
1006 #endif
1007
1008 void kgdb_panic(const char *msg)
1009 {
1010         if (!kgdb_io_module_registered)
1011                 return;
1012
1013         /*
1014          * We don't want to get stuck waiting for input from user if
1015          * "panic_timeout" indicates the system should automatically
1016          * reboot on panic.
1017          */
1018         if (panic_timeout)
1019                 return;
1020
1021         if (dbg_kdb_mode)
1022                 kdb_printf("PANIC: %s\n", msg);
1023
1024         kgdb_breakpoint();
1025 }
1026
1027 static void kgdb_initial_breakpoint(void)
1028 {
1029         kgdb_break_asap = 0;
1030
1031         pr_crit("Waiting for connection from remote gdb...\n");
1032         kgdb_breakpoint();
1033 }
1034
1035 void __weak kgdb_arch_late(void)
1036 {
1037 }
1038
1039 void __init dbg_late_init(void)
1040 {
1041         dbg_is_early = false;
1042         if (kgdb_io_module_registered)
1043                 kgdb_arch_late();
1044         kdb_init(KDB_INIT_FULL);
1045
1046         if (kgdb_io_module_registered && kgdb_break_asap)
1047                 kgdb_initial_breakpoint();
1048 }
1049
1050 static int
1051 dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
1052 {
1053         /*
1054          * Take the following action on reboot notify depending on value:
1055          *    1 == Enter debugger
1056          *    0 == [the default] detach debug client
1057          *   -1 == Do nothing... and use this until the board resets
1058          */
1059         switch (kgdbreboot) {
1060         case 1:
1061                 kgdb_breakpoint();
1062                 goto done;
1063         case -1:
1064                 goto done;
1065         }
1066         if (!dbg_kdb_mode)
1067                 gdbstub_exit(code);
1068 done:
1069         return NOTIFY_DONE;
1070 }
1071
1072 static struct notifier_block dbg_reboot_notifier = {
1073         .notifier_call          = dbg_notify_reboot,
1074         .next                   = NULL,
1075         .priority               = INT_MAX,
1076 };
1077
1078 static void kgdb_register_callbacks(void)
1079 {
1080         if (!kgdb_io_module_registered) {
1081                 kgdb_io_module_registered = 1;
1082                 kgdb_arch_init();
1083                 if (!dbg_is_early)
1084                         kgdb_arch_late();
1085                 register_module_notifier(&dbg_module_load_nb);
1086                 register_reboot_notifier(&dbg_reboot_notifier);
1087 #ifdef CONFIG_MAGIC_SYSRQ
1088                 register_sysrq_key('g', &sysrq_dbg_op);
1089 #endif
1090                 if (kgdb_use_con && !kgdb_con_registered) {
1091                         register_console(&kgdbcons);
1092                         kgdb_con_registered = 1;
1093                 }
1094         }
1095 }
1096
1097 static void kgdb_unregister_callbacks(void)
1098 {
1099         /*
1100          * When this routine is called KGDB should unregister from
1101          * handlers and clean up, making sure it is not handling any
1102          * break exceptions at the time.
1103          */
1104         if (kgdb_io_module_registered) {
1105                 kgdb_io_module_registered = 0;
1106                 unregister_reboot_notifier(&dbg_reboot_notifier);
1107                 unregister_module_notifier(&dbg_module_load_nb);
1108                 kgdb_arch_exit();
1109 #ifdef CONFIG_MAGIC_SYSRQ
1110                 unregister_sysrq_key('g', &sysrq_dbg_op);
1111 #endif
1112                 if (kgdb_con_registered) {
1113                         unregister_console(&kgdbcons);
1114                         kgdb_con_registered = 0;
1115                 }
1116         }
1117 }
1118
1119 /**
1120  *      kgdb_register_io_module - register KGDB IO module
1121  *      @new_dbg_io_ops: the io ops vector
1122  *
1123  *      Register it with the KGDB core.
1124  */
1125 int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
1126 {
1127         struct kgdb_io *old_dbg_io_ops;
1128         int err;
1129
1130         spin_lock(&kgdb_registration_lock);
1131
1132         old_dbg_io_ops = dbg_io_ops;
1133         if (old_dbg_io_ops) {
1134                 if (!old_dbg_io_ops->deinit) {
1135                         spin_unlock(&kgdb_registration_lock);
1136
1137                         pr_err("KGDB I/O driver %s can't replace %s.\n",
1138                                 new_dbg_io_ops->name, old_dbg_io_ops->name);
1139                         return -EBUSY;
1140                 }
1141                 pr_info("Replacing I/O driver %s with %s\n",
1142                         old_dbg_io_ops->name, new_dbg_io_ops->name);
1143         }
1144
1145         if (new_dbg_io_ops->init) {
1146                 err = new_dbg_io_ops->init();
1147                 if (err) {
1148                         spin_unlock(&kgdb_registration_lock);
1149                         return err;
1150                 }
1151         }
1152
1153         dbg_io_ops = new_dbg_io_ops;
1154
1155         spin_unlock(&kgdb_registration_lock);
1156
1157         if (old_dbg_io_ops) {
1158                 old_dbg_io_ops->deinit();
1159                 return 0;
1160         }
1161
1162         pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
1163
1164         /* Arm KGDB now. */
1165         kgdb_register_callbacks();
1166
1167         if (kgdb_break_asap &&
1168             (!dbg_is_early || IS_ENABLED(CONFIG_ARCH_HAS_EARLY_DEBUG)))
1169                 kgdb_initial_breakpoint();
1170
1171         return 0;
1172 }
1173 EXPORT_SYMBOL_GPL(kgdb_register_io_module);
1174
1175 /**
1176  *      kgdb_unregister_io_module - unregister KGDB IO module
1177  *      @old_dbg_io_ops: the io ops vector
1178  *
1179  *      Unregister it with the KGDB core.
1180  */
1181 void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
1182 {
1183         BUG_ON(kgdb_connected);
1184
1185         /*
1186          * KGDB is no longer able to communicate out, so
1187          * unregister our callbacks and reset state.
1188          */
1189         kgdb_unregister_callbacks();
1190
1191         spin_lock(&kgdb_registration_lock);
1192
1193         WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
1194         dbg_io_ops = NULL;
1195
1196         spin_unlock(&kgdb_registration_lock);
1197
1198         if (old_dbg_io_ops->deinit)
1199                 old_dbg_io_ops->deinit();
1200
1201         pr_info("Unregistered I/O driver %s, debugger disabled\n",
1202                 old_dbg_io_ops->name);
1203 }
1204 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
1205
1206 int dbg_io_get_char(void)
1207 {
1208         int ret = dbg_io_ops->read_char();
1209         if (ret == NO_POLL_CHAR)
1210                 return -1;
1211         if (!dbg_kdb_mode)
1212                 return ret;
1213         if (ret == 127)
1214                 return 8;
1215         return ret;
1216 }
1217
1218 /**
1219  * kgdb_breakpoint - generate breakpoint exception
1220  *
1221  * This function will generate a breakpoint exception.  It is used at the
1222  * beginning of a program to sync up with a debugger and can be used
1223  * otherwise as a quick means to stop program execution and "break" into
1224  * the debugger.
1225  */
1226 noinline void kgdb_breakpoint(void)
1227 {
1228         atomic_inc(&kgdb_setting_breakpoint);
1229         wmb(); /* Sync point before breakpoint */
1230         arch_kgdb_breakpoint();
1231         wmb(); /* Sync point after breakpoint */
1232         atomic_dec(&kgdb_setting_breakpoint);
1233 }
1234 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
1235
1236 static int __init opt_kgdb_wait(char *str)
1237 {
1238         kgdb_break_asap = 1;
1239
1240         kdb_init(KDB_INIT_EARLY);
1241         if (kgdb_io_module_registered &&
1242             IS_ENABLED(CONFIG_ARCH_HAS_EARLY_DEBUG))
1243                 kgdb_initial_breakpoint();
1244
1245         return 0;
1246 }
1247
1248 early_param("kgdbwait", opt_kgdb_wait);