1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel Debug Core
4  *
5  * Maintainer: Jason Wessel <jason.wessel@windriver.com>
6  *
7  * Copyright (C) 2000-2001 VERITAS Software Corporation.
8  * Copyright (C) 2002-2004 Timesys Corporation
9  * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
10  * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
11  * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
12  * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
13  * Copyright (C) 2005-2009 Wind River Systems, Inc.
14  * Copyright (C) 2007 MontaVista Software, Inc.
15  * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
16  *
17  * Contributors at various stages not listed above:
18  *  Jason Wessel ( jason.wessel@windriver.com )
19  *  George Anzinger <george@mvista.com>
20  *  Anurekh Saxena (anurekh.saxena@timesys.com)
21  *  Lake Stevens Instrument Division (Glenn Engel)
22  *  Jim Kingdon, Cygnus Support.
23  *
24  * Original KGDB stub: David Grothe <dave@gcom.com>,
25  * Tigran Aivazian <tigran@sco.com>
26  */
27 
28 #define pr_fmt(fmt) "KGDB: " fmt
29 
30 #include <linux/pid_namespace.h>
31 #include <linux/clocksource.h>
32 #include <linux/serial_core.h>
33 #include <linux/interrupt.h>
34 #include <linux/spinlock.h>
35 #include <linux/console.h>
36 #include <linux/threads.h>
37 #include <linux/uaccess.h>
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/ptrace.h>
41 #include <linux/string.h>
42 #include <linux/delay.h>
43 #include <linux/sched.h>
44 #include <linux/sysrq.h>
45 #include <linux/reboot.h>
46 #include <linux/init.h>
47 #include <linux/kgdb.h>
48 #include <linux/kdb.h>
49 #include <linux/nmi.h>
50 #include <linux/pid.h>
51 #include <linux/smp.h>
52 #include <linux/mm.h>
53 #include <linux/rcupdate.h>
54 #include <linux/irq.h>
55 #include <linux/security.h>
56 
57 #include <asm/cacheflush.h>
58 #include <asm/byteorder.h>
59 #include <linux/atomic.h>
60 
61 #include "debug_core.h"
62 
63 static int kgdb_break_asap;
64 
65 struct debuggerinfo_struct kgdb_info[NR_CPUS];
66 
67 /* kgdb_connected - Is a host GDB connected to us? */
68 int				kgdb_connected;
69 EXPORT_SYMBOL_GPL(kgdb_connected);
70 
71 /* All the KGDB handlers are installed */
72 int			kgdb_io_module_registered;
73 
74 /* Guard for recursive entry */
75 static int			exception_level;
76 
77 struct kgdb_io		*dbg_io_ops;
78 static DEFINE_SPINLOCK(kgdb_registration_lock);
79 
80 /* Action for the reboot notifier, a global allow kdb to change it */
81 static int kgdbreboot;
82 /* kgdb console driver is loaded */
83 static int kgdb_con_registered;
84 /* determine if kgdb console output should be used */
85 static int kgdb_use_con;
86 /* Flag for alternate operations for early debugging */
87 bool dbg_is_early = true;
88 /* Next cpu to become the master debug core */
89 int dbg_switch_cpu;
90 
91 /* Use kdb or gdbserver mode */
92 int dbg_kdb_mode = 1;
93 
94 module_param(kgdb_use_con, int, 0644);
95 module_param(kgdbreboot, int, 0644);
96 
97 /*
98  * Holds information about breakpoints in a kernel. These breakpoints are
99  * added and removed by gdb.
100  */
101 static struct kgdb_bkpt		kgdb_break[KGDB_MAX_BREAKPOINTS] = {
102 	[0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
103 };
104 
105 /*
106  * The CPU# of the active CPU, or -1 if none:
107  */
108 atomic_t			kgdb_active = ATOMIC_INIT(-1);
109 EXPORT_SYMBOL_GPL(kgdb_active);
110 static DEFINE_RAW_SPINLOCK(dbg_master_lock);
111 static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
112 
113 /*
114  * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
115  * bootup code (which might not have percpu set up yet):
116  */
117 static atomic_t			masters_in_kgdb;
118 static atomic_t			slaves_in_kgdb;
119 atomic_t			kgdb_setting_breakpoint;
120 
121 struct task_struct		*kgdb_usethread;
122 struct task_struct		*kgdb_contthread;
123 
124 int				kgdb_single_step;
125 static pid_t			kgdb_sstep_pid;
126 
127 /* to keep track of the CPU which is doing the single stepping*/
128 atomic_t			kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
129 
130 /*
131  * If you are debugging a problem where roundup (the collection of
132  * all other CPUs) is a problem [this should be extremely rare],
133  * then use the nokgdbroundup option to avoid roundup. In that case
134  * the other CPUs might interfere with your debugging context, so
135  * use this with care:
136  */
137 static int kgdb_do_roundup = 1;
138 
139 static int __init opt_nokgdbroundup(char *str)
140 {
141 	kgdb_do_roundup = 0;
142 
143 	return 0;
144 }
145 
146 early_param("nokgdbroundup", opt_nokgdbroundup);
147 
148 /*
149  * Finally, some KGDB code :-)
150  */
151 
152 /*
153  * Weak aliases for breakpoint management,
154  * can be overridden by architectures when needed:
155  */
156 int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
157 {
158 	int err;
159 
160 	err = copy_from_kernel_nofault(bpt->saved_instr, (char *)bpt->bpt_addr,
161 				BREAK_INSTR_SIZE);
162 	if (err)
163 		return err;
164 	err = copy_to_kernel_nofault((char *)bpt->bpt_addr,
165 				 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
166 	return err;
167 }
168 NOKPROBE_SYMBOL(kgdb_arch_set_breakpoint);
169 
170 int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
171 {
172 	return copy_to_kernel_nofault((char *)bpt->bpt_addr,
173 				  (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
174 }
175 NOKPROBE_SYMBOL(kgdb_arch_remove_breakpoint);
176 
177 int __weak kgdb_validate_break_address(unsigned long addr)
178 {
179 	struct kgdb_bkpt tmp;
180 	int err;
181 
182 	if (kgdb_within_blocklist(addr))
183 		return -EINVAL;
184 
185 	/* Validate setting the breakpoint and then removing it.  If the
186 	 * remove fails, the kernel needs to emit a bad message because we
187 	 * are deep trouble not being able to put things back the way we
188 	 * found them.
189 	 */
190 	tmp.bpt_addr = addr;
191 	err = kgdb_arch_set_breakpoint(&tmp);
192 	if (err)
193 		return err;
194 	err = kgdb_arch_remove_breakpoint(&tmp);
195 	if (err)
196 		pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
197 		       addr);
198 	return err;
199 }
200 
201 unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
202 {
203 	return instruction_pointer(regs);
204 }
205 NOKPROBE_SYMBOL(kgdb_arch_pc);
206 
207 int __weak kgdb_arch_init(void)
208 {
209 	return 0;
210 }
211 
212 int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
213 {
214 	return 0;
215 }
216 NOKPROBE_SYMBOL(kgdb_skipexception);
217 
218 #ifdef CONFIG_SMP
219 
220 /*
221  * Default (weak) implementation for kgdb_roundup_cpus
222  */
223 
224 void __weak kgdb_call_nmi_hook(void *ignored)
225 {
226 	/*
227 	 * NOTE: get_irq_regs() is supposed to get the registers from
228 	 * before the IPI interrupt happened and so is supposed to
229 	 * show where the processor was.  In some situations it's
230 	 * possible we might be called without an IPI, so it might be
231 	 * safer to figure out how to make kgdb_breakpoint() work
232 	 * properly here.
233 	 */
234 	kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
235 }
236 NOKPROBE_SYMBOL(kgdb_call_nmi_hook);
237 
238 static DEFINE_PER_CPU(call_single_data_t, kgdb_roundup_csd) =
239 	CSD_INIT(kgdb_call_nmi_hook, NULL);
240 
241 void __weak kgdb_roundup_cpus(void)
242 {
243 	call_single_data_t *csd;
244 	int this_cpu = raw_smp_processor_id();
245 	int cpu;
246 	int ret;
247 
248 	for_each_online_cpu(cpu) {
249 		/* No need to roundup ourselves */
250 		if (cpu == this_cpu)
251 			continue;
252 
253 		csd = &per_cpu(kgdb_roundup_csd, cpu);
254 
255 		/*
256 		 * If it didn't round up last time, don't try again
257 		 * since smp_call_function_single_async() will block.
258 		 *
259 		 * If rounding_up is false then we know that the
260 		 * previous call must have at least started and that
261 		 * means smp_call_function_single_async() won't block.
262 		 */
263 		if (kgdb_info[cpu].rounding_up)
264 			continue;
265 		kgdb_info[cpu].rounding_up = true;
266 
267 		ret = smp_call_function_single_async(cpu, csd);
268 		if (ret)
269 			kgdb_info[cpu].rounding_up = false;
270 	}
271 }
272 NOKPROBE_SYMBOL(kgdb_roundup_cpus);
273 
274 #endif
275 
276 /*
277  * Some architectures need cache flushes when we set/clear a
278  * breakpoint:
279  */
280 static void kgdb_flush_swbreak_addr(unsigned long addr)
281 {
282 	if (!CACHE_FLUSH_IS_SAFE)
283 		return;
284 
285 	/* Force flush instruction cache if it was outside the mm */
286 	flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
287 }
288 NOKPROBE_SYMBOL(kgdb_flush_swbreak_addr);
289 
290 /*
291  * SW breakpoint management:
292  */
293 int dbg_activate_sw_breakpoints(void)
294 {
295 	int error;
296 	int ret = 0;
297 	int i;
298 
299 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
300 		if (kgdb_break[i].state != BP_SET)
301 			continue;
302 
303 		error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
304 		if (error) {
305 			ret = error;
306 			pr_info("BP install failed: %lx\n",
307 				kgdb_break[i].bpt_addr);
308 			continue;
309 		}
310 
311 		kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
312 		kgdb_break[i].state = BP_ACTIVE;
313 	}
314 	return ret;
315 }
316 NOKPROBE_SYMBOL(dbg_activate_sw_breakpoints);
317 
318 int dbg_set_sw_break(unsigned long addr)
319 {
320 	int err = kgdb_validate_break_address(addr);
321 	int breakno = -1;
322 	int i;
323 
324 	if (err)
325 		return err;
326 
327 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
328 		if ((kgdb_break[i].state == BP_SET) &&
329 					(kgdb_break[i].bpt_addr == addr))
330 			return -EEXIST;
331 	}
332 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
333 		if (kgdb_break[i].state == BP_REMOVED &&
334 					kgdb_break[i].bpt_addr == addr) {
335 			breakno = i;
336 			break;
337 		}
338 	}
339 
340 	if (breakno == -1) {
341 		for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
342 			if (kgdb_break[i].state == BP_UNDEFINED) {
343 				breakno = i;
344 				break;
345 			}
346 		}
347 	}
348 
349 	if (breakno == -1)
350 		return -E2BIG;
351 
352 	kgdb_break[breakno].state = BP_SET;
353 	kgdb_break[breakno].type = BP_BREAKPOINT;
354 	kgdb_break[breakno].bpt_addr = addr;
355 
356 	return 0;
357 }
358 
359 int dbg_deactivate_sw_breakpoints(void)
360 {
361 	int error;
362 	int ret = 0;
363 	int i;
364 
365 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
366 		if (kgdb_break[i].state != BP_ACTIVE)
367 			continue;
368 		error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
369 		if (error) {
370 			pr_info("BP remove failed: %lx\n",
371 				kgdb_break[i].bpt_addr);
372 			ret = error;
373 		}
374 
375 		kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
376 		kgdb_break[i].state = BP_SET;
377 	}
378 	return ret;
379 }
380 NOKPROBE_SYMBOL(dbg_deactivate_sw_breakpoints);
381 
382 int dbg_remove_sw_break(unsigned long addr)
383 {
384 	int i;
385 
386 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
387 		if ((kgdb_break[i].state == BP_SET) &&
388 				(kgdb_break[i].bpt_addr == addr)) {
389 			kgdb_break[i].state = BP_REMOVED;
390 			return 0;
391 		}
392 	}
393 	return -ENOENT;
394 }
395 
396 int kgdb_isremovedbreak(unsigned long addr)
397 {
398 	int i;
399 
400 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
401 		if ((kgdb_break[i].state == BP_REMOVED) &&
402 					(kgdb_break[i].bpt_addr == addr))
403 			return 1;
404 	}
405 	return 0;
406 }
407 
408 int kgdb_has_hit_break(unsigned long addr)
409 {
410 	int i;
411 
412 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
413 		if (kgdb_break[i].state == BP_ACTIVE &&
414 		    kgdb_break[i].bpt_addr == addr)
415 			return 1;
416 	}
417 	return 0;
418 }
419 
420 int dbg_remove_all_break(void)
421 {
422 	int error;
423 	int i;
424 
425 	/* Clear memory breakpoints. */
426 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
427 		if (kgdb_break[i].state != BP_ACTIVE)
428 			goto setundefined;
429 		error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
430 		if (error)
431 			pr_err("breakpoint remove failed: %lx\n",
432 			       kgdb_break[i].bpt_addr);
433 setundefined:
434 		kgdb_break[i].state = BP_UNDEFINED;
435 	}
436 
437 	/* Clear hardware breakpoints. */
438 	if (arch_kgdb_ops.remove_all_hw_break)
439 		arch_kgdb_ops.remove_all_hw_break();
440 
441 	return 0;
442 }
443 
444 void kgdb_free_init_mem(void)
445 {
446 	int i;
447 
448 	/* Clear init memory breakpoints. */
449 	for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
450 		if (init_section_contains((void *)kgdb_break[i].bpt_addr, 0))
451 			kgdb_break[i].state = BP_UNDEFINED;
452 	}
453 }
454 
455 #ifdef CONFIG_KGDB_KDB
456 void kdb_dump_stack_on_cpu(int cpu)
457 {
458 	if (cpu == raw_smp_processor_id() || !IS_ENABLED(CONFIG_SMP)) {
459 		dump_stack();
460 		return;
461 	}
462 
463 	if (!(kgdb_info[cpu].exception_state & DCPU_IS_SLAVE)) {
464 		kdb_printf("ERROR: Task on cpu %d didn't stop in the debugger\n",
465 			   cpu);
466 		return;
467 	}
468 
469 	/*
470 	 * In general, architectures don't support dumping the stack of a
471 	 * "running" process that's not the current one.  From the point of
472 	 * view of the Linux, kernel processes that are looping in the kgdb
473 	 * slave loop are still "running".  There's also no API (that actually
474 	 * works across all architectures) that can do a stack crawl based
475 	 * on registers passed as a parameter.
476 	 *
477 	 * Solve this conundrum by asking slave CPUs to do the backtrace
478 	 * themselves.
479 	 */
480 	kgdb_info[cpu].exception_state |= DCPU_WANT_BT;
481 	while (kgdb_info[cpu].exception_state & DCPU_WANT_BT)
482 		cpu_relax();
483 }
484 #endif
485 
486 /*
487  * Return true if there is a valid kgdb I/O module.  Also if no
488  * debugger is attached a message can be printed to the console about
489  * waiting for the debugger to attach.
490  *
491  * The print_wait argument is only to be true when called from inside
492  * the core kgdb_handle_exception, because it will wait for the
493  * debugger to attach.
494  */
495 static int kgdb_io_ready(int print_wait)
496 {
497 	if (!dbg_io_ops)
498 		return 0;
499 	if (kgdb_connected)
500 		return 1;
501 	if (atomic_read(&kgdb_setting_breakpoint))
502 		return 1;
503 	if (print_wait) {
504 #ifdef CONFIG_KGDB_KDB
505 		if (!dbg_kdb_mode)
506 			pr_crit("waiting... or $3#33 for KDB\n");
507 #else
508 		pr_crit("Waiting for remote debugger\n");
509 #endif
510 	}
511 	return 1;
512 }
513 NOKPROBE_SYMBOL(kgdb_io_ready);
514 
515 static int kgdb_reenter_check(struct kgdb_state *ks)
516 {
517 	unsigned long addr;
518 
519 	if (atomic_read(&kgdb_active) != raw_smp_processor_id())
520 		return 0;
521 
522 	/* Panic on recursive debugger calls: */
523 	exception_level++;
524 	addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
525 	dbg_deactivate_sw_breakpoints();
526 
527 	/*
528 	 * If the break point removed ok at the place exception
529 	 * occurred, try to recover and print a warning to the end
530 	 * user because the user planted a breakpoint in a place that
531 	 * KGDB needs in order to function.
532 	 */
533 	if (dbg_remove_sw_break(addr) == 0) {
534 		exception_level = 0;
535 		kgdb_skipexception(ks->ex_vector, ks->linux_regs);
536 		dbg_activate_sw_breakpoints();
537 		pr_crit("re-enter error: breakpoint removed %lx\n", addr);
538 		WARN_ON_ONCE(1);
539 
540 		return 1;
541 	}
542 	dbg_remove_all_break();
543 	kgdb_skipexception(ks->ex_vector, ks->linux_regs);
544 
545 	if (exception_level > 1) {
546 		dump_stack();
547 		kgdb_io_module_registered = false;
548 		panic("Recursive entry to debugger");
549 	}
550 
551 	pr_crit("re-enter exception: ALL breakpoints killed\n");
552 #ifdef CONFIG_KGDB_KDB
553 	/* Allow kdb to debug itself one level */
554 	return 0;
555 #endif
556 	dump_stack();
557 	panic("Recursive entry to debugger");
558 
559 	return 1;
560 }
561 NOKPROBE_SYMBOL(kgdb_reenter_check);
562 
563 static void dbg_touch_watchdogs(void)
564 {
565 	touch_softlockup_watchdog_sync();
566 	clocksource_touch_watchdog();
567 	rcu_cpu_stall_reset();
568 }
569 NOKPROBE_SYMBOL(dbg_touch_watchdogs);
570 
571 static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
572 		int exception_state)
573 {
574 	unsigned long flags;
575 	int sstep_tries = 100;
576 	int error;
577 	int cpu;
578 	int trace_on = 0;
579 	int online_cpus = num_online_cpus();
580 	u64 time_left;
581 
582 	kgdb_info[ks->cpu].enter_kgdb++;
583 	kgdb_info[ks->cpu].exception_state |= exception_state;
584 
585 	if (exception_state == DCPU_WANT_MASTER)
586 		atomic_inc(&masters_in_kgdb);
587 	else
588 		atomic_inc(&slaves_in_kgdb);
589 
590 	if (arch_kgdb_ops.disable_hw_break)
591 		arch_kgdb_ops.disable_hw_break(regs);
592 
593 acquirelock:
594 	rcu_read_lock();
595 	/*
596 	 * Interrupts will be restored by the 'trap return' code, except when
597 	 * single stepping.
598 	 */
599 	local_irq_save(flags);
600 
601 	cpu = ks->cpu;
602 	kgdb_info[cpu].debuggerinfo = regs;
603 	kgdb_info[cpu].task = current;
604 	kgdb_info[cpu].ret_state = 0;
605 	kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
606 
607 	/* Make sure the above info reaches the primary CPU */
608 	smp_mb();
609 
610 	if (exception_level == 1) {
611 		if (raw_spin_trylock(&dbg_master_lock))
612 			atomic_xchg(&kgdb_active, cpu);
613 		goto cpu_master_loop;
614 	}
615 
616 	/*
617 	 * CPU will loop if it is a slave or request to become a kgdb
618 	 * master cpu and acquire the kgdb_active lock:
619 	 */
620 	while (1) {
621 cpu_loop:
622 		if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
623 			kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
624 			goto cpu_master_loop;
625 		} else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
626 			if (raw_spin_trylock(&dbg_master_lock)) {
627 				atomic_xchg(&kgdb_active, cpu);
628 				break;
629 			}
630 		} else if (kgdb_info[cpu].exception_state & DCPU_WANT_BT) {
631 			dump_stack();
632 			kgdb_info[cpu].exception_state &= ~DCPU_WANT_BT;
633 		} else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
634 			if (!raw_spin_is_locked(&dbg_slave_lock))
635 				goto return_normal;
636 		} else {
637 return_normal:
638 			/* Return to normal operation by executing any
639 			 * hw breakpoint fixup.
640 			 */
641 			if (arch_kgdb_ops.correct_hw_break)
642 				arch_kgdb_ops.correct_hw_break();
643 			if (trace_on)
644 				tracing_on();
645 			kgdb_info[cpu].debuggerinfo = NULL;
646 			kgdb_info[cpu].task = NULL;
647 			kgdb_info[cpu].exception_state &=
648 				~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
649 			kgdb_info[cpu].enter_kgdb--;
650 			smp_mb__before_atomic();
651 			atomic_dec(&slaves_in_kgdb);
652 			dbg_touch_watchdogs();
653 			local_irq_restore(flags);
654 			rcu_read_unlock();
655 			return 0;
656 		}
657 		cpu_relax();
658 	}
659 
660 	/*
661 	 * For single stepping, try to only enter on the processor
662 	 * that was single stepping.  To guard against a deadlock, the
663 	 * kernel will only try for the value of sstep_tries before
664 	 * giving up and continuing on.
665 	 */
666 	if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
667 	    (kgdb_info[cpu].task &&
668 	     kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
669 		atomic_set(&kgdb_active, -1);
670 		raw_spin_unlock(&dbg_master_lock);
671 		dbg_touch_watchdogs();
672 		local_irq_restore(flags);
673 		rcu_read_unlock();
674 
675 		goto acquirelock;
676 	}
677 
678 	if (!kgdb_io_ready(1)) {
679 		kgdb_info[cpu].ret_state = 1;
680 		goto kgdb_restore; /* No I/O connection, resume the system */
681 	}
682 
683 	/*
684 	 * Don't enter if we have hit a removed breakpoint.
685 	 */
686 	if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
687 		goto kgdb_restore;
688 
689 	atomic_inc(&ignore_console_lock_warning);
690 
691 	/* Call the I/O driver's pre_exception routine */
692 	if (dbg_io_ops->pre_exception)
693 		dbg_io_ops->pre_exception();
694 
695 	/*
696 	 * Get the passive CPU lock which will hold all the non-primary
697 	 * CPU in a spin state while the debugger is active
698 	 */
699 	if (!kgdb_single_step)
700 		raw_spin_lock(&dbg_slave_lock);
701 
702 #ifdef CONFIG_SMP
703 	/* If send_ready set, slaves are already waiting */
704 	if (ks->send_ready)
705 		atomic_set(ks->send_ready, 1);
706 
707 	/* Signal the other CPUs to enter kgdb_wait() */
708 	else if ((!kgdb_single_step) && kgdb_do_roundup)
709 		kgdb_roundup_cpus();
710 #endif
711 
712 	/*
713 	 * Wait for the other CPUs to be notified and be waiting for us:
714 	 */
715 	time_left = MSEC_PER_SEC;
716 	while (kgdb_do_roundup && --time_left &&
717 	       (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
718 		   online_cpus)
719 		udelay(1000);
720 	if (!time_left)
721 		pr_crit("Timed out waiting for secondary CPUs.\n");
722 
723 	/*
724 	 * At this point the primary processor is completely
725 	 * in the debugger and all secondary CPUs are quiescent
726 	 */
727 	dbg_deactivate_sw_breakpoints();
728 	kgdb_single_step = 0;
729 	kgdb_contthread = current;
730 	exception_level = 0;
731 	trace_on = tracing_is_on();
732 	if (trace_on)
733 		tracing_off();
734 
735 	while (1) {
736 cpu_master_loop:
737 		if (dbg_kdb_mode) {
738 			kgdb_connected = 1;
739 			error = kdb_stub(ks);
740 			if (error == -1)
741 				continue;
742 			kgdb_connected = 0;
743 		} else {
744 			/*
745 			 * This is a brutal way to interfere with the debugger
746 			 * and prevent gdb being used to poke at kernel memory.
747 			 * This could cause trouble if lockdown is applied when
748 			 * there is already an active gdb session. For now the
749 			 * answer is simply "don't do that". Typically lockdown
750 			 * *will* be applied before the debug core gets started
751 			 * so only developers using kgdb for fairly advanced
752 			 * early kernel debug can be biten by this. Hopefully
753 			 * they are sophisticated enough to take care of
754 			 * themselves, especially with help from the lockdown
755 			 * message printed on the console!
756 			 */
757 			if (security_locked_down(LOCKDOWN_DBG_WRITE_KERNEL)) {
758 				if (IS_ENABLED(CONFIG_KGDB_KDB)) {
759 					/* Switch back to kdb if possible... */
760 					dbg_kdb_mode = 1;
761 					continue;
762 				} else {
763 					/* ... otherwise just bail */
764 					break;
765 				}
766 			}
767 			error = gdb_serial_stub(ks);
768 		}
769 
770 		if (error == DBG_PASS_EVENT) {
771 			dbg_kdb_mode = !dbg_kdb_mode;
772 		} else if (error == DBG_SWITCH_CPU_EVENT) {
773 			kgdb_info[dbg_switch_cpu].exception_state |=
774 				DCPU_NEXT_MASTER;
775 			goto cpu_loop;
776 		} else {
777 			kgdb_info[cpu].ret_state = error;
778 			break;
779 		}
780 	}
781 
782 	dbg_activate_sw_breakpoints();
783 
784 	/* Call the I/O driver's post_exception routine */
785 	if (dbg_io_ops->post_exception)
786 		dbg_io_ops->post_exception();
787 
788 	atomic_dec(&ignore_console_lock_warning);
789 
790 	if (!kgdb_single_step) {
791 		raw_spin_unlock(&dbg_slave_lock);
792 		/* Wait till all the CPUs have quit from the debugger. */
793 		while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
794 			cpu_relax();
795 	}
796 
797 kgdb_restore:
798 	if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
799 		int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
800 		if (kgdb_info[sstep_cpu].task)
801 			kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
802 		else
803 			kgdb_sstep_pid = 0;
804 	}
805 	if (arch_kgdb_ops.correct_hw_break)
806 		arch_kgdb_ops.correct_hw_break();
807 	if (trace_on)
808 		tracing_on();
809 
810 	kgdb_info[cpu].debuggerinfo = NULL;
811 	kgdb_info[cpu].task = NULL;
812 	kgdb_info[cpu].exception_state &=
813 		~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
814 	kgdb_info[cpu].enter_kgdb--;
815 	smp_mb__before_atomic();
816 	atomic_dec(&masters_in_kgdb);
817 	/* Free kgdb_active */
818 	atomic_set(&kgdb_active, -1);
819 	raw_spin_unlock(&dbg_master_lock);
820 	dbg_touch_watchdogs();
821 	local_irq_restore(flags);
822 	rcu_read_unlock();
823 
824 	return kgdb_info[cpu].ret_state;
825 }
826 NOKPROBE_SYMBOL(kgdb_cpu_enter);
827 
828 /*
829  * kgdb_handle_exception() - main entry point from a kernel exception
830  *
831  * Locking hierarchy:
832  *	interface locks, if any (begin_session)
833  *	kgdb lock (kgdb_active)
834  */
835 int
836 kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
837 {
838 	struct kgdb_state kgdb_var;
839 	struct kgdb_state *ks = &kgdb_var;
840 	/*
841 	 * Avoid entering the debugger if we were triggered due to an oops
842 	 * but panic_timeout indicates the system should automatically
843 	 * reboot on panic. We don't want to get stuck waiting for input
844 	 * on such systems, especially if its "just" an oops.
845 	 */
846 	if (signo != SIGTRAP && panic_timeout)
847 		return 1;
848 
849 	memset(ks, 0, sizeof(struct kgdb_state));
850 	ks->cpu			= raw_smp_processor_id();
851 	ks->ex_vector		= evector;
852 	ks->signo		= signo;
853 	ks->err_code		= ecode;
854 	ks->linux_regs		= regs;
855 
856 	if (kgdb_reenter_check(ks))
857 		return 0; /* Ouch, double exception ! */
858 	if (kgdb_info[ks->cpu].enter_kgdb != 0)
859 		return 0;
860 
861 	return kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
862 }
863 NOKPROBE_SYMBOL(kgdb_handle_exception);
864 
865 /*
866  * GDB places a breakpoint at this function to know dynamically loaded objects.
867  */
868 static int module_event(struct notifier_block *self, unsigned long val,
869 	void *data)
870 {
871 	return 0;
872 }
873 
874 static struct notifier_block dbg_module_load_nb = {
875 	.notifier_call	= module_event,
876 };
877 
878 int kgdb_nmicallback(int cpu, void *regs)
879 {
880 #ifdef CONFIG_SMP
881 	struct kgdb_state kgdb_var;
882 	struct kgdb_state *ks = &kgdb_var;
883 
884 	kgdb_info[cpu].rounding_up = false;
885 
886 	memset(ks, 0, sizeof(struct kgdb_state));
887 	ks->cpu			= cpu;
888 	ks->linux_regs		= regs;
889 
890 	if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
891 			raw_spin_is_locked(&dbg_master_lock)) {
892 		kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
893 		return 0;
894 	}
895 #endif
896 	return 1;
897 }
898 NOKPROBE_SYMBOL(kgdb_nmicallback);
899 
900 int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
901 							atomic_t *send_ready)
902 {
903 #ifdef CONFIG_SMP
904 	if (!kgdb_io_ready(0) || !send_ready)
905 		return 1;
906 
907 	if (kgdb_info[cpu].enter_kgdb == 0) {
908 		struct kgdb_state kgdb_var;
909 		struct kgdb_state *ks = &kgdb_var;
910 
911 		memset(ks, 0, sizeof(struct kgdb_state));
912 		ks->cpu			= cpu;
913 		ks->ex_vector		= trapnr;
914 		ks->signo		= SIGTRAP;
915 		ks->err_code		= err_code;
916 		ks->linux_regs		= regs;
917 		ks->send_ready		= send_ready;
918 		kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
919 		return 0;
920 	}
921 #endif
922 	return 1;
923 }
924 NOKPROBE_SYMBOL(kgdb_nmicallin);
925 
926 static void kgdb_console_write(struct console *co, const char *s,
927    unsigned count)
928 {
929 	unsigned long flags;
930 
931 	/* If we're debugging, or KGDB has not connected, don't try
932 	 * and print. */
933 	if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
934 		return;
935 
936 	local_irq_save(flags);
937 	gdbstub_msg_write(s, count);
938 	local_irq_restore(flags);
939 }
940 
941 static struct console kgdbcons = {
942 	.name		= "kgdb",
943 	.write		= kgdb_console_write,
944 	.flags		= CON_PRINTBUFFER | CON_ENABLED,
945 	.index		= -1,
946 };
947 
948 static int __init opt_kgdb_con(char *str)
949 {
950 	kgdb_use_con = 1;
951 
952 	if (kgdb_io_module_registered && !kgdb_con_registered) {
953 		register_console(&kgdbcons);
954 		kgdb_con_registered = 1;
955 	}
956 
957 	return 0;
958 }
959 
960 early_param("kgdbcon", opt_kgdb_con);
961 
962 #ifdef CONFIG_MAGIC_SYSRQ
963 static void sysrq_handle_dbg(u8 key)
964 {
965 	if (!dbg_io_ops) {
966 		pr_crit("ERROR: No KGDB I/O module available\n");
967 		return;
968 	}
969 	if (!kgdb_connected) {
970 #ifdef CONFIG_KGDB_KDB
971 		if (!dbg_kdb_mode)
972 			pr_crit("KGDB or $3#33 for KDB\n");
973 #else
974 		pr_crit("Entering KGDB\n");
975 #endif
976 	}
977 
978 	kgdb_breakpoint();
979 }
980 
981 static const struct sysrq_key_op sysrq_dbg_op = {
982 	.handler	= sysrq_handle_dbg,
983 	.help_msg	= "debug(g)",
984 	.action_msg	= "DEBUG",
985 };
986 #endif
987 
988 void kgdb_panic(const char *msg)
989 {
990 	if (!kgdb_io_module_registered)
991 		return;
992 
993 	/*
994 	 * We don't want to get stuck waiting for input from user if
995 	 * "panic_timeout" indicates the system should automatically
996 	 * reboot on panic.
997 	 */
998 	if (panic_timeout)
999 		return;
1000 
1001 	debug_locks_off();
1002 	console_flush_on_panic(CONSOLE_FLUSH_PENDING);
1003 
1004 	if (dbg_kdb_mode)
1005 		kdb_printf("PANIC: %s\n", msg);
1006 
1007 	kgdb_breakpoint();
1008 }
1009 
1010 static void kgdb_initial_breakpoint(void)
1011 {
1012 	kgdb_break_asap = 0;
1013 
1014 	pr_crit("Waiting for connection from remote gdb...\n");
1015 	kgdb_breakpoint();
1016 }
1017 
1018 void __weak kgdb_arch_late(void)
1019 {
1020 }
1021 
1022 void __init dbg_late_init(void)
1023 {
1024 	dbg_is_early = false;
1025 	if (kgdb_io_module_registered)
1026 		kgdb_arch_late();
1027 	kdb_init(KDB_INIT_FULL);
1028 
1029 	if (kgdb_io_module_registered && kgdb_break_asap)
1030 		kgdb_initial_breakpoint();
1031 }
1032 
1033 static int
1034 dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
1035 {
1036 	/*
1037 	 * Take the following action on reboot notify depending on value:
1038 	 *    1 == Enter debugger
1039 	 *    0 == [the default] detach debug client
1040 	 *   -1 == Do nothing... and use this until the board resets
1041 	 */
1042 	switch (kgdbreboot) {
1043 	case 1:
1044 		kgdb_breakpoint();
1045 		goto done;
1046 	case -1:
1047 		goto done;
1048 	}
1049 	if (!dbg_kdb_mode)
1050 		gdbstub_exit(code);
1051 done:
1052 	return NOTIFY_DONE;
1053 }
1054 
1055 static struct notifier_block dbg_reboot_notifier = {
1056 	.notifier_call		= dbg_notify_reboot,
1057 	.next			= NULL,
1058 	.priority		= INT_MAX,
1059 };
1060 
1061 static void kgdb_register_callbacks(void)
1062 {
1063 	if (!kgdb_io_module_registered) {
1064 		kgdb_io_module_registered = 1;
1065 		kgdb_arch_init();
1066 		if (!dbg_is_early)
1067 			kgdb_arch_late();
1068 		register_module_notifier(&dbg_module_load_nb);
1069 		register_reboot_notifier(&dbg_reboot_notifier);
1070 #ifdef CONFIG_MAGIC_SYSRQ
1071 		register_sysrq_key('g', &sysrq_dbg_op);
1072 #endif
1073 		if (kgdb_use_con && !kgdb_con_registered) {
1074 			register_console(&kgdbcons);
1075 			kgdb_con_registered = 1;
1076 		}
1077 	}
1078 }
1079 
1080 static void kgdb_unregister_callbacks(void)
1081 {
1082 	/*
1083 	 * When this routine is called KGDB should unregister from
1084 	 * handlers and clean up, making sure it is not handling any
1085 	 * break exceptions at the time.
1086 	 */
1087 	if (kgdb_io_module_registered) {
1088 		kgdb_io_module_registered = 0;
1089 		unregister_reboot_notifier(&dbg_reboot_notifier);
1090 		unregister_module_notifier(&dbg_module_load_nb);
1091 		kgdb_arch_exit();
1092 #ifdef CONFIG_MAGIC_SYSRQ
1093 		unregister_sysrq_key('g', &sysrq_dbg_op);
1094 #endif
1095 		if (kgdb_con_registered) {
1096 			unregister_console(&kgdbcons);
1097 			kgdb_con_registered = 0;
1098 		}
1099 	}
1100 }
1101 
1102 /**
1103  *	kgdb_register_io_module - register KGDB IO module
1104  *	@new_dbg_io_ops: the io ops vector
1105  *
1106  *	Register it with the KGDB core.
1107  */
1108 int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
1109 {
1110 	struct kgdb_io *old_dbg_io_ops;
1111 	int err;
1112 
1113 	spin_lock(&kgdb_registration_lock);
1114 
1115 	old_dbg_io_ops = dbg_io_ops;
1116 	if (old_dbg_io_ops) {
1117 		if (!old_dbg_io_ops->deinit) {
1118 			spin_unlock(&kgdb_registration_lock);
1119 
1120 			pr_err("KGDB I/O driver %s can't replace %s.\n",
1121 				new_dbg_io_ops->name, old_dbg_io_ops->name);
1122 			return -EBUSY;
1123 		}
1124 		pr_info("Replacing I/O driver %s with %s\n",
1125 			old_dbg_io_ops->name, new_dbg_io_ops->name);
1126 	}
1127 
1128 	if (new_dbg_io_ops->init) {
1129 		err = new_dbg_io_ops->init();
1130 		if (err) {
1131 			spin_unlock(&kgdb_registration_lock);
1132 			return err;
1133 		}
1134 	}
1135 
1136 	dbg_io_ops = new_dbg_io_ops;
1137 
1138 	spin_unlock(&kgdb_registration_lock);
1139 
1140 	if (old_dbg_io_ops) {
1141 		old_dbg_io_ops->deinit();
1142 		return 0;
1143 	}
1144 
1145 	pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
1146 
1147 	/* Arm KGDB now. */
1148 	kgdb_register_callbacks();
1149 
1150 	if (kgdb_break_asap &&
1151 	    (!dbg_is_early || IS_ENABLED(CONFIG_ARCH_HAS_EARLY_DEBUG)))
1152 		kgdb_initial_breakpoint();
1153 
1154 	return 0;
1155 }
1156 EXPORT_SYMBOL_GPL(kgdb_register_io_module);
1157 
1158 /**
1159  *	kgdb_unregister_io_module - unregister KGDB IO module
1160  *	@old_dbg_io_ops: the io ops vector
1161  *
1162  *	Unregister it with the KGDB core.
1163  */
1164 void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
1165 {
1166 	BUG_ON(kgdb_connected);
1167 
1168 	/*
1169 	 * KGDB is no longer able to communicate out, so
1170 	 * unregister our callbacks and reset state.
1171 	 */
1172 	kgdb_unregister_callbacks();
1173 
1174 	spin_lock(&kgdb_registration_lock);
1175 
1176 	WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
1177 	dbg_io_ops = NULL;
1178 
1179 	spin_unlock(&kgdb_registration_lock);
1180 
1181 	if (old_dbg_io_ops->deinit)
1182 		old_dbg_io_ops->deinit();
1183 
1184 	pr_info("Unregistered I/O driver %s, debugger disabled\n",
1185 		old_dbg_io_ops->name);
1186 }
1187 EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
1188 
1189 int dbg_io_get_char(void)
1190 {
1191 	int ret = dbg_io_ops->read_char();
1192 	if (ret == NO_POLL_CHAR)
1193 		return -1;
1194 	if (!dbg_kdb_mode)
1195 		return ret;
1196 	if (ret == 127)
1197 		return 8;
1198 	return ret;
1199 }
1200 
1201 /**
1202  * kgdb_breakpoint - generate breakpoint exception
1203  *
1204  * This function will generate a breakpoint exception.  It is used at the
1205  * beginning of a program to sync up with a debugger and can be used
1206  * otherwise as a quick means to stop program execution and "break" into
1207  * the debugger.
1208  */
1209 noinline void kgdb_breakpoint(void)
1210 {
1211 	atomic_inc(&kgdb_setting_breakpoint);
1212 	wmb(); /* Sync point before breakpoint */
1213 	arch_kgdb_breakpoint();
1214 	wmb(); /* Sync point after breakpoint */
1215 	atomic_dec(&kgdb_setting_breakpoint);
1216 }
1217 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
1218 
1219 static int __init opt_kgdb_wait(char *str)
1220 {
1221 	kgdb_break_asap = 1;
1222 
1223 	kdb_init(KDB_INIT_EARLY);
1224 	if (kgdb_io_module_registered &&
1225 	    IS_ENABLED(CONFIG_ARCH_HAS_EARLY_DEBUG))
1226 		kgdb_initial_breakpoint();
1227 
1228 	return 0;
1229 }
1230 
1231 early_param("kgdbwait", opt_kgdb_wait);
1232