1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
4 #include <linux/module.h>
5 #include <linux/personality.h>
6 #include <linux/kallsyms.h>
7 #include <linux/hardirq.h>
8 #include <linux/kdebug.h>
9 #include <linux/sched/task_stack.h>
10 #include <linux/uaccess.h>
11 #include <linux/ftrace.h>
13 #include <asm/proc-fns.h>
14 #include <asm/unistd.h>
17 #include <linux/ptrace.h>
18 #include <nds32_intrinsic.h>
20 extern void show_pte(struct mm_struct *mm, unsigned long addr);
23 * Dump out the contents of some memory nicely...
25 void dump_mem(const char *lvl, unsigned long bottom, unsigned long top)
32 * We need to switch to kernel mode so that we can use __get_user
33 * to safely read from kernel space. Note that we now dump the
34 * code first, just in case the backtrace kills us.
39 pr_emerg("%s(0x%08lx to 0x%08lx)\n", lvl, bottom, top);
41 for (first = bottom & ~31; first < top; first += 32) {
43 char str[sizeof(" 12345678") * 8 + 1];
45 memset(str, ' ', sizeof(str));
46 str[sizeof(str) - 1] = '\0';
48 for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
49 if (p >= bottom && p < top) {
51 if (__get_user(val, (unsigned long *)p) == 0)
52 sprintf(str + i * 9, " %08lx", val);
54 sprintf(str + i * 9, " ????????");
57 pr_emerg("%s%04lx:%s\n", lvl, first & 0xffff, str);
63 EXPORT_SYMBOL(dump_mem);
65 static void dump_instr(struct pt_regs *regs)
67 unsigned long addr = instruction_pointer(regs);
69 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
74 * We need to switch to kernel mode so that we can use __get_user
75 * to safely read from kernel space. Note that we now dump the
76 * code first, just in case the backtrace kills us.
82 for (i = -4; i < 1; i++) {
83 unsigned int val, bad;
85 bad = __get_user(val, &((u32 *) addr)[i]);
88 p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
90 p += sprintf(p, "bad PC value");
94 pr_emerg("Code: %s\n", str);
99 #define LOOP_TIMES (100)
100 static void __dump(struct task_struct *tsk, unsigned long *base_reg)
102 unsigned long ret_addr;
103 int cnt = LOOP_TIMES, graph = 0;
104 pr_emerg("Call Trace:\n");
105 if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
106 while (!kstack_end(base_reg)) {
107 ret_addr = *base_reg++;
108 if (__kernel_text_address(ret_addr)) {
109 ret_addr = ftrace_graph_ret_addr(
110 tsk, &graph, ret_addr, NULL);
111 print_ip_sym(ret_addr);
117 while (!kstack_end((void *)base_reg) &&
118 !((unsigned long)base_reg & 0x3) &&
119 ((unsigned long)base_reg >= TASK_SIZE)) {
120 unsigned long next_fp;
121 ret_addr = base_reg[LP_OFFSET];
122 next_fp = base_reg[FP_OFFSET];
123 if (__kernel_text_address(ret_addr)) {
125 ret_addr = ftrace_graph_ret_addr(
126 tsk, &graph, ret_addr, NULL);
127 print_ip_sym(ret_addr);
131 base_reg = (unsigned long *)next_fp;
137 void show_stack(struct task_struct *tsk, unsigned long *sp)
139 unsigned long *base_reg;
143 if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
145 base_reg = (unsigned long *)(tsk->thread.cpu_context.sp);
147 __asm__ __volatile__("\tori\t%0, $sp, #0\n":"=r"(base_reg));
150 base_reg = (unsigned long *)(tsk->thread.cpu_context.fp);
152 __asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg));
154 __dump(tsk, base_reg);
158 DEFINE_SPINLOCK(die_lock);
161 * This function is protected against re-entrancy.
163 void die(const char *str, struct pt_regs *regs, int err)
165 struct task_struct *tsk = current;
166 static int die_counter;
169 spin_lock_irq(&die_lock);
172 pr_emerg("Internal error: %s: %x [#%d]\n", str, err, ++die_counter);
174 pr_emerg("CPU: %i\n", smp_processor_id());
176 pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
177 tsk->comm, tsk->pid, end_of_stack(tsk));
179 if (!user_mode(regs) || in_interrupt()) {
180 dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK);
186 spin_unlock_irq(&die_lock);
192 void die_if_kernel(const char *str, struct pt_regs *regs, int err)
200 int bad_syscall(int n, struct pt_regs *regs)
202 if (current->personality != PER_LINUX) {
203 send_sig(SIGSEGV, current, 1);
204 return regs->uregs[0];
207 force_sig_fault(SIGILL, ILL_ILLTRP,
208 (void __user *)instruction_pointer(regs) - 4, current);
209 die_if_kernel("Oops - bad syscall", regs, n);
210 return regs->uregs[0];
213 void __pte_error(const char *file, int line, unsigned long val)
215 pr_emerg("%s:%d: bad pte %08lx.\n", file, line, val);
218 void __pmd_error(const char *file, int line, unsigned long val)
220 pr_emerg("%s:%d: bad pmd %08lx.\n", file, line, val);
223 void __pgd_error(const char *file, int line, unsigned long val)
225 pr_emerg("%s:%d: bad pgd %08lx.\n", file, line, val);
228 extern char *exception_vector, *exception_vector_end;
229 void __init trap_init(void)
234 void __init early_trap_init(void)
236 unsigned long ivb = 0;
237 unsigned long base = PAGE_OFFSET;
239 memcpy((unsigned long *)base, (unsigned long *)&exception_vector,
240 ((unsigned long)&exception_vector_end -
241 (unsigned long)&exception_vector));
242 ivb = __nds32__mfsr(NDS32_SR_IVB);
243 /* Check platform support. */
244 if (((ivb & IVB_mskNIVIC) >> IVB_offNIVIC) < 2)
246 ("IVIC mode is not allowed on the platform with interrupt controller\n");
247 __nds32__mtsr((ivb & ~IVB_mskESZ) | (IVB_valESZ16 << IVB_offESZ) |
248 IVB_BASE, NDS32_SR_IVB);
249 __nds32__mtsr(INT_MASK_INITAIAL_VAL, NDS32_SR_INT_MASK);
252 * 0x800 = 128 vectors * 16byte.
253 * It should be enough to flush a page.
255 cpu_cache_wbinval_page(base, true);
258 void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
259 int error_code, int si_code)
261 tsk->thread.trap_no = ENTRY_DEBUG_RELATED;
262 tsk->thread.error_code = error_code;
264 force_sig_fault(SIGTRAP, si_code,
265 (void __user *)instruction_pointer(regs), tsk);
268 void do_debug_trap(unsigned long entry, unsigned long addr,
269 unsigned long type, struct pt_regs *regs)
271 if (notify_die(DIE_OOPS, "Oops", regs, addr, type, SIGTRAP)
275 if (user_mode(regs)) {
277 send_sigtrap(current, regs, 0, TRAP_BRKPT);
280 if (!fixup_exception(regs))
281 die("unexpected kernel_trap", regs, 0);
285 void unhandled_interruption(struct pt_regs *regs)
287 pr_emerg("unhandled_interruption\n");
289 if (!user_mode(regs))
291 force_sig(SIGKILL, current);
294 void unhandled_exceptions(unsigned long entry, unsigned long addr,
295 unsigned long type, struct pt_regs *regs)
297 pr_emerg("Unhandled Exception: entry: %lx addr:%lx itype:%lx\n", entry,
300 if (!user_mode(regs))
302 force_sig(SIGKILL, current);
305 extern int do_page_fault(unsigned long entry, unsigned long addr,
306 unsigned int error_code, struct pt_regs *regs);
309 * 2:DEF dispatch for TLB MISC exception handler
312 void do_dispatch_tlb_misc(unsigned long entry, unsigned long addr,
313 unsigned long type, struct pt_regs *regs)
315 type = type & (ITYPE_mskINST | ITYPE_mskETYPE);
316 if ((type & ITYPE_mskETYPE) < 5) {
317 /* Permission exceptions */
318 do_page_fault(entry, addr, type, regs);
320 unhandled_exceptions(entry, addr, type, regs);
323 void do_revinsn(struct pt_regs *regs)
325 pr_emerg("Reserved Instruction\n");
327 if (!user_mode(regs))
329 force_sig(SIGILL, current);
332 #ifdef CONFIG_ALIGNMENT_TRAP
333 extern int unalign_access_mode;
334 extern int do_unaligned_access(unsigned long addr, struct pt_regs *regs);
336 void do_dispatch_general(unsigned long entry, unsigned long addr,
337 unsigned long itype, struct pt_regs *regs,
340 unsigned int swid = itype >> ITYPE_offSWID;
341 unsigned long type = itype & (ITYPE_mskINST | ITYPE_mskETYPE);
342 if (type == ETYPE_ALIGNMENT_CHECK) {
343 #ifdef CONFIG_ALIGNMENT_TRAP
344 /* Alignment check */
345 if (user_mode(regs) && unalign_access_mode) {
347 ret = do_unaligned_access(addr, regs);
354 ("Unhandled unaligned access exception\n");
357 do_page_fault(entry, addr, type, regs);
358 } else if (type == ETYPE_RESERVED_INSTRUCTION) {
359 /* Reserved instruction */
361 } else if (type == ETYPE_COPROCESSOR) {
363 #if IS_ENABLED(CONFIG_FPU)
364 unsigned int fucop_exist = __nds32__mfsr(NDS32_SR_FUCOP_EXIST);
365 unsigned int cpid = ((itype & ITYPE_mskCPID) >> ITYPE_offCPID);
367 if ((cpid == FPU_CPID) &&
368 (fucop_exist & FUCOP_EXIST_mskCP0ISFPU)) {
369 unsigned int subtype = (itype & ITYPE_mskSTYPE);
371 if (true == do_fpu_exception(subtype, regs))
375 unhandled_exceptions(entry, addr, type, regs);
376 } else if (type == ETYPE_TRAP && swid == SWID_RAISE_INTERRUPT_LEVEL) {
377 /* trap, used on v3 EDM target debugging workaround */
379 * DIPC(OIPC) is passed as parameter before
380 * interrupt is enabled, so the DIPC will not be corrupted
381 * even though interrupts are coming in
385 * 2. update pt_regs ipc with oipc
386 * 3. update pt_regs ipsw (clear DEX)
388 __asm__ volatile ("mtsr %0, $IPC\n\t"::"r" (oipc));
390 if (regs->pipsw & PSW_mskDEX) {
392 ("Nested Debug exception is possibly happened\n");
393 pr_emerg("ipc:%08x pipc:%08x\n",
394 (unsigned int)regs->ipc,
395 (unsigned int)regs->pipc);
397 do_debug_trap(entry, addr, itype, regs);
398 regs->ipsw &= ~PSW_mskDEX;
400 unhandled_exceptions(entry, addr, type, regs);