1 // SPDX-License-Identifier: GPL-2.0
3 * arch/alpha/kernel/traps.c
5 * (C) Copyright 1994 Linus Torvalds
9 * This file initializes the trap entry points
12 #include <linux/cpu.h>
13 #include <linux/jiffies.h>
15 #include <linux/sched/signal.h>
16 #include <linux/sched/debug.h>
17 #include <linux/tty.h>
18 #include <linux/delay.h>
19 #include <linux/extable.h>
20 #include <linux/kallsyms.h>
21 #include <linux/ratelimit.h>
23 #include <asm/gentrap.h>
24 #include <linux/uaccess.h>
25 #include <asm/unaligned.h>
26 #include <asm/sysinfo.h>
27 #include <asm/hwrpb.h>
28 #include <asm/mmu_context.h>
29 #include <asm/special_insns.h>
33 /* Work-around for some SRMs which mishandle opDEC faults. */
40 __asm__ __volatile__ (
41 /* Load the address of... */
43 /* A stub instruction fault handler. Just add 4 to the
49 /* Install the instruction fault handler. */
51 " call_pal %[wrent]\n"
52 /* With that in place, the fault from the round-to-minf fp
53 insn will arrive either at the "lda 4" insn (bad) or one
54 past that (good). This places the correct fixup in %0. */
56 " cvttq/svm $f31,$f31\n"
58 : [fix] "=r" (opDEC_fix)
59 : [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent)
60 : "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25");
63 printk("opDEC fixup enabled.\n");
67 dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
69 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n",
70 regs->pc, regs->r26, regs->ps, print_tainted());
71 printk("pc is at %pSR\n", (void *)regs->pc);
72 printk("ra is at %pSR\n", (void *)regs->r26);
73 printk("v0 = %016lx t0 = %016lx t1 = %016lx\n",
74 regs->r0, regs->r1, regs->r2);
75 printk("t2 = %016lx t3 = %016lx t4 = %016lx\n",
76 regs->r3, regs->r4, regs->r5);
77 printk("t5 = %016lx t6 = %016lx t7 = %016lx\n",
78 regs->r6, regs->r7, regs->r8);
81 printk("s0 = %016lx s1 = %016lx s2 = %016lx\n",
82 r9_15[9], r9_15[10], r9_15[11]);
83 printk("s3 = %016lx s4 = %016lx s5 = %016lx\n",
84 r9_15[12], r9_15[13], r9_15[14]);
85 printk("s6 = %016lx\n", r9_15[15]);
88 printk("a0 = %016lx a1 = %016lx a2 = %016lx\n",
89 regs->r16, regs->r17, regs->r18);
90 printk("a3 = %016lx a4 = %016lx a5 = %016lx\n",
91 regs->r19, regs->r20, regs->r21);
92 printk("t8 = %016lx t9 = %016lx t10= %016lx\n",
93 regs->r22, regs->r23, regs->r24);
94 printk("t11= %016lx pv = %016lx at = %016lx\n",
95 regs->r25, regs->r27, regs->r28);
96 printk("gp = %016lx sp = %p\n", regs->gp, regs+1);
103 static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
104 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
105 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
106 "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
110 dik_show_code(unsigned int *pc)
115 for (i = -6; i < 2; i++) {
117 if (__get_user(insn, (unsigned int __user *)pc + i))
119 printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>');
125 dik_show_trace(unsigned long *sp, const char *loglvl)
128 printk("%sTrace:\n", loglvl);
129 while (0x1ff8 & (unsigned long) sp) {
130 extern char _stext[], _etext[];
131 unsigned long tmp = *sp;
133 if (!is_kernel_text(tmp))
135 printk("%s[<%lx>] %pSR\n", loglvl, tmp, (void *)tmp);
137 printk("%s ...", loglvl);
141 printk("%s\n", loglvl);
144 static int kstack_depth_to_print = 24;
146 void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
148 unsigned long *stack;
152 * debugging aid: "show_stack(NULL, NULL, KERN_EMERG);" prints the
153 * back trace for this cpu.
156 sp=(unsigned long*)&sp;
159 for(i=0; i < kstack_depth_to_print; i++) {
160 if (((long) stack & (THREAD_SIZE-1)) == 0)
165 printk("%s ", loglvl);
169 pr_cont("%016lx", *stack++);
172 dik_show_trace(sp, loglvl);
176 die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
181 printk("CPU %d ", hard_smp_processor_id());
183 printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
184 dik_show_regs(regs, r9_15);
185 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
186 dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
187 dik_show_code((unsigned int *)regs->pc);
189 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
190 printk("die_if_kernel recursion detected.\n");
194 make_task_dead(SIGSEGV);
197 #ifndef CONFIG_MATHEMU
198 static long dummy_emul(void) { return 0; }
199 long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask)
200 = (void *)dummy_emul;
201 EXPORT_SYMBOL_GPL(alpha_fp_emul_imprecise);
202 long (*alpha_fp_emul) (unsigned long pc)
203 = (void *)dummy_emul;
204 EXPORT_SYMBOL_GPL(alpha_fp_emul);
206 long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask);
207 long alpha_fp_emul (unsigned long pc);
211 do_entArith(unsigned long summary, unsigned long write_mask,
212 struct pt_regs *regs)
214 long si_code = FPE_FLTINV;
217 /* Software-completion summary bit is set, so try to
218 emulate the instruction. If the processor supports
219 precise exceptions, we don't have to search. */
220 if (!amask(AMASK_PRECISE_TRAP))
221 si_code = alpha_fp_emul(regs->pc - 4);
223 si_code = alpha_fp_emul_imprecise(regs, write_mask);
227 die_if_kernel("Arithmetic fault", regs, 0, NULL);
229 send_sig_fault_trapno(SIGFPE, si_code, (void __user *) regs->pc, 0, current);
233 do_entIF(unsigned long type, struct pt_regs *regs)
237 if (type == 3) { /* FEN fault */
238 /* Irritating users can call PAL_clrfen to disable the
239 FPU for the process. The kernel will then trap in
240 do_switch_stack and undo_switch_stack when we try
241 to save and restore the FP registers.
243 Given that GCC by default generates code that uses the
244 FP registers, PAL_clrfen is not useful except for DoS
245 attacks. So turn the bleeding FPU back on and be done
247 current_thread_info()->pcb.flags |= 1;
248 __reload_thread(¤t_thread_info()->pcb);
251 if (!user_mode(regs)) {
253 const unsigned int *data
254 = (const unsigned int *) regs->pc;
255 printk("Kernel bug at %s:%d\n",
256 (const char *)(data[1] | (long)data[2] << 32),
259 #ifdef CONFIG_ALPHA_WTINT
261 /* If CALL_PAL WTINT is totally unsupported by the
262 PALcode, e.g. MILO, "emulate" it by overwriting
265 = (unsigned int *) regs->pc - 1;
266 if (*pinsn == PAL_wtint) {
267 *pinsn = 0x47e01400; /* mov 0,$0 */
273 #endif /* ALPHA_WTINT */
274 die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
279 case 0: /* breakpoint */
280 if (ptrace_cancel_bpt(current)) {
281 regs->pc -= 4; /* make pc point to former bpt */
284 send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc,
288 case 1: /* bugcheck */
289 send_sig_fault_trapno(SIGTRAP, TRAP_UNK,
290 (void __user *) regs->pc, 0, current);
293 case 2: /* gentrap */
294 switch ((long) regs->r16) {
351 send_sig_fault_trapno(signo, code, (void __user *) regs->pc,
356 if (implver() == IMPLVER_EV4) {
359 /* The some versions of SRM do not handle
360 the opDEC properly - they return the PC of the
361 opDEC fault, not the instruction after as the
362 Alpha architecture requires. Here we fix it up.
363 We do this by intentionally causing an opDEC
364 fault during the boot sequence and testing if
365 we get the correct PC. If not, we set a flag
366 to correct it every time through. */
367 regs->pc += opDEC_fix;
369 /* EV4 does not implement anything except normal
370 rounding. Everything else will come here as
371 an illegal instruction. Emulate them. */
372 si_code = alpha_fp_emul(regs->pc - 4);
376 send_sig_fault_trapno(SIGFPE, si_code,
377 (void __user *) regs->pc,
385 default: /* unexpected instruction-fault type */
389 send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, current);
392 /* There is an ifdef in the PALcode in MILO that enables a
393 "kernel debugging entry point" as an unprivileged call_pal.
395 We don't want to have anything to do with it, but unfortunately
396 several versions of MILO included in distributions have it enabled,
397 and if we don't put something on the entry point we'll oops. */
400 do_entDbg(struct pt_regs *regs)
402 die_if_kernel("Instruction fault", regs, 0, NULL);
404 force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc);
409 * entUna has a different register layout to be reasonably simple. It
410 * needs access to all the integer registers (the kernel doesn't use
411 * fp-regs), and it needs to have them in order for simpler access.
413 * Due to the non-standard register layout (and because we don't want
414 * to handle floating-point regs), user-mode unaligned accesses are
415 * handled separately by do_entUnaUser below.
417 * Oh, btw, we don't handle the "gp" register correctly, but if we fault
418 * on a gp-register unaligned load/store, something is _very_ wrong
419 * in the kernel anyway..
422 unsigned long regs[32];
423 unsigned long ps, pc, gp, a0, a1, a2;
426 struct unaligned_stat {
427 unsigned long count, va, pc;
431 /* Macro for exception fixup code to access integer registers. */
432 #define una_reg(r) (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
436 do_entUna(void * va, unsigned long opcode, unsigned long reg,
437 struct allregs *regs)
439 long error, tmp1, tmp2, tmp3, tmp4;
440 unsigned long pc = regs->pc - 4;
441 unsigned long *_regs = regs->regs;
442 const struct exception_table_entry *fixup;
444 unaligned[0].count++;
445 unaligned[0].va = (unsigned long) va;
446 unaligned[0].pc = pc;
448 /* We don't want to use the generic get/put unaligned macros as
449 we want to trap exceptions. Only if we actually get an
450 exception will we decide whether we should have caught it. */
453 case 0x0c: /* ldwu */
454 __asm__ __volatile__(
455 "1: ldq_u %1,0(%3)\n"
456 "2: ldq_u %2,1(%3)\n"
462 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
466 una_reg(reg) = tmp1|tmp2;
470 __asm__ __volatile__(
471 "1: ldq_u %1,0(%3)\n"
472 "2: ldq_u %2,3(%3)\n"
478 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
482 una_reg(reg) = (int)(tmp1|tmp2);
486 __asm__ __volatile__(
487 "1: ldq_u %1,0(%3)\n"
488 "2: ldq_u %2,7(%3)\n"
494 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
498 una_reg(reg) = tmp1|tmp2;
501 /* Note that the store sequences do not indicate that they change
502 memory because it _should_ be affecting nothing in this context.
503 (Otherwise we have other, much larger, problems.) */
505 __asm__ __volatile__(
506 "1: ldq_u %2,1(%5)\n"
507 "2: ldq_u %1,0(%5)\n"
514 "3: stq_u %2,1(%5)\n"
515 "4: stq_u %1,0(%5)\n"
521 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
522 "=&r"(tmp3), "=&r"(tmp4)
523 : "r"(va), "r"(una_reg(reg)), "0"(0));
529 __asm__ __volatile__(
530 "1: ldq_u %2,3(%5)\n"
531 "2: ldq_u %1,0(%5)\n"
538 "3: stq_u %2,3(%5)\n"
539 "4: stq_u %1,0(%5)\n"
545 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
546 "=&r"(tmp3), "=&r"(tmp4)
547 : "r"(va), "r"(una_reg(reg)), "0"(0));
553 __asm__ __volatile__(
554 "1: ldq_u %2,7(%5)\n"
555 "2: ldq_u %1,0(%5)\n"
562 "3: stq_u %2,7(%5)\n"
563 "4: stq_u %1,0(%5)\n"
569 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
570 "=&r"(tmp3), "=&r"(tmp4)
571 : "r"(va), "r"(una_reg(reg)), "0"(0));
577 printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
578 pc, va, opcode, reg);
579 make_task_dead(SIGSEGV);
582 /* Ok, we caught the exception, but we don't want it. Is there
583 someone to pass it along to? */
584 if ((fixup = search_exception_tables(pc)) != 0) {
586 newpc = fixup_exception(una_reg, fixup, pc);
588 printk("Forwarding unaligned exception at %lx (%lx)\n",
596 * Yikes! No one to forward the exception to.
597 * Since the registers are in a weird format, dump them ourselves.
600 printk("%s(%d): unhandled unaligned exception\n",
601 current->comm, task_pid_nr(current));
603 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n",
604 pc, una_reg(26), regs->ps);
605 printk("r0 = %016lx r1 = %016lx r2 = %016lx\n",
606 una_reg(0), una_reg(1), una_reg(2));
607 printk("r3 = %016lx r4 = %016lx r5 = %016lx\n",
608 una_reg(3), una_reg(4), una_reg(5));
609 printk("r6 = %016lx r7 = %016lx r8 = %016lx\n",
610 una_reg(6), una_reg(7), una_reg(8));
611 printk("r9 = %016lx r10= %016lx r11= %016lx\n",
612 una_reg(9), una_reg(10), una_reg(11));
613 printk("r12= %016lx r13= %016lx r14= %016lx\n",
614 una_reg(12), una_reg(13), una_reg(14));
615 printk("r15= %016lx\n", una_reg(15));
616 printk("r16= %016lx r17= %016lx r18= %016lx\n",
617 una_reg(16), una_reg(17), una_reg(18));
618 printk("r19= %016lx r20= %016lx r21= %016lx\n",
619 una_reg(19), una_reg(20), una_reg(21));
620 printk("r22= %016lx r23= %016lx r24= %016lx\n",
621 una_reg(22), una_reg(23), una_reg(24));
622 printk("r25= %016lx r27= %016lx r28= %016lx\n",
623 una_reg(25), una_reg(27), una_reg(28));
624 printk("gp = %016lx sp = %p\n", regs->gp, regs+1);
626 dik_show_code((unsigned int *)pc);
627 dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
629 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
630 printk("die_if_kernel recursion detected.\n");
634 make_task_dead(SIGSEGV);
638 * Convert an s-floating point value in memory format to the
639 * corresponding value in register format. The exponent
640 * needs to be remapped to preserve non-finite values
641 * (infinities, not-a-numbers, denormals).
643 static inline unsigned long
644 s_mem_to_reg (unsigned long s_mem)
646 unsigned long frac = (s_mem >> 0) & 0x7fffff;
647 unsigned long sign = (s_mem >> 31) & 0x1;
648 unsigned long exp_msb = (s_mem >> 30) & 0x1;
649 unsigned long exp_low = (s_mem >> 23) & 0x7f;
652 exp = (exp_msb << 10) | exp_low; /* common case */
654 if (exp_low == 0x7f) {
658 if (exp_low == 0x00) {
664 return (sign << 63) | (exp << 52) | (frac << 29);
668 * Convert an s-floating point value in register format to the
669 * corresponding value in memory format.
671 static inline unsigned long
672 s_reg_to_mem (unsigned long s_reg)
674 return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34);
678 * Handle user-level unaligned fault. Handling user-level unaligned
679 * faults is *extremely* slow and produces nasty messages. A user
680 * program *should* fix unaligned faults ASAP.
682 * Notice that we have (almost) the regular kernel stack layout here,
683 * so finding the appropriate registers is a little more difficult
684 * than in the kernel case.
686 * Finally, we handle regular integer load/stores only. In
687 * particular, load-linked/store-conditionally and floating point
688 * load/stores are not supported. The former make no sense with
689 * unaligned faults (they are guaranteed to fail) and I don't think
690 * the latter will occur in any decent program.
692 * Sigh. We *do* have to handle some FP operations, because GCC will
693 * uses them as temporary storage for integer memory to memory copies.
694 * However, we need to deal with stt/ldt and sts/lds only.
697 #define OP_INT_MASK ( 1L << 0x28 | 1L << 0x2c /* ldl stl */ \
698 | 1L << 0x29 | 1L << 0x2d /* ldq stq */ \
699 | 1L << 0x0c | 1L << 0x0d /* ldwu stw */ \
700 | 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
702 #define OP_WRITE_MASK ( 1L << 0x26 | 1L << 0x27 /* sts stt */ \
703 | 1L << 0x2c | 1L << 0x2d /* stl stq */ \
704 | 1L << 0x0d | 1L << 0x0e ) /* stw stb */
706 #define R(x) ((size_t) &((struct pt_regs *)0)->x)
708 static int unauser_reg_offsets[32] = {
709 R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
710 /* r9 ... r15 are stored in front of regs. */
711 -56, -48, -40, -32, -24, -16, -8,
712 R(r16), R(r17), R(r18),
713 R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
714 R(r27), R(r28), R(gp),
721 do_entUnaUser(void __user * va, unsigned long opcode,
722 unsigned long reg, struct pt_regs *regs)
724 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
726 unsigned long tmp1, tmp2, tmp3, tmp4;
727 unsigned long fake_reg, *reg_addr = &fake_reg;
731 /* Check the UAC bits to decide what the user wants us to do
732 with the unaligned access. */
734 if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
735 if (__ratelimit(&ratelimit)) {
736 printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
737 current->comm, task_pid_nr(current),
738 regs->pc - 4, va, opcode, reg);
741 if ((current_thread_info()->status & TS_UAC_SIGBUS))
743 /* Not sure why you'd want to use this, but... */
744 if ((current_thread_info()->status & TS_UAC_NOFIX))
747 /* Don't bother reading ds in the access check since we already
748 know that this came from the user. Also rely on the fact that
749 the page at TASK_SIZE is unmapped and so can't be touched anyway. */
750 if ((unsigned long)va >= TASK_SIZE)
753 ++unaligned[1].count;
754 unaligned[1].va = (unsigned long)va;
755 unaligned[1].pc = regs->pc - 4;
757 if ((1L << opcode) & OP_INT_MASK) {
758 /* it's an integer load/store */
760 reg_addr = (unsigned long *)
761 ((char *)regs + unauser_reg_offsets[reg]);
762 } else if (reg == 30) {
763 /* usp in PAL regs */
766 /* zero "register" */
771 /* We don't want to use the generic get/put unaligned macros as
772 we want to trap exceptions. Only if we actually get an
773 exception will we decide whether we should have caught it. */
776 case 0x0c: /* ldwu */
777 __asm__ __volatile__(
778 "1: ldq_u %1,0(%3)\n"
779 "2: ldq_u %2,1(%3)\n"
785 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
789 *reg_addr = tmp1|tmp2;
793 __asm__ __volatile__(
794 "1: ldq_u %1,0(%3)\n"
795 "2: ldq_u %2,3(%3)\n"
801 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
805 alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2)));
809 __asm__ __volatile__(
810 "1: ldq_u %1,0(%3)\n"
811 "2: ldq_u %2,7(%3)\n"
817 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
821 alpha_write_fp_reg(reg, tmp1|tmp2);
825 __asm__ __volatile__(
826 "1: ldq_u %1,0(%3)\n"
827 "2: ldq_u %2,3(%3)\n"
833 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
837 *reg_addr = (int)(tmp1|tmp2);
841 __asm__ __volatile__(
842 "1: ldq_u %1,0(%3)\n"
843 "2: ldq_u %2,7(%3)\n"
849 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
853 *reg_addr = tmp1|tmp2;
856 /* Note that the store sequences do not indicate that they change
857 memory because it _should_ be affecting nothing in this context.
858 (Otherwise we have other, much larger, problems.) */
860 __asm__ __volatile__(
861 "1: ldq_u %2,1(%5)\n"
862 "2: ldq_u %1,0(%5)\n"
869 "3: stq_u %2,1(%5)\n"
870 "4: stq_u %1,0(%5)\n"
876 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
877 "=&r"(tmp3), "=&r"(tmp4)
878 : "r"(va), "r"(*reg_addr), "0"(0));
884 fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
888 __asm__ __volatile__(
889 "1: ldq_u %2,3(%5)\n"
890 "2: ldq_u %1,0(%5)\n"
897 "3: stq_u %2,3(%5)\n"
898 "4: stq_u %1,0(%5)\n"
904 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
905 "=&r"(tmp3), "=&r"(tmp4)
906 : "r"(va), "r"(*reg_addr), "0"(0));
912 fake_reg = alpha_read_fp_reg(reg);
916 __asm__ __volatile__(
917 "1: ldq_u %2,7(%5)\n"
918 "2: ldq_u %1,0(%5)\n"
925 "3: stq_u %2,7(%5)\n"
926 "4: stq_u %1,0(%5)\n"
932 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
933 "=&r"(tmp3), "=&r"(tmp4)
934 : "r"(va), "r"(*reg_addr), "0"(0));
940 /* What instruction were you trying to use, exactly? */
944 /* Only integer loads should get here; everyone else returns early. */
950 regs->pc -= 4; /* make pc point to faulting insn */
952 /* We need to replicate some of the logic in mm/fault.c,
953 since we don't have access to the fault code in the
954 exception handling return path. */
955 if ((unsigned long)va >= TASK_SIZE)
956 si_code = SEGV_ACCERR;
958 struct mm_struct *mm = current->mm;
960 if (find_vma(mm, (unsigned long)va))
961 si_code = SEGV_ACCERR;
963 si_code = SEGV_MAPERR;
964 mmap_read_unlock(mm);
966 send_sig_fault(SIGSEGV, si_code, va, current);
971 send_sig_fault(SIGBUS, BUS_ADRALN, va, current);
978 /* Tell PAL-code what global pointer we want in the kernel. */
979 register unsigned long gptr __asm__("$29");
982 /* Hack for Multia (UDB) and JENSEN: some of their SRMs have
983 a bug in the handling of the opDEC fault. Fix it up if so. */
984 if (implver() == IMPLVER_EV4)