1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Copyright (C) 2001 IBM
8 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
11 * Derived from "arch/i386/kernel/signal.c"
12 * Copyright (C) 1991, 1992 Linus Torvalds
13 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
16 #include <linux/sched.h>
18 #include <linux/smp.h>
19 #include <linux/kernel.h>
20 #include <linux/signal.h>
21 #include <linux/errno.h>
22 #include <linux/elf.h>
23 #include <linux/ptrace.h>
24 #include <linux/pagemap.h>
25 #include <linux/ratelimit.h>
26 #include <linux/syscalls.h>
28 #include <linux/compat.h>
30 #include <linux/wait.h>
31 #include <linux/unistd.h>
32 #include <linux/stddef.h>
33 #include <linux/tty.h>
34 #include <linux/binfmts.h>
37 #include <linux/uaccess.h>
38 #include <asm/cacheflush.h>
39 #include <asm/syscalls.h>
40 #include <asm/sigcontext.h>
42 #include <asm/switch_to.h>
44 #include <asm/asm-prototypes.h>
47 #include <asm/unistd.h>
49 #include <asm/ucontext.h>
50 #include <asm/pgtable.h>
57 #define old_sigaction old_sigaction32
58 #define sigcontext sigcontext32
59 #define mcontext mcontext32
60 #define ucontext ucontext32
62 #define __save_altstack __compat_save_altstack
65 * Userspace code may pass a ucontext which doesn't include VSX added
66 * at the end. We need to check for this case.
68 #define UCONTEXTSIZEWITHOUTVSX \
69 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
72 * Returning 0 means we return to userspace via
73 * ret_from_except and thus restore all user
74 * registers from *regs. This is what we need
75 * to do when a signal has been delivered.
78 #define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
79 #undef __SIGNAL_FRAMESIZE
80 #define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
82 #define ELF_NVRREG ELF_NVRREG32
85 * Functions for flipping sigsets (thanks to brain dead generic
86 * implementation that makes things simple for little endian only)
88 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
90 return put_compat_sigset(uset, set, sizeof(*uset));
93 static inline int get_sigset_t(sigset_t *set,
94 const compat_sigset_t __user *uset)
96 return get_compat_sigset(set, uset);
99 #define to_user_ptr(p) ptr_to_compat(p)
100 #define from_user_ptr(p) compat_ptr(p)
102 static inline int save_general_regs(struct pt_regs *regs,
103 struct mcontext __user *frame)
105 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
107 /* Force usr to alway see softe as 1 (interrupts enabled) */
108 elf_greg_t64 softe = 0x1;
110 WARN_ON(!FULL_REGS(regs));
112 for (i = 0; i <= PT_RESULT; i ++) {
113 if (i == 14 && !FULL_REGS(regs))
115 if ( i == PT_SOFTE) {
116 if(__put_user((unsigned int)softe, &frame->mc_gregs[i]))
121 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
127 static inline int restore_general_regs(struct pt_regs *regs,
128 struct mcontext __user *sr)
130 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
133 for (i = 0; i <= PT_RESULT; i++) {
134 if ((i == PT_MSR) || (i == PT_SOFTE))
136 if (__get_user(gregs[i], &sr->mc_gregs[i]))
142 #else /* CONFIG_PPC64 */
144 #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
146 static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
148 return copy_to_user(uset, set, sizeof(*uset));
151 static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
153 return copy_from_user(set, uset, sizeof(*uset));
156 #define to_user_ptr(p) ((unsigned long)(p))
157 #define from_user_ptr(p) ((void __user *)(p))
159 static inline int save_general_regs(struct pt_regs *regs,
160 struct mcontext __user *frame)
162 WARN_ON(!FULL_REGS(regs));
163 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
166 static inline int restore_general_regs(struct pt_regs *regs,
167 struct mcontext __user *sr)
169 /* copy up to but not including MSR */
170 if (__copy_from_user(regs, &sr->mc_gregs,
171 PT_MSR * sizeof(elf_greg_t)))
173 /* copy from orig_r3 (the word after the MSR) up to the end */
174 if (__copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
175 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
182 * When we have signals to deliver, we set up on the
183 * user stack, going down from the original stack pointer:
184 * an ABI gap of 56 words
186 * a sigcontext struct
187 * a gap of __SIGNAL_FRAMESIZE bytes
189 * Each of these things must be a multiple of 16 bytes in size. The following
190 * structure represent all of this except the __SIGNAL_FRAMESIZE gap
194 struct sigcontext sctx; /* the sigcontext */
195 struct mcontext mctx; /* all the register values */
196 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
197 struct sigcontext sctx_transact;
198 struct mcontext mctx_transact;
201 * Programs using the rs6000/xcoff abi can save up to 19 gp
202 * regs and 18 fp regs below sp before decrementing it.
207 /* We use the mc_pad field for the signal return trampoline. */
211 * When we have rt signals to deliver, we set up on the
212 * user stack, going down from the original stack pointer:
213 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
214 * a gap of __SIGNAL_FRAMESIZE+16 bytes
215 * (the +16 is to get the siginfo and ucontext in the same
216 * positions as in older kernels).
218 * Each of these things must be a multiple of 16 bytes in size.
223 compat_siginfo_t info;
228 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
229 struct ucontext uc_transact;
232 * Programs using the rs6000/xcoff abi can save up to 19 gp
233 * regs and 18 fp regs below sp before decrementing it.
239 * Save the current user registers on the user stack.
240 * We only save the altivec/spe registers if the process has used
241 * altivec/spe instructions at some point.
243 static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
244 struct mcontext __user *tm_frame, int sigret,
245 int ctx_has_vsx_region)
247 unsigned long msr = regs->msr;
249 /* Make sure floating point registers are stored in regs */
250 flush_fp_to_thread(current);
252 /* save general registers */
253 if (save_general_regs(regs, frame))
256 #ifdef CONFIG_ALTIVEC
257 /* save altivec registers */
258 if (current->thread.used_vr) {
259 flush_altivec_to_thread(current);
260 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state,
261 ELF_NVRREG * sizeof(vector128)))
263 /* set MSR_VEC in the saved MSR value to indicate that
264 frame->mc_vregs contains valid data */
267 /* else assert((regs->msr & MSR_VEC) == 0) */
269 /* We always copy to/from vrsave, it's 0 if we don't have or don't
270 * use altivec. Since VSCR only contains 32 bits saved in the least
271 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
272 * most significant bits of that same vector. --BenH
273 * Note that the current VRSAVE value is in the SPR at this point.
275 if (cpu_has_feature(CPU_FTR_ALTIVEC))
276 current->thread.vrsave = mfspr(SPRN_VRSAVE);
277 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
279 #endif /* CONFIG_ALTIVEC */
280 if (copy_fpr_to_user(&frame->mc_fregs, current))
284 * Clear the MSR VSX bit to indicate there is no valid state attached
285 * to this context, except in the specific case below where we set it.
290 * Copy VSR 0-31 upper half from thread_struct to local
291 * buffer, then write that to userspace. Also set MSR_VSX in
292 * the saved MSR value to indicate that frame->mc_vregs
293 * contains valid data
295 if (current->thread.used_vsr && ctx_has_vsx_region) {
296 flush_vsx_to_thread(current);
297 if (copy_vsx_to_user(&frame->mc_vsregs, current))
301 #endif /* CONFIG_VSX */
303 /* save spe registers */
304 if (current->thread.used_spe) {
305 flush_spe_to_thread(current);
306 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
307 ELF_NEVRREG * sizeof(u32)))
309 /* set MSR_SPE in the saved MSR value to indicate that
310 frame->mc_vregs contains valid data */
313 /* else assert((regs->msr & MSR_SPE) == 0) */
315 /* We always copy to/from spefscr */
316 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
318 #endif /* CONFIG_SPE */
320 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
322 /* We need to write 0 the MSR top 32 bits in the tm frame so that we
323 * can check it on the restore to see if TM is active
325 if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
329 /* Set up the sigreturn trampoline: li 0,sigret; sc */
330 if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
331 || __put_user(PPC_INST_SC, &frame->tramp[1]))
333 flush_icache_range((unsigned long) &frame->tramp[0],
334 (unsigned long) &frame->tramp[2]);
340 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
342 * Save the current user registers on the user stack.
343 * We only save the altivec/spe registers if the process has used
344 * altivec/spe instructions at some point.
345 * We also save the transactional registers to a second ucontext in the
348 * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
350 static int save_tm_user_regs(struct pt_regs *regs,
351 struct mcontext __user *frame,
352 struct mcontext __user *tm_frame, int sigret,
355 WARN_ON(tm_suspend_disabled);
357 /* Save both sets of general registers */
358 if (save_general_regs(¤t->thread.ckpt_regs, frame)
359 || save_general_regs(regs, tm_frame))
362 /* Stash the top half of the 64bit MSR into the 32bit MSR word
363 * of the transactional mcontext. This way we have a backward-compatible
364 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
365 * also look at what type of transaction (T or S) was active at the
366 * time of the signal.
368 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
371 #ifdef CONFIG_ALTIVEC
372 /* save altivec registers */
373 if (current->thread.used_vr) {
374 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.ckvr_state,
375 ELF_NVRREG * sizeof(vector128)))
378 if (__copy_to_user(&tm_frame->mc_vregs,
379 ¤t->thread.vr_state,
380 ELF_NVRREG * sizeof(vector128)))
383 if (__copy_to_user(&tm_frame->mc_vregs,
384 ¤t->thread.ckvr_state,
385 ELF_NVRREG * sizeof(vector128)))
389 /* set MSR_VEC in the saved MSR value to indicate that
390 * frame->mc_vregs contains valid data
395 /* We always copy to/from vrsave, it's 0 if we don't have or don't
396 * use altivec. Since VSCR only contains 32 bits saved in the least
397 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
398 * most significant bits of that same vector. --BenH
400 if (cpu_has_feature(CPU_FTR_ALTIVEC))
401 current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
402 if (__put_user(current->thread.ckvrsave,
403 (u32 __user *)&frame->mc_vregs[32]))
406 if (__put_user(current->thread.vrsave,
407 (u32 __user *)&tm_frame->mc_vregs[32]))
410 if (__put_user(current->thread.ckvrsave,
411 (u32 __user *)&tm_frame->mc_vregs[32]))
414 #endif /* CONFIG_ALTIVEC */
416 if (copy_ckfpr_to_user(&frame->mc_fregs, current))
419 if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
422 if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
428 * Copy VSR 0-31 upper half from thread_struct to local
429 * buffer, then write that to userspace. Also set MSR_VSX in
430 * the saved MSR value to indicate that frame->mc_vregs
431 * contains valid data
433 if (current->thread.used_vsr) {
434 if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
437 if (copy_vsx_to_user(&tm_frame->mc_vsregs,
441 if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
447 #endif /* CONFIG_VSX */
449 /* SPE regs are not checkpointed with TM, so this section is
450 * simply the same as in save_user_regs().
452 if (current->thread.used_spe) {
453 flush_spe_to_thread(current);
454 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
455 ELF_NEVRREG * sizeof(u32)))
457 /* set MSR_SPE in the saved MSR value to indicate that
458 * frame->mc_vregs contains valid data */
462 /* We always copy to/from spefscr */
463 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
465 #endif /* CONFIG_SPE */
467 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
470 /* Set up the sigreturn trampoline: li 0,sigret; sc */
471 if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
472 || __put_user(PPC_INST_SC, &frame->tramp[1]))
474 flush_icache_range((unsigned long) &frame->tramp[0],
475 (unsigned long) &frame->tramp[2]);
483 * Restore the current user register values from the user stack,
486 static long restore_user_regs(struct pt_regs *regs,
487 struct mcontext __user *sr, int sig)
490 unsigned int save_r2 = 0;
497 * restore general registers but not including MSR or SOFTE. Also
498 * take care of keeping r2 (TLS) intact if not a signal
501 save_r2 = (unsigned int)regs->gpr[2];
502 err = restore_general_regs(regs, sr);
503 set_trap_norestart(regs);
504 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
506 regs->gpr[2] = (unsigned long) save_r2;
510 /* if doing signal return, restore the previous little-endian mode */
512 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
514 #ifdef CONFIG_ALTIVEC
516 * Force the process to reload the altivec registers from
517 * current->thread when it next does altivec instructions
519 regs->msr &= ~MSR_VEC;
521 /* restore altivec registers from the stack */
522 if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs,
523 sizeof(sr->mc_vregs)))
525 current->thread.used_vr = true;
526 } else if (current->thread.used_vr)
527 memset(¤t->thread.vr_state, 0,
528 ELF_NVRREG * sizeof(vector128));
530 /* Always get VRSAVE back */
531 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
533 if (cpu_has_feature(CPU_FTR_ALTIVEC))
534 mtspr(SPRN_VRSAVE, current->thread.vrsave);
535 #endif /* CONFIG_ALTIVEC */
536 if (copy_fpr_from_user(current, &sr->mc_fregs))
541 * Force the process to reload the VSX registers from
542 * current->thread when it next does VSX instruction.
544 regs->msr &= ~MSR_VSX;
547 * Restore altivec registers from the stack to a local
548 * buffer, then write this out to the thread_struct
550 if (copy_vsx_from_user(current, &sr->mc_vsregs))
552 current->thread.used_vsr = true;
553 } else if (current->thread.used_vsr)
554 for (i = 0; i < 32 ; i++)
555 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
556 #endif /* CONFIG_VSX */
558 * force the process to reload the FP registers from
559 * current->thread when it next does FP instructions
561 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
564 /* force the process to reload the spe registers from
565 current->thread when it next does spe instructions */
566 regs->msr &= ~MSR_SPE;
568 /* restore spe registers from the stack */
569 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
570 ELF_NEVRREG * sizeof(u32)))
572 current->thread.used_spe = true;
573 } else if (current->thread.used_spe)
574 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
576 /* Always get SPEFSCR back */
577 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
579 #endif /* CONFIG_SPE */
584 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
586 * Restore the current user register values from the user stack, except for
587 * MSR, and recheckpoint the original checkpointed register state for processes
590 static long restore_tm_user_regs(struct pt_regs *regs,
591 struct mcontext __user *sr,
592 struct mcontext __user *tm_sr)
595 unsigned long msr, msr_hi;
600 if (tm_suspend_disabled)
603 * restore general registers but not including MSR or SOFTE. Also
604 * take care of keeping r2 (TLS) intact if not a signal.
605 * See comment in signal_64.c:restore_tm_sigcontexts();
606 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
607 * were set by the signal delivery.
609 err = restore_general_regs(regs, tm_sr);
610 err |= restore_general_regs(¤t->thread.ckpt_regs, sr);
612 err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
614 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
618 /* Restore the previous little-endian mode */
619 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
621 #ifdef CONFIG_ALTIVEC
622 regs->msr &= ~MSR_VEC;
624 /* restore altivec registers from the stack */
625 if (__copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs,
626 sizeof(sr->mc_vregs)) ||
627 __copy_from_user(¤t->thread.vr_state,
629 sizeof(sr->mc_vregs)))
631 current->thread.used_vr = true;
632 } else if (current->thread.used_vr) {
633 memset(¤t->thread.vr_state, 0,
634 ELF_NVRREG * sizeof(vector128));
635 memset(¤t->thread.ckvr_state, 0,
636 ELF_NVRREG * sizeof(vector128));
639 /* Always get VRSAVE back */
640 if (__get_user(current->thread.ckvrsave,
641 (u32 __user *)&sr->mc_vregs[32]) ||
642 __get_user(current->thread.vrsave,
643 (u32 __user *)&tm_sr->mc_vregs[32]))
645 if (cpu_has_feature(CPU_FTR_ALTIVEC))
646 mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
647 #endif /* CONFIG_ALTIVEC */
649 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
651 if (copy_fpr_from_user(current, &sr->mc_fregs) ||
652 copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
656 regs->msr &= ~MSR_VSX;
659 * Restore altivec registers from the stack to a local
660 * buffer, then write this out to the thread_struct
662 if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
663 copy_ckvsx_from_user(current, &sr->mc_vsregs))
665 current->thread.used_vsr = true;
666 } else if (current->thread.used_vsr)
667 for (i = 0; i < 32 ; i++) {
668 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
669 current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
671 #endif /* CONFIG_VSX */
674 /* SPE regs are not checkpointed with TM, so this section is
675 * simply the same as in restore_user_regs().
677 regs->msr &= ~MSR_SPE;
679 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
680 ELF_NEVRREG * sizeof(u32)))
682 current->thread.used_spe = true;
683 } else if (current->thread.used_spe)
684 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
686 /* Always get SPEFSCR back */
687 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
690 #endif /* CONFIG_SPE */
692 /* Get the top half of the MSR from the user context */
693 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
696 /* If TM bits are set to the reserved value, it's an invalid context */
697 if (MSR_TM_RESV(msr_hi))
701 * Disabling preemption, since it is unsafe to be preempted
702 * with MSR[TS] set without recheckpointing.
708 * After regs->MSR[TS] being updated, make sure that get_user(),
709 * put_user() or similar functions are *not* called. These
710 * functions can generate page faults which will cause the process
711 * to be de-scheduled with MSR[TS] set but without calling
712 * tm_recheckpoint(). This can cause a bug.
714 * Pull in the MSR TM bits from the user context
716 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
717 /* Now, recheckpoint. This loads up all of the checkpointed (older)
718 * registers, including FP and V[S]Rs. After recheckpointing, the
719 * transactional versions should be loaded.
722 /* Make sure the transaction is marked as failed */
723 current->thread.tm_texasr |= TEXASR_FS;
724 /* This loads the checkpointed FP/VEC state, if used */
725 tm_recheckpoint(¤t->thread);
727 /* This loads the speculative FP/VEC state, if used */
728 msr_check_and_set(msr & (MSR_FP | MSR_VEC));
730 load_fp_state(¤t->thread.fp_state);
731 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
733 #ifdef CONFIG_ALTIVEC
735 load_vr_state(¤t->thread.vr_state);
736 regs->msr |= MSR_VEC;
748 #define copy_siginfo_to_user copy_siginfo_to_user32
750 #endif /* CONFIG_PPC64 */
753 * Set up a signal frame for a "real-time" signal handler
754 * (one which gets siginfo).
756 int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
757 struct task_struct *tsk)
759 struct rt_sigframe __user *rt_sf;
760 struct mcontext __user *frame;
761 struct mcontext __user *tm_frame = NULL;
763 unsigned long newsp = 0;
766 struct pt_regs *regs = tsk->thread.regs;
767 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
768 /* Save the thread's msr before get_tm_stackpointer() changes it */
769 unsigned long msr = regs->msr;
772 BUG_ON(tsk != current);
774 /* Set up Signal Frame */
775 /* Put a Real Time Context onto stack */
776 rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
778 if (unlikely(rt_sf == NULL))
781 /* Put the siginfo & fill in most of the ucontext */
782 if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
783 || __put_user(0, &rt_sf->uc.uc_flags)
784 || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
785 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
787 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
790 /* Save user registers on the stack */
791 frame = &rt_sf->uc.uc_mcontext;
793 if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
795 tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
797 sigret = __NR_rt_sigreturn;
798 tramp = (unsigned long) frame->tramp;
801 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
802 tm_frame = &rt_sf->uc_transact.uc_mcontext;
803 if (MSR_TM_ACTIVE(msr)) {
804 if (__put_user((unsigned long)&rt_sf->uc_transact,
805 &rt_sf->uc.uc_link) ||
806 __put_user((unsigned long)tm_frame,
807 &rt_sf->uc_transact.uc_regs))
809 if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
815 if (__put_user(0, &rt_sf->uc.uc_link))
817 if (save_user_regs(regs, frame, tm_frame, sigret, 1))
822 tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
824 /* create a stack frame for the caller of the handler */
825 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
826 addr = (void __user *)regs->gpr[1];
827 if (put_user(regs->gpr[1], (u32 __user *)newsp))
830 /* Fill registers for signal handler */
831 regs->gpr[1] = newsp;
832 regs->gpr[3] = ksig->sig;
833 regs->gpr[4] = (unsigned long) &rt_sf->info;
834 regs->gpr[5] = (unsigned long) &rt_sf->uc;
835 regs->gpr[6] = (unsigned long) rt_sf;
836 regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
837 /* enter the signal handler in native-endian mode */
838 regs->msr &= ~MSR_LE;
839 regs->msr |= (MSR_KERNEL & MSR_LE);
843 if (show_unhandled_signals)
844 printk_ratelimited(KERN_INFO
845 "%s[%d]: bad frame in handle_rt_signal32: "
846 "%p nip %08lx lr %08lx\n",
848 addr, regs->nip, regs->link);
853 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
856 struct mcontext __user *mcp;
858 if (get_sigset_t(&set, &ucp->uc_sigmask))
864 if (__get_user(cmcp, &ucp->uc_regs))
866 mcp = (struct mcontext __user *)(u64)cmcp;
867 /* no need to check access_ok(mcp), since mcp < 4GB */
870 if (__get_user(mcp, &ucp->uc_regs))
872 if (!access_ok(mcp, sizeof(*mcp)))
875 set_current_blocked(&set);
876 if (restore_user_regs(regs, mcp, sig))
882 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
883 static int do_setcontext_tm(struct ucontext __user *ucp,
884 struct ucontext __user *tm_ucp,
885 struct pt_regs *regs)
888 struct mcontext __user *mcp;
889 struct mcontext __user *tm_mcp;
893 if (get_sigset_t(&set, &ucp->uc_sigmask))
896 if (__get_user(cmcp, &ucp->uc_regs) ||
897 __get_user(tm_cmcp, &tm_ucp->uc_regs))
899 mcp = (struct mcontext __user *)(u64)cmcp;
900 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
901 /* no need to check access_ok(mcp), since mcp < 4GB */
903 set_current_blocked(&set);
904 if (restore_tm_user_regs(regs, mcp, tm_mcp))
912 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
913 struct ucontext __user *, new_ctx, int, ctx_size)
915 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
916 struct ucontext __user *, new_ctx, long, ctx_size)
919 struct pt_regs *regs = current_pt_regs();
920 int ctx_has_vsx_region = 0;
923 unsigned long new_msr = 0;
926 struct mcontext __user *mcp;
930 * Get pointer to the real mcontext. No need for
931 * access_ok since we are dealing with compat
934 if (__get_user(cmcp, &new_ctx->uc_regs))
936 mcp = (struct mcontext __user *)(u64)cmcp;
937 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
941 * Check that the context is not smaller than the original
942 * size (with VMX but without VSX)
944 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
947 * If the new context state sets the MSR VSX bits but
948 * it doesn't provide VSX state.
950 if ((ctx_size < sizeof(struct ucontext)) &&
953 /* Does the context have enough room to store VSX data? */
954 if (ctx_size >= sizeof(struct ucontext))
955 ctx_has_vsx_region = 1;
957 /* Context size is for future use. Right now, we only make sure
958 * we are passed something we understand
960 if (ctx_size < sizeof(struct ucontext))
963 if (old_ctx != NULL) {
964 struct mcontext __user *mctx;
967 * old_ctx might not be 16-byte aligned, in which
968 * case old_ctx->uc_mcontext won't be either.
969 * Because we have the old_ctx->uc_pad2 field
970 * before old_ctx->uc_mcontext, we need to round down
971 * from &old_ctx->uc_mcontext to a 16-byte boundary.
973 mctx = (struct mcontext __user *)
974 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
975 if (!access_ok(old_ctx, ctx_size)
976 || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
977 || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked)
978 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
983 if (!access_ok(new_ctx, ctx_size) ||
984 fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
988 * If we get a fault copying the context into the kernel's
989 * image of the user's registers, we can't just return -EFAULT
990 * because the user's registers will be corrupted. For instance
991 * the NIP value may have been updated but not some of the
992 * other registers. Given that we have done the access_ok
993 * and successfully read the first and last bytes of the region
994 * above, this should only happen in an out-of-memory situation
995 * or if another thread unmaps the region containing the context.
996 * We kill the task with a SIGSEGV in this situation.
998 if (do_setcontext(new_ctx, regs, 0))
1001 set_thread_flag(TIF_RESTOREALL);
1006 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1008 SYSCALL_DEFINE0(rt_sigreturn)
1011 struct rt_sigframe __user *rt_sf;
1012 struct pt_regs *regs = current_pt_regs();
1014 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1015 struct ucontext __user *uc_transact;
1016 unsigned long msr_hi;
1019 /* Always make any pending restarted system calls return -EINTR */
1020 current->restart_block.fn = do_no_restart_syscall;
1022 rt_sf = (struct rt_sigframe __user *)
1023 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1024 if (!access_ok(rt_sf, sizeof(*rt_sf)))
1027 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1029 * If there is a transactional state then throw it away.
1030 * The purpose of a sigreturn is to destroy all traces of the
1031 * signal frame, this includes any transactional state created
1032 * within in. We only check for suspended as we can never be
1033 * active in the kernel, we are active, there is nothing better to
1034 * do than go ahead and Bad Thing later.
1035 * The cause is not important as there will never be a
1036 * recheckpoint so it's not user visible.
1038 if (MSR_TM_SUSPENDED(mfmsr()))
1039 tm_reclaim_current(0);
1041 if (__get_user(tmp, &rt_sf->uc.uc_link))
1043 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1046 struct mcontext __user *mcp;
1048 if (__get_user(cmcp, &uc_transact->uc_regs))
1050 mcp = (struct mcontext __user *)(u64)cmcp;
1051 /* The top 32 bits of the MSR are stashed in the transactional
1053 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1056 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1057 /* Trying to start TM on non TM system */
1058 if (!cpu_has_feature(CPU_FTR_TM))
1060 /* We only recheckpoint on return if we're
1064 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1070 * Unset regs->msr because ucontext MSR TS is not
1071 * set, and recheckpoint was not called. This avoid
1072 * hitting a TM Bad thing at RFID
1074 regs->msr &= ~MSR_TS_MASK;
1076 /* Fall through, for non-TM restore */
1079 if (do_setcontext(&rt_sf->uc, regs, 1))
1083 * It's not clear whether or why it is desirable to save the
1084 * sigaltstack setting on signal delivery and restore it on
1085 * signal return. But other architectures do this and we have
1086 * always done it up until now so it is probably better not to
1087 * change it. -- paulus
1090 if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1093 if (restore_altstack(&rt_sf->uc.uc_stack))
1096 set_thread_flag(TIF_RESTOREALL);
1100 if (show_unhandled_signals)
1101 printk_ratelimited(KERN_INFO
1102 "%s[%d]: bad frame in sys_rt_sigreturn: "
1103 "%p nip %08lx lr %08lx\n",
1104 current->comm, current->pid,
1105 rt_sf, regs->nip, regs->link);
1112 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1113 int, ndbg, struct sig_dbg_op __user *, dbg)
1115 struct pt_regs *regs = current_pt_regs();
1116 struct sig_dbg_op op;
1118 unsigned long new_msr = regs->msr;
1119 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1120 unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1123 for (i=0; i<ndbg; i++) {
1124 if (copy_from_user(&op, dbg + i, sizeof(op)))
1126 switch (op.dbg_type) {
1127 case SIG_DBG_SINGLE_STEPPING:
1128 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1131 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1133 new_dbcr0 &= ~DBCR0_IC;
1134 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1135 current->thread.debug.dbcr1)) {
1137 new_dbcr0 &= ~DBCR0_IDM;
1147 case SIG_DBG_BRANCH_TRACING:
1148 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1163 /* We wait until here to actually install the values in the
1164 registers so if we fail in the above loop, it will not
1165 affect the contents of these registers. After this point,
1166 failure is a problem, anyway, and it's very unlikely unless
1167 the user is really doing something wrong. */
1168 regs->msr = new_msr;
1169 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1170 current->thread.debug.dbcr0 = new_dbcr0;
1173 if (!access_ok(ctx, sizeof(*ctx)) ||
1174 fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
1178 * If we get a fault copying the context into the kernel's
1179 * image of the user's registers, we can't just return -EFAULT
1180 * because the user's registers will be corrupted. For instance
1181 * the NIP value may have been updated but not some of the
1182 * other registers. Given that we have done the access_ok
1183 * and successfully read the first and last bytes of the region
1184 * above, this should only happen in an out-of-memory situation
1185 * or if another thread unmaps the region containing the context.
1186 * We kill the task with a SIGSEGV in this situation.
1188 if (do_setcontext(ctx, regs, 1)) {
1189 if (show_unhandled_signals)
1190 printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1191 "sys_debug_setcontext: %p nip %08lx "
1193 current->comm, current->pid,
1194 ctx, regs->nip, regs->link);
1201 * It's not clear whether or why it is desirable to save the
1202 * sigaltstack setting on signal delivery and restore it on
1203 * signal return. But other architectures do this and we have
1204 * always done it up until now so it is probably better not to
1205 * change it. -- paulus
1207 restore_altstack(&ctx->uc_stack);
1209 set_thread_flag(TIF_RESTOREALL);
1216 * OK, we're invoking a handler
1218 int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1219 struct task_struct *tsk)
1221 struct sigcontext __user *sc;
1222 struct sigframe __user *frame;
1223 struct mcontext __user *tm_mctx = NULL;
1224 unsigned long newsp = 0;
1226 unsigned long tramp;
1227 struct pt_regs *regs = tsk->thread.regs;
1228 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1229 /* Save the thread's msr before get_tm_stackpointer() changes it */
1230 unsigned long msr = regs->msr;
1233 BUG_ON(tsk != current);
1235 /* Set up Signal Frame */
1236 frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
1237 if (unlikely(frame == NULL))
1239 sc = (struct sigcontext __user *) &frame->sctx;
1242 #error "Please adjust handle_signal()"
1244 if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
1245 || __put_user(oldset->sig[0], &sc->oldmask)
1247 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1249 || __put_user(oldset->sig[1], &sc->_unused[3])
1251 || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1252 || __put_user(ksig->sig, &sc->signal))
1255 if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
1257 tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
1259 sigret = __NR_sigreturn;
1260 tramp = (unsigned long) frame->mctx.tramp;
1263 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1264 tm_mctx = &frame->mctx_transact;
1265 if (MSR_TM_ACTIVE(msr)) {
1266 if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1273 if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1279 tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
1281 /* create a stack frame for the caller of the handler */
1282 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1283 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1286 regs->gpr[1] = newsp;
1287 regs->gpr[3] = ksig->sig;
1288 regs->gpr[4] = (unsigned long) sc;
1289 regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
1290 /* enter the signal handler in big-endian mode */
1291 regs->msr &= ~MSR_LE;
1295 if (show_unhandled_signals)
1296 printk_ratelimited(KERN_INFO
1297 "%s[%d]: bad frame in handle_signal32: "
1298 "%p nip %08lx lr %08lx\n",
1299 tsk->comm, tsk->pid,
1300 frame, regs->nip, regs->link);
1306 * Do a signal return; undo the signal stack.
1309 COMPAT_SYSCALL_DEFINE0(sigreturn)
1311 SYSCALL_DEFINE0(sigreturn)
1314 struct pt_regs *regs = current_pt_regs();
1315 struct sigframe __user *sf;
1316 struct sigcontext __user *sc;
1317 struct sigcontext sigctx;
1318 struct mcontext __user *sr;
1321 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1322 struct mcontext __user *mcp, *tm_mcp;
1323 unsigned long msr_hi;
1326 /* Always make any pending restarted system calls return -EINTR */
1327 current->restart_block.fn = do_no_restart_syscall;
1329 sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1332 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1337 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1338 * unused part of the signal stackframe
1340 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1342 set.sig[0] = sigctx.oldmask;
1343 set.sig[1] = sigctx._unused[3];
1345 set_current_blocked(&set);
1347 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1348 mcp = (struct mcontext __user *)&sf->mctx;
1349 tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1350 if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1352 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1353 if (!cpu_has_feature(CPU_FTR_TM))
1355 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1360 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1362 if (!access_ok(sr, sizeof(*sr))
1363 || restore_user_regs(regs, sr, 1))
1367 set_thread_flag(TIF_RESTOREALL);
1371 if (show_unhandled_signals)
1372 printk_ratelimited(KERN_INFO
1373 "%s[%d]: bad frame in sys_sigreturn: "
1374 "%p nip %08lx lr %08lx\n",
1375 current->comm, current->pid,
1376 addr, regs->nip, regs->link);