1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Derived from "arch/i386/kernel/signal.c"
7 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
11 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/kernel.h>
15 #include <linux/signal.h>
16 #include <linux/errno.h>
17 #include <linux/wait.h>
18 #include <linux/unistd.h>
19 #include <linux/stddef.h>
20 #include <linux/elf.h>
21 #include <linux/ptrace.h>
22 #include <linux/ratelimit.h>
23 #include <linux/syscalls.h>
24 #include <linux/pagemap.h>
26 #include <asm/sigcontext.h>
27 #include <asm/ucontext.h>
28 #include <linux/uaccess.h>
29 #include <asm/unistd.h>
30 #include <asm/cacheflush.h>
31 #include <asm/syscalls.h>
33 #include <asm/switch_to.h>
35 #include <asm/asm-prototypes.h>
40 #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
41 #define FP_REGS_SIZE sizeof(elf_fpregset_t)
43 #define TRAMP_TRACEBACK 4
47 * When we have signals to deliver, we set up on the user stack,
48 * going down from the original stack pointer:
49 * 1) a rt_sigframe struct which contains the ucontext
50 * 2) a gap of __SIGNAL_FRAMESIZE bytes which acts as a dummy caller
51 * frame for the signal handler.
55 /* sys_rt_sigreturn requires the ucontext be the first field */
57 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
58 struct ucontext uc_transact;
60 unsigned long _unused[2];
61 unsigned int tramp[TRAMP_SIZE];
62 struct siginfo __user *pinfo;
65 /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */
66 char abigap[USER_REDZONE_SIZE];
67 } __attribute__ ((aligned (16)));
70 * This computes a quad word aligned pointer inside the vmx_reserve array
71 * element. For historical reasons sigcontext might not be quad word aligned,
72 * but the location we write the VMX regs to must be. See the comment in
73 * sigcontext for more detail.
76 static elf_vrreg_t __user *sigcontext_vmx_regs(struct sigcontext __user *sc)
78 return (elf_vrreg_t __user *) (((unsigned long)sc->vmx_reserve + 15) & ~0xful);
82 static void prepare_setup_sigcontext(struct task_struct *tsk)
85 /* save altivec registers */
86 if (tsk->thread.used_vr)
87 flush_altivec_to_thread(tsk);
88 if (cpu_has_feature(CPU_FTR_ALTIVEC))
89 tsk->thread.vrsave = mfspr(SPRN_VRSAVE);
90 #endif /* CONFIG_ALTIVEC */
92 flush_fp_to_thread(tsk);
95 if (tsk->thread.used_vsr)
96 flush_vsx_to_thread(tsk);
97 #endif /* CONFIG_VSX */
101 * Set up the sigcontext for the signal frame.
104 #define unsafe_setup_sigcontext(sc, tsk, signr, set, handler, ctx_has_vsx_region, label)\
106 if (__unsafe_setup_sigcontext(sc, tsk, signr, set, handler, ctx_has_vsx_region))\
109 static long notrace __unsafe_setup_sigcontext(struct sigcontext __user *sc,
110 struct task_struct *tsk, int signr, sigset_t *set,
111 unsigned long handler, int ctx_has_vsx_region)
113 /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
114 * process never used altivec yet (MSR_VEC is zero in pt_regs of
115 * the context). This is very important because we must ensure we
116 * don't lose the VRSAVE content that may have been set prior to
117 * the process doing its first vector operation
118 * Userland shall check AT_HWCAP to know whether it can rely on the
119 * v_regs pointer or not
121 #ifdef CONFIG_ALTIVEC
122 elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
124 struct pt_regs *regs = tsk->thread.regs;
125 unsigned long msr = regs->msr;
126 /* Force usr to alway see softe as 1 (interrupts enabled) */
127 unsigned long softe = 0x1;
129 BUG_ON(tsk != current);
131 #ifdef CONFIG_ALTIVEC
132 unsafe_put_user(v_regs, &sc->v_regs, efault_out);
134 /* save altivec registers */
135 if (tsk->thread.used_vr) {
136 /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
137 unsafe_copy_to_user(v_regs, &tsk->thread.vr_state,
138 33 * sizeof(vector128), efault_out);
139 /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
140 * contains valid data.
144 /* We always copy to/from vrsave, it's 0 if we don't have or don't
147 unsafe_put_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33], efault_out);
148 #else /* CONFIG_ALTIVEC */
149 unsafe_put_user(0, &sc->v_regs, efault_out);
150 #endif /* CONFIG_ALTIVEC */
151 /* copy fpr regs and fpscr */
152 unsafe_copy_fpr_to_user(&sc->fp_regs, tsk, efault_out);
155 * Clear the MSR VSX bit to indicate there is no valid state attached
156 * to this context, except in the specific case below where we set it.
161 * Copy VSX low doubleword to local buffer for formatting,
162 * then out to userspace. Update v_regs to point after the
165 if (tsk->thread.used_vsr && ctx_has_vsx_region) {
166 v_regs += ELF_NVRREG;
167 unsafe_copy_vsx_to_user(v_regs, tsk, efault_out);
168 /* set MSR_VSX in the MSR value in the frame to
169 * indicate that sc->vs_reg) contains valid data.
173 #endif /* CONFIG_VSX */
174 unsafe_put_user(&sc->gp_regs, &sc->regs, efault_out);
175 WARN_ON(!FULL_REGS(regs));
176 unsafe_copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE, efault_out);
177 unsafe_put_user(msr, &sc->gp_regs[PT_MSR], efault_out);
178 unsafe_put_user(softe, &sc->gp_regs[PT_SOFTE], efault_out);
179 unsafe_put_user(signr, &sc->signal, efault_out);
180 unsafe_put_user(handler, &sc->handler, efault_out);
182 unsafe_put_user(set->sig[0], &sc->oldmask, efault_out);
190 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
192 * As above, but Transactional Memory is in use, so deliver sigcontexts
193 * containing checkpointed and transactional register states.
195 * To do this, we treclaim (done before entering here) to gather both sets of
196 * registers and set up the 'normal' sigcontext registers with rolled-back
197 * register values such that a simple signal handler sees a correct
198 * checkpointed register state. If interested, a TM-aware sighandler can
199 * examine the transactional registers in the 2nd sigcontext to determine the
200 * real origin of the signal.
202 static long setup_tm_sigcontexts(struct sigcontext __user *sc,
203 struct sigcontext __user *tm_sc,
204 struct task_struct *tsk,
205 int signr, sigset_t *set, unsigned long handler,
208 /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
209 * process never used altivec yet (MSR_VEC is zero in pt_regs of
210 * the context). This is very important because we must ensure we
211 * don't lose the VRSAVE content that may have been set prior to
212 * the process doing its first vector operation
213 * Userland shall check AT_HWCAP to know wether it can rely on the
214 * v_regs pointer or not.
216 #ifdef CONFIG_ALTIVEC
217 elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
218 elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
220 struct pt_regs *regs = tsk->thread.regs;
223 BUG_ON(tsk != current);
225 BUG_ON(!MSR_TM_ACTIVE(msr));
227 WARN_ON(tm_suspend_disabled);
229 /* Restore checkpointed FP, VEC, and VSX bits from ckpt_regs as
230 * it contains the correct FP, VEC, VSX state after we treclaimed
231 * the transaction and giveup_all() was called on reclaiming.
233 msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
235 #ifdef CONFIG_ALTIVEC
236 err |= __put_user(v_regs, &sc->v_regs);
237 err |= __put_user(tm_v_regs, &tm_sc->v_regs);
239 /* save altivec registers */
240 if (tsk->thread.used_vr) {
241 /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
242 err |= __copy_to_user(v_regs, &tsk->thread.ckvr_state,
243 33 * sizeof(vector128));
244 /* If VEC was enabled there are transactional VRs valid too,
245 * else they're a copy of the checkpointed VRs.
248 err |= __copy_to_user(tm_v_regs,
249 &tsk->thread.vr_state,
250 33 * sizeof(vector128));
252 err |= __copy_to_user(tm_v_regs,
253 &tsk->thread.ckvr_state,
254 33 * sizeof(vector128));
256 /* set MSR_VEC in the MSR value in the frame to indicate
257 * that sc->v_reg contains valid data.
261 /* We always copy to/from vrsave, it's 0 if we don't have or don't
264 if (cpu_has_feature(CPU_FTR_ALTIVEC))
265 tsk->thread.ckvrsave = mfspr(SPRN_VRSAVE);
266 err |= __put_user(tsk->thread.ckvrsave, (u32 __user *)&v_regs[33]);
268 err |= __put_user(tsk->thread.vrsave,
269 (u32 __user *)&tm_v_regs[33]);
271 err |= __put_user(tsk->thread.ckvrsave,
272 (u32 __user *)&tm_v_regs[33]);
274 #else /* CONFIG_ALTIVEC */
275 err |= __put_user(0, &sc->v_regs);
276 err |= __put_user(0, &tm_sc->v_regs);
277 #endif /* CONFIG_ALTIVEC */
279 /* copy fpr regs and fpscr */
280 err |= copy_ckfpr_to_user(&sc->fp_regs, tsk);
282 err |= copy_fpr_to_user(&tm_sc->fp_regs, tsk);
284 err |= copy_ckfpr_to_user(&tm_sc->fp_regs, tsk);
288 * Copy VSX low doubleword to local buffer for formatting,
289 * then out to userspace. Update v_regs to point after the
292 if (tsk->thread.used_vsr) {
293 v_regs += ELF_NVRREG;
294 tm_v_regs += ELF_NVRREG;
296 err |= copy_ckvsx_to_user(v_regs, tsk);
299 err |= copy_vsx_to_user(tm_v_regs, tsk);
301 err |= copy_ckvsx_to_user(tm_v_regs, tsk);
303 /* set MSR_VSX in the MSR value in the frame to
304 * indicate that sc->vs_reg) contains valid data.
308 #endif /* CONFIG_VSX */
310 err |= __put_user(&sc->gp_regs, &sc->regs);
311 err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs);
312 WARN_ON(!FULL_REGS(regs));
313 err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE);
314 err |= __copy_to_user(&sc->gp_regs,
315 &tsk->thread.ckpt_regs, GP_REGS_SIZE);
316 err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]);
317 err |= __put_user(msr, &sc->gp_regs[PT_MSR]);
318 err |= __put_user(signr, &sc->signal);
319 err |= __put_user(handler, &sc->handler);
321 err |= __put_user(set->sig[0], &sc->oldmask);
328 * Restore the sigcontext from the signal frame.
331 static long restore_sigcontext(struct task_struct *tsk, sigset_t *set, int sig,
332 struct sigcontext __user *sc)
334 #ifdef CONFIG_ALTIVEC
335 elf_vrreg_t __user *v_regs;
337 unsigned long err = 0;
338 unsigned long save_r13 = 0;
340 struct pt_regs *regs = tsk->thread.regs;
345 BUG_ON(tsk != current);
347 /* If this is not a signal return, we preserve the TLS in r13 */
349 save_r13 = regs->gpr[13];
352 err |= __copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr));
353 err |= __get_user(regs->nip, &sc->gp_regs[PT_NIP]);
354 /* get MSR separately, transfer the LE bit if doing signal return */
355 err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
357 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
358 err |= __get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3]);
359 err |= __get_user(regs->ctr, &sc->gp_regs[PT_CTR]);
360 err |= __get_user(regs->link, &sc->gp_regs[PT_LNK]);
361 err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]);
362 err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]);
363 /* Don't allow userspace to set SOFTE */
364 set_trap_norestart(regs);
365 err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
366 err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
367 err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
370 regs->gpr[13] = save_r13;
372 err |= __get_user(set->sig[0], &sc->oldmask);
375 * Force reload of FP/VEC.
376 * This has to be done before copying stuff into tsk->thread.fpr/vr
377 * for the reasons explained in the previous comment.
379 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
381 #ifdef CONFIG_ALTIVEC
382 err |= __get_user(v_regs, &sc->v_regs);
385 if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128)))
387 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
388 if (v_regs != NULL && (msr & MSR_VEC) != 0) {
389 err |= __copy_from_user(&tsk->thread.vr_state, v_regs,
390 33 * sizeof(vector128));
391 tsk->thread.used_vr = true;
392 } else if (tsk->thread.used_vr) {
393 memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
395 /* Always get VRSAVE back */
397 err |= __get_user(tsk->thread.vrsave, (u32 __user *)&v_regs[33]);
399 tsk->thread.vrsave = 0;
400 if (cpu_has_feature(CPU_FTR_ALTIVEC))
401 mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
402 #endif /* CONFIG_ALTIVEC */
403 /* restore floating point */
404 err |= copy_fpr_from_user(tsk, &sc->fp_regs);
407 * Get additional VSX data. Update v_regs to point after the
408 * VMX data. Copy VSX low doubleword from userspace to local
409 * buffer for formatting, then into the taskstruct.
411 v_regs += ELF_NVRREG;
412 if ((msr & MSR_VSX) != 0) {
413 err |= copy_vsx_from_user(tsk, v_regs);
414 tsk->thread.used_vsr = true;
416 for (i = 0; i < 32 ; i++)
417 tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
423 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
425 * Restore the two sigcontexts from the frame of a transactional processes.
428 static long restore_tm_sigcontexts(struct task_struct *tsk,
429 struct sigcontext __user *sc,
430 struct sigcontext __user *tm_sc)
432 #ifdef CONFIG_ALTIVEC
433 elf_vrreg_t __user *v_regs, *tm_v_regs;
435 unsigned long err = 0;
437 struct pt_regs *regs = tsk->thread.regs;
442 BUG_ON(tsk != current);
444 if (tm_suspend_disabled)
448 err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr));
449 err |= __copy_from_user(&tsk->thread.ckpt_regs, sc->gp_regs,
453 * TFHAR is restored from the checkpointed 'wound-back' ucontext's NIP.
454 * TEXASR was set by the signal delivery reclaim, as was TFIAR.
455 * Users doing anything abhorrent like thread-switching w/ signals for
456 * TM-Suspended code will have to back TEXASR/TFIAR up themselves.
457 * For the case of getting a signal and simply returning from it,
458 * we don't need to re-copy them here.
460 err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]);
461 err |= __get_user(tsk->thread.tm_tfhar, &sc->gp_regs[PT_NIP]);
463 /* get MSR separately, transfer the LE bit if doing signal return */
464 err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
465 /* Don't allow reserved mode. */
466 if (MSR_TM_RESV(msr))
469 /* pull in MSR LE from user context */
470 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
472 /* The following non-GPR non-FPR non-VR state is also checkpointed: */
473 err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]);
474 err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]);
475 err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]);
476 err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]);
477 err |= __get_user(tsk->thread.ckpt_regs.ctr,
478 &sc->gp_regs[PT_CTR]);
479 err |= __get_user(tsk->thread.ckpt_regs.link,
480 &sc->gp_regs[PT_LNK]);
481 err |= __get_user(tsk->thread.ckpt_regs.xer,
482 &sc->gp_regs[PT_XER]);
483 err |= __get_user(tsk->thread.ckpt_regs.ccr,
484 &sc->gp_regs[PT_CCR]);
485 /* Don't allow userspace to set SOFTE */
486 set_trap_norestart(regs);
487 /* These regs are not checkpointed; they can go in 'regs'. */
488 err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
489 err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
490 err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
493 * Force reload of FP/VEC.
494 * This has to be done before copying stuff into tsk->thread.fpr/vr
495 * for the reasons explained in the previous comment.
497 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
499 #ifdef CONFIG_ALTIVEC
500 err |= __get_user(v_regs, &sc->v_regs);
501 err |= __get_user(tm_v_regs, &tm_sc->v_regs);
504 if (v_regs && !access_ok(v_regs, 34 * sizeof(vector128)))
506 if (tm_v_regs && !access_ok(tm_v_regs, 34 * sizeof(vector128)))
508 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
509 if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) {
510 err |= __copy_from_user(&tsk->thread.ckvr_state, v_regs,
511 33 * sizeof(vector128));
512 err |= __copy_from_user(&tsk->thread.vr_state, tm_v_regs,
513 33 * sizeof(vector128));
514 current->thread.used_vr = true;
516 else if (tsk->thread.used_vr) {
517 memset(&tsk->thread.vr_state, 0, 33 * sizeof(vector128));
518 memset(&tsk->thread.ckvr_state, 0, 33 * sizeof(vector128));
520 /* Always get VRSAVE back */
521 if (v_regs != NULL && tm_v_regs != NULL) {
522 err |= __get_user(tsk->thread.ckvrsave,
523 (u32 __user *)&v_regs[33]);
524 err |= __get_user(tsk->thread.vrsave,
525 (u32 __user *)&tm_v_regs[33]);
528 tsk->thread.vrsave = 0;
529 tsk->thread.ckvrsave = 0;
531 if (cpu_has_feature(CPU_FTR_ALTIVEC))
532 mtspr(SPRN_VRSAVE, tsk->thread.vrsave);
533 #endif /* CONFIG_ALTIVEC */
534 /* restore floating point */
535 err |= copy_fpr_from_user(tsk, &tm_sc->fp_regs);
536 err |= copy_ckfpr_from_user(tsk, &sc->fp_regs);
539 * Get additional VSX data. Update v_regs to point after the
540 * VMX data. Copy VSX low doubleword from userspace to local
541 * buffer for formatting, then into the taskstruct.
543 if (v_regs && ((msr & MSR_VSX) != 0)) {
544 v_regs += ELF_NVRREG;
545 tm_v_regs += ELF_NVRREG;
546 err |= copy_vsx_from_user(tsk, tm_v_regs);
547 err |= copy_ckvsx_from_user(tsk, v_regs);
548 tsk->thread.used_vsr = true;
550 for (i = 0; i < 32 ; i++) {
551 tsk->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
552 tsk->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
557 /* Make sure the transaction is marked as failed */
558 tsk->thread.tm_texasr |= TEXASR_FS;
561 * Disabling preemption, since it is unsafe to be preempted
562 * with MSR[TS] set without recheckpointing.
566 /* pull in MSR TS bits from user context */
567 regs->msr |= msr & MSR_TS_MASK;
570 * Ensure that TM is enabled in regs->msr before we leave the signal
571 * handler. It could be the case that (a) user disabled the TM bit
572 * through the manipulation of the MSR bits in uc_mcontext or (b) the
573 * TM bit was disabled because a sufficient number of context switches
574 * happened whilst in the signal handler and load_tm overflowed,
575 * disabling the TM bit. In either case we can end up with an illegal
576 * TM state leading to a TM Bad Thing when we return to userspace.
579 * After regs->MSR[TS] being updated, make sure that get_user(),
580 * put_user() or similar functions are *not* called. These
581 * functions can generate page faults which will cause the process
582 * to be de-scheduled with MSR[TS] set but without calling
583 * tm_recheckpoint(). This can cause a bug.
587 /* This loads the checkpointed FP/VEC state, if used */
588 tm_recheckpoint(&tsk->thread);
590 msr_check_and_set(msr & (MSR_FP | MSR_VEC));
592 load_fp_state(&tsk->thread.fp_state);
593 regs->msr |= (MSR_FP | tsk->thread.fpexc_mode);
596 load_vr_state(&tsk->thread.vr_state);
597 regs->msr |= MSR_VEC;
604 #else /* !CONFIG_PPC_TRANSACTIONAL_MEM */
605 static long restore_tm_sigcontexts(struct task_struct *tsk, struct sigcontext __user *sc,
606 struct sigcontext __user *tm_sc)
613 * Setup the trampoline code on the stack
615 static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
620 /* bctrl # call the handler */
621 err |= __put_user(PPC_INST_BCTRL, &tramp[0]);
622 /* addi r1, r1, __SIGNAL_FRAMESIZE # Pop the dummy stackframe */
623 err |= __put_user(PPC_INST_ADDI | __PPC_RT(R1) | __PPC_RA(R1) |
624 (__SIGNAL_FRAMESIZE & 0xffff), &tramp[1]);
625 /* li r0, __NR_[rt_]sigreturn| */
626 err |= __put_user(PPC_INST_ADDI | (syscall & 0xffff), &tramp[2]);
628 err |= __put_user(PPC_INST_SC, &tramp[3]);
630 /* Minimal traceback info */
631 for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++)
632 err |= __put_user(0, &tramp[i]);
635 flush_icache_range((unsigned long) &tramp[0],
636 (unsigned long) &tramp[TRAMP_SIZE]);
642 * Userspace code may pass a ucontext which doesn't include VSX added
643 * at the end. We need to check for this case.
645 #define UCONTEXTSIZEWITHOUTVSX \
646 (sizeof(struct ucontext) - 32*sizeof(long))
649 * Handle {get,set,swap}_context operations
651 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
652 struct ucontext __user *, new_ctx, long, ctx_size)
655 unsigned long new_msr = 0;
656 int ctx_has_vsx_region = 0;
659 get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR]))
662 * Check that the context is not smaller than the original
663 * size (with VMX but without VSX)
665 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
668 * If the new context state sets the MSR VSX bits but
669 * it doesn't provide VSX state.
671 if ((ctx_size < sizeof(struct ucontext)) &&
674 /* Does the context have enough room to store VSX data? */
675 if (ctx_size >= sizeof(struct ucontext))
676 ctx_has_vsx_region = 1;
678 if (old_ctx != NULL) {
679 prepare_setup_sigcontext(current);
680 if (!user_write_access_begin(old_ctx, ctx_size))
683 unsafe_setup_sigcontext(&old_ctx->uc_mcontext, current, 0, NULL,
684 0, ctx_has_vsx_region, efault_out);
685 unsafe_copy_to_user(&old_ctx->uc_sigmask, ¤t->blocked,
686 sizeof(sigset_t), efault_out);
688 user_write_access_end();
692 if (!access_ok(new_ctx, ctx_size) ||
693 fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
697 * If we get a fault copying the context into the kernel's
698 * image of the user's registers, we can't just return -EFAULT
699 * because the user's registers will be corrupted. For instance
700 * the NIP value may have been updated but not some of the
701 * other registers. Given that we have done the access_ok
702 * and successfully read the first and last bytes of the region
703 * above, this should only happen in an out-of-memory situation
704 * or if another thread unmaps the region containing the context.
705 * We kill the task with a SIGSEGV in this situation.
708 if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set)))
710 set_current_blocked(&set);
711 if (restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext))
714 /* This returns like rt_sigreturn */
715 set_thread_flag(TIF_RESTOREALL);
719 user_write_access_end();
725 * Do a signal return; undo the signal stack.
728 SYSCALL_DEFINE0(rt_sigreturn)
730 struct pt_regs *regs = current_pt_regs();
731 struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1];
735 /* Always make any pending restarted system calls return -EINTR */
736 current->restart_block.fn = do_no_restart_syscall;
738 if (!access_ok(uc, sizeof(*uc)))
741 if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
743 set_current_blocked(&set);
745 if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM)) {
747 * If there is a transactional state then throw it away.
748 * The purpose of a sigreturn is to destroy all traces of the
749 * signal frame, this includes any transactional state created
750 * within in. We only check for suspended as we can never be
751 * active in the kernel, we are active, there is nothing better to
752 * do than go ahead and Bad Thing later.
753 * The cause is not important as there will never be a
754 * recheckpoint so it's not user visible.
756 if (MSR_TM_SUSPENDED(mfmsr()))
757 tm_reclaim_current(0);
760 * Disable MSR[TS] bit also, so, if there is an exception in the
761 * code below (as a page fault in copy_ckvsx_to_user()), it does
762 * not recheckpoint this task if there was a context switch inside
765 * A major page fault can indirectly call schedule(). A reschedule
766 * process in the middle of an exception can have a side effect
767 * (Changing the CPU MSR[TS] state), since schedule() is called
768 * with the CPU MSR[TS] disable and returns with MSR[TS]=Suspended
769 * (switch_to() calls tm_recheckpoint() for the 'new' process). In
770 * this case, the process continues to be the same in the CPU, but
771 * the CPU state just changed.
773 * This can cause a TM Bad Thing, since the MSR in the stack will
774 * have the MSR[TS]=0, and this is what will be used to RFID.
776 * Clearing MSR[TS] state here will avoid a recheckpoint if there
777 * is any process reschedule in kernel space. The MSR[TS] state
778 * does not need to be saved also, since it will be replaced with
779 * the MSR[TS] that came from user context later, at
780 * restore_tm_sigcontexts.
782 regs->msr &= ~MSR_TS_MASK;
784 if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
788 if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && MSR_TM_ACTIVE(msr)) {
789 /* We recheckpoint on return. */
790 struct ucontext __user *uc_transact;
792 /* Trying to start TM on non TM system */
793 if (!cpu_has_feature(CPU_FTR_TM))
796 if (__get_user(uc_transact, &uc->uc_link))
798 if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
799 &uc_transact->uc_mcontext))
803 * Fall through, for non-TM restore
805 * Unset MSR[TS] on the thread regs since MSR from user
806 * context does not have MSR active, and recheckpoint was
807 * not called since restore_tm_sigcontexts() was not called
810 * If not unsetting it, the code can RFID to userspace with
811 * MSR[TS] set, but without CPU in the proper state,
812 * causing a TM bad thing.
814 current->thread.regs->msr &= ~MSR_TS_MASK;
815 if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
819 if (restore_altstack(&uc->uc_stack))
822 set_thread_flag(TIF_RESTOREALL);
826 signal_fault(current, regs, "rt_sigreturn", uc);
832 int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
833 struct task_struct *tsk)
835 struct rt_sigframe __user *frame;
836 unsigned long newsp = 0;
838 struct pt_regs *regs = tsk->thread.regs;
839 /* Save the thread's msr before get_tm_stackpointer() changes it */
840 unsigned long msr = regs->msr;
842 frame = get_sigframe(ksig, tsk, sizeof(*frame), 0);
843 if (!access_ok(frame, sizeof(*frame)))
846 err |= __put_user(&frame->info, &frame->pinfo);
847 err |= __put_user(&frame->uc, &frame->puc);
848 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
852 /* Create the ucontext. */
853 err |= __put_user(0, &frame->uc.uc_flags);
854 err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]);
856 if (MSR_TM_ACTIVE(msr)) {
857 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
858 /* The ucontext_t passed to userland points to the second
859 * ucontext_t (for transactional state) with its uc_link ptr.
861 err |= __put_user(&frame->uc_transact, &frame->uc.uc_link);
862 err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
863 &frame->uc_transact.uc_mcontext,
864 tsk, ksig->sig, NULL,
865 (unsigned long)ksig->ka.sa.sa_handler,
869 err |= __put_user(0, &frame->uc.uc_link);
870 prepare_setup_sigcontext(tsk);
871 if (!user_write_access_begin(&frame->uc.uc_mcontext,
872 sizeof(frame->uc.uc_mcontext)))
874 err |= __unsafe_setup_sigcontext(&frame->uc.uc_mcontext, tsk,
876 (unsigned long)ksig->ka.sa.sa_handler, 1);
877 user_write_access_end();
879 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
883 /* Make sure signal handler doesn't get spurious FP exceptions */
884 tsk->thread.fp_state.fpscr = 0;
886 /* Set up to return from userspace. */
887 if (tsk->mm->context.vdso) {
888 regs->nip = VDSO64_SYMBOL(tsk->mm->context.vdso, sigtramp_rt64);
890 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
893 regs->nip = (unsigned long) &frame->tramp[0];
896 /* Allocate a dummy caller frame for the signal handler. */
897 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
898 err |= put_user(regs->gpr[1], (unsigned long __user *)newsp);
900 /* Set up "regs" so we "return" to the signal handler. */
901 if (is_elf2_task()) {
902 regs->ctr = (unsigned long) ksig->ka.sa.sa_handler;
903 regs->gpr[12] = regs->ctr;
905 /* Handler is *really* a pointer to the function descriptor for
906 * the signal routine. The first entry in the function
907 * descriptor is the entry address of signal and the second
908 * entry is the TOC value we need to use.
910 func_descr_t __user *funct_desc_ptr =
911 (func_descr_t __user *) ksig->ka.sa.sa_handler;
913 err |= get_user(regs->ctr, &funct_desc_ptr->entry);
914 err |= get_user(regs->gpr[2], &funct_desc_ptr->toc);
917 /* enter the signal handler in native-endian mode */
918 regs->msr &= ~MSR_LE;
919 regs->msr |= (MSR_KERNEL & MSR_LE);
920 regs->gpr[1] = newsp;
921 regs->gpr[3] = ksig->sig;
923 if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
924 err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo);
925 err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc);
926 regs->gpr[6] = (unsigned long) frame;
928 regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext;
936 signal_fault(current, regs, "handle_rt_signal64", frame);