2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/prctl.h>
29 #include <linux/init_task.h>
30 #include <linux/export.h>
31 #include <linux/kallsyms.h>
32 #include <linux/mqueue.h>
33 #include <linux/hardirq.h>
34 #include <linux/utsname.h>
35 #include <linux/ftrace.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/personality.h>
38 #include <linux/random.h>
39 #include <linux/hw_breakpoint.h>
41 #include <asm/pgtable.h>
42 #include <asm/uaccess.h>
44 #include <asm/processor.h>
47 #include <asm/machdep.h>
49 #include <asm/runlatch.h>
50 #include <asm/syscalls.h>
51 #include <asm/switch_to.h>
53 #include <asm/debug.h>
55 #include <asm/firmware.h>
57 #include <linux/kprobes.h>
58 #include <linux/kdebug.h>
60 /* Transactional Memory debug */
62 #define TM_DEBUG(x...) printk(KERN_INFO x)
64 #define TM_DEBUG(x...) do { } while(0)
67 extern unsigned long _get_SP(void);
70 struct task_struct *last_task_used_math = NULL;
71 struct task_struct *last_task_used_altivec = NULL;
72 struct task_struct *last_task_used_vsx = NULL;
73 struct task_struct *last_task_used_spe = NULL;
78 * Make sure the floating-point register state in the
79 * the thread_struct is up to date for task tsk.
81 void flush_fp_to_thread(struct task_struct *tsk)
83 if (tsk->thread.regs) {
85 * We need to disable preemption here because if we didn't,
86 * another process could get scheduled after the regs->msr
87 * test but before we have finished saving the FP registers
88 * to the thread_struct. That process could take over the
89 * FPU, and then when we get scheduled again we would store
90 * bogus values for the remaining FP registers.
93 if (tsk->thread.regs->msr & MSR_FP) {
96 * This should only ever be called for current or
97 * for a stopped child process. Since we save away
98 * the FP register state on context switch on SMP,
99 * there is something wrong if a stopped child appears
100 * to still have its FP state in the CPU registers.
102 BUG_ON(tsk != current);
109 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
112 void enable_kernel_fp(void)
114 WARN_ON(preemptible());
117 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
120 giveup_fpu(NULL); /* just enables FP for kernel */
122 giveup_fpu(last_task_used_math);
123 #endif /* CONFIG_SMP */
125 EXPORT_SYMBOL(enable_kernel_fp);
127 #ifdef CONFIG_ALTIVEC
128 void enable_kernel_altivec(void)
130 WARN_ON(preemptible());
133 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
134 giveup_altivec(current);
136 giveup_altivec_notask();
138 giveup_altivec(last_task_used_altivec);
139 #endif /* CONFIG_SMP */
141 EXPORT_SYMBOL(enable_kernel_altivec);
144 * Make sure the VMX/Altivec register state in the
145 * the thread_struct is up to date for task tsk.
147 void flush_altivec_to_thread(struct task_struct *tsk)
149 if (tsk->thread.regs) {
151 if (tsk->thread.regs->msr & MSR_VEC) {
153 BUG_ON(tsk != current);
160 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
161 #endif /* CONFIG_ALTIVEC */
165 /* not currently used, but some crazy RAID module might want to later */
166 void enable_kernel_vsx(void)
168 WARN_ON(preemptible());
171 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
174 giveup_vsx(NULL); /* just enable vsx for kernel - force */
176 giveup_vsx(last_task_used_vsx);
177 #endif /* CONFIG_SMP */
179 EXPORT_SYMBOL(enable_kernel_vsx);
182 void giveup_vsx(struct task_struct *tsk)
189 void flush_vsx_to_thread(struct task_struct *tsk)
191 if (tsk->thread.regs) {
193 if (tsk->thread.regs->msr & MSR_VSX) {
195 BUG_ON(tsk != current);
202 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
203 #endif /* CONFIG_VSX */
207 void enable_kernel_spe(void)
209 WARN_ON(preemptible());
212 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
215 giveup_spe(NULL); /* just enable SPE for kernel - force */
217 giveup_spe(last_task_used_spe);
218 #endif /* __SMP __ */
220 EXPORT_SYMBOL(enable_kernel_spe);
222 void flush_spe_to_thread(struct task_struct *tsk)
224 if (tsk->thread.regs) {
226 if (tsk->thread.regs->msr & MSR_SPE) {
228 BUG_ON(tsk != current);
230 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
236 #endif /* CONFIG_SPE */
240 * If we are doing lazy switching of CPU state (FP, altivec or SPE),
241 * and the current task has some state, discard it.
243 void discard_lazy_cpu_state(void)
246 if (last_task_used_math == current)
247 last_task_used_math = NULL;
248 #ifdef CONFIG_ALTIVEC
249 if (last_task_used_altivec == current)
250 last_task_used_altivec = NULL;
251 #endif /* CONFIG_ALTIVEC */
253 if (last_task_used_vsx == current)
254 last_task_used_vsx = NULL;
255 #endif /* CONFIG_VSX */
257 if (last_task_used_spe == current)
258 last_task_used_spe = NULL;
262 #endif /* CONFIG_SMP */
264 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
265 void do_send_trap(struct pt_regs *regs, unsigned long address,
266 unsigned long error_code, int signal_code, int breakpt)
270 current->thread.trap_nr = signal_code;
271 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
272 11, SIGSEGV) == NOTIFY_STOP)
275 /* Deliver the signal to userspace */
276 info.si_signo = SIGTRAP;
277 info.si_errno = breakpt; /* breakpoint or watchpoint id */
278 info.si_code = signal_code;
279 info.si_addr = (void __user *)address;
280 force_sig_info(SIGTRAP, &info, current);
282 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
283 void do_break (struct pt_regs *regs, unsigned long address,
284 unsigned long error_code)
288 current->thread.trap_nr = TRAP_HWBKPT;
289 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
290 11, SIGSEGV) == NOTIFY_STOP)
293 if (debugger_break_match(regs))
296 /* Clear the breakpoint */
297 hw_breakpoint_disable();
299 /* Deliver the signal to userspace */
300 info.si_signo = SIGTRAP;
302 info.si_code = TRAP_HWBKPT;
303 info.si_addr = (void __user *)address;
304 force_sig_info(SIGTRAP, &info, current);
306 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
308 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
310 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
312 * Set the debug registers back to their default "safe" values.
314 static void set_debug_reg_defaults(struct thread_struct *thread)
316 thread->debug.iac1 = thread->debug.iac2 = 0;
317 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
318 thread->debug.iac3 = thread->debug.iac4 = 0;
320 thread->debug.dac1 = thread->debug.dac2 = 0;
321 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
322 thread->debug.dvc1 = thread->debug.dvc2 = 0;
324 thread->debug.dbcr0 = 0;
327 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
329 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
330 DBCR1_IAC3US | DBCR1_IAC4US;
332 * Force Data Address Compare User/Supervisor bits to be User-only
333 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
335 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
337 thread->debug.dbcr1 = 0;
341 static void prime_debug_regs(struct thread_struct *thread)
344 * We could have inherited MSR_DE from userspace, since
345 * it doesn't get cleared on exception entry. Make sure
346 * MSR_DE is clear before we enable any debug events.
348 mtmsr(mfmsr() & ~MSR_DE);
350 mtspr(SPRN_IAC1, thread->debug.iac1);
351 mtspr(SPRN_IAC2, thread->debug.iac2);
352 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
353 mtspr(SPRN_IAC3, thread->debug.iac3);
354 mtspr(SPRN_IAC4, thread->debug.iac4);
356 mtspr(SPRN_DAC1, thread->debug.dac1);
357 mtspr(SPRN_DAC2, thread->debug.dac2);
358 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
359 mtspr(SPRN_DVC1, thread->debug.dvc1);
360 mtspr(SPRN_DVC2, thread->debug.dvc2);
362 mtspr(SPRN_DBCR0, thread->debug.dbcr0);
363 mtspr(SPRN_DBCR1, thread->debug.dbcr1);
365 mtspr(SPRN_DBCR2, thread->debug.dbcr2);
369 * Unless neither the old or new thread are making use of the
370 * debug registers, set the debug registers from the values
371 * stored in the new thread.
373 void switch_booke_debug_regs(struct thread_struct *new_thread)
375 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
376 || (new_thread->debug.dbcr0 & DBCR0_IDM))
377 prime_debug_regs(new_thread);
379 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
380 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
381 #ifndef CONFIG_HAVE_HW_BREAKPOINT
382 static void set_debug_reg_defaults(struct thread_struct *thread)
384 thread->hw_brk.address = 0;
385 thread->hw_brk.type = 0;
386 set_breakpoint(&thread->hw_brk);
388 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
389 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
391 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
392 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
394 mtspr(SPRN_DAC1, dabr);
395 #ifdef CONFIG_PPC_47x
400 #elif defined(CONFIG_PPC_BOOK3S)
401 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
403 mtspr(SPRN_DABR, dabr);
404 if (cpu_has_feature(CPU_FTR_DABRX))
405 mtspr(SPRN_DABRX, dabrx);
409 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
415 static inline int set_dabr(struct arch_hw_breakpoint *brk)
417 unsigned long dabr, dabrx;
419 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
420 dabrx = ((brk->type >> 3) & 0x7);
423 return ppc_md.set_dabr(dabr, dabrx);
425 return __set_dabr(dabr, dabrx);
428 static inline int set_dawr(struct arch_hw_breakpoint *brk)
430 unsigned long dawr, dawrx, mrd;
434 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
435 << (63 - 58); //* read/write bits */
436 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
437 << (63 - 59); //* translate */
438 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
439 >> 3; //* PRIM bits */
440 /* dawr length is stored in field MDR bits 48:53. Matches range in
441 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
443 brk->len is in bytes.
444 This aligns up to double word size, shifts and does the bias.
446 mrd = ((brk->len + 7) >> 3) - 1;
447 dawrx |= (mrd & 0x3f) << (63 - 53);
450 return ppc_md.set_dawr(dawr, dawrx);
451 mtspr(SPRN_DAWR, dawr);
452 mtspr(SPRN_DAWRX, dawrx);
456 int set_breakpoint(struct arch_hw_breakpoint *brk)
458 __get_cpu_var(current_brk) = *brk;
460 if (cpu_has_feature(CPU_FTR_DAWR))
461 return set_dawr(brk);
463 return set_dabr(brk);
467 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
470 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
471 struct arch_hw_breakpoint *b)
473 if (a->address != b->address)
475 if (a->type != b->type)
477 if (a->len != b->len)
481 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
482 static inline void tm_reclaim_task(struct task_struct *tsk)
484 /* We have to work out if we're switching from/to a task that's in the
485 * middle of a transaction.
487 * In switching we need to maintain a 2nd register state as
488 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
489 * checkpointed (tbegin) state in ckpt_regs and saves the transactional
490 * (current) FPRs into oldtask->thread.transact_fpr[].
492 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
494 struct thread_struct *thr = &tsk->thread;
499 if (!MSR_TM_ACTIVE(thr->regs->msr))
500 goto out_and_saveregs;
502 /* Stash the original thread MSR, as giveup_fpu et al will
503 * modify it. We hold onto it to see whether the task used
506 thr->tm_orig_msr = thr->regs->msr;
508 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
509 "ccr=%lx, msr=%lx, trap=%lx)\n",
510 tsk->pid, thr->regs->nip,
511 thr->regs->ccr, thr->regs->msr,
514 tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED);
516 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
520 /* Always save the regs here, even if a transaction's not active.
521 * This context-switches a thread's TM info SPRs. We do it here to
522 * be consistent with the restore path (in recheckpoint) which
523 * cannot happen later in _switch().
528 static inline void tm_recheckpoint_new_task(struct task_struct *new)
532 if (!cpu_has_feature(CPU_FTR_TM))
535 /* Recheckpoint the registers of the thread we're about to switch to.
537 * If the task was using FP, we non-lazily reload both the original and
538 * the speculative FP register states. This is because the kernel
539 * doesn't see if/when a TM rollback occurs, so if we take an FP
540 * unavoidable later, we are unable to determine which set of FP regs
541 * need to be restored.
543 if (!new->thread.regs)
546 /* The TM SPRs are restored here, so that TEXASR.FS can be set
547 * before the trecheckpoint and no explosion occurs.
549 tm_restore_sprs(&new->thread);
551 if (!MSR_TM_ACTIVE(new->thread.regs->msr))
553 msr = new->thread.tm_orig_msr;
554 /* Recheckpoint to restore original checkpointed register state. */
555 TM_DEBUG("*** tm_recheckpoint of pid %d "
556 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
557 new->pid, new->thread.regs->msr, msr);
559 /* This loads the checkpointed FP/VEC state, if used */
560 tm_recheckpoint(&new->thread, msr);
562 /* This loads the speculative FP/VEC state, if used */
564 do_load_up_transact_fpu(&new->thread);
565 new->thread.regs->msr |=
566 (MSR_FP | new->thread.fpexc_mode);
568 #ifdef CONFIG_ALTIVEC
570 do_load_up_transact_altivec(&new->thread);
571 new->thread.regs->msr |= MSR_VEC;
574 /* We may as well turn on VSX too since all the state is restored now */
576 new->thread.regs->msr |= MSR_VSX;
578 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
579 "(kernel msr 0x%lx)\n",
583 static inline void __switch_to_tm(struct task_struct *prev)
585 if (cpu_has_feature(CPU_FTR_TM)) {
587 tm_reclaim_task(prev);
591 #define tm_recheckpoint_new_task(new)
592 #define __switch_to_tm(prev)
593 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
595 struct task_struct *__switch_to(struct task_struct *prev,
596 struct task_struct *new)
598 struct thread_struct *new_thread, *old_thread;
599 struct task_struct *last;
600 #ifdef CONFIG_PPC_BOOK3S_64
601 struct ppc64_tlb_batch *batch;
604 WARN_ON(!irqs_disabled());
606 /* Back up the TAR across context switches.
607 * Note that the TAR is not available for use in the kernel. (To
608 * provide this, the TAR should be backed up/restored on exception
609 * entry/exit instead, and be in pt_regs. FIXME, this should be in
610 * pt_regs anyway (for debug).)
611 * Save the TAR here before we do treclaim/trecheckpoint as these
612 * will change the TAR.
614 save_tar(&prev->thread);
616 __switch_to_tm(prev);
619 /* avoid complexity of lazy save/restore of fpu
620 * by just saving it every time we switch out if
621 * this task used the fpu during the last quantum.
623 * If it tries to use the fpu again, it'll trap and
624 * reload its fp regs. So we don't have to do a restore
625 * every switch, just a save.
628 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
630 #ifdef CONFIG_ALTIVEC
632 * If the previous thread used altivec in the last quantum
633 * (thus changing altivec regs) then save them.
634 * We used to check the VRSAVE register but not all apps
635 * set it, so we don't rely on it now (and in fact we need
636 * to save & restore VSCR even if VRSAVE == 0). -- paulus
638 * On SMP we always save/restore altivec regs just to avoid the
639 * complexity of changing processors.
642 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
643 giveup_altivec(prev);
644 #endif /* CONFIG_ALTIVEC */
646 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
647 /* VMX and FPU registers are already save here */
649 #endif /* CONFIG_VSX */
652 * If the previous thread used spe in the last quantum
653 * (thus changing spe regs) then save them.
655 * On SMP we always save/restore spe regs just to avoid the
656 * complexity of changing processors.
658 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
660 #endif /* CONFIG_SPE */
662 #else /* CONFIG_SMP */
663 #ifdef CONFIG_ALTIVEC
664 /* Avoid the trap. On smp this this never happens since
665 * we don't set last_task_used_altivec -- Cort
667 if (new->thread.regs && last_task_used_altivec == new)
668 new->thread.regs->msr |= MSR_VEC;
669 #endif /* CONFIG_ALTIVEC */
671 if (new->thread.regs && last_task_used_vsx == new)
672 new->thread.regs->msr |= MSR_VSX;
673 #endif /* CONFIG_VSX */
675 /* Avoid the trap. On smp this this never happens since
676 * we don't set last_task_used_spe
678 if (new->thread.regs && last_task_used_spe == new)
679 new->thread.regs->msr |= MSR_SPE;
680 #endif /* CONFIG_SPE */
682 #endif /* CONFIG_SMP */
684 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
685 switch_booke_debug_regs(&new->thread);
688 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
691 #ifndef CONFIG_HAVE_HW_BREAKPOINT
692 if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
693 set_breakpoint(&new->thread.hw_brk);
694 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
698 new_thread = &new->thread;
699 old_thread = ¤t->thread;
703 * Collect processor utilization data per process
705 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
706 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
707 long unsigned start_tb, current_tb;
708 start_tb = old_thread->start_tb;
709 cu->current_tb = current_tb = mfspr(SPRN_PURR);
710 old_thread->accum_tb += (current_tb - start_tb);
711 new_thread->start_tb = current_tb;
713 #endif /* CONFIG_PPC64 */
715 #ifdef CONFIG_PPC_BOOK3S_64
716 batch = &__get_cpu_var(ppc64_tlb_batch);
718 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
720 __flush_tlb_pending(batch);
723 #endif /* CONFIG_PPC_BOOK3S_64 */
726 * We can't take a PMU exception inside _switch() since there is a
727 * window where the kernel stack SLB and the kernel stack are out
728 * of sync. Hard disable here.
732 tm_recheckpoint_new_task(new);
734 last = _switch(old_thread, new_thread);
736 #ifdef CONFIG_PPC_BOOK3S_64
737 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
738 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
739 batch = &__get_cpu_var(ppc64_tlb_batch);
742 #endif /* CONFIG_PPC_BOOK3S_64 */
747 static int instructions_to_print = 16;
749 static void show_instructions(struct pt_regs *regs)
752 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
755 printk("Instruction dump:");
757 for (i = 0; i < instructions_to_print; i++) {
763 #if !defined(CONFIG_BOOKE)
764 /* If executing with the IMMU off, adjust pc rather
765 * than print XXXXXXXX.
767 if (!(regs->msr & MSR_IR))
768 pc = (unsigned long)phys_to_virt(pc);
771 /* We use __get_user here *only* to avoid an OOPS on a
772 * bad address because the pc *should* only be a
775 if (!__kernel_text_address(pc) ||
776 __get_user(instr, (unsigned int __user *)pc)) {
777 printk(KERN_CONT "XXXXXXXX ");
780 printk(KERN_CONT "<%08x> ", instr);
782 printk(KERN_CONT "%08x ", instr);
791 static struct regbit {
795 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
824 static void printbits(unsigned long val, struct regbit *bits)
826 const char *sep = "";
829 for (; bits->bit; ++bits)
830 if (val & bits->bit) {
831 printk("%s%s", sep, bits->name);
839 #define REGS_PER_LINE 4
840 #define LAST_VOLATILE 13
843 #define REGS_PER_LINE 8
844 #define LAST_VOLATILE 12
847 void show_regs(struct pt_regs * regs)
851 show_regs_print_info(KERN_DEFAULT);
853 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
854 regs->nip, regs->link, regs->ctr);
855 printk("REGS: %p TRAP: %04lx %s (%s)\n",
856 regs, regs->trap, print_tainted(), init_utsname()->release);
857 printk("MSR: "REG" ", regs->msr);
858 printbits(regs->msr, msr_bits);
859 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
861 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
862 printk("CFAR: "REG" ", regs->orig_gpr3);
863 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
864 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
865 printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
867 printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
870 printk("SOFTE: %ld ", regs->softe);
872 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
873 if (MSR_TM_ACTIVE(regs->msr))
874 printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
877 for (i = 0; i < 32; i++) {
878 if ((i % REGS_PER_LINE) == 0)
879 printk("\nGPR%02d: ", i);
880 printk(REG " ", regs->gpr[i]);
881 if (i == LAST_VOLATILE && !FULL_REGS(regs))
885 #ifdef CONFIG_KALLSYMS
887 * Lookup NIP late so we have the best change of getting the
888 * above info out without failing
890 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
891 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
893 show_stack(current, (unsigned long *) regs->gpr[1]);
894 if (!user_mode(regs))
895 show_instructions(regs);
898 void exit_thread(void)
900 discard_lazy_cpu_state();
903 void flush_thread(void)
905 discard_lazy_cpu_state();
907 #ifdef CONFIG_HAVE_HW_BREAKPOINT
908 flush_ptrace_hw_breakpoint(current);
909 #else /* CONFIG_HAVE_HW_BREAKPOINT */
910 set_debug_reg_defaults(¤t->thread);
911 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
915 release_thread(struct task_struct *t)
920 * this gets called so that we can store coprocessor state into memory and
921 * copy the current task into the new thread.
923 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
925 flush_fp_to_thread(src);
926 flush_altivec_to_thread(src);
927 flush_vsx_to_thread(src);
928 flush_spe_to_thread(src);
940 extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
942 int copy_thread(unsigned long clone_flags, unsigned long usp,
943 unsigned long arg, struct task_struct *p)
945 struct pt_regs *childregs, *kregs;
946 extern void ret_from_fork(void);
947 extern void ret_from_kernel_thread(void);
949 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
952 sp -= sizeof(struct pt_regs);
953 childregs = (struct pt_regs *) sp;
954 if (unlikely(p->flags & PF_KTHREAD)) {
955 struct thread_info *ti = (void *)task_stack_page(p);
956 memset(childregs, 0, sizeof(struct pt_regs));
957 childregs->gpr[1] = sp + sizeof(struct pt_regs);
958 childregs->gpr[14] = usp; /* function */
960 clear_tsk_thread_flag(p, TIF_32BIT);
961 childregs->softe = 1;
963 childregs->gpr[15] = arg;
964 p->thread.regs = NULL; /* no user register state */
965 ti->flags |= _TIF_RESTOREALL;
966 f = ret_from_kernel_thread;
968 struct pt_regs *regs = current_pt_regs();
969 CHECK_FULL_REGS(regs);
972 childregs->gpr[1] = usp;
973 p->thread.regs = childregs;
974 childregs->gpr[3] = 0; /* Result from fork() */
975 if (clone_flags & CLONE_SETTLS) {
977 if (!is_32bit_task())
978 childregs->gpr[13] = childregs->gpr[6];
981 childregs->gpr[2] = childregs->gpr[6];
986 sp -= STACK_FRAME_OVERHEAD;
989 * The way this works is that at some point in the future
990 * some task will call _switch to switch to the new task.
991 * That will pop off the stack frame created below and start
992 * the new task running at ret_from_fork. The new task will
993 * do some house keeping and then return from the fork or clone
994 * system call, using the stack frame created above.
996 ((unsigned long *)sp)[0] = 0;
997 sp -= sizeof(struct pt_regs);
998 kregs = (struct pt_regs *) sp;
999 sp -= STACK_FRAME_OVERHEAD;
1002 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1003 _ALIGN_UP(sizeof(struct thread_info), 16);
1005 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1006 p->thread.ptrace_bps[0] = NULL;
1009 p->thread.fp_save_area = NULL;
1010 #ifdef CONFIG_ALTIVEC
1011 p->thread.vr_save_area = NULL;
1014 #ifdef CONFIG_PPC_STD_MMU_64
1015 if (mmu_has_feature(MMU_FTR_SLB)) {
1016 unsigned long sp_vsid;
1017 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1019 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1020 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1021 << SLB_VSID_SHIFT_1T;
1023 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1025 sp_vsid |= SLB_VSID_KERNEL | llp;
1026 p->thread.ksp_vsid = sp_vsid;
1028 #endif /* CONFIG_PPC_STD_MMU_64 */
1030 if (cpu_has_feature(CPU_FTR_DSCR)) {
1031 p->thread.dscr_inherit = current->thread.dscr_inherit;
1032 p->thread.dscr = current->thread.dscr;
1034 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1035 p->thread.ppr = INIT_PPR;
1038 * The PPC64 ABI makes use of a TOC to contain function
1039 * pointers. The function (ret_from_except) is actually a pointer
1040 * to the TOC entry. The first entry is a pointer to the actual
1044 kregs->nip = *((unsigned long *)f);
1046 kregs->nip = (unsigned long)f;
1052 * Set up a thread for executing a new program
1054 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1057 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1061 * If we exec out of a kernel thread then thread.regs will not be
1064 if (!current->thread.regs) {
1065 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1066 current->thread.regs = regs - 1;
1069 memset(regs->gpr, 0, sizeof(regs->gpr));
1077 * We have just cleared all the nonvolatile GPRs, so make
1078 * FULL_REGS(regs) return true. This is necessary to allow
1079 * ptrace to examine the thread immediately after exec.
1086 regs->msr = MSR_USER;
1088 if (!is_32bit_task()) {
1089 unsigned long entry;
1091 if (is_elf2_task()) {
1092 /* Look ma, no function descriptors! */
1097 * The latest iteration of the ABI requires that when
1098 * calling a function (at its global entry point),
1099 * the caller must ensure r12 holds the entry point
1100 * address (so that the function can quickly
1101 * establish addressability).
1103 regs->gpr[12] = start;
1104 /* Make sure that's restored on entry to userspace. */
1105 set_thread_flag(TIF_RESTOREALL);
1109 /* start is a relocated pointer to the function
1110 * descriptor for the elf _start routine. The first
1111 * entry in the function descriptor is the entry
1112 * address of _start and the second entry is the TOC
1113 * value we need to use.
1115 __get_user(entry, (unsigned long __user *)start);
1116 __get_user(toc, (unsigned long __user *)start+1);
1118 /* Check whether the e_entry function descriptor entries
1119 * need to be relocated before we can use them.
1121 if (load_addr != 0) {
1128 regs->msr = MSR_USER64;
1132 regs->msr = MSR_USER32;
1135 discard_lazy_cpu_state();
1137 current->thread.used_vsr = 0;
1139 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1140 current->thread.fp_save_area = NULL;
1141 #ifdef CONFIG_ALTIVEC
1142 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1143 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1144 current->thread.vr_save_area = NULL;
1145 current->thread.vrsave = 0;
1146 current->thread.used_vr = 0;
1147 #endif /* CONFIG_ALTIVEC */
1149 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1150 current->thread.acc = 0;
1151 current->thread.spefscr = 0;
1152 current->thread.used_spe = 0;
1153 #endif /* CONFIG_SPE */
1154 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1155 if (cpu_has_feature(CPU_FTR_TM))
1156 regs->msr |= MSR_TM;
1157 current->thread.tm_tfhar = 0;
1158 current->thread.tm_texasr = 0;
1159 current->thread.tm_tfiar = 0;
1160 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1163 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1164 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1166 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1168 struct pt_regs *regs = tsk->thread.regs;
1170 /* This is a bit hairy. If we are an SPE enabled processor
1171 * (have embedded fp) we store the IEEE exception enable flags in
1172 * fpexc_mode. fpexc_mode is also used for setting FP exception
1173 * mode (asyn, precise, disabled) for 'Classic' FP. */
1174 if (val & PR_FP_EXC_SW_ENABLE) {
1176 if (cpu_has_feature(CPU_FTR_SPE)) {
1177 tsk->thread.fpexc_mode = val &
1178 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1188 /* on a CONFIG_SPE this does not hurt us. The bits that
1189 * __pack_fe01 use do not overlap with bits used for
1190 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1191 * on CONFIG_SPE implementations are reserved so writing to
1192 * them does not change anything */
1193 if (val > PR_FP_EXC_PRECISE)
1195 tsk->thread.fpexc_mode = __pack_fe01(val);
1196 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1197 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1198 | tsk->thread.fpexc_mode;
1202 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1206 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1208 if (cpu_has_feature(CPU_FTR_SPE))
1209 val = tsk->thread.fpexc_mode;
1216 val = __unpack_fe01(tsk->thread.fpexc_mode);
1217 return put_user(val, (unsigned int __user *) adr);
1220 int set_endian(struct task_struct *tsk, unsigned int val)
1222 struct pt_regs *regs = tsk->thread.regs;
1224 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1225 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1231 if (val == PR_ENDIAN_BIG)
1232 regs->msr &= ~MSR_LE;
1233 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1234 regs->msr |= MSR_LE;
1241 int get_endian(struct task_struct *tsk, unsigned long adr)
1243 struct pt_regs *regs = tsk->thread.regs;
1246 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1247 !cpu_has_feature(CPU_FTR_REAL_LE))
1253 if (regs->msr & MSR_LE) {
1254 if (cpu_has_feature(CPU_FTR_REAL_LE))
1255 val = PR_ENDIAN_LITTLE;
1257 val = PR_ENDIAN_PPC_LITTLE;
1259 val = PR_ENDIAN_BIG;
1261 return put_user(val, (unsigned int __user *)adr);
1264 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1266 tsk->thread.align_ctl = val;
1270 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1272 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1275 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1276 unsigned long nbytes)
1278 unsigned long stack_page;
1279 unsigned long cpu = task_cpu(p);
1282 * Avoid crashing if the stack has overflowed and corrupted
1283 * task_cpu(p), which is in the thread_info struct.
1285 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1286 stack_page = (unsigned long) hardirq_ctx[cpu];
1287 if (sp >= stack_page + sizeof(struct thread_struct)
1288 && sp <= stack_page + THREAD_SIZE - nbytes)
1291 stack_page = (unsigned long) softirq_ctx[cpu];
1292 if (sp >= stack_page + sizeof(struct thread_struct)
1293 && sp <= stack_page + THREAD_SIZE - nbytes)
1299 int validate_sp(unsigned long sp, struct task_struct *p,
1300 unsigned long nbytes)
1302 unsigned long stack_page = (unsigned long)task_stack_page(p);
1304 if (sp >= stack_page + sizeof(struct thread_struct)
1305 && sp <= stack_page + THREAD_SIZE - nbytes)
1308 return valid_irq_stack(sp, p, nbytes);
1311 EXPORT_SYMBOL(validate_sp);
1313 unsigned long get_wchan(struct task_struct *p)
1315 unsigned long ip, sp;
1318 if (!p || p == current || p->state == TASK_RUNNING)
1322 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1326 sp = *(unsigned long *)sp;
1327 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1330 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1331 if (!in_sched_functions(ip))
1334 } while (count++ < 16);
1338 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1340 void show_stack(struct task_struct *tsk, unsigned long *stack)
1342 unsigned long sp, ip, lr, newsp;
1345 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1346 int curr_frame = current->curr_ret_stack;
1347 extern void return_to_handler(void);
1348 unsigned long rth = (unsigned long)return_to_handler;
1349 unsigned long mrth = -1;
1351 extern void mod_return_to_handler(void);
1352 rth = *(unsigned long *)rth;
1353 mrth = (unsigned long)mod_return_to_handler;
1354 mrth = *(unsigned long *)mrth;
1358 sp = (unsigned long) stack;
1363 asm("mr %0,1" : "=r" (sp));
1365 sp = tsk->thread.ksp;
1369 printk("Call Trace:\n");
1371 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1374 stack = (unsigned long *) sp;
1376 ip = stack[STACK_FRAME_LR_SAVE];
1377 if (!firstframe || ip != lr) {
1378 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1379 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1380 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
1382 (void *)current->ret_stack[curr_frame].ret);
1387 printk(" (unreliable)");
1393 * See if this is an exception frame.
1394 * We look for the "regshere" marker in the current frame.
1396 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1397 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1398 struct pt_regs *regs = (struct pt_regs *)
1399 (sp + STACK_FRAME_OVERHEAD);
1401 printk("--- Exception: %lx at %pS\n LR = %pS\n",
1402 regs->trap, (void *)regs->nip, (void *)lr);
1407 } while (count++ < kstack_depth_to_print);
1411 /* Called with hard IRQs off */
1412 void notrace __ppc64_runlatch_on(void)
1414 struct thread_info *ti = current_thread_info();
1417 ctrl = mfspr(SPRN_CTRLF);
1418 ctrl |= CTRL_RUNLATCH;
1419 mtspr(SPRN_CTRLT, ctrl);
1421 ti->local_flags |= _TLF_RUNLATCH;
1424 /* Called with hard IRQs off */
1425 void notrace __ppc64_runlatch_off(void)
1427 struct thread_info *ti = current_thread_info();
1430 ti->local_flags &= ~_TLF_RUNLATCH;
1432 ctrl = mfspr(SPRN_CTRLF);
1433 ctrl &= ~CTRL_RUNLATCH;
1434 mtspr(SPRN_CTRLT, ctrl);
1436 #endif /* CONFIG_PPC64 */
1438 unsigned long arch_align_stack(unsigned long sp)
1440 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1441 sp -= get_random_int() & ~PAGE_MASK;
1445 static inline unsigned long brk_rnd(void)
1447 unsigned long rnd = 0;
1449 /* 8MB for 32bit, 1GB for 64bit */
1450 if (is_32bit_task())
1451 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1453 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1455 return rnd << PAGE_SHIFT;
1458 unsigned long arch_randomize_brk(struct mm_struct *mm)
1460 unsigned long base = mm->brk;
1463 #ifdef CONFIG_PPC_STD_MMU_64
1465 * If we are using 1TB segments and we are allowed to randomise
1466 * the heap, we can put it above 1TB so it is backed by a 1TB
1467 * segment. Otherwise the heap will be in the bottom 1TB
1468 * which always uses 256MB segments and this may result in a
1469 * performance penalty.
1471 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1472 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1475 ret = PAGE_ALIGN(base + brk_rnd());
1483 unsigned long randomize_et_dyn(unsigned long base)
1485 unsigned long ret = PAGE_ALIGN(base + brk_rnd());