1 #include <asm/processor.h>
2 #include <asm/ppc_asm.h>
4 #include <asm/asm-offsets.h>
5 #include <asm/cputable.h>
6 #include <asm/thread_info.h>
8 #include <asm/ptrace.h>
10 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11 /* void do_load_up_transact_altivec(struct thread_struct *thread)
13 * This is similar to load_up_altivec but for the transactional version of the
14 * vector regs. It doesn't mess with the task MSR or valid flags.
15 * Furthermore, VEC laziness is not supported with TM currently.
17 _GLOBAL(do_load_up_transact_altivec)
24 stw r4,THREAD_USED_VR(r3)
26 li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR
29 addi r10,r3,THREAD_TRANSACT_VRSTATE
36 * Enable use of VMX/Altivec for the caller.
46 * Load state from memory into VMX registers including VSCR.
47 * Assumes the caller has enabled VMX in the MSR.
49 _GLOBAL(load_vr_state)
57 * Store VMX state into memory, including VSCR.
58 * Assumes the caller has enabled VMX in the MSR.
60 _GLOBAL(store_vr_state)
68 * Disable VMX for the task which had it previously,
69 * and save its vector registers in its thread_struct.
70 * Enables the VMX for use in the kernel on return.
71 * On SMP we know the VMX is free, since we give it up every
72 * switch (ie, no lazy save of the vector registers).
74 * Note that on 32-bit this can only use registers that will be
75 * restored by fast_exception_return, i.e. r3 - r6, r10 and r11.
77 _GLOBAL(load_up_altivec)
78 mfmsr r5 /* grab the current MSR */
80 MTMSRD(r5) /* enable use of AltiVec now */
83 /* Hack: if we get an altivec unavailable trap with VRSAVE
84 * set to all zeros, we assume this is a broken application
85 * that fails to set it properly, and thus we switch it to
94 /* enable use of VMX after return */
96 mfspr r5,SPRN_SPRG_THREAD /* current task's THREAD (phys) */
99 ld r4,PACACURRENT(r13)
100 addi r5,r4,THREAD /* Get THREAD */
101 oris r12,r12,MSR_VEC@h
104 addi r6,r5,THREAD_VRSTATE
107 stw r4,THREAD_USED_VR(r5)
111 /* restore registers and return */
114 _GLOBAL(giveup_altivec_notask)
116 andis. r4,r3,MSR_VEC@h
117 bnelr /* Already enabled? */
120 MTMSRD(r3) /* enable use of VMX now */
125 * giveup_altivec(tsk)
126 * Disable VMX for the task given as the argument,
127 * and save the vector registers in its thread_struct.
128 * Enables the VMX for use in the kernel on return.
130 _GLOBAL(giveup_altivec)
134 MTMSRD(r5) /* enable use of VMX now */
137 beqlr /* if no previous owner, done */
138 addi r3,r3,THREAD /* want THREAD of task */
139 PPC_LL r7,THREAD_VRSAVEAREA(r3)
140 PPC_LL r5,PT_REGS(r3)
143 addi r7,r3,THREAD_VRSTATE
150 PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
153 lis r3,(MSR_VEC|MSR_VSX)@h
156 ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
160 andc r4,r4,r3 /* disable FP for previous task */
161 PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
168 #error This asm code isn't ready for 32-bit kernels
172 * load_up_vsx(unused, unused, tsk)
173 * Disable VSX for the task which had it previously,
174 * and save its vector registers in its thread_struct.
175 * Reuse the fp and vsx saves, but first check to see if they have
176 * been saved already.
179 /* Load FP and VSX registers if they haven't been done yet */
181 beql+ load_up_fpu /* skip if already loaded */
182 andis. r5,r12,MSR_VEC@h
183 beql+ load_up_altivec /* skip if already loaded */
185 ld r4,PACACURRENT(r13)
186 addi r4,r4,THREAD /* Get THREAD */
188 stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
189 /* enable use of VSX after return */
190 oris r12,r12,MSR_VSX@h
192 b fast_exception_return
196 * Disable VSX for the task given as the argument.
197 * Does NOT save vsx registers.
198 * Enables the VSX for use in the kernel on return.
200 _GLOBAL(__giveup_vsx)
203 mtmsrd r5 /* enable use of VSX now */
207 beqlr- /* if no previous owner, done */
208 addi r3,r3,THREAD /* want THREAD of task */
212 ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
214 andc r4,r4,r3 /* disable VSX for previous task */
215 std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
219 #endif /* CONFIG_VSX */
223 * The routines below are in assembler so we can closely control the
224 * usage of floating-point registers. These routines must be called
225 * with preempt disabled.
232 .long 0x3f800000 /* 1.0 in single-precision FP */
234 .long 0x3f000000 /* 0.5 in single-precision FP */
236 #define LDCONST(fr, name) \
245 .tc FD_3ff00000_0[TC],0x3ff0000000000000 /* 1.0 */
247 .tc FD_3fe00000_0[TC],0x3fe0000000000000 /* 0.5 */
249 #define LDCONST(fr, name) \
255 * Internal routine to enable floating point and set FPSCR to 0.
256 * Don't call it from C; it doesn't use the normal calling convention.
288 * Vector add, floating point.
305 * Vector subtract, floating point.
322 * Vector multiply and add, floating point.
334 fmadds fr0,fr0,fr2,fr1
342 * Vector negative multiply and subtract, floating point.
354 fnmsubs fr0,fr0,fr2,fr1
362 * Vector reciprocal estimate. We just compute 1.0/x.
363 * r3 -> destination, r4 -> source.
380 * Vector reciprocal square-root estimate, floating point.
381 * We use the frsqrte instruction for the initial estimate followed
382 * by 2 iterations of Newton-Raphson to get sufficient accuracy.
383 * r3 -> destination, r4 -> source.
398 frsqrte fr1,fr0 /* r = frsqrte(s) */
399 fmuls fr3,fr1,fr0 /* r * s */
400 fmuls fr2,fr1,fr5 /* r * 0.5 */
401 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
402 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */
403 fmuls fr3,fr1,fr0 /* r * s */
404 fmuls fr2,fr1,fr5 /* r * 0.5 */
405 fnmsubs fr3,fr1,fr3,fr4 /* 1 - s * r * r */
406 fmadds fr1,fr2,fr3,fr1 /* r = r + 0.5 * r * (1 - s * r * r) */