1 // SPDX-License-Identifier: GPL-2.0
3 * In-kernel vector facility support functions
5 * Copyright IBM Corp. 2015
6 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
8 #include <linux/kernel.h>
10 #include <linux/sched.h>
13 void __kernel_fpu_begin(struct kernel_fpu *state, int flags)
15 __vector128 *vxrs = state->vxrs;
19 * Limit the save to the FPU/vector registers already
20 * in use by the previous context.
22 flags &= state->hdr.mask;
23 if (flags & KERNEL_FPC)
24 fpu_stfpc(&state->hdr.fpc);
26 if (flags & KERNEL_VXR_LOW)
27 save_fp_regs_vx(vxrs);
30 mask = flags & KERNEL_VXR;
31 if (mask == KERNEL_VXR) {
32 vxrs += fpu_vstm(0, 15, vxrs);
33 vxrs += fpu_vstm(16, 31, vxrs);
36 if (mask == KERNEL_VXR_MID) {
37 vxrs += fpu_vstm(8, 23, vxrs);
40 mask = flags & KERNEL_VXR_LOW;
42 if (mask == KERNEL_VXR_LOW)
43 vxrs += fpu_vstm(0, 15, vxrs);
44 else if (mask == KERNEL_VXR_V0V7)
45 vxrs += fpu_vstm(0, 7, vxrs);
47 vxrs += fpu_vstm(8, 15, vxrs);
49 mask = flags & KERNEL_VXR_HIGH;
51 if (mask == KERNEL_VXR_HIGH)
52 vxrs += fpu_vstm(16, 31, vxrs);
53 else if (mask == KERNEL_VXR_V16V23)
54 vxrs += fpu_vstm(16, 23, vxrs);
56 vxrs += fpu_vstm(24, 31, vxrs);
59 EXPORT_SYMBOL(__kernel_fpu_begin);
61 void __kernel_fpu_end(struct kernel_fpu *state, int flags)
63 __vector128 *vxrs = state->vxrs;
67 * Limit the restore to the FPU/vector registers of the
68 * previous context that have been overwritten by the
71 flags &= state->hdr.mask;
72 if (flags & KERNEL_FPC)
73 fpu_lfpc(&state->hdr.fpc);
75 if (flags & KERNEL_VXR_LOW)
76 load_fp_regs_vx(vxrs);
79 mask = flags & KERNEL_VXR;
80 if (mask == KERNEL_VXR) {
81 vxrs += fpu_vlm(0, 15, vxrs);
82 vxrs += fpu_vlm(16, 31, vxrs);
85 if (mask == KERNEL_VXR_MID) {
86 vxrs += fpu_vlm(8, 23, vxrs);
89 mask = flags & KERNEL_VXR_LOW;
91 if (mask == KERNEL_VXR_LOW)
92 vxrs += fpu_vlm(0, 15, vxrs);
93 else if (mask == KERNEL_VXR_V0V7)
94 vxrs += fpu_vlm(0, 7, vxrs);
96 vxrs += fpu_vlm(8, 15, vxrs);
98 mask = flags & KERNEL_VXR_HIGH;
100 if (mask == KERNEL_VXR_HIGH)
101 vxrs += fpu_vlm(16, 31, vxrs);
102 else if (mask == KERNEL_VXR_V16V23)
103 vxrs += fpu_vlm(16, 23, vxrs);
105 vxrs += fpu_vlm(24, 31, vxrs);
108 EXPORT_SYMBOL(__kernel_fpu_end);
110 void load_fpu_state(struct fpu *state, int flags)
112 __vector128 *vxrs = &state->vxrs[0];
115 if (flags & KERNEL_FPC)
116 fpu_lfpc_safe(&state->fpc);
118 if (flags & KERNEL_VXR_V0V7)
119 load_fp_regs_vx(state->vxrs);
122 mask = flags & KERNEL_VXR;
123 if (mask == KERNEL_VXR) {
124 fpu_vlm(0, 15, &vxrs[0]);
125 fpu_vlm(16, 31, &vxrs[16]);
128 if (mask == KERNEL_VXR_MID) {
129 fpu_vlm(8, 23, &vxrs[8]);
132 mask = flags & KERNEL_VXR_LOW;
134 if (mask == KERNEL_VXR_LOW)
135 fpu_vlm(0, 15, &vxrs[0]);
136 else if (mask == KERNEL_VXR_V0V7)
137 fpu_vlm(0, 7, &vxrs[0]);
139 fpu_vlm(8, 15, &vxrs[8]);
141 mask = flags & KERNEL_VXR_HIGH;
143 if (mask == KERNEL_VXR_HIGH)
144 fpu_vlm(16, 31, &vxrs[16]);
145 else if (mask == KERNEL_VXR_V16V23)
146 fpu_vlm(16, 23, &vxrs[16]);
148 fpu_vlm(24, 31, &vxrs[24]);
152 void save_fpu_state(struct fpu *state, int flags)
154 __vector128 *vxrs = &state->vxrs[0];
157 if (flags & KERNEL_FPC)
158 fpu_stfpc(&state->fpc);
160 if (flags & KERNEL_VXR_LOW)
161 save_fp_regs_vx(state->vxrs);
164 mask = flags & KERNEL_VXR;
165 if (mask == KERNEL_VXR) {
166 fpu_vstm(0, 15, &vxrs[0]);
167 fpu_vstm(16, 31, &vxrs[16]);
170 if (mask == KERNEL_VXR_MID) {
171 fpu_vstm(8, 23, &vxrs[8]);
174 mask = flags & KERNEL_VXR_LOW;
176 if (mask == KERNEL_VXR_LOW)
177 fpu_vstm(0, 15, &vxrs[0]);
178 else if (mask == KERNEL_VXR_V0V7)
179 fpu_vstm(0, 7, &vxrs[0]);
181 fpu_vstm(8, 15, &vxrs[8]);
183 mask = flags & KERNEL_VXR_HIGH;
185 if (mask == KERNEL_VXR_HIGH)
186 fpu_vstm(16, 31, &vxrs[16]);
187 else if (mask == KERNEL_VXR_V16V23)
188 fpu_vstm(16, 23, &vxrs[16]);
190 fpu_vstm(24, 31, &vxrs[24]);
193 EXPORT_SYMBOL(save_fpu_state);