Merge branch 'address-masking'
[linux-2.6-microblaze.git] / arch / x86 / kernel / fpu / init.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * x86 FPU boot time init code:
4  */
5 #include <asm/fpu/api.h>
6 #include <asm/tlbflush.h>
7 #include <asm/setup.h>
8
9 #include <linux/sched.h>
10 #include <linux/sched/task.h>
11 #include <linux/init.h>
12
13 #include "internal.h"
14 #include "legacy.h"
15 #include "xstate.h"
16
17 /*
18  * Initialize the registers found in all CPUs, CR0 and CR4:
19  */
20 static void fpu__init_cpu_generic(void)
21 {
22         unsigned long cr0;
23         unsigned long cr4_mask = 0;
24
25         if (boot_cpu_has(X86_FEATURE_FXSR))
26                 cr4_mask |= X86_CR4_OSFXSR;
27         if (boot_cpu_has(X86_FEATURE_XMM))
28                 cr4_mask |= X86_CR4_OSXMMEXCPT;
29         if (cr4_mask)
30                 cr4_set_bits(cr4_mask);
31
32         cr0 = read_cr0();
33         cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
34         if (!boot_cpu_has(X86_FEATURE_FPU))
35                 cr0 |= X86_CR0_EM;
36         write_cr0(cr0);
37
38         /* Flush out any pending x87 state: */
39 #ifdef CONFIG_MATH_EMULATION
40         if (!boot_cpu_has(X86_FEATURE_FPU))
41                 fpstate_init_soft(&current->thread.fpu.fpstate->regs.soft);
42         else
43 #endif
44                 asm volatile ("fninit");
45 }
46
47 /*
48  * Enable all supported FPU features. Called when a CPU is brought online:
49  */
50 void fpu__init_cpu(void)
51 {
52         fpu__init_cpu_generic();
53         fpu__init_cpu_xstate();
54 }
55
56 static bool __init fpu__probe_without_cpuid(void)
57 {
58         unsigned long cr0;
59         u16 fsw, fcw;
60
61         fsw = fcw = 0xffff;
62
63         cr0 = read_cr0();
64         cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
65         write_cr0(cr0);
66
67         asm volatile("fninit ; fnstsw %0 ; fnstcw %1" : "+m" (fsw), "+m" (fcw));
68
69         pr_info("x86/fpu: Probing for FPU: FSW=0x%04hx FCW=0x%04hx\n", fsw, fcw);
70
71         return fsw == 0 && (fcw & 0x103f) == 0x003f;
72 }
73
74 static void __init fpu__init_system_early_generic(void)
75 {
76         if (!boot_cpu_has(X86_FEATURE_CPUID) &&
77             !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
78                 if (fpu__probe_without_cpuid())
79                         setup_force_cpu_cap(X86_FEATURE_FPU);
80                 else
81                         setup_clear_cpu_cap(X86_FEATURE_FPU);
82         }
83
84 #ifndef CONFIG_MATH_EMULATION
85         if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_FPU)) {
86                 pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n");
87                 for (;;)
88                         asm volatile("hlt");
89         }
90 #endif
91 }
92
93 /*
94  * Boot time FPU feature detection code:
95  */
96 unsigned int mxcsr_feature_mask __ro_after_init = 0xffffffffu;
97 EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
98
99 static void __init fpu__init_system_mxcsr(void)
100 {
101         unsigned int mask = 0;
102
103         if (boot_cpu_has(X86_FEATURE_FXSR)) {
104                 /* Static because GCC does not get 16-byte stack alignment right: */
105                 static struct fxregs_state fxregs __initdata;
106
107                 asm volatile("fxsave %0" : "+m" (fxregs));
108
109                 mask = fxregs.mxcsr_mask;
110
111                 /*
112                  * If zero then use the default features mask,
113                  * which has all features set, except the
114                  * denormals-are-zero feature bit:
115                  */
116                 if (mask == 0)
117                         mask = 0x0000ffbf;
118         }
119         mxcsr_feature_mask &= mask;
120 }
121
122 /*
123  * Once per bootup FPU initialization sequences that will run on most x86 CPUs:
124  */
125 static void __init fpu__init_system_generic(void)
126 {
127         /*
128          * Set up the legacy init FPU context. Will be updated when the
129          * CPU supports XSAVE[S].
130          */
131         fpstate_init_user(&init_fpstate);
132
133         fpu__init_system_mxcsr();
134 }
135
136 /*
137  * Enforce that 'MEMBER' is the last field of 'TYPE'.
138  *
139  * Align the computed size with alignment of the TYPE,
140  * because that's how C aligns structs.
141  */
142 #define CHECK_MEMBER_AT_END_OF(TYPE, MEMBER) \
143         BUILD_BUG_ON(sizeof(TYPE) !=         \
144                      ALIGN(offsetofend(TYPE, MEMBER), _Alignof(TYPE)))
145
146 /*
147  * We append the 'struct fpu' to the task_struct:
148  */
149 static void __init fpu__init_task_struct_size(void)
150 {
151         int task_size = sizeof(struct task_struct);
152
153         /*
154          * Subtract off the static size of the register state.
155          * It potentially has a bunch of padding.
156          */
157         task_size -= sizeof(current->thread.fpu.__fpstate.regs);
158
159         /*
160          * Add back the dynamically-calculated register state
161          * size.
162          */
163         task_size += fpu_kernel_cfg.default_size;
164
165         /*
166          * We dynamically size 'struct fpu', so we require that
167          * it be at the end of 'thread_struct' and that
168          * 'thread_struct' be at the end of 'task_struct'.  If
169          * you hit a compile error here, check the structure to
170          * see if something got added to the end.
171          */
172         CHECK_MEMBER_AT_END_OF(struct fpu, __fpstate);
173         CHECK_MEMBER_AT_END_OF(struct thread_struct, fpu);
174         CHECK_MEMBER_AT_END_OF(struct task_struct, thread);
175
176         arch_task_struct_size = task_size;
177 }
178
179 /*
180  * Set up the user and kernel xstate sizes based on the legacy FPU context size.
181  *
182  * We set this up first, and later it will be overwritten by
183  * fpu__init_system_xstate() if the CPU knows about xstates.
184  */
185 static void __init fpu__init_system_xstate_size_legacy(void)
186 {
187         unsigned int size;
188
189         /*
190          * Note that the size configuration might be overwritten later
191          * during fpu__init_system_xstate().
192          */
193         if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
194                 size = sizeof(struct swregs_state);
195         } else if (cpu_feature_enabled(X86_FEATURE_FXSR)) {
196                 size = sizeof(struct fxregs_state);
197                 fpu_user_cfg.legacy_features = XFEATURE_MASK_FPSSE;
198         } else {
199                 size = sizeof(struct fregs_state);
200                 fpu_user_cfg.legacy_features = XFEATURE_MASK_FP;
201         }
202
203         fpu_kernel_cfg.max_size = size;
204         fpu_kernel_cfg.default_size = size;
205         fpu_user_cfg.max_size = size;
206         fpu_user_cfg.default_size = size;
207         fpstate_reset(&current->thread.fpu);
208 }
209
210 /*
211  * Called on the boot CPU once per system bootup, to set up the initial
212  * FPU state that is later cloned into all processes:
213  */
214 void __init fpu__init_system(void)
215 {
216         fpstate_reset(&current->thread.fpu);
217         fpu__init_system_early_generic();
218
219         /*
220          * The FPU has to be operational for some of the
221          * later FPU init activities:
222          */
223         fpu__init_cpu();
224
225         fpu__init_system_generic();
226         fpu__init_system_xstate_size_legacy();
227         fpu__init_system_xstate(fpu_kernel_cfg.max_size);
228         fpu__init_task_struct_size();
229 }