1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/extable.h>
3 #include <linux/uaccess.h>
4 #include <linux/sched/debug.h>
7 #include <asm/fpu/internal.h>
10 #include <asm/kdebug.h>
12 typedef bool (*ex_handler_t)(const struct exception_table_entry *,
13 struct pt_regs *, int, unsigned long,
16 static inline unsigned long
17 ex_fixup_addr(const struct exception_table_entry *x)
19 return (unsigned long)&x->fixup + x->fixup;
21 static inline ex_handler_t
22 ex_fixup_handler(const struct exception_table_entry *x)
24 return (ex_handler_t)((unsigned long)&x->handler + x->handler);
27 __visible bool ex_handler_default(const struct exception_table_entry *fixup,
28 struct pt_regs *regs, int trapnr,
29 unsigned long error_code,
30 unsigned long fault_addr)
32 regs->ip = ex_fixup_addr(fixup);
35 EXPORT_SYMBOL(ex_handler_default);
37 __visible bool ex_handler_fault(const struct exception_table_entry *fixup,
38 struct pt_regs *regs, int trapnr,
39 unsigned long error_code,
40 unsigned long fault_addr)
42 regs->ip = ex_fixup_addr(fixup);
46 EXPORT_SYMBOL_GPL(ex_handler_fault);
49 * Handler for when we fail to restore a task's FPU state. We should never get
50 * here because the FPU state of a task using the FPU (task->thread.fpu.state)
51 * should always be valid. However, past bugs have allowed userspace to set
52 * reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn().
53 * These caused XRSTOR to fail when switching to the task, leaking the FPU
54 * registers of the task previously executing on the CPU. Mitigate this class
55 * of vulnerability by restoring from the initial state (essentially, zeroing
56 * out all the FPU registers) if we can't restore from the task's FPU state.
58 __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup,
59 struct pt_regs *regs, int trapnr,
60 unsigned long error_code,
61 unsigned long fault_addr)
63 regs->ip = ex_fixup_addr(fixup);
65 WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
66 (void *)instruction_pointer(regs));
68 __restore_fpregs_from_fpstate(&init_fpstate, xfeatures_mask_fpstate());
71 EXPORT_SYMBOL_GPL(ex_handler_fprestore);
73 __visible bool ex_handler_uaccess(const struct exception_table_entry *fixup,
74 struct pt_regs *regs, int trapnr,
75 unsigned long error_code,
76 unsigned long fault_addr)
78 WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?");
79 regs->ip = ex_fixup_addr(fixup);
82 EXPORT_SYMBOL(ex_handler_uaccess);
84 __visible bool ex_handler_copy(const struct exception_table_entry *fixup,
85 struct pt_regs *regs, int trapnr,
86 unsigned long error_code,
87 unsigned long fault_addr)
89 WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?");
90 regs->ip = ex_fixup_addr(fixup);
94 EXPORT_SYMBOL(ex_handler_copy);
96 __visible bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
97 struct pt_regs *regs, int trapnr,
98 unsigned long error_code,
99 unsigned long fault_addr)
101 if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
102 (unsigned int)regs->cx, regs->ip, (void *)regs->ip))
103 show_stack_regs(regs);
105 /* Pretend that the read succeeded and returned 0. */
106 regs->ip = ex_fixup_addr(fixup);
111 EXPORT_SYMBOL(ex_handler_rdmsr_unsafe);
113 __visible bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
114 struct pt_regs *regs, int trapnr,
115 unsigned long error_code,
116 unsigned long fault_addr)
118 if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
119 (unsigned int)regs->cx, (unsigned int)regs->dx,
120 (unsigned int)regs->ax, regs->ip, (void *)regs->ip))
121 show_stack_regs(regs);
123 /* Pretend that the write succeeded. */
124 regs->ip = ex_fixup_addr(fixup);
127 EXPORT_SYMBOL(ex_handler_wrmsr_unsafe);
129 __visible bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
130 struct pt_regs *regs, int trapnr,
131 unsigned long error_code,
132 unsigned long fault_addr)
134 if (static_cpu_has(X86_BUG_NULL_SEG))
135 asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS));
136 asm volatile ("mov %0, %%fs" : : "rm" (0));
137 return ex_handler_default(fixup, regs, trapnr, error_code, fault_addr);
139 EXPORT_SYMBOL(ex_handler_clear_fs);
141 enum handler_type ex_get_fault_handler_type(unsigned long ip)
143 const struct exception_table_entry *e;
144 ex_handler_t handler;
146 e = search_exception_tables(ip);
148 return EX_HANDLER_NONE;
149 handler = ex_fixup_handler(e);
150 if (handler == ex_handler_fault)
151 return EX_HANDLER_FAULT;
152 else if (handler == ex_handler_uaccess || handler == ex_handler_copy)
153 return EX_HANDLER_UACCESS;
155 return EX_HANDLER_OTHER;
158 int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
159 unsigned long fault_addr)
161 const struct exception_table_entry *e;
162 ex_handler_t handler;
164 #ifdef CONFIG_PNPBIOS
165 if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
166 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
167 extern u32 pnp_bios_is_utter_crap;
168 pnp_bios_is_utter_crap = 1;
169 printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n");
173 : : "g" (pnp_bios_fault_esp), "g" (pnp_bios_fault_eip));
174 panic("do_trap: can't hit this");
178 e = search_exception_tables(regs->ip);
182 handler = ex_fixup_handler(e);
183 return handler(e, regs, trapnr, error_code, fault_addr);
186 extern unsigned int early_recursion_flag;
188 /* Restricted version used during very early boot */
189 void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
191 /* Ignore early NMIs. */
192 if (trapnr == X86_TRAP_NMI)
195 if (early_recursion_flag > 2)
199 * Old CPUs leave the high bits of CS on the stack
200 * undefined. I'm not sure which CPUs do this, but at least
201 * the 486 DX works this way.
202 * Xen pv domains are not using the default __KERNEL_CS.
204 if (!xen_pv_domain() && regs->cs != __KERNEL_CS)
208 * The full exception fixup machinery is available as soon as
209 * the early IDT is loaded. This means that it is the
210 * responsibility of extable users to either function correctly
211 * when handlers are invoked early or to simply avoid causing
212 * exceptions before they're ready to handle them.
214 * This is better than filtering which handlers can be used,
215 * because refusing to call a handler here is guaranteed to
216 * result in a hard-to-debug panic.
218 * Keep in mind that not all vectors actually get here. Early
219 * page faults, for example, are special.
221 if (fixup_exception(regs, trapnr, regs->orig_ax, 0))
224 if (trapnr == X86_TRAP_UD) {
225 if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
232 * If this was a BUG and report_bug returns or if this
233 * was just a normal #UD, we want to continue onward and
239 early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
240 (unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
241 regs->orig_ax, read_cr2());