1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2017 Arm Ltd.
3 #define pr_fmt(fmt) "sdei: " fmt
5 #include <linux/arm-smccc.h>
6 #include <linux/arm_sdei.h>
7 #include <linux/hardirq.h>
8 #include <linux/irqflags.h>
9 #include <linux/sched/task_stack.h>
10 #include <linux/uaccess.h>
12 #include <asm/alternative.h>
13 #include <asm/exception.h>
14 #include <asm/kprobes.h>
16 #include <asm/ptrace.h>
17 #include <asm/sections.h>
18 #include <asm/stacktrace.h>
19 #include <asm/sysreg.h>
20 #include <asm/vmap_stack.h>
22 unsigned long sdei_exit_mode;
25 * VMAP'd stacks checking for stack overflow on exception using sp as a scratch
26 * register, meaning SDEI has to switch to its own stack. We need two stacks as
27 * a critical event may interrupt a normal event that has just taken a
28 * synchronous exception, and is using sp as scratch register. For a critical
29 * event interrupting a normal event, we can't reliably tell if we were on the
31 * For now, we allocate stacks when the driver is probed.
33 DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
34 DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
36 #ifdef CONFIG_VMAP_STACK
37 DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
38 DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
41 static void _free_sdei_stack(unsigned long * __percpu *ptr, int cpu)
45 p = per_cpu(*ptr, cpu);
47 per_cpu(*ptr, cpu) = NULL;
52 static void free_sdei_stacks(void)
56 for_each_possible_cpu(cpu) {
57 _free_sdei_stack(&sdei_stack_normal_ptr, cpu);
58 _free_sdei_stack(&sdei_stack_critical_ptr, cpu);
62 static int _init_sdei_stack(unsigned long * __percpu *ptr, int cpu)
66 p = arch_alloc_vmap_stack(SDEI_STACK_SIZE, cpu_to_node(cpu));
69 per_cpu(*ptr, cpu) = p;
74 static int init_sdei_stacks(void)
79 for_each_possible_cpu(cpu) {
80 err = _init_sdei_stack(&sdei_stack_normal_ptr, cpu);
83 err = _init_sdei_stack(&sdei_stack_critical_ptr, cpu);
94 static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
96 unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
97 unsigned long high = low + SDEI_STACK_SIZE;
99 return on_stack(sp, low, high, STACK_TYPE_SDEI_NORMAL, info);
102 static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
104 unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
105 unsigned long high = low + SDEI_STACK_SIZE;
107 return on_stack(sp, low, high, STACK_TYPE_SDEI_CRITICAL, info);
110 bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
112 if (!IS_ENABLED(CONFIG_VMAP_STACK))
115 if (on_sdei_critical_stack(sp, info))
118 if (on_sdei_normal_stack(sp, info))
124 unsigned long sdei_arch_get_entry_point(int conduit)
127 * SDEI works between adjacent exception levels. If we booted at EL1 we
128 * assume a hypervisor is marshalling events. If we booted at EL2 and
129 * dropped to EL1 because we don't support VHE, then we can't support
132 if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
133 pr_err("Not supported on this hardware/boot configuration\n");
137 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
138 if (init_sdei_stacks())
142 sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
144 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
145 if (arm64_kernel_unmapped_at_el0()) {
146 unsigned long offset;
148 offset = (unsigned long)__sdei_asm_entry_trampoline -
149 (unsigned long)__entry_tramp_text_start;
150 return TRAMP_VALIAS + offset;
152 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
153 return (unsigned long)__sdei_asm_handler;
158 * __sdei_handler() returns one of:
159 * SDEI_EV_HANDLED - success, return to the interrupted context.
160 * SDEI_EV_FAILED - failure, return this error code to firmare.
161 * virtual-address - success, return to this address.
163 static __kprobes unsigned long _sdei_handler(struct pt_regs *regs,
164 struct sdei_registered_event *arg)
168 int clobbered_registers = 4;
169 u64 elr = read_sysreg(elr_el1);
170 u32 kernel_mode = read_sysreg(CurrentEL) | 1; /* +SPSel */
171 unsigned long vbar = read_sysreg(vbar_el1);
173 if (arm64_kernel_unmapped_at_el0())
174 clobbered_registers++;
176 /* Retrieve the missing registers values */
177 for (i = 0; i < clobbered_registers; i++) {
178 /* from within the handler, this call always succeeds */
179 sdei_api_event_context(i, ®s->regs[i]);
183 * We didn't take an exception to get here, set PAN. UAO will be cleared
184 * by sdei_event_handler()s force_uaccess_begin() call.
186 __uaccess_enable_hw_pan();
188 err = sdei_event_handler(regs, arg);
190 return SDEI_EV_FAILED;
192 if (elr != read_sysreg(elr_el1)) {
194 * We took a synchronous exception from the SDEI handler.
195 * This could deadlock, and if you interrupt KVM it will
198 pr_warn("unsafe: exception during handler\n");
201 mode = regs->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK);
204 * If we interrupted the kernel with interrupts masked, we always go
205 * back to wherever we came from.
207 if (mode == kernel_mode && !interrupts_enabled(regs))
208 return SDEI_EV_HANDLED;
211 * Otherwise, we pretend this was an IRQ. This lets user space tasks
212 * receive signals before we return to them, and KVM to invoke it's
213 * world switch to do the same.
215 * See DDI0487B.a Table D1-7 'Vector offsets from vector table base
218 if (mode == kernel_mode)
220 else if (mode & PSR_MODE32_BIT)
227 asmlinkage noinstr unsigned long
228 __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
232 arm64_enter_nmi(regs);
234 ret = _sdei_handler(regs, arg);
236 arm64_exit_nmi(regs);