1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/process.c
5 * Original Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
7 * Copyright (C) 2012 ARM Ltd.
12 #include <linux/compat.h>
13 #include <linux/efi.h>
14 #include <linux/elf.h>
15 #include <linux/export.h>
16 #include <linux/sched.h>
17 #include <linux/sched/debug.h>
18 #include <linux/sched/task.h>
19 #include <linux/sched/task_stack.h>
20 #include <linux/kernel.h>
21 #include <linux/mman.h>
23 #include <linux/nospec.h>
24 #include <linux/sched.h>
25 #include <linux/stddef.h>
26 #include <linux/sysctl.h>
27 #include <linux/unistd.h>
28 #include <linux/user.h>
29 #include <linux/delay.h>
30 #include <linux/reboot.h>
31 #include <linux/interrupt.h>
32 #include <linux/init.h>
33 #include <linux/cpu.h>
34 #include <linux/elfcore.h>
36 #include <linux/tick.h>
37 #include <linux/utsname.h>
38 #include <linux/uaccess.h>
39 #include <linux/random.h>
40 #include <linux/hw_breakpoint.h>
41 #include <linux/personality.h>
42 #include <linux/notifier.h>
43 #include <trace/events/power.h>
44 #include <linux/percpu.h>
45 #include <linux/thread_info.h>
46 #include <linux/prctl.h>
48 #include <asm/alternative.h>
49 #include <asm/compat.h>
50 #include <asm/cpufeature.h>
51 #include <asm/cacheflush.h>
53 #include <asm/fpsimd.h>
54 #include <asm/mmu_context.h>
56 #include <asm/processor.h>
57 #include <asm/pointer_auth.h>
58 #include <asm/stacktrace.h>
59 #include <asm/switch_to.h>
60 #include <asm/system_misc.h>
62 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
63 #include <linux/stackprotector.h>
64 unsigned long __stack_chk_guard __read_mostly;
65 EXPORT_SYMBOL(__stack_chk_guard);
69 * Function pointers to optional machine specific functions
71 void (*pm_power_off)(void);
72 EXPORT_SYMBOL_GPL(pm_power_off);
74 #ifdef CONFIG_HOTPLUG_CPU
75 void arch_cpu_idle_dead(void)
82 * Called by kexec, immediately prior to machine_kexec().
84 * This must completely disable all secondary CPUs; simply causing those CPUs
85 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
86 * kexec'd kernel to use any and all RAM as it sees fit, without having to
87 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
88 * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this.
90 void machine_shutdown(void)
92 smp_shutdown_nonboot_cpus(reboot_cpu);
96 * Halting simply requires that the secondary CPUs stop performing any
97 * activity (executing tasks, handling interrupts). smp_send_stop()
100 void machine_halt(void)
108 * Power-off simply requires that the secondary CPUs stop performing any
109 * activity (executing tasks, handling interrupts). smp_send_stop()
110 * achieves this. When the system power is turned off, it will take all CPUs
113 void machine_power_off(void)
122 * Restart requires that the secondary CPUs stop performing any activity
123 * while the primary CPU resets the system. Systems with multiple CPUs must
124 * provide a HW restart implementation, to ensure that all CPUs reset at once.
125 * This is required so that any code running after reset on the primary CPU
126 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
127 * executing pre-reset code, and using RAM that the primary CPU's code wishes
128 * to use. Implementing such co-ordination would be essentially impossible.
130 void machine_restart(char *cmd)
132 /* Disable interrupts first */
137 * UpdateCapsule() depends on the system being reset via
140 if (efi_enabled(EFI_RUNTIME_SERVICES))
141 efi_reboot(reboot_mode, NULL);
143 /* Now call the architecture specific reboot code. */
144 do_kernel_restart(cmd);
147 * Whoops - the architecture was unable to reboot.
149 printk("Reboot failed -- System halted\n");
153 #define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str
154 static const char *const btypes[] = {
162 static void print_pstate(struct pt_regs *regs)
164 u64 pstate = regs->pstate;
166 if (compat_user_mode(regs)) {
167 printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
169 pstate & PSR_AA32_N_BIT ? 'N' : 'n',
170 pstate & PSR_AA32_Z_BIT ? 'Z' : 'z',
171 pstate & PSR_AA32_C_BIT ? 'C' : 'c',
172 pstate & PSR_AA32_V_BIT ? 'V' : 'v',
173 pstate & PSR_AA32_Q_BIT ? 'Q' : 'q',
174 pstate & PSR_AA32_T_BIT ? "T32" : "A32",
175 pstate & PSR_AA32_E_BIT ? "BE" : "LE",
176 pstate & PSR_AA32_A_BIT ? 'A' : 'a',
177 pstate & PSR_AA32_I_BIT ? 'I' : 'i',
178 pstate & PSR_AA32_F_BIT ? 'F' : 'f');
180 const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >>
183 printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO BTYPE=%s)\n",
185 pstate & PSR_N_BIT ? 'N' : 'n',
186 pstate & PSR_Z_BIT ? 'Z' : 'z',
187 pstate & PSR_C_BIT ? 'C' : 'c',
188 pstate & PSR_V_BIT ? 'V' : 'v',
189 pstate & PSR_D_BIT ? 'D' : 'd',
190 pstate & PSR_A_BIT ? 'A' : 'a',
191 pstate & PSR_I_BIT ? 'I' : 'i',
192 pstate & PSR_F_BIT ? 'F' : 'f',
193 pstate & PSR_PAN_BIT ? '+' : '-',
194 pstate & PSR_UAO_BIT ? '+' : '-',
195 pstate & PSR_TCO_BIT ? '+' : '-',
200 void __show_regs(struct pt_regs *regs)
205 if (compat_user_mode(regs)) {
206 lr = regs->compat_lr;
207 sp = regs->compat_sp;
215 show_regs_print_info(KERN_DEFAULT);
218 if (!user_mode(regs)) {
219 printk("pc : %pS\n", (void *)regs->pc);
220 printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr));
222 printk("pc : %016llx\n", regs->pc);
223 printk("lr : %016llx\n", lr);
226 printk("sp : %016llx\n", sp);
228 if (system_uses_irq_prio_masking())
229 printk("pmr_save: %08llx\n", regs->pmr_save);
234 printk("x%-2d: %016llx", i, regs->regs[i]);
237 pr_cont(" x%-2d: %016llx", i, regs->regs[i]);
243 void show_regs(struct pt_regs *regs)
246 dump_backtrace(regs, NULL, KERN_DEFAULT);
249 static void tls_thread_flush(void)
251 write_sysreg(0, tpidr_el0);
253 if (is_compat_task()) {
254 current->thread.uw.tp_value = 0;
257 * We need to ensure ordering between the shadow state and the
258 * hardware state, so that we don't corrupt the hardware state
259 * with a stale shadow state during context switch.
262 write_sysreg(0, tpidrro_el0);
266 static void flush_tagged_addr_state(void)
268 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI))
269 clear_thread_flag(TIF_TAGGED_ADDR);
272 void flush_thread(void)
274 fpsimd_flush_thread();
276 flush_ptrace_hw_breakpoint(current);
277 flush_tagged_addr_state();
280 void release_thread(struct task_struct *dead_task)
284 void arch_release_task_struct(struct task_struct *tsk)
286 fpsimd_release_task(tsk);
289 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
292 fpsimd_preserve_current_state();
295 /* We rely on the above assignment to initialize dst's thread_flags: */
296 BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
299 * Detach src's sve_state (if any) from dst so that it does not
300 * get erroneously used or freed prematurely. dst's sve_state
301 * will be allocated on demand later on if dst uses SVE.
302 * For consistency, also clear TIF_SVE here: this could be done
303 * later in copy_process(), but to avoid tripping up future
304 * maintainers it is best not to leave TIF_SVE and sve_state in
305 * an inconsistent state, even temporarily.
307 dst->thread.sve_state = NULL;
308 clear_tsk_thread_flag(dst, TIF_SVE);
310 /* clear any pending asynchronous tag fault raised by the parent */
311 clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
316 asmlinkage void ret_from_fork(void) asm("ret_from_fork");
318 int copy_thread(unsigned long clone_flags, unsigned long stack_start,
319 unsigned long stk_sz, struct task_struct *p, unsigned long tls)
321 struct pt_regs *childregs = task_pt_regs(p);
323 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
326 * In case p was allocated the same task_struct pointer as some
327 * other recently-exited task, make sure p is disassociated from
328 * any cpu that may have run that now-exited task recently.
329 * Otherwise we could erroneously skip reloading the FPSIMD
332 fpsimd_flush_task_state(p);
334 ptrauth_thread_init_kernel(p);
336 if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
337 *childregs = *current_pt_regs();
338 childregs->regs[0] = 0;
341 * Read the current TLS pointer from tpidr_el0 as it may be
342 * out-of-sync with the saved value.
344 *task_user_tls(p) = read_sysreg(tpidr_el0);
347 if (is_compat_thread(task_thread_info(p)))
348 childregs->compat_sp = stack_start;
350 childregs->sp = stack_start;
354 * If a TLS pointer was passed to clone, use it for the new
357 if (clone_flags & CLONE_SETTLS)
358 p->thread.uw.tp_value = tls;
361 * A kthread has no context to ERET to, so ensure any buggy
362 * ERET is treated as an illegal exception return.
364 * When a user task is created from a kthread, childregs will
365 * be initialized by start_thread() or start_compat_thread().
367 memset(childregs, 0, sizeof(struct pt_regs));
368 childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT;
370 p->thread.cpu_context.x19 = stack_start;
371 p->thread.cpu_context.x20 = stk_sz;
373 p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
374 p->thread.cpu_context.sp = (unsigned long)childregs;
376 * For the benefit of the unwinder, set up childregs->stackframe
377 * as the final frame for the new task.
379 p->thread.cpu_context.fp = (unsigned long)childregs->stackframe;
381 ptrace_hw_copy_thread(p);
386 void tls_preserve_current_state(void)
388 *task_user_tls(current) = read_sysreg(tpidr_el0);
391 static void tls_thread_switch(struct task_struct *next)
393 tls_preserve_current_state();
395 if (is_compat_thread(task_thread_info(next)))
396 write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
397 else if (!arm64_kernel_unmapped_at_el0())
398 write_sysreg(0, tpidrro_el0);
400 write_sysreg(*task_user_tls(next), tpidr_el0);
404 * Force SSBS state on context-switch, since it may be lost after migrating
405 * from a CPU which treats the bit as RES0 in a heterogeneous system.
407 static void ssbs_thread_switch(struct task_struct *next)
410 * Nothing to do for kernel threads, but 'regs' may be junk
411 * (e.g. idle task) so check the flags and bail early.
413 if (unlikely(next->flags & PF_KTHREAD))
417 * If all CPUs implement the SSBS extension, then we just need to
418 * context-switch the PSTATE field.
420 if (cpus_have_const_cap(ARM64_SSBS))
423 spectre_v4_enable_task_mitigation(next);
427 * We store our current task in sp_el0, which is clobbered by userspace. Keep a
428 * shadow copy so that we can restore this upon entry from userspace.
430 * This is *only* for exception entry from EL0, and is not valid until we
431 * __switch_to() a user task.
433 DEFINE_PER_CPU(struct task_struct *, __entry_task);
435 static void entry_task_switch(struct task_struct *next)
437 __this_cpu_write(__entry_task, next);
441 * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
442 * Assuming the virtual counter is enabled at the beginning of times:
444 * - disable access when switching from a 64bit task to a 32bit task
445 * - enable access when switching from a 32bit task to a 64bit task
447 static void erratum_1418040_thread_switch(struct task_struct *prev,
448 struct task_struct *next)
453 if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040))
456 prev32 = is_compat_thread(task_thread_info(prev));
457 next32 = is_compat_thread(task_thread_info(next));
459 if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
462 val = read_sysreg(cntkctl_el1);
465 val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
467 val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
469 write_sysreg(val, cntkctl_el1);
472 static void update_sctlr_el1(u64 sctlr)
475 * EnIA must not be cleared while in the kernel as this is necessary for
476 * in-kernel PAC. It will be cleared on kernel exit if needed.
478 sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr);
480 /* ISB required for the kernel uaccess routines when setting TCF0. */
484 void set_task_sctlr_el1(u64 sctlr)
487 * __switch_to() checks current->thread.sctlr as an
488 * optimisation. Disable preemption so that it does not see
489 * the variable update before the SCTLR_EL1 one.
492 current->thread.sctlr_user = sctlr;
493 update_sctlr_el1(sctlr);
500 __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
501 struct task_struct *next)
503 struct task_struct *last;
505 fpsimd_thread_switch(next);
506 tls_thread_switch(next);
507 hw_breakpoint_thread_switch(next);
508 contextidr_thread_switch(next);
509 entry_task_switch(next);
510 ssbs_thread_switch(next);
511 erratum_1418040_thread_switch(prev, next);
512 ptrauth_thread_switch_user(next);
515 * Complete any pending TLB or cache maintenance on this CPU in case
516 * the thread migrates to a different CPU.
517 * This full barrier is also required by the membarrier system
523 * MTE thread switching must happen after the DSB above to ensure that
524 * any asynchronous tag check faults have been logged in the TFSR*_EL1
527 mte_thread_switch(next);
528 /* avoid expensive SCTLR_EL1 accesses if no change */
529 if (prev->thread.sctlr_user != next->thread.sctlr_user)
530 update_sctlr_el1(next->thread.sctlr_user);
532 /* the actual thread switch */
533 last = cpu_switch_to(prev, next);
538 unsigned long get_wchan(struct task_struct *p)
540 struct stackframe frame;
541 unsigned long stack_page, ret = 0;
543 if (!p || p == current || task_is_running(p))
546 stack_page = (unsigned long)try_get_task_stack(p);
550 start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p));
553 if (unwind_frame(p, &frame))
555 if (!in_sched_functions(frame.pc)) {
559 } while (count++ < 16);
566 unsigned long arch_align_stack(unsigned long sp)
568 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
569 sp -= get_random_int() & ~PAGE_MASK;
574 int compat_elf_check_arch(const struct elf32_hdr *hdr)
576 if (!system_supports_32bit_el0())
579 if ((hdr)->e_machine != EM_ARM)
582 if (!((hdr)->e_flags & EF_ARM_EABI_MASK))
586 * Prevent execve() of a 32-bit program from a deadline task
587 * if the restricted affinity mask would be inadmissible on an
590 return !static_branch_unlikely(&arm64_mismatched_32bit_el0) ||
591 !dl_task_check_affinity(current, system_32bit_el0_cpumask());
596 * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
598 void arch_setup_new_exec(void)
600 unsigned long mmflags = 0;
602 if (is_compat_task()) {
603 mmflags = MMCF_AARCH32;
606 * Restrict the CPU affinity mask for a 32-bit task so that
607 * it contains only 32-bit-capable CPUs.
609 * From the perspective of the task, this looks similar to
610 * what would happen if the 64-bit-only CPUs were hot-unplugged
611 * at the point of execve(), although we try a bit harder to
612 * honour the cpuset hierarchy.
614 if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
615 force_compatible_cpus_allowed_ptr(current);
616 } else if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) {
617 relax_compatible_cpus_allowed_ptr(current);
620 current->mm->context.flags = mmflags;
621 ptrauth_thread_init_user();
622 mte_thread_init_user();
624 if (task_spec_ssb_noexec(current)) {
625 arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
630 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
632 * Control the relaxed ABI allowing tagged user addresses into the kernel.
634 static unsigned int tagged_addr_disabled;
636 long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
638 unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE;
639 struct thread_info *ti = task_thread_info(task);
641 if (is_compat_thread(ti))
644 if (system_supports_mte())
645 valid_mask |= PR_MTE_TCF_MASK | PR_MTE_TAG_MASK;
647 if (arg & ~valid_mask)
651 * Do not allow the enabling of the tagged address ABI if globally
652 * disabled via sysctl abi.tagged_addr_disabled.
654 if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
657 if (set_mte_ctrl(task, arg) != 0)
660 update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
665 long get_tagged_addr_ctrl(struct task_struct *task)
668 struct thread_info *ti = task_thread_info(task);
670 if (is_compat_thread(ti))
673 if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR))
674 ret = PR_TAGGED_ADDR_ENABLE;
676 ret |= get_mte_ctrl(task);
682 * Global sysctl to disable the tagged user addresses support. This control
683 * only prevents the tagged address ABI enabling via prctl() and does not
684 * disable it for tasks that already opted in to the relaxed ABI.
687 static struct ctl_table tagged_addr_sysctl_table[] = {
689 .procname = "tagged_addr_disabled",
691 .data = &tagged_addr_disabled,
692 .maxlen = sizeof(int),
693 .proc_handler = proc_dointvec_minmax,
694 .extra1 = SYSCTL_ZERO,
695 .extra2 = SYSCTL_ONE,
700 static int __init tagged_addr_init(void)
702 if (!register_sysctl("abi", tagged_addr_sysctl_table))
707 core_initcall(tagged_addr_init);
708 #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
710 #ifdef CONFIG_BINFMT_ELF
711 int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
712 bool has_interp, bool is_interp)
715 * For dynamically linked executables the interpreter is
716 * responsible for setting PROT_BTI on everything except
719 if (is_interp != has_interp)
722 if (!(state->flags & ARM64_ELF_BTI))
725 if (prot & PROT_EXEC)