1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/process.c
5 * Original Copyright (C) 1995 Linus Torvalds
6 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
7 * Copyright (C) 2012 ARM Ltd.
12 #include <linux/compat.h>
13 #include <linux/efi.h>
14 #include <linux/elf.h>
15 #include <linux/export.h>
16 #include <linux/sched.h>
17 #include <linux/sched/debug.h>
18 #include <linux/sched/task.h>
19 #include <linux/sched/task_stack.h>
20 #include <linux/kernel.h>
21 #include <linux/lockdep.h>
22 #include <linux/mman.h>
24 #include <linux/stddef.h>
25 #include <linux/sysctl.h>
26 #include <linux/unistd.h>
27 #include <linux/user.h>
28 #include <linux/delay.h>
29 #include <linux/reboot.h>
30 #include <linux/interrupt.h>
31 #include <linux/init.h>
32 #include <linux/cpu.h>
33 #include <linux/elfcore.h>
35 #include <linux/tick.h>
36 #include <linux/utsname.h>
37 #include <linux/uaccess.h>
38 #include <linux/random.h>
39 #include <linux/hw_breakpoint.h>
40 #include <linux/personality.h>
41 #include <linux/notifier.h>
42 #include <trace/events/power.h>
43 #include <linux/percpu.h>
44 #include <linux/thread_info.h>
45 #include <linux/prctl.h>
47 #include <asm/alternative.h>
48 #include <asm/arch_gicv3.h>
49 #include <asm/compat.h>
50 #include <asm/cpufeature.h>
51 #include <asm/cacheflush.h>
53 #include <asm/fpsimd.h>
54 #include <asm/mmu_context.h>
55 #include <asm/processor.h>
56 #include <asm/pointer_auth.h>
57 #include <asm/stacktrace.h>
59 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
60 #include <linux/stackprotector.h>
61 unsigned long __stack_chk_guard __read_mostly;
62 EXPORT_SYMBOL(__stack_chk_guard);
66 * Function pointers to optional machine specific functions
68 void (*pm_power_off)(void);
69 EXPORT_SYMBOL_GPL(pm_power_off);
71 void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
73 static void __cpu_do_idle(void)
79 static void __cpu_do_idle_irqprio(void)
82 unsigned long daif_bits;
84 daif_bits = read_sysreg(daif);
85 write_sysreg(daif_bits | PSR_I_BIT, daif);
88 * Unmask PMR before going idle to make sure interrupts can
92 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
97 write_sysreg(daif_bits, daif);
103 * Idle the processor (wait for interrupt).
105 * If the CPU supports priority masking we must do additional work to
106 * ensure that interrupts are not masked at the PMR (because the core will
107 * not wake up if we block the wake up signal in the interrupt controller).
109 void cpu_do_idle(void)
111 if (system_uses_irq_prio_masking())
112 __cpu_do_idle_irqprio();
118 * This is our default idle handler.
120 void arch_cpu_idle(void)
123 * This should do all the clock switching and wait for interrupt
130 #ifdef CONFIG_HOTPLUG_CPU
131 void arch_cpu_idle_dead(void)
138 * Called by kexec, immediately prior to machine_kexec().
140 * This must completely disable all secondary CPUs; simply causing those CPUs
141 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
142 * kexec'd kernel to use any and all RAM as it sees fit, without having to
143 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
144 * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this.
146 void machine_shutdown(void)
148 smp_shutdown_nonboot_cpus(reboot_cpu);
152 * Halting simply requires that the secondary CPUs stop performing any
153 * activity (executing tasks, handling interrupts). smp_send_stop()
156 void machine_halt(void)
164 * Power-off simply requires that the secondary CPUs stop performing any
165 * activity (executing tasks, handling interrupts). smp_send_stop()
166 * achieves this. When the system power is turned off, it will take all CPUs
169 void machine_power_off(void)
178 * Restart requires that the secondary CPUs stop performing any activity
179 * while the primary CPU resets the system. Systems with multiple CPUs must
180 * provide a HW restart implementation, to ensure that all CPUs reset at once.
181 * This is required so that any code running after reset on the primary CPU
182 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
183 * executing pre-reset code, and using RAM that the primary CPU's code wishes
184 * to use. Implementing such co-ordination would be essentially impossible.
186 void machine_restart(char *cmd)
188 /* Disable interrupts first */
193 * UpdateCapsule() depends on the system being reset via
196 if (efi_enabled(EFI_RUNTIME_SERVICES))
197 efi_reboot(reboot_mode, NULL);
199 /* Now call the architecture specific reboot code. */
201 arm_pm_restart(reboot_mode, cmd);
203 do_kernel_restart(cmd);
206 * Whoops - the architecture was unable to reboot.
208 printk("Reboot failed -- System halted\n");
212 #define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str
213 static const char *const btypes[] = {
221 static void print_pstate(struct pt_regs *regs)
223 u64 pstate = regs->pstate;
225 if (compat_user_mode(regs)) {
226 printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
228 pstate & PSR_AA32_N_BIT ? 'N' : 'n',
229 pstate & PSR_AA32_Z_BIT ? 'Z' : 'z',
230 pstate & PSR_AA32_C_BIT ? 'C' : 'c',
231 pstate & PSR_AA32_V_BIT ? 'V' : 'v',
232 pstate & PSR_AA32_Q_BIT ? 'Q' : 'q',
233 pstate & PSR_AA32_T_BIT ? "T32" : "A32",
234 pstate & PSR_AA32_E_BIT ? "BE" : "LE",
235 pstate & PSR_AA32_A_BIT ? 'A' : 'a',
236 pstate & PSR_AA32_I_BIT ? 'I' : 'i',
237 pstate & PSR_AA32_F_BIT ? 'F' : 'f');
239 const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >>
242 printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO BTYPE=%s)\n",
244 pstate & PSR_N_BIT ? 'N' : 'n',
245 pstate & PSR_Z_BIT ? 'Z' : 'z',
246 pstate & PSR_C_BIT ? 'C' : 'c',
247 pstate & PSR_V_BIT ? 'V' : 'v',
248 pstate & PSR_D_BIT ? 'D' : 'd',
249 pstate & PSR_A_BIT ? 'A' : 'a',
250 pstate & PSR_I_BIT ? 'I' : 'i',
251 pstate & PSR_F_BIT ? 'F' : 'f',
252 pstate & PSR_PAN_BIT ? '+' : '-',
253 pstate & PSR_UAO_BIT ? '+' : '-',
258 void __show_regs(struct pt_regs *regs)
263 if (compat_user_mode(regs)) {
264 lr = regs->compat_lr;
265 sp = regs->compat_sp;
273 show_regs_print_info(KERN_DEFAULT);
276 if (!user_mode(regs)) {
277 printk("pc : %pS\n", (void *)regs->pc);
278 printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr));
280 printk("pc : %016llx\n", regs->pc);
281 printk("lr : %016llx\n", lr);
284 printk("sp : %016llx\n", sp);
286 if (system_uses_irq_prio_masking())
287 printk("pmr_save: %08llx\n", regs->pmr_save);
292 printk("x%-2d: %016llx ", i, regs->regs[i]);
296 pr_cont("x%-2d: %016llx ", i, regs->regs[i]);
304 void show_regs(struct pt_regs * regs)
307 dump_backtrace(regs, NULL, KERN_DEFAULT);
310 static void tls_thread_flush(void)
312 write_sysreg(0, tpidr_el0);
314 if (is_compat_task()) {
315 current->thread.uw.tp_value = 0;
318 * We need to ensure ordering between the shadow state and the
319 * hardware state, so that we don't corrupt the hardware state
320 * with a stale shadow state during context switch.
323 write_sysreg(0, tpidrro_el0);
327 static void flush_tagged_addr_state(void)
329 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI))
330 clear_thread_flag(TIF_TAGGED_ADDR);
333 void flush_thread(void)
335 fpsimd_flush_thread();
337 flush_ptrace_hw_breakpoint(current);
338 flush_tagged_addr_state();
341 void release_thread(struct task_struct *dead_task)
345 void arch_release_task_struct(struct task_struct *tsk)
347 fpsimd_release_task(tsk);
350 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
353 fpsimd_preserve_current_state();
356 /* We rely on the above assignment to initialize dst's thread_flags: */
357 BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
360 * Detach src's sve_state (if any) from dst so that it does not
361 * get erroneously used or freed prematurely. dst's sve_state
362 * will be allocated on demand later on if dst uses SVE.
363 * For consistency, also clear TIF_SVE here: this could be done
364 * later in copy_process(), but to avoid tripping up future
365 * maintainers it is best not to leave TIF_SVE and sve_state in
366 * an inconsistent state, even temporarily.
368 dst->thread.sve_state = NULL;
369 clear_tsk_thread_flag(dst, TIF_SVE);
374 asmlinkage void ret_from_fork(void) asm("ret_from_fork");
376 int copy_thread(unsigned long clone_flags, unsigned long stack_start,
377 unsigned long stk_sz, struct task_struct *p, unsigned long tls)
379 struct pt_regs *childregs = task_pt_regs(p);
381 memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
384 * In case p was allocated the same task_struct pointer as some
385 * other recently-exited task, make sure p is disassociated from
386 * any cpu that may have run that now-exited task recently.
387 * Otherwise we could erroneously skip reloading the FPSIMD
390 fpsimd_flush_task_state(p);
392 ptrauth_thread_init_kernel(p);
394 if (likely(!(p->flags & PF_KTHREAD))) {
395 *childregs = *current_pt_regs();
396 childregs->regs[0] = 0;
399 * Read the current TLS pointer from tpidr_el0 as it may be
400 * out-of-sync with the saved value.
402 *task_user_tls(p) = read_sysreg(tpidr_el0);
405 if (is_compat_thread(task_thread_info(p)))
406 childregs->compat_sp = stack_start;
408 childregs->sp = stack_start;
412 * If a TLS pointer was passed to clone, use it for the new
415 if (clone_flags & CLONE_SETTLS)
416 p->thread.uw.tp_value = tls;
418 memset(childregs, 0, sizeof(struct pt_regs));
419 childregs->pstate = PSR_MODE_EL1h;
420 if (IS_ENABLED(CONFIG_ARM64_UAO) &&
421 cpus_have_const_cap(ARM64_HAS_UAO))
422 childregs->pstate |= PSR_UAO_BIT;
424 if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
425 set_ssbs_bit(childregs);
427 if (system_uses_irq_prio_masking())
428 childregs->pmr_save = GIC_PRIO_IRQON;
430 p->thread.cpu_context.x19 = stack_start;
431 p->thread.cpu_context.x20 = stk_sz;
433 p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
434 p->thread.cpu_context.sp = (unsigned long)childregs;
436 ptrace_hw_copy_thread(p);
441 void tls_preserve_current_state(void)
443 *task_user_tls(current) = read_sysreg(tpidr_el0);
446 static void tls_thread_switch(struct task_struct *next)
448 tls_preserve_current_state();
450 if (is_compat_thread(task_thread_info(next)))
451 write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
452 else if (!arm64_kernel_unmapped_at_el0())
453 write_sysreg(0, tpidrro_el0);
455 write_sysreg(*task_user_tls(next), tpidr_el0);
458 /* Restore the UAO state depending on next's addr_limit */
459 void uao_thread_switch(struct task_struct *next)
461 if (IS_ENABLED(CONFIG_ARM64_UAO)) {
462 if (task_thread_info(next)->addr_limit == KERNEL_DS)
463 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
465 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
470 * Force SSBS state on context-switch, since it may be lost after migrating
471 * from a CPU which treats the bit as RES0 in a heterogeneous system.
473 static void ssbs_thread_switch(struct task_struct *next)
475 struct pt_regs *regs = task_pt_regs(next);
478 * Nothing to do for kernel threads, but 'regs' may be junk
479 * (e.g. idle task) so check the flags and bail early.
481 if (unlikely(next->flags & PF_KTHREAD))
485 * If all CPUs implement the SSBS extension, then we just need to
486 * context-switch the PSTATE field.
488 if (cpu_have_feature(cpu_feature(SSBS)))
491 /* If the mitigation is enabled, then we leave SSBS clear. */
492 if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
493 test_tsk_thread_flag(next, TIF_SSBD))
496 if (compat_user_mode(regs))
497 set_compat_ssbs_bit(regs);
498 else if (user_mode(regs))
503 * We store our current task in sp_el0, which is clobbered by userspace. Keep a
504 * shadow copy so that we can restore this upon entry from userspace.
506 * This is *only* for exception entry from EL0, and is not valid until we
507 * __switch_to() a user task.
509 DEFINE_PER_CPU(struct task_struct *, __entry_task);
511 static void entry_task_switch(struct task_struct *next)
513 __this_cpu_write(__entry_task, next);
517 * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
518 * Assuming the virtual counter is enabled at the beginning of times:
520 * - disable access when switching from a 64bit task to a 32bit task
521 * - enable access when switching from a 32bit task to a 64bit task
523 static void erratum_1418040_thread_switch(struct task_struct *prev,
524 struct task_struct *next)
529 if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
530 cpus_have_const_cap(ARM64_WORKAROUND_1418040)))
533 prev32 = is_compat_thread(task_thread_info(prev));
534 next32 = is_compat_thread(task_thread_info(next));
536 if (prev32 == next32)
539 val = read_sysreg(cntkctl_el1);
542 val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
544 val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
546 write_sysreg(val, cntkctl_el1);
552 __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
553 struct task_struct *next)
555 struct task_struct *last;
557 fpsimd_thread_switch(next);
558 tls_thread_switch(next);
559 hw_breakpoint_thread_switch(next);
560 contextidr_thread_switch(next);
561 entry_task_switch(next);
562 uao_thread_switch(next);
563 ssbs_thread_switch(next);
564 erratum_1418040_thread_switch(prev, next);
567 * Complete any pending TLB or cache maintenance on this CPU in case
568 * the thread migrates to a different CPU.
569 * This full barrier is also required by the membarrier system
574 /* the actual thread switch */
575 last = cpu_switch_to(prev, next);
580 unsigned long get_wchan(struct task_struct *p)
582 struct stackframe frame;
583 unsigned long stack_page, ret = 0;
585 if (!p || p == current || p->state == TASK_RUNNING)
588 stack_page = (unsigned long)try_get_task_stack(p);
592 start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p));
595 if (unwind_frame(p, &frame))
597 if (!in_sched_functions(frame.pc)) {
601 } while (count ++ < 16);
608 unsigned long arch_align_stack(unsigned long sp)
610 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
611 sp -= get_random_int() & ~PAGE_MASK;
616 * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
618 void arch_setup_new_exec(void)
620 current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
622 ptrauth_thread_init_user(current);
625 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
627 * Control the relaxed ABI allowing tagged user addresses into the kernel.
629 static unsigned int tagged_addr_disabled;
631 long set_tagged_addr_ctrl(unsigned long arg)
633 if (is_compat_task())
635 if (arg & ~PR_TAGGED_ADDR_ENABLE)
639 * Do not allow the enabling of the tagged address ABI if globally
640 * disabled via sysctl abi.tagged_addr_disabled.
642 if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
645 update_thread_flag(TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
650 long get_tagged_addr_ctrl(void)
652 if (is_compat_task())
655 if (test_thread_flag(TIF_TAGGED_ADDR))
656 return PR_TAGGED_ADDR_ENABLE;
662 * Global sysctl to disable the tagged user addresses support. This control
663 * only prevents the tagged address ABI enabling via prctl() and does not
664 * disable it for tasks that already opted in to the relaxed ABI.
667 static struct ctl_table tagged_addr_sysctl_table[] = {
669 .procname = "tagged_addr_disabled",
671 .data = &tagged_addr_disabled,
672 .maxlen = sizeof(int),
673 .proc_handler = proc_dointvec_minmax,
674 .extra1 = SYSCTL_ZERO,
675 .extra2 = SYSCTL_ONE,
680 static int __init tagged_addr_init(void)
682 if (!register_sysctl("abi", tagged_addr_sysctl_table))
687 core_initcall(tagged_addr_init);
688 #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
690 asmlinkage void __sched arm64_preempt_schedule_irq(void)
692 lockdep_assert_irqs_disabled();
695 * Preempting a task from an IRQ means we leave copies of PSTATE
696 * on the stack. cpufeature's enable calls may modify PSTATE, but
697 * resuming one of these preempted tasks would undo those changes.
699 * Only allow a task to be preempted once cpufeatures have been
702 if (system_capabilities_finalized())
703 preempt_schedule_irq();
706 #ifdef CONFIG_BINFMT_ELF
707 int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
708 bool has_interp, bool is_interp)
711 * For dynamically linked executables the interpreter is
712 * responsible for setting PROT_BTI on everything except
715 if (is_interp != has_interp)
718 if (!(state->flags & ARM64_ELF_BTI))
721 if (prot & PROT_EXEC)