1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/kernel/process.c
5 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
6 * Original Copyright (C) 1995 Linus Torvalds
8 #include <linux/export.h>
9 #include <linux/sched.h>
10 #include <linux/sched/debug.h>
11 #include <linux/sched/task.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/kernel.h>
15 #include <linux/stddef.h>
16 #include <linux/unistd.h>
17 #include <linux/user.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/elfcore.h>
22 #include <linux/tick.h>
23 #include <linux/utsname.h>
24 #include <linux/uaccess.h>
25 #include <linux/random.h>
26 #include <linux/hw_breakpoint.h>
27 #include <linux/leds.h>
29 #include <asm/processor.h>
30 #include <asm/thread_notify.h>
31 #include <asm/stacktrace.h>
32 #include <asm/system_misc.h>
33 #include <asm/mach/time.h>
39 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
40 #include <linux/stackprotector.h>
41 unsigned long __stack_chk_guard __read_mostly;
42 EXPORT_SYMBOL(__stack_chk_guard);
45 static const char *processor_modes[] __maybe_unused = {
46 "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
47 "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
48 "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "MON_32" , "ABT_32" ,
49 "UK8_32" , "UK9_32" , "HYP_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
52 static const char *isa_modes[] __maybe_unused = {
53 "ARM" , "Thumb" , "Jazelle", "ThumbEE"
57 * This is our default idle handler.
60 void (*arm_pm_idle)(void);
63 * Called from the core idle loop.
66 void arch_cpu_idle(void)
72 raw_local_irq_enable();
75 void arch_cpu_idle_prepare(void)
80 void arch_cpu_idle_enter(void)
82 ledtrig_cpu(CPU_LED_IDLE_START);
83 #ifdef CONFIG_PL310_ERRATA_769419
88 void arch_cpu_idle_exit(void)
90 ledtrig_cpu(CPU_LED_IDLE_END);
93 void __show_regs_alloc_free(struct pt_regs *regs)
97 /* check for r0 - r12 only */
98 for (i = 0; i < 13; i++) {
99 pr_alert("Register r%d information:", i);
100 mem_dump_obj((void *)regs->uregs[i]);
104 void __show_regs(struct pt_regs *regs)
108 #ifndef CONFIG_CPU_V7M
109 unsigned int domain, fs;
110 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
112 * Get the domain register for the parent context. In user
113 * mode, we don't save the DACR, so lets use what it should
114 * be. For other modes, we place it after the pt_regs struct.
116 if (user_mode(regs)) {
117 domain = DACR_UACCESS_ENABLE;
120 domain = to_svc_pt_regs(regs)->dacr;
121 fs = to_svc_pt_regs(regs)->addr_limit;
124 domain = get_domain();
129 show_regs_print_info(KERN_DEFAULT);
131 printk("PC is at %pS\n", (void *)instruction_pointer(regs));
132 printk("LR is at %pS\n", (void *)regs->ARM_lr);
133 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n",
134 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr);
135 printk("sp : %08lx ip : %08lx fp : %08lx\n",
136 regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
137 printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
138 regs->ARM_r10, regs->ARM_r9,
140 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
141 regs->ARM_r7, regs->ARM_r6,
142 regs->ARM_r5, regs->ARM_r4);
143 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
144 regs->ARM_r3, regs->ARM_r2,
145 regs->ARM_r1, regs->ARM_r0);
147 flags = regs->ARM_cpsr;
148 buf[0] = flags & PSR_N_BIT ? 'N' : 'n';
149 buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z';
150 buf[2] = flags & PSR_C_BIT ? 'C' : 'c';
151 buf[3] = flags & PSR_V_BIT ? 'V' : 'v';
154 #ifndef CONFIG_CPU_V7M
158 if ((domain & domain_mask(DOMAIN_USER)) ==
159 domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
161 else if (fs == KERNEL_DS)
166 printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
167 buf, interrupts_enabled(regs) ? "n" : "ff",
168 fast_interrupts_enabled(regs) ? "n" : "ff",
169 processor_modes[processor_mode(regs)],
170 isa_modes[isa_mode(regs)], segment);
173 printk("xPSR: %08lx\n", regs->ARM_cpsr);
176 #ifdef CONFIG_CPU_CP15
181 #ifdef CONFIG_CPU_CP15_MMU
183 unsigned int transbase;
184 asm("mrc p15, 0, %0, c2, c0\n\t"
186 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
190 asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
192 printk("Control: %08x%s\n", ctrl, buf);
197 void show_regs(struct pt_regs * regs)
203 ATOMIC_NOTIFIER_HEAD(thread_notify_head);
205 EXPORT_SYMBOL_GPL(thread_notify_head);
208 * Free current thread data structures etc..
210 void exit_thread(struct task_struct *tsk)
212 thread_notify(THREAD_NOTIFY_EXIT, task_thread_info(tsk));
215 void flush_thread(void)
217 struct thread_info *thread = current_thread_info();
218 struct task_struct *tsk = current;
220 flush_ptrace_hw_breakpoint(tsk);
222 memset(thread->used_cp, 0, sizeof(thread->used_cp));
223 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
224 memset(&thread->fpstate, 0, sizeof(union fp_state));
228 thread_notify(THREAD_NOTIFY_FLUSH, thread);
231 void release_thread(struct task_struct *dead_task)
235 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
237 int copy_thread(unsigned long clone_flags, unsigned long stack_start,
238 unsigned long stk_sz, struct task_struct *p, unsigned long tls)
240 struct thread_info *thread = task_thread_info(p);
241 struct pt_regs *childregs = task_pt_regs(p);
243 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
245 #ifdef CONFIG_CPU_USE_DOMAINS
247 * Copy the initial value of the domain access control register
248 * from the current thread: thread->addr_limit will have been
249 * copied from the current thread via setup_thread_stack() in
252 thread->cpu_domain = get_domain();
255 if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
256 *childregs = *current_pt_regs();
257 childregs->ARM_r0 = 0;
259 childregs->ARM_sp = stack_start;
261 memset(childregs, 0, sizeof(struct pt_regs));
262 thread->cpu_context.r4 = stk_sz;
263 thread->cpu_context.r5 = stack_start;
264 childregs->ARM_cpsr = SVC_MODE;
266 thread->cpu_context.pc = (unsigned long)ret_from_fork;
267 thread->cpu_context.sp = (unsigned long)childregs;
269 clear_ptrace_hw_breakpoint(p);
271 if (clone_flags & CLONE_SETTLS)
272 thread->tp_value[0] = tls;
273 thread->tp_value[1] = get_tpuser();
275 thread_notify(THREAD_NOTIFY_COPY, thread);
277 #ifdef CONFIG_STACKPROTECTOR_PER_TASK
278 thread->stack_canary = p->stack_canary;
284 unsigned long get_wchan(struct task_struct *p)
286 struct stackframe frame;
287 unsigned long stack_page;
289 if (!p || p == current || task_is_running(p))
292 frame.fp = thread_saved_fp(p);
293 frame.sp = thread_saved_sp(p);
294 frame.lr = 0; /* recovered from the stack */
295 frame.pc = thread_saved_pc(p);
296 stack_page = (unsigned long)task_stack_page(p);
298 if (frame.sp < stack_page ||
299 frame.sp >= stack_page + THREAD_SIZE ||
300 unwind_frame(&frame) < 0)
302 if (!in_sched_functions(frame.pc))
304 } while (count ++ < 16);
309 #ifdef CONFIG_KUSER_HELPERS
311 * The vectors page is always readable from user space for the
312 * atomic helpers. Insert it into the gate_vma so that it is visible
313 * through ptrace and /proc/<pid>/mem.
315 static struct vm_area_struct gate_vma;
317 static int __init gate_vma_init(void)
319 vma_init(&gate_vma, NULL);
320 gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
321 gate_vma.vm_start = 0xffff0000;
322 gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
323 gate_vma.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
326 arch_initcall(gate_vma_init);
328 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
333 int in_gate_area(struct mm_struct *mm, unsigned long addr)
335 return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end);
338 int in_gate_area_no_mm(unsigned long addr)
340 return in_gate_area(NULL, addr);
342 #define is_gate_vma(vma) ((vma) == &gate_vma)
344 #define is_gate_vma(vma) 0
347 const char *arch_vma_name(struct vm_area_struct *vma)
349 return is_gate_vma(vma) ? "[vectors]" : NULL;
352 /* If possible, provide a placement hint at a random offset from the
353 * stack for the sigpage and vdso pages.
355 static unsigned long sigpage_addr(const struct mm_struct *mm,
358 unsigned long offset;
364 first = PAGE_ALIGN(mm->start_stack);
366 last = TASK_SIZE - (npages << PAGE_SHIFT);
368 /* No room after stack? */
372 /* Just enough room? */
376 slots = ((last - first) >> PAGE_SHIFT) + 1;
378 offset = get_random_int() % slots;
380 addr = first + (offset << PAGE_SHIFT);
385 static struct page *signal_page;
386 extern struct page *get_signal_page(void);
388 static int sigpage_mremap(const struct vm_special_mapping *sm,
389 struct vm_area_struct *new_vma)
391 current->mm->context.sigpage = new_vma->vm_start;
395 static const struct vm_special_mapping sigpage_mapping = {
397 .pages = &signal_page,
398 .mremap = sigpage_mremap,
401 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
403 struct mm_struct *mm = current->mm;
404 struct vm_area_struct *vma;
405 unsigned long npages;
411 signal_page = get_signal_page();
415 npages = 1; /* for sigpage */
416 npages += vdso_total_pages;
418 if (mmap_write_lock_killable(mm))
420 hint = sigpage_addr(mm, npages);
421 addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
422 if (IS_ERR_VALUE(addr)) {
427 vma = _install_special_mapping(mm, addr, PAGE_SIZE,
428 VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
436 mm->context.sigpage = addr;
438 /* Unlike the sigpage, failure to install the vdso is unlikely
439 * to be fatal to the process, so no error check needed
442 arm_install_vdso(mm, addr + PAGE_SIZE);
445 mmap_write_unlock(mm);