1 // SPDX-License-Identifier: GPL-2.0
2 /* arch/sparc64/kernel/process.c
4 * Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net)
5 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
6 * Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10 * This file handles the architecture-dependent parts of process handling..
12 #include <linux/errno.h>
13 #include <linux/export.h>
14 #include <linux/sched.h>
15 #include <linux/sched/debug.h>
16 #include <linux/sched/task.h>
17 #include <linux/sched/task_stack.h>
18 #include <linux/kernel.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/delay.h>
27 #include <linux/compat.h>
28 #include <linux/tick.h>
29 #include <linux/init.h>
30 #include <linux/cpu.h>
31 #include <linux/perf_event.h>
32 #include <linux/elfcore.h>
33 #include <linux/sysrq.h>
34 #include <linux/nmi.h>
35 #include <linux/context_tracking.h>
36 #include <linux/signal.h>
38 #include <linux/uaccess.h>
40 #include <asm/pgalloc.h>
41 #include <asm/processor.h>
42 #include <asm/pstate.h>
44 #include <asm/fpumacro.h>
46 #include <asm/cpudata.h>
47 #include <asm/mmu_context.h>
48 #include <asm/unistd.h>
49 #include <asm/hypervisor.h>
50 #include <asm/syscalls.h>
51 #include <asm/irq_regs.h>
57 /* Idle loop support on sparc64. */
58 void arch_cpu_idle(void)
60 if (tlb_type != hypervisor) {
62 raw_local_irq_enable();
66 raw_local_irq_enable();
68 /* The sun4v sleeping code requires that we have PSTATE.IE cleared over
69 * the cpu sleep hypervisor call.
72 "rdpr %%pstate, %0\n\t"
74 "wrpr %0, %%g0, %%pstate"
78 if (!need_resched() && !cpu_is_offline(smp_processor_id())) {
80 /* If resumed by cpu_poke then we need to explicitly
81 * call scheduler_ipi().
86 /* Re-enable interrupts. */
88 "rdpr %%pstate, %0\n\t"
90 "wrpr %0, %%g0, %%pstate"
96 #ifdef CONFIG_HOTPLUG_CPU
97 void arch_cpu_idle_dead(void)
99 sched_preempt_enable_no_resched();
105 static void show_regwindow32(struct pt_regs *regs)
107 struct reg_window32 __user *rw;
108 struct reg_window32 r_w;
111 __asm__ __volatile__ ("flushw");
112 rw = compat_ptr((unsigned int)regs->u_regs[14]);
115 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
121 printk("l0: %08x l1: %08x l2: %08x l3: %08x "
122 "l4: %08x l5: %08x l6: %08x l7: %08x\n",
123 r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
124 r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
125 printk("i0: %08x i1: %08x i2: %08x i3: %08x "
126 "i4: %08x i5: %08x i6: %08x i7: %08x\n",
127 r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
128 r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
131 #define show_regwindow32(regs) do { } while (0)
134 static void show_regwindow(struct pt_regs *regs)
136 struct reg_window __user *rw;
137 struct reg_window *rwk;
138 struct reg_window r_w;
141 if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
142 __asm__ __volatile__ ("flushw");
143 rw = (struct reg_window __user *)
144 (regs->u_regs[14] + STACK_BIAS);
145 rwk = (struct reg_window *)
146 (regs->u_regs[14] + STACK_BIAS);
147 if (!(regs->tstate & TSTATE_PRIV)) {
150 if (copy_from_user (&r_w, rw, sizeof(r_w))) {
158 show_regwindow32(regs);
161 printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
162 rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
163 printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
164 rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
165 printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
166 rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
167 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
168 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
169 if (regs->tstate & TSTATE_PRIV)
170 printk("I7: <%pS>\n", (void *) rwk->ins[7]);
173 void show_regs(struct pt_regs *regs)
175 show_regs_print_info(KERN_DEFAULT);
177 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
178 regs->tpc, regs->tnpc, regs->y, print_tainted());
179 printk("TPC: <%pS>\n", (void *) regs->tpc);
180 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
181 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
183 printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
184 regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
186 printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
187 regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
189 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
190 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
192 printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
193 show_regwindow(regs);
194 show_stack(current, (unsigned long *)regs->u_regs[UREG_FP], KERN_DEFAULT);
197 union global_cpu_snapshot global_cpu_snapshot[NR_CPUS];
198 static DEFINE_SPINLOCK(global_cpu_snapshot_lock);
200 static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
203 struct global_reg_snapshot *rp;
207 rp = &global_cpu_snapshot[this_cpu].reg;
209 rp->tstate = regs->tstate;
211 rp->tnpc = regs->tnpc;
212 rp->o7 = regs->u_regs[UREG_I7];
214 if (regs->tstate & TSTATE_PRIV) {
215 struct reg_window *rw;
217 rw = (struct reg_window *)
218 (regs->u_regs[UREG_FP] + STACK_BIAS);
219 if (kstack_valid(tp, (unsigned long) rw)) {
221 rw = (struct reg_window *)
222 (rw->ins[6] + STACK_BIAS);
223 if (kstack_valid(tp, (unsigned long) rw))
224 rp->rpc = rw->ins[7];
233 /* In order to avoid hangs we do not try to synchronize with the
234 * global register dump client cpus. The last store they make is to
235 * the thread pointer, so do a short poll waiting for that to become
238 static void __global_reg_poll(struct global_reg_snapshot *gp)
242 while (!gp->thread && ++limit < 100) {
248 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
250 struct thread_info *tp = current_thread_info();
251 struct pt_regs *regs = get_irq_regs();
258 spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
260 this_cpu = raw_smp_processor_id();
262 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
264 if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
265 __global_reg_self(tp, regs, this_cpu);
267 smp_fetch_global_regs();
269 for_each_cpu(cpu, mask) {
270 struct global_reg_snapshot *gp;
272 if (exclude_self && cpu == this_cpu)
275 gp = &global_cpu_snapshot[cpu].reg;
277 __global_reg_poll(gp);
280 printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n",
281 (cpu == this_cpu ? '*' : ' '), cpu,
282 gp->tstate, gp->tpc, gp->tnpc,
283 ((tp && tp->task) ? tp->task->comm : "NULL"),
284 ((tp && tp->task) ? tp->task->pid : -1));
286 if (gp->tstate & TSTATE_PRIV) {
287 printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
293 printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
294 gp->tpc, gp->o7, gp->i7, gp->rpc);
297 touch_nmi_watchdog();
300 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
302 spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
305 #ifdef CONFIG_MAGIC_SYSRQ
307 static void sysrq_handle_globreg(int key)
309 trigger_all_cpu_backtrace();
312 static const struct sysrq_key_op sparc_globalreg_op = {
313 .handler = sysrq_handle_globreg,
314 .help_msg = "global-regs(y)",
315 .action_msg = "Show Global CPU Regs",
318 static void __global_pmu_self(int this_cpu)
320 struct global_pmu_snapshot *pp;
326 pp = &global_cpu_snapshot[this_cpu].pmu;
329 if (tlb_type == hypervisor &&
330 sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
333 for (i = 0; i < num; i++) {
334 pp->pcr[i] = pcr_ops->read_pcr(i);
335 pp->pic[i] = pcr_ops->read_pic(i);
339 static void __global_pmu_poll(struct global_pmu_snapshot *pp)
343 while (!pp->pcr[0] && ++limit < 100) {
349 static void pmu_snapshot_all_cpus(void)
354 spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
356 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
358 this_cpu = raw_smp_processor_id();
360 __global_pmu_self(this_cpu);
362 smp_fetch_global_pmu();
364 for_each_online_cpu(cpu) {
365 struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu;
367 __global_pmu_poll(pp);
369 printk("%c CPU[%3d]: PCR[%08lx:%08lx:%08lx:%08lx] PIC[%08lx:%08lx:%08lx:%08lx]\n",
370 (cpu == this_cpu ? '*' : ' '), cpu,
371 pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
372 pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
374 touch_nmi_watchdog();
377 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
379 spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
382 static void sysrq_handle_globpmu(int key)
384 pmu_snapshot_all_cpus();
387 static const struct sysrq_key_op sparc_globalpmu_op = {
388 .handler = sysrq_handle_globpmu,
389 .help_msg = "global-pmu(x)",
390 .action_msg = "Show Global PMU Regs",
393 static int __init sparc_sysrq_init(void)
395 int ret = register_sysrq_key('y', &sparc_globalreg_op);
398 ret = register_sysrq_key('x', &sparc_globalpmu_op);
402 core_initcall(sparc_sysrq_init);
406 /* Free current thread data structures etc.. */
407 void exit_thread(struct task_struct *tsk)
409 struct thread_info *t = task_thread_info(tsk);
412 if (t->utraps[0] < 2)
419 void flush_thread(void)
421 struct thread_info *t = current_thread_info();
422 struct mm_struct *mm;
426 tsb_context_switch(mm);
428 set_thread_wsaved(0);
430 /* Clear FPU register state. */
434 /* It's a bit more tricky when 64-bit tasks are involved... */
435 static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
437 bool stack_64bit = test_thread_64bit_stack(psp);
438 unsigned long fp, distance, rval;
443 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
445 if (test_thread_flag(TIF_32BIT))
448 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
450 /* Now align the stack as this is mandatory in the Sparc ABI
451 * due to how register windows work. This hides the
452 * restriction from thread libraries etc.
457 rval = (csp - distance);
458 if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
460 else if (!stack_64bit) {
461 if (put_user(((u32)csp),
462 &(((struct reg_window32 __user *)rval)->ins[6])))
465 if (put_user(((u64)csp - STACK_BIAS),
466 &(((struct reg_window __user *)rval)->ins[6])))
469 rval = rval - STACK_BIAS;
475 /* Standard stuff. */
476 static inline void shift_window_buffer(int first_win, int last_win,
477 struct thread_info *t)
481 for (i = first_win; i < last_win; i++) {
482 t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
483 memcpy(&t->reg_window[i], &t->reg_window[i+1],
484 sizeof(struct reg_window));
488 void synchronize_user_stack(void)
490 struct thread_info *t = current_thread_info();
491 unsigned long window;
493 flush_user_windows();
494 if ((window = get_thread_wsaved()) != 0) {
497 struct reg_window *rwin = &t->reg_window[window];
498 int winsize = sizeof(struct reg_window);
501 sp = t->rwbuf_stkptrs[window];
503 if (test_thread_64bit_stack(sp))
506 winsize = sizeof(struct reg_window32);
508 if (!copy_to_user((char __user *)sp, rwin, winsize)) {
509 shift_window_buffer(window, get_thread_wsaved() - 1, t);
510 set_thread_wsaved(get_thread_wsaved() - 1);
516 static void stack_unaligned(unsigned long sp)
518 force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) sp);
521 static const char uwfault32[] = KERN_INFO \
522 "%s[%d]: bad register window fault: SP %08lx (orig_sp %08lx) TPC %08lx O7 %08lx\n";
523 static const char uwfault64[] = KERN_INFO \
524 "%s[%d]: bad register window fault: SP %016lx (orig_sp %016lx) TPC %08lx O7 %016lx\n";
526 void fault_in_user_windows(struct pt_regs *regs)
528 struct thread_info *t = current_thread_info();
529 unsigned long window;
531 flush_user_windows();
532 window = get_thread_wsaved();
534 if (likely(window != 0)) {
537 struct reg_window *rwin = &t->reg_window[window];
538 int winsize = sizeof(struct reg_window);
539 unsigned long sp, orig_sp;
541 orig_sp = sp = t->rwbuf_stkptrs[window];
543 if (test_thread_64bit_stack(sp))
546 winsize = sizeof(struct reg_window32);
548 if (unlikely(sp & 0x7UL))
551 if (unlikely(copy_to_user((char __user *)sp,
553 if (show_unhandled_signals)
554 printk_ratelimited(is_compat_task() ?
555 uwfault32 : uwfault64,
556 current->comm, current->pid,
559 regs->u_regs[UREG_I7]);
564 set_thread_wsaved(0);
568 set_thread_wsaved(window + 1);
572 /* Copy a Sparc thread. The fork() return value conventions
573 * under SunOS are nothing short of bletcherous:
574 * Parent --> %o0 == childs pid, %o1 == 0
575 * Child --> %o0 == parents pid, %o1 == 1
577 int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
578 struct task_struct *p, unsigned long tls)
580 struct thread_info *t = task_thread_info(p);
581 struct pt_regs *regs = current_pt_regs();
582 struct sparc_stackf *parent_sf;
583 unsigned long child_stack_sz;
584 char *child_trap_frame;
586 /* Calculate offset to stack_frame & pt_regs */
587 child_stack_sz = (STACKFRAME_SZ + TRACEREG_SZ);
588 child_trap_frame = (task_stack_page(p) +
589 (THREAD_SIZE - child_stack_sz));
592 t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
593 t->kregs = (struct pt_regs *) (child_trap_frame +
594 sizeof(struct sparc_stackf));
597 if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
598 memset(child_trap_frame, 0, child_stack_sz);
599 __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
600 (current_pt_regs()->tstate + 1) & TSTATE_CWP;
601 t->current_ds = ASI_P;
602 t->kregs->u_regs[UREG_G1] = sp; /* function */
603 t->kregs->u_regs[UREG_G2] = arg;
607 parent_sf = ((struct sparc_stackf *) regs) - 1;
608 memcpy(child_trap_frame, parent_sf, child_stack_sz);
609 if (t->flags & _TIF_32BIT) {
610 sp &= 0x00000000ffffffffUL;
611 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
613 t->kregs->u_regs[UREG_FP] = sp;
614 __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] =
615 (regs->tstate + 1) & TSTATE_CWP;
616 t->current_ds = ASI_AIUS;
617 if (sp != regs->u_regs[UREG_FP]) {
620 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
623 t->kregs->u_regs[UREG_FP] = csp;
628 /* Set the return value for the child. */
629 t->kregs->u_regs[UREG_I0] = current->pid;
630 t->kregs->u_regs[UREG_I1] = 1;
632 /* Set the second return value for the parent. */
633 regs->u_regs[UREG_I1] = 0;
635 if (clone_flags & CLONE_SETTLS)
636 t->kregs->u_regs[UREG_G7] = tls;
641 /* TIF_MCDPER in thread info flags for current task is updated lazily upon
642 * a context switch. Update this flag in current task's thread flags
643 * before dup so the dup'd task will inherit the current TIF_MCDPER flag.
645 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
648 register unsigned long tmp_mcdper;
650 __asm__ __volatile__(
651 ".word 0x83438000\n\t" /* rd %mcdper, %g1 */
657 set_thread_flag(TIF_MCDPER);
659 clear_thread_flag(TIF_MCDPER);
666 unsigned long get_wchan(struct task_struct *task)
668 unsigned long pc, fp, bias = 0;
669 struct thread_info *tp;
670 struct reg_window *rw;
671 unsigned long ret = 0;
674 if (!task || task == current || task_is_running(task))
677 tp = task_thread_info(task);
679 fp = task_thread_info(task)->ksp + bias;
682 if (!kstack_valid(tp, fp))
684 rw = (struct reg_window *) fp;
686 if (!in_sched_functions(pc)) {
690 fp = rw->ins[6] + bias;
691 } while (++count < 16);