Merge tag 'arc-5.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
[linux-2.6-microblaze.git] / arch / mips / kernel / process.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
7  * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  * Copyright (C) 2004 Thiemo Seufer
10  * Copyright (C) 2013  Imagination Technologies Ltd.
11  */
12 #include <linux/cpu.h>
13 #include <linux/errno.h>
14 #include <linux/init.h>
15 #include <linux/kallsyms.h>
16 #include <linux/kernel.h>
17 #include <linux/nmi.h>
18 #include <linux/personality.h>
19 #include <linux/prctl.h>
20 #include <linux/random.h>
21 #include <linux/sched.h>
22 #include <linux/sched/debug.h>
23 #include <linux/sched/task_stack.h>
24
25 #include <asm/abi.h>
26 #include <asm/asm.h>
27 #include <asm/dsemul.h>
28 #include <asm/dsp.h>
29 #include <asm/exec.h>
30 #include <asm/fpu.h>
31 #include <asm/inst.h>
32 #include <asm/irq.h>
33 #include <asm/irq_regs.h>
34 #include <asm/isadep.h>
35 #include <asm/msa.h>
36 #include <asm/mips-cps.h>
37 #include <asm/mipsregs.h>
38 #include <asm/processor.h>
39 #include <asm/reg.h>
40 #include <asm/stacktrace.h>
41
42 #ifdef CONFIG_HOTPLUG_CPU
43 void arch_cpu_idle_dead(void)
44 {
45         play_dead();
46 }
47 #endif
48
49 asmlinkage void ret_from_fork(void);
50 asmlinkage void ret_from_kernel_thread(void);
51
52 void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
53 {
54         unsigned long status;
55
56         /* New thread loses kernel privileges. */
57         status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK);
58         status |= KU_USER;
59         regs->cp0_status = status;
60         lose_fpu(0);
61         clear_thread_flag(TIF_MSA_CTX_LIVE);
62         clear_used_math();
63 #ifdef CONFIG_MIPS_FP_SUPPORT
64         atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
65 #endif
66         init_dsp();
67         regs->cp0_epc = pc;
68         regs->regs[29] = sp;
69 }
70
71 void exit_thread(struct task_struct *tsk)
72 {
73         /*
74          * User threads may have allocated a delay slot emulation frame.
75          * If so, clean up that allocation.
76          */
77         if (!(current->flags & PF_KTHREAD))
78                 dsemul_thread_cleanup(tsk);
79 }
80
81 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
82 {
83         /*
84          * Save any process state which is live in hardware registers to the
85          * parent context prior to duplication. This prevents the new child
86          * state becoming stale if the parent is preempted before copy_thread()
87          * gets a chance to save the parent's live hardware registers to the
88          * child context.
89          */
90         preempt_disable();
91
92         if (is_msa_enabled())
93                 save_msa(current);
94         else if (is_fpu_owner())
95                 _save_fp(current);
96
97         save_dsp(current);
98
99         preempt_enable();
100
101         *dst = *src;
102         return 0;
103 }
104
105 /*
106  * Copy architecture-specific thread state
107  */
108 int copy_thread(unsigned long clone_flags, unsigned long usp,
109                 unsigned long kthread_arg, struct task_struct *p,
110                 unsigned long tls)
111 {
112         struct thread_info *ti = task_thread_info(p);
113         struct pt_regs *childregs, *regs = current_pt_regs();
114         unsigned long childksp;
115
116         childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
117
118         /* set up new TSS. */
119         childregs = (struct pt_regs *) childksp - 1;
120         /*  Put the stack after the struct pt_regs.  */
121         childksp = (unsigned long) childregs;
122         p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
123         if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
124                 /* kernel thread */
125                 unsigned long status = p->thread.cp0_status;
126                 memset(childregs, 0, sizeof(struct pt_regs));
127                 p->thread.reg16 = usp; /* fn */
128                 p->thread.reg17 = kthread_arg;
129                 p->thread.reg29 = childksp;
130                 p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
131 #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
132                 status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
133                          ((status & (ST0_KUC | ST0_IEC)) << 2);
134 #else
135                 status |= ST0_EXL;
136 #endif
137                 childregs->cp0_status = status;
138                 return 0;
139         }
140
141         /* user thread */
142         *childregs = *regs;
143         childregs->regs[7] = 0; /* Clear error flag */
144         childregs->regs[2] = 0; /* Child gets zero as return value */
145         if (usp)
146                 childregs->regs[29] = usp;
147
148         p->thread.reg29 = (unsigned long) childregs;
149         p->thread.reg31 = (unsigned long) ret_from_fork;
150
151         /*
152          * New tasks lose permission to use the fpu. This accelerates context
153          * switching for most programs since they don't use the fpu.
154          */
155         childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
156
157         clear_tsk_thread_flag(p, TIF_USEDFPU);
158         clear_tsk_thread_flag(p, TIF_USEDMSA);
159         clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
160
161 #ifdef CONFIG_MIPS_MT_FPAFF
162         clear_tsk_thread_flag(p, TIF_FPUBOUND);
163 #endif /* CONFIG_MIPS_MT_FPAFF */
164
165 #ifdef CONFIG_MIPS_FP_SUPPORT
166         atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
167 #endif
168
169         if (clone_flags & CLONE_SETTLS)
170                 ti->tp_value = tls;
171
172         return 0;
173 }
174
175 #ifdef CONFIG_STACKPROTECTOR
176 #include <linux/stackprotector.h>
177 unsigned long __stack_chk_guard __read_mostly;
178 EXPORT_SYMBOL(__stack_chk_guard);
179 #endif
180
181 struct mips_frame_info {
182         void            *func;
183         unsigned long   func_size;
184         int             frame_size;
185         int             pc_offset;
186 };
187
188 #define J_TARGET(pc,target)     \
189                 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
190
191 static inline int is_jr_ra_ins(union mips_instruction *ip)
192 {
193 #ifdef CONFIG_CPU_MICROMIPS
194         /*
195          * jr16 ra
196          * jr ra
197          */
198         if (mm_insn_16bit(ip->word >> 16)) {
199                 if (ip->mm16_r5_format.opcode == mm_pool16c_op &&
200                     ip->mm16_r5_format.rt == mm_jr16_op &&
201                     ip->mm16_r5_format.imm == 31)
202                         return 1;
203                 return 0;
204         }
205
206         if (ip->r_format.opcode == mm_pool32a_op &&
207             ip->r_format.func == mm_pool32axf_op &&
208             ((ip->u_format.uimmediate >> 6) & GENMASK(9, 0)) == mm_jalr_op &&
209             ip->r_format.rt == 31)
210                 return 1;
211         return 0;
212 #else
213         if (ip->r_format.opcode == spec_op &&
214             ip->r_format.func == jr_op &&
215             ip->r_format.rs == 31)
216                 return 1;
217         return 0;
218 #endif
219 }
220
221 static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
222 {
223 #ifdef CONFIG_CPU_MICROMIPS
224         /*
225          * swsp ra,offset
226          * swm16 reglist,offset(sp)
227          * swm32 reglist,offset(sp)
228          * sw32 ra,offset(sp)
229          * jradiussp - NOT SUPPORTED
230          *
231          * microMIPS is way more fun...
232          */
233         if (mm_insn_16bit(ip->word >> 16)) {
234                 switch (ip->mm16_r5_format.opcode) {
235                 case mm_swsp16_op:
236                         if (ip->mm16_r5_format.rt != 31)
237                                 return 0;
238
239                         *poff = ip->mm16_r5_format.imm;
240                         *poff = (*poff << 2) / sizeof(ulong);
241                         return 1;
242
243                 case mm_pool16c_op:
244                         switch (ip->mm16_m_format.func) {
245                         case mm_swm16_op:
246                                 *poff = ip->mm16_m_format.imm;
247                                 *poff += 1 + ip->mm16_m_format.rlist;
248                                 *poff = (*poff << 2) / sizeof(ulong);
249                                 return 1;
250
251                         default:
252                                 return 0;
253                         }
254
255                 default:
256                         return 0;
257                 }
258         }
259
260         switch (ip->i_format.opcode) {
261         case mm_sw32_op:
262                 if (ip->i_format.rs != 29)
263                         return 0;
264                 if (ip->i_format.rt != 31)
265                         return 0;
266
267                 *poff = ip->i_format.simmediate / sizeof(ulong);
268                 return 1;
269
270         case mm_pool32b_op:
271                 switch (ip->mm_m_format.func) {
272                 case mm_swm32_func:
273                         if (ip->mm_m_format.rd < 0x10)
274                                 return 0;
275                         if (ip->mm_m_format.base != 29)
276                                 return 0;
277
278                         *poff = ip->mm_m_format.simmediate;
279                         *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
280                         *poff /= sizeof(ulong);
281                         return 1;
282                 default:
283                         return 0;
284                 }
285
286         default:
287                 return 0;
288         }
289 #else
290         /* sw / sd $ra, offset($sp) */
291         if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
292                 ip->i_format.rs == 29 && ip->i_format.rt == 31) {
293                 *poff = ip->i_format.simmediate / sizeof(ulong);
294                 return 1;
295         }
296 #ifdef CONFIG_CPU_LOONGSON64
297         if ((ip->loongson3_lswc2_format.opcode == swc2_op) &&
298                       (ip->loongson3_lswc2_format.ls == 1) &&
299                       (ip->loongson3_lswc2_format.fr == 0) &&
300                       (ip->loongson3_lswc2_format.base == 29)) {
301                 if (ip->loongson3_lswc2_format.rt == 31) {
302                         *poff = ip->loongson3_lswc2_format.offset << 1;
303                         return 1;
304                 }
305                 if (ip->loongson3_lswc2_format.rq == 31) {
306                         *poff = (ip->loongson3_lswc2_format.offset << 1) + 1;
307                         return 1;
308                 }
309         }
310 #endif
311         return 0;
312 #endif
313 }
314
315 static inline int is_jump_ins(union mips_instruction *ip)
316 {
317 #ifdef CONFIG_CPU_MICROMIPS
318         /*
319          * jr16,jrc,jalr16,jalr16
320          * jal
321          * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
322          * jraddiusp - NOT SUPPORTED
323          *
324          * microMIPS is kind of more fun...
325          */
326         if (mm_insn_16bit(ip->word >> 16)) {
327                 if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
328                     (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
329                         return 1;
330                 return 0;
331         }
332
333         if (ip->j_format.opcode == mm_j32_op)
334                 return 1;
335         if (ip->j_format.opcode == mm_jal32_op)
336                 return 1;
337         if (ip->r_format.opcode != mm_pool32a_op ||
338                         ip->r_format.func != mm_pool32axf_op)
339                 return 0;
340         return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
341 #else
342         if (ip->j_format.opcode == j_op)
343                 return 1;
344         if (ip->j_format.opcode == jal_op)
345                 return 1;
346         if (ip->r_format.opcode != spec_op)
347                 return 0;
348         return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
349 #endif
350 }
351
352 static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
353 {
354 #ifdef CONFIG_CPU_MICROMIPS
355         unsigned short tmp;
356
357         /*
358          * addiusp -imm
359          * addius5 sp,-imm
360          * addiu32 sp,sp,-imm
361          * jradiussp - NOT SUPPORTED
362          *
363          * microMIPS is not more fun...
364          */
365         if (mm_insn_16bit(ip->word >> 16)) {
366                 if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
367                     ip->mm16_r3_format.simmediate & mm_addiusp_func) {
368                         tmp = ip->mm_b0_format.simmediate >> 1;
369                         tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
370                         if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
371                                 tmp ^= 0x100;
372                         *frame_size = -(signed short)(tmp << 2);
373                         return 1;
374                 }
375                 if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
376                     ip->mm16_r5_format.rt == 29) {
377                         tmp = ip->mm16_r5_format.imm >> 1;
378                         *frame_size = -(signed short)(tmp & 0xf);
379                         return 1;
380                 }
381                 return 0;
382         }
383
384         if (ip->mm_i_format.opcode == mm_addiu32_op &&
385             ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
386                 *frame_size = -ip->i_format.simmediate;
387                 return 1;
388         }
389 #else
390         /* addiu/daddiu sp,sp,-imm */
391         if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
392                 return 0;
393
394         if (ip->i_format.opcode == addiu_op ||
395             ip->i_format.opcode == daddiu_op) {
396                 *frame_size = -ip->i_format.simmediate;
397                 return 1;
398         }
399 #endif
400         return 0;
401 }
402
403 static int get_frame_info(struct mips_frame_info *info)
404 {
405         bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
406         union mips_instruction insn, *ip, *ip_end;
407         unsigned int last_insn_size = 0;
408         bool saw_jump = false;
409
410         info->pc_offset = -1;
411         info->frame_size = 0;
412
413         ip = (void *)msk_isa16_mode((ulong)info->func);
414         if (!ip)
415                 goto err;
416
417         ip_end = (void *)ip + (info->func_size ? info->func_size : 512);
418
419         while (ip < ip_end) {
420                 ip = (void *)ip + last_insn_size;
421
422                 if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
423                         insn.word = ip->halfword[0] << 16;
424                         last_insn_size = 2;
425                 } else if (is_mmips) {
426                         insn.word = ip->halfword[0] << 16 | ip->halfword[1];
427                         last_insn_size = 4;
428                 } else {
429                         insn.word = ip->word;
430                         last_insn_size = 4;
431                 }
432
433                 if (is_jr_ra_ins(ip)) {
434                         break;
435                 } else if (!info->frame_size) {
436                         is_sp_move_ins(&insn, &info->frame_size);
437                         continue;
438                 } else if (!saw_jump && is_jump_ins(ip)) {
439                         /*
440                          * If we see a jump instruction, we are finished
441                          * with the frame save.
442                          *
443                          * Some functions can have a shortcut return at
444                          * the beginning of the function, so don't start
445                          * looking for jump instruction until we see the
446                          * frame setup.
447                          *
448                          * The RA save instruction can get put into the
449                          * delay slot of the jump instruction, so look
450                          * at the next instruction, too.
451                          */
452                         saw_jump = true;
453                         continue;
454                 }
455                 if (info->pc_offset == -1 &&
456                     is_ra_save_ins(&insn, &info->pc_offset))
457                         break;
458                 if (saw_jump)
459                         break;
460         }
461         if (info->frame_size && info->pc_offset >= 0) /* nested */
462                 return 0;
463         if (info->pc_offset < 0) /* leaf */
464                 return 1;
465         /* prologue seems bogus... */
466 err:
467         return -1;
468 }
469
470 static struct mips_frame_info schedule_mfi __read_mostly;
471
472 #ifdef CONFIG_KALLSYMS
473 static unsigned long get___schedule_addr(void)
474 {
475         return kallsyms_lookup_name("__schedule");
476 }
477 #else
478 static unsigned long get___schedule_addr(void)
479 {
480         union mips_instruction *ip = (void *)schedule;
481         int max_insns = 8;
482         int i;
483
484         for (i = 0; i < max_insns; i++, ip++) {
485                 if (ip->j_format.opcode == j_op)
486                         return J_TARGET(ip, ip->j_format.target);
487         }
488         return 0;
489 }
490 #endif
491
492 static int __init frame_info_init(void)
493 {
494         unsigned long size = 0;
495 #ifdef CONFIG_KALLSYMS
496         unsigned long ofs;
497 #endif
498         unsigned long addr;
499
500         addr = get___schedule_addr();
501         if (!addr)
502                 addr = (unsigned long)schedule;
503
504 #ifdef CONFIG_KALLSYMS
505         kallsyms_lookup_size_offset(addr, &size, &ofs);
506 #endif
507         schedule_mfi.func = (void *)addr;
508         schedule_mfi.func_size = size;
509
510         get_frame_info(&schedule_mfi);
511
512         /*
513          * Without schedule() frame info, result given by
514          * thread_saved_pc() and get_wchan() are not reliable.
515          */
516         if (schedule_mfi.pc_offset < 0)
517                 printk("Can't analyze schedule() prologue at %p\n", schedule);
518
519         return 0;
520 }
521
522 arch_initcall(frame_info_init);
523
524 /*
525  * Return saved PC of a blocked thread.
526  */
527 static unsigned long thread_saved_pc(struct task_struct *tsk)
528 {
529         struct thread_struct *t = &tsk->thread;
530
531         /* New born processes are a special case */
532         if (t->reg31 == (unsigned long) ret_from_fork)
533                 return t->reg31;
534         if (schedule_mfi.pc_offset < 0)
535                 return 0;
536         return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
537 }
538
539
540 #ifdef CONFIG_KALLSYMS
541 /* generic stack unwinding function */
542 unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
543                                               unsigned long *sp,
544                                               unsigned long pc,
545                                               unsigned long *ra)
546 {
547         unsigned long low, high, irq_stack_high;
548         struct mips_frame_info info;
549         unsigned long size, ofs;
550         struct pt_regs *regs;
551         int leaf;
552
553         if (!stack_page)
554                 return 0;
555
556         /*
557          * IRQ stacks start at IRQ_STACK_START
558          * task stacks at THREAD_SIZE - 32
559          */
560         low = stack_page;
561         if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
562                 high = stack_page + IRQ_STACK_START;
563                 irq_stack_high = high;
564         } else {
565                 high = stack_page + THREAD_SIZE - 32;
566                 irq_stack_high = 0;
567         }
568
569         /*
570          * If we reached the top of the interrupt stack, start unwinding
571          * the interrupted task stack.
572          */
573         if (unlikely(*sp == irq_stack_high)) {
574                 unsigned long task_sp = *(unsigned long *)*sp;
575
576                 /*
577                  * Check that the pointer saved in the IRQ stack head points to
578                  * something within the stack of the current task
579                  */
580                 if (!object_is_on_stack((void *)task_sp))
581                         return 0;
582
583                 /*
584                  * Follow pointer to tasks kernel stack frame where interrupted
585                  * state was saved.
586                  */
587                 regs = (struct pt_regs *)task_sp;
588                 pc = regs->cp0_epc;
589                 if (!user_mode(regs) && __kernel_text_address(pc)) {
590                         *sp = regs->regs[29];
591                         *ra = regs->regs[31];
592                         return pc;
593                 }
594                 return 0;
595         }
596         if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
597                 return 0;
598         /*
599          * Return ra if an exception occurred at the first instruction
600          */
601         if (unlikely(ofs == 0)) {
602                 pc = *ra;
603                 *ra = 0;
604                 return pc;
605         }
606
607         info.func = (void *)(pc - ofs);
608         info.func_size = ofs;   /* analyze from start to ofs */
609         leaf = get_frame_info(&info);
610         if (leaf < 0)
611                 return 0;
612
613         if (*sp < low || *sp + info.frame_size > high)
614                 return 0;
615
616         if (leaf)
617                 /*
618                  * For some extreme cases, get_frame_info() can
619                  * consider wrongly a nested function as a leaf
620                  * one. In that cases avoid to return always the
621                  * same value.
622                  */
623                 pc = pc != *ra ? *ra : 0;
624         else
625                 pc = ((unsigned long *)(*sp))[info.pc_offset];
626
627         *sp += info.frame_size;
628         *ra = 0;
629         return __kernel_text_address(pc) ? pc : 0;
630 }
631 EXPORT_SYMBOL(unwind_stack_by_address);
632
633 /* used by show_backtrace() */
634 unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
635                            unsigned long pc, unsigned long *ra)
636 {
637         unsigned long stack_page = 0;
638         int cpu;
639
640         for_each_possible_cpu(cpu) {
641                 if (on_irq_stack(cpu, *sp)) {
642                         stack_page = (unsigned long)irq_stack[cpu];
643                         break;
644                 }
645         }
646
647         if (!stack_page)
648                 stack_page = (unsigned long)task_stack_page(task);
649
650         return unwind_stack_by_address(stack_page, sp, pc, ra);
651 }
652 #endif
653
654 /*
655  * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
656  */
657 unsigned long get_wchan(struct task_struct *task)
658 {
659         unsigned long pc = 0;
660 #ifdef CONFIG_KALLSYMS
661         unsigned long sp;
662         unsigned long ra = 0;
663 #endif
664
665         if (!task || task == current || task_is_running(task))
666                 goto out;
667         if (!task_stack_page(task))
668                 goto out;
669
670         pc = thread_saved_pc(task);
671
672 #ifdef CONFIG_KALLSYMS
673         sp = task->thread.reg29 + schedule_mfi.frame_size;
674
675         while (in_sched_functions(pc))
676                 pc = unwind_stack(task, &sp, pc, &ra);
677 #endif
678
679 out:
680         return pc;
681 }
682
683 unsigned long mips_stack_top(void)
684 {
685         unsigned long top = TASK_SIZE & PAGE_MASK;
686
687         if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
688                 /* One page for branch delay slot "emulation" */
689                 top -= PAGE_SIZE;
690         }
691
692         /* Space for the VDSO, data page & GIC user page */
693         top -= PAGE_ALIGN(current->thread.abi->vdso->size);
694         top -= PAGE_SIZE;
695         top -= mips_gic_present() ? PAGE_SIZE : 0;
696
697         /* Space for cache colour alignment */
698         if (cpu_has_dc_aliases)
699                 top -= shm_align_mask + 1;
700
701         /* Space to randomize the VDSO base */
702         if (current->flags & PF_RANDOMIZE)
703                 top -= VDSO_RANDOMIZE_SIZE;
704
705         return top;
706 }
707
708 /*
709  * Don't forget that the stack pointer must be aligned on a 8 bytes
710  * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
711  */
712 unsigned long arch_align_stack(unsigned long sp)
713 {
714         if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
715                 sp -= get_random_int() & ~PAGE_MASK;
716
717         return sp & ALMASK;
718 }
719
720 static struct cpumask backtrace_csd_busy;
721
722 static void handle_backtrace(void *info)
723 {
724         nmi_cpu_backtrace(get_irq_regs());
725         cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
726 }
727
728 static DEFINE_PER_CPU(call_single_data_t, backtrace_csd) =
729         CSD_INIT(handle_backtrace, NULL);
730
731 static void raise_backtrace(cpumask_t *mask)
732 {
733         call_single_data_t *csd;
734         int cpu;
735
736         for_each_cpu(cpu, mask) {
737                 /*
738                  * If we previously sent an IPI to the target CPU & it hasn't
739                  * cleared its bit in the busy cpumask then it didn't handle
740                  * our previous IPI & it's not safe for us to reuse the
741                  * call_single_data_t.
742                  */
743                 if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
744                         pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
745                                 cpu);
746                         continue;
747                 }
748
749                 csd = &per_cpu(backtrace_csd, cpu);
750                 smp_call_function_single_async(cpu, csd);
751         }
752 }
753
754 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
755 {
756         nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
757 }
758
759 int mips_get_process_fp_mode(struct task_struct *task)
760 {
761         int value = 0;
762
763         if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
764                 value |= PR_FP_MODE_FR;
765         if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
766                 value |= PR_FP_MODE_FRE;
767
768         return value;
769 }
770
771 static long prepare_for_fp_mode_switch(void *unused)
772 {
773         /*
774          * This is icky, but we use this to simply ensure that all CPUs have
775          * context switched, regardless of whether they were previously running
776          * kernel or user code. This ensures that no CPU that a mode-switching
777          * program may execute on keeps its FPU enabled (& in the old mode)
778          * throughout the mode switch.
779          */
780         return 0;
781 }
782
783 int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
784 {
785         const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
786         struct task_struct *t;
787         struct cpumask process_cpus;
788         int cpu;
789
790         /* If nothing to change, return right away, successfully.  */
791         if (value == mips_get_process_fp_mode(task))
792                 return 0;
793
794         /* Only accept a mode change if 64-bit FP enabled for o32.  */
795         if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
796                 return -EOPNOTSUPP;
797
798         /* And only for o32 tasks.  */
799         if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
800                 return -EOPNOTSUPP;
801
802         /* Check the value is valid */
803         if (value & ~known_bits)
804                 return -EOPNOTSUPP;
805
806         /* Setting FRE without FR is not supported.  */
807         if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
808                 return -EOPNOTSUPP;
809
810         /* Avoid inadvertently triggering emulation */
811         if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
812             !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
813                 return -EOPNOTSUPP;
814         if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
815                 return -EOPNOTSUPP;
816
817         /* FR = 0 not supported in MIPS R6 */
818         if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
819                 return -EOPNOTSUPP;
820
821         /* Indicate the new FP mode in each thread */
822         for_each_thread(task, t) {
823                 /* Update desired FP register width */
824                 if (value & PR_FP_MODE_FR) {
825                         clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
826                 } else {
827                         set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
828                         clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
829                 }
830
831                 /* Update desired FP single layout */
832                 if (value & PR_FP_MODE_FRE)
833                         set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
834                 else
835                         clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
836         }
837
838         /*
839          * We need to ensure that all threads in the process have switched mode
840          * before returning, in order to allow userland to not worry about
841          * races. We can do this by forcing all CPUs that any thread in the
842          * process may be running on to schedule something else - in this case
843          * prepare_for_fp_mode_switch().
844          *
845          * We begin by generating a mask of all CPUs that any thread in the
846          * process may be running on.
847          */
848         cpumask_clear(&process_cpus);
849         for_each_thread(task, t)
850                 cpumask_set_cpu(task_cpu(t), &process_cpus);
851
852         /*
853          * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
854          *
855          * The CPUs may have rescheduled already since we switched mode or
856          * generated the cpumask, but that doesn't matter. If the task in this
857          * process is scheduled out then our scheduling
858          * prepare_for_fp_mode_switch() will simply be redundant. If it's
859          * scheduled in then it will already have picked up the new FP mode
860          * whilst doing so.
861          */
862         cpus_read_lock();
863         for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
864                 work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
865         cpus_read_unlock();
866
867         return 0;
868 }
869
870 #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
871 void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
872 {
873         unsigned int i;
874
875         for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
876                 /* k0/k1 are copied as zero. */
877                 if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
878                         uregs[i] = 0;
879                 else
880                         uregs[i] = regs->regs[i - MIPS32_EF_R0];
881         }
882
883         uregs[MIPS32_EF_LO] = regs->lo;
884         uregs[MIPS32_EF_HI] = regs->hi;
885         uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
886         uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
887         uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
888         uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
889 }
890 #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
891
892 #ifdef CONFIG_64BIT
893 void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
894 {
895         unsigned int i;
896
897         for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
898                 /* k0/k1 are copied as zero. */
899                 if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
900                         uregs[i] = 0;
901                 else
902                         uregs[i] = regs->regs[i - MIPS64_EF_R0];
903         }
904
905         uregs[MIPS64_EF_LO] = regs->lo;
906         uregs[MIPS64_EF_HI] = regs->hi;
907         uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
908         uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
909         uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
910         uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
911 }
912 #endif /* CONFIG_64BIT */