Merge branches 'for-next/52-bit-kva', 'for-next/cpu-topology', 'for-next/error-inject...
[linux-2.6-microblaze.git] / arch / arm64 / kernel / ptrace.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/kernel/ptrace.c
4  *
5  * By Ross Biro 1/23/92
6  * edited by Linus Torvalds
7  * ARM modifications Copyright (C) 2000 Russell King
8  * Copyright (C) 2012 ARM Ltd.
9  */
10
11 #include <linux/audit.h>
12 #include <linux/compat.h>
13 #include <linux/kernel.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/task_stack.h>
16 #include <linux/mm.h>
17 #include <linux/nospec.h>
18 #include <linux/smp.h>
19 #include <linux/ptrace.h>
20 #include <linux/user.h>
21 #include <linux/seccomp.h>
22 #include <linux/security.h>
23 #include <linux/init.h>
24 #include <linux/signal.h>
25 #include <linux/string.h>
26 #include <linux/uaccess.h>
27 #include <linux/perf_event.h>
28 #include <linux/hw_breakpoint.h>
29 #include <linux/regset.h>
30 #include <linux/tracehook.h>
31 #include <linux/elf.h>
32
33 #include <asm/compat.h>
34 #include <asm/cpufeature.h>
35 #include <asm/debug-monitors.h>
36 #include <asm/fpsimd.h>
37 #include <asm/pgtable.h>
38 #include <asm/pointer_auth.h>
39 #include <asm/stacktrace.h>
40 #include <asm/syscall.h>
41 #include <asm/traps.h>
42 #include <asm/system_misc.h>
43
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/syscalls.h>
46
47 struct pt_regs_offset {
48         const char *name;
49         int offset;
50 };
51
52 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
53 #define REG_OFFSET_END {.name = NULL, .offset = 0}
54 #define GPR_OFFSET_NAME(r) \
55         {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
56
57 static const struct pt_regs_offset regoffset_table[] = {
58         GPR_OFFSET_NAME(0),
59         GPR_OFFSET_NAME(1),
60         GPR_OFFSET_NAME(2),
61         GPR_OFFSET_NAME(3),
62         GPR_OFFSET_NAME(4),
63         GPR_OFFSET_NAME(5),
64         GPR_OFFSET_NAME(6),
65         GPR_OFFSET_NAME(7),
66         GPR_OFFSET_NAME(8),
67         GPR_OFFSET_NAME(9),
68         GPR_OFFSET_NAME(10),
69         GPR_OFFSET_NAME(11),
70         GPR_OFFSET_NAME(12),
71         GPR_OFFSET_NAME(13),
72         GPR_OFFSET_NAME(14),
73         GPR_OFFSET_NAME(15),
74         GPR_OFFSET_NAME(16),
75         GPR_OFFSET_NAME(17),
76         GPR_OFFSET_NAME(18),
77         GPR_OFFSET_NAME(19),
78         GPR_OFFSET_NAME(20),
79         GPR_OFFSET_NAME(21),
80         GPR_OFFSET_NAME(22),
81         GPR_OFFSET_NAME(23),
82         GPR_OFFSET_NAME(24),
83         GPR_OFFSET_NAME(25),
84         GPR_OFFSET_NAME(26),
85         GPR_OFFSET_NAME(27),
86         GPR_OFFSET_NAME(28),
87         GPR_OFFSET_NAME(29),
88         GPR_OFFSET_NAME(30),
89         {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
90         REG_OFFSET_NAME(sp),
91         REG_OFFSET_NAME(pc),
92         REG_OFFSET_NAME(pstate),
93         REG_OFFSET_END,
94 };
95
96 /**
97  * regs_query_register_offset() - query register offset from its name
98  * @name:       the name of a register
99  *
100  * regs_query_register_offset() returns the offset of a register in struct
101  * pt_regs from its name. If the name is invalid, this returns -EINVAL;
102  */
103 int regs_query_register_offset(const char *name)
104 {
105         const struct pt_regs_offset *roff;
106
107         for (roff = regoffset_table; roff->name != NULL; roff++)
108                 if (!strcmp(roff->name, name))
109                         return roff->offset;
110         return -EINVAL;
111 }
112
113 /**
114  * regs_within_kernel_stack() - check the address in the stack
115  * @regs:      pt_regs which contains kernel stack pointer.
116  * @addr:      address which is checked.
117  *
118  * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
119  * If @addr is within the kernel stack, it returns true. If not, returns false.
120  */
121 static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
122 {
123         return ((addr & ~(THREAD_SIZE - 1))  ==
124                 (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
125                 on_irq_stack(addr, NULL);
126 }
127
128 /**
129  * regs_get_kernel_stack_nth() - get Nth entry of the stack
130  * @regs:       pt_regs which contains kernel stack pointer.
131  * @n:          stack entry number.
132  *
133  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
134  * is specified by @regs. If the @n th entry is NOT in the kernel stack,
135  * this returns 0.
136  */
137 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
138 {
139         unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
140
141         addr += n;
142         if (regs_within_kernel_stack(regs, (unsigned long)addr))
143                 return *addr;
144         else
145                 return 0;
146 }
147
148 /*
149  * TODO: does not yet catch signals sent when the child dies.
150  * in exit.c or in signal.c.
151  */
152
153 /*
154  * Called by kernel/ptrace.c when detaching..
155  */
156 void ptrace_disable(struct task_struct *child)
157 {
158         /*
159          * This would be better off in core code, but PTRACE_DETACH has
160          * grown its fair share of arch-specific worts and changing it
161          * is likely to cause regressions on obscure architectures.
162          */
163         user_disable_single_step(child);
164 }
165
166 #ifdef CONFIG_HAVE_HW_BREAKPOINT
167 /*
168  * Handle hitting a HW-breakpoint.
169  */
170 static void ptrace_hbptriggered(struct perf_event *bp,
171                                 struct perf_sample_data *data,
172                                 struct pt_regs *regs)
173 {
174         struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
175         const char *desc = "Hardware breakpoint trap (ptrace)";
176
177 #ifdef CONFIG_COMPAT
178         if (is_compat_task()) {
179                 int si_errno = 0;
180                 int i;
181
182                 for (i = 0; i < ARM_MAX_BRP; ++i) {
183                         if (current->thread.debug.hbp_break[i] == bp) {
184                                 si_errno = (i << 1) + 1;
185                                 break;
186                         }
187                 }
188
189                 for (i = 0; i < ARM_MAX_WRP; ++i) {
190                         if (current->thread.debug.hbp_watch[i] == bp) {
191                                 si_errno = -((i << 1) + 1);
192                                 break;
193                         }
194                 }
195                 arm64_force_sig_ptrace_errno_trap(si_errno,
196                                                   (void __user *)bkpt->trigger,
197                                                   desc);
198         }
199 #endif
200         arm64_force_sig_fault(SIGTRAP, TRAP_HWBKPT,
201                               (void __user *)(bkpt->trigger),
202                               desc);
203 }
204
205 /*
206  * Unregister breakpoints from this task and reset the pointers in
207  * the thread_struct.
208  */
209 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
210 {
211         int i;
212         struct thread_struct *t = &tsk->thread;
213
214         for (i = 0; i < ARM_MAX_BRP; i++) {
215                 if (t->debug.hbp_break[i]) {
216                         unregister_hw_breakpoint(t->debug.hbp_break[i]);
217                         t->debug.hbp_break[i] = NULL;
218                 }
219         }
220
221         for (i = 0; i < ARM_MAX_WRP; i++) {
222                 if (t->debug.hbp_watch[i]) {
223                         unregister_hw_breakpoint(t->debug.hbp_watch[i]);
224                         t->debug.hbp_watch[i] = NULL;
225                 }
226         }
227 }
228
229 void ptrace_hw_copy_thread(struct task_struct *tsk)
230 {
231         memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
232 }
233
234 static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
235                                                struct task_struct *tsk,
236                                                unsigned long idx)
237 {
238         struct perf_event *bp = ERR_PTR(-EINVAL);
239
240         switch (note_type) {
241         case NT_ARM_HW_BREAK:
242                 if (idx >= ARM_MAX_BRP)
243                         goto out;
244                 idx = array_index_nospec(idx, ARM_MAX_BRP);
245                 bp = tsk->thread.debug.hbp_break[idx];
246                 break;
247         case NT_ARM_HW_WATCH:
248                 if (idx >= ARM_MAX_WRP)
249                         goto out;
250                 idx = array_index_nospec(idx, ARM_MAX_WRP);
251                 bp = tsk->thread.debug.hbp_watch[idx];
252                 break;
253         }
254
255 out:
256         return bp;
257 }
258
259 static int ptrace_hbp_set_event(unsigned int note_type,
260                                 struct task_struct *tsk,
261                                 unsigned long idx,
262                                 struct perf_event *bp)
263 {
264         int err = -EINVAL;
265
266         switch (note_type) {
267         case NT_ARM_HW_BREAK:
268                 if (idx >= ARM_MAX_BRP)
269                         goto out;
270                 idx = array_index_nospec(idx, ARM_MAX_BRP);
271                 tsk->thread.debug.hbp_break[idx] = bp;
272                 err = 0;
273                 break;
274         case NT_ARM_HW_WATCH:
275                 if (idx >= ARM_MAX_WRP)
276                         goto out;
277                 idx = array_index_nospec(idx, ARM_MAX_WRP);
278                 tsk->thread.debug.hbp_watch[idx] = bp;
279                 err = 0;
280                 break;
281         }
282
283 out:
284         return err;
285 }
286
287 static struct perf_event *ptrace_hbp_create(unsigned int note_type,
288                                             struct task_struct *tsk,
289                                             unsigned long idx)
290 {
291         struct perf_event *bp;
292         struct perf_event_attr attr;
293         int err, type;
294
295         switch (note_type) {
296         case NT_ARM_HW_BREAK:
297                 type = HW_BREAKPOINT_X;
298                 break;
299         case NT_ARM_HW_WATCH:
300                 type = HW_BREAKPOINT_RW;
301                 break;
302         default:
303                 return ERR_PTR(-EINVAL);
304         }
305
306         ptrace_breakpoint_init(&attr);
307
308         /*
309          * Initialise fields to sane defaults
310          * (i.e. values that will pass validation).
311          */
312         attr.bp_addr    = 0;
313         attr.bp_len     = HW_BREAKPOINT_LEN_4;
314         attr.bp_type    = type;
315         attr.disabled   = 1;
316
317         bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
318         if (IS_ERR(bp))
319                 return bp;
320
321         err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
322         if (err)
323                 return ERR_PTR(err);
324
325         return bp;
326 }
327
328 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
329                                      struct arch_hw_breakpoint_ctrl ctrl,
330                                      struct perf_event_attr *attr)
331 {
332         int err, len, type, offset, disabled = !ctrl.enabled;
333
334         attr->disabled = disabled;
335         if (disabled)
336                 return 0;
337
338         err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
339         if (err)
340                 return err;
341
342         switch (note_type) {
343         case NT_ARM_HW_BREAK:
344                 if ((type & HW_BREAKPOINT_X) != type)
345                         return -EINVAL;
346                 break;
347         case NT_ARM_HW_WATCH:
348                 if ((type & HW_BREAKPOINT_RW) != type)
349                         return -EINVAL;
350                 break;
351         default:
352                 return -EINVAL;
353         }
354
355         attr->bp_len    = len;
356         attr->bp_type   = type;
357         attr->bp_addr   += offset;
358
359         return 0;
360 }
361
362 static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info)
363 {
364         u8 num;
365         u32 reg = 0;
366
367         switch (note_type) {
368         case NT_ARM_HW_BREAK:
369                 num = hw_breakpoint_slots(TYPE_INST);
370                 break;
371         case NT_ARM_HW_WATCH:
372                 num = hw_breakpoint_slots(TYPE_DATA);
373                 break;
374         default:
375                 return -EINVAL;
376         }
377
378         reg |= debug_monitors_arch();
379         reg <<= 8;
380         reg |= num;
381
382         *info = reg;
383         return 0;
384 }
385
386 static int ptrace_hbp_get_ctrl(unsigned int note_type,
387                                struct task_struct *tsk,
388                                unsigned long idx,
389                                u32 *ctrl)
390 {
391         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
392
393         if (IS_ERR(bp))
394                 return PTR_ERR(bp);
395
396         *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
397         return 0;
398 }
399
400 static int ptrace_hbp_get_addr(unsigned int note_type,
401                                struct task_struct *tsk,
402                                unsigned long idx,
403                                u64 *addr)
404 {
405         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
406
407         if (IS_ERR(bp))
408                 return PTR_ERR(bp);
409
410         *addr = bp ? counter_arch_bp(bp)->address : 0;
411         return 0;
412 }
413
414 static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
415                                                         struct task_struct *tsk,
416                                                         unsigned long idx)
417 {
418         struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
419
420         if (!bp)
421                 bp = ptrace_hbp_create(note_type, tsk, idx);
422
423         return bp;
424 }
425
426 static int ptrace_hbp_set_ctrl(unsigned int note_type,
427                                struct task_struct *tsk,
428                                unsigned long idx,
429                                u32 uctrl)
430 {
431         int err;
432         struct perf_event *bp;
433         struct perf_event_attr attr;
434         struct arch_hw_breakpoint_ctrl ctrl;
435
436         bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
437         if (IS_ERR(bp)) {
438                 err = PTR_ERR(bp);
439                 return err;
440         }
441
442         attr = bp->attr;
443         decode_ctrl_reg(uctrl, &ctrl);
444         err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
445         if (err)
446                 return err;
447
448         return modify_user_hw_breakpoint(bp, &attr);
449 }
450
451 static int ptrace_hbp_set_addr(unsigned int note_type,
452                                struct task_struct *tsk,
453                                unsigned long idx,
454                                u64 addr)
455 {
456         int err;
457         struct perf_event *bp;
458         struct perf_event_attr attr;
459
460         bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
461         if (IS_ERR(bp)) {
462                 err = PTR_ERR(bp);
463                 return err;
464         }
465
466         attr = bp->attr;
467         attr.bp_addr = addr;
468         err = modify_user_hw_breakpoint(bp, &attr);
469         return err;
470 }
471
472 #define PTRACE_HBP_ADDR_SZ      sizeof(u64)
473 #define PTRACE_HBP_CTRL_SZ      sizeof(u32)
474 #define PTRACE_HBP_PAD_SZ       sizeof(u32)
475
476 static int hw_break_get(struct task_struct *target,
477                         const struct user_regset *regset,
478                         unsigned int pos, unsigned int count,
479                         void *kbuf, void __user *ubuf)
480 {
481         unsigned int note_type = regset->core_note_type;
482         int ret, idx = 0, offset, limit;
483         u32 info, ctrl;
484         u64 addr;
485
486         /* Resource info */
487         ret = ptrace_hbp_get_resource_info(note_type, &info);
488         if (ret)
489                 return ret;
490
491         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
492                                   sizeof(info));
493         if (ret)
494                 return ret;
495
496         /* Pad */
497         offset = offsetof(struct user_hwdebug_state, pad);
498         ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
499                                        offset + PTRACE_HBP_PAD_SZ);
500         if (ret)
501                 return ret;
502
503         /* (address, ctrl) registers */
504         offset = offsetof(struct user_hwdebug_state, dbg_regs);
505         limit = regset->n * regset->size;
506         while (count && offset < limit) {
507                 ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
508                 if (ret)
509                         return ret;
510                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr,
511                                           offset, offset + PTRACE_HBP_ADDR_SZ);
512                 if (ret)
513                         return ret;
514                 offset += PTRACE_HBP_ADDR_SZ;
515
516                 ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
517                 if (ret)
518                         return ret;
519                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl,
520                                           offset, offset + PTRACE_HBP_CTRL_SZ);
521                 if (ret)
522                         return ret;
523                 offset += PTRACE_HBP_CTRL_SZ;
524
525                 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
526                                                offset,
527                                                offset + PTRACE_HBP_PAD_SZ);
528                 if (ret)
529                         return ret;
530                 offset += PTRACE_HBP_PAD_SZ;
531                 idx++;
532         }
533
534         return 0;
535 }
536
537 static int hw_break_set(struct task_struct *target,
538                         const struct user_regset *regset,
539                         unsigned int pos, unsigned int count,
540                         const void *kbuf, const void __user *ubuf)
541 {
542         unsigned int note_type = regset->core_note_type;
543         int ret, idx = 0, offset, limit;
544         u32 ctrl;
545         u64 addr;
546
547         /* Resource info and pad */
548         offset = offsetof(struct user_hwdebug_state, dbg_regs);
549         ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
550         if (ret)
551                 return ret;
552
553         /* (address, ctrl) registers */
554         limit = regset->n * regset->size;
555         while (count && offset < limit) {
556                 if (count < PTRACE_HBP_ADDR_SZ)
557                         return -EINVAL;
558                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
559                                          offset, offset + PTRACE_HBP_ADDR_SZ);
560                 if (ret)
561                         return ret;
562                 ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
563                 if (ret)
564                         return ret;
565                 offset += PTRACE_HBP_ADDR_SZ;
566
567                 if (!count)
568                         break;
569                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
570                                          offset, offset + PTRACE_HBP_CTRL_SZ);
571                 if (ret)
572                         return ret;
573                 ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
574                 if (ret)
575                         return ret;
576                 offset += PTRACE_HBP_CTRL_SZ;
577
578                 ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
579                                                 offset,
580                                                 offset + PTRACE_HBP_PAD_SZ);
581                 if (ret)
582                         return ret;
583                 offset += PTRACE_HBP_PAD_SZ;
584                 idx++;
585         }
586
587         return 0;
588 }
589 #endif  /* CONFIG_HAVE_HW_BREAKPOINT */
590
591 static int gpr_get(struct task_struct *target,
592                    const struct user_regset *regset,
593                    unsigned int pos, unsigned int count,
594                    void *kbuf, void __user *ubuf)
595 {
596         struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
597         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
598 }
599
600 static int gpr_set(struct task_struct *target, const struct user_regset *regset,
601                    unsigned int pos, unsigned int count,
602                    const void *kbuf, const void __user *ubuf)
603 {
604         int ret;
605         struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
606
607         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
608         if (ret)
609                 return ret;
610
611         if (!valid_user_regs(&newregs, target))
612                 return -EINVAL;
613
614         task_pt_regs(target)->user_regs = newregs;
615         return 0;
616 }
617
618 /*
619  * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
620  */
621 static int __fpr_get(struct task_struct *target,
622                      const struct user_regset *regset,
623                      unsigned int pos, unsigned int count,
624                      void *kbuf, void __user *ubuf, unsigned int start_pos)
625 {
626         struct user_fpsimd_state *uregs;
627
628         sve_sync_to_fpsimd(target);
629
630         uregs = &target->thread.uw.fpsimd_state;
631
632         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
633                                    start_pos, start_pos + sizeof(*uregs));
634 }
635
636 static int fpr_get(struct task_struct *target, const struct user_regset *regset,
637                    unsigned int pos, unsigned int count,
638                    void *kbuf, void __user *ubuf)
639 {
640         if (target == current)
641                 fpsimd_preserve_current_state();
642
643         return __fpr_get(target, regset, pos, count, kbuf, ubuf, 0);
644 }
645
646 static int __fpr_set(struct task_struct *target,
647                      const struct user_regset *regset,
648                      unsigned int pos, unsigned int count,
649                      const void *kbuf, const void __user *ubuf,
650                      unsigned int start_pos)
651 {
652         int ret;
653         struct user_fpsimd_state newstate;
654
655         /*
656          * Ensure target->thread.uw.fpsimd_state is up to date, so that a
657          * short copyin can't resurrect stale data.
658          */
659         sve_sync_to_fpsimd(target);
660
661         newstate = target->thread.uw.fpsimd_state;
662
663         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate,
664                                  start_pos, start_pos + sizeof(newstate));
665         if (ret)
666                 return ret;
667
668         target->thread.uw.fpsimd_state = newstate;
669
670         return ret;
671 }
672
673 static int fpr_set(struct task_struct *target, const struct user_regset *regset,
674                    unsigned int pos, unsigned int count,
675                    const void *kbuf, const void __user *ubuf)
676 {
677         int ret;
678
679         ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
680         if (ret)
681                 return ret;
682
683         sve_sync_from_fpsimd_zeropad(target);
684         fpsimd_flush_task_state(target);
685
686         return ret;
687 }
688
689 static int tls_get(struct task_struct *target, const struct user_regset *regset,
690                    unsigned int pos, unsigned int count,
691                    void *kbuf, void __user *ubuf)
692 {
693         unsigned long *tls = &target->thread.uw.tp_value;
694
695         if (target == current)
696                 tls_preserve_current_state();
697
698         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1);
699 }
700
701 static int tls_set(struct task_struct *target, const struct user_regset *regset,
702                    unsigned int pos, unsigned int count,
703                    const void *kbuf, const void __user *ubuf)
704 {
705         int ret;
706         unsigned long tls = target->thread.uw.tp_value;
707
708         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
709         if (ret)
710                 return ret;
711
712         target->thread.uw.tp_value = tls;
713         return ret;
714 }
715
716 static int system_call_get(struct task_struct *target,
717                            const struct user_regset *regset,
718                            unsigned int pos, unsigned int count,
719                            void *kbuf, void __user *ubuf)
720 {
721         int syscallno = task_pt_regs(target)->syscallno;
722
723         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
724                                    &syscallno, 0, -1);
725 }
726
727 static int system_call_set(struct task_struct *target,
728                            const struct user_regset *regset,
729                            unsigned int pos, unsigned int count,
730                            const void *kbuf, const void __user *ubuf)
731 {
732         int syscallno = task_pt_regs(target)->syscallno;
733         int ret;
734
735         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
736         if (ret)
737                 return ret;
738
739         task_pt_regs(target)->syscallno = syscallno;
740         return ret;
741 }
742
743 #ifdef CONFIG_ARM64_SVE
744
745 static void sve_init_header_from_task(struct user_sve_header *header,
746                                       struct task_struct *target)
747 {
748         unsigned int vq;
749
750         memset(header, 0, sizeof(*header));
751
752         header->flags = test_tsk_thread_flag(target, TIF_SVE) ?
753                 SVE_PT_REGS_SVE : SVE_PT_REGS_FPSIMD;
754         if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
755                 header->flags |= SVE_PT_VL_INHERIT;
756
757         header->vl = target->thread.sve_vl;
758         vq = sve_vq_from_vl(header->vl);
759
760         header->max_vl = sve_max_vl;
761         header->size = SVE_PT_SIZE(vq, header->flags);
762         header->max_size = SVE_PT_SIZE(sve_vq_from_vl(header->max_vl),
763                                       SVE_PT_REGS_SVE);
764 }
765
766 static unsigned int sve_size_from_header(struct user_sve_header const *header)
767 {
768         return ALIGN(header->size, SVE_VQ_BYTES);
769 }
770
771 static unsigned int sve_get_size(struct task_struct *target,
772                                  const struct user_regset *regset)
773 {
774         struct user_sve_header header;
775
776         if (!system_supports_sve())
777                 return 0;
778
779         sve_init_header_from_task(&header, target);
780         return sve_size_from_header(&header);
781 }
782
783 static int sve_get(struct task_struct *target,
784                    const struct user_regset *regset,
785                    unsigned int pos, unsigned int count,
786                    void *kbuf, void __user *ubuf)
787 {
788         int ret;
789         struct user_sve_header header;
790         unsigned int vq;
791         unsigned long start, end;
792
793         if (!system_supports_sve())
794                 return -EINVAL;
795
796         /* Header */
797         sve_init_header_from_task(&header, target);
798         vq = sve_vq_from_vl(header.vl);
799
800         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &header,
801                                   0, sizeof(header));
802         if (ret)
803                 return ret;
804
805         if (target == current)
806                 fpsimd_preserve_current_state();
807
808         /* Registers: FPSIMD-only case */
809
810         BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
811         if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD)
812                 return __fpr_get(target, regset, pos, count, kbuf, ubuf,
813                                  SVE_PT_FPSIMD_OFFSET);
814
815         /* Otherwise: full SVE case */
816
817         BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
818         start = SVE_PT_SVE_OFFSET;
819         end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
820         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
821                                   target->thread.sve_state,
822                                   start, end);
823         if (ret)
824                 return ret;
825
826         start = end;
827         end = SVE_PT_SVE_FPSR_OFFSET(vq);
828         ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
829                                        start, end);
830         if (ret)
831                 return ret;
832
833         /*
834          * Copy fpsr, and fpcr which must follow contiguously in
835          * struct fpsimd_state:
836          */
837         start = end;
838         end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
839         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
840                                   &target->thread.uw.fpsimd_state.fpsr,
841                                   start, end);
842         if (ret)
843                 return ret;
844
845         start = end;
846         end = sve_size_from_header(&header);
847         return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
848                                         start, end);
849 }
850
851 static int sve_set(struct task_struct *target,
852                    const struct user_regset *regset,
853                    unsigned int pos, unsigned int count,
854                    const void *kbuf, const void __user *ubuf)
855 {
856         int ret;
857         struct user_sve_header header;
858         unsigned int vq;
859         unsigned long start, end;
860
861         if (!system_supports_sve())
862                 return -EINVAL;
863
864         /* Header */
865         if (count < sizeof(header))
866                 return -EINVAL;
867         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &header,
868                                  0, sizeof(header));
869         if (ret)
870                 goto out;
871
872         /*
873          * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
874          * sve_set_vector_length(), which will also validate them for us:
875          */
876         ret = sve_set_vector_length(target, header.vl,
877                 ((unsigned long)header.flags & ~SVE_PT_REGS_MASK) << 16);
878         if (ret)
879                 goto out;
880
881         /* Actual VL set may be less than the user asked for: */
882         vq = sve_vq_from_vl(target->thread.sve_vl);
883
884         /* Registers: FPSIMD-only case */
885
886         BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET != sizeof(header));
887         if ((header.flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD) {
888                 ret = __fpr_set(target, regset, pos, count, kbuf, ubuf,
889                                 SVE_PT_FPSIMD_OFFSET);
890                 clear_tsk_thread_flag(target, TIF_SVE);
891                 goto out;
892         }
893
894         /* Otherwise: full SVE case */
895
896         /*
897          * If setting a different VL from the requested VL and there is
898          * register data, the data layout will be wrong: don't even
899          * try to set the registers in this case.
900          */
901         if (count && vq != sve_vq_from_vl(header.vl)) {
902                 ret = -EIO;
903                 goto out;
904         }
905
906         sve_alloc(target);
907
908         /*
909          * Ensure target->thread.sve_state is up to date with target's
910          * FPSIMD regs, so that a short copyin leaves trailing registers
911          * unmodified.
912          */
913         fpsimd_sync_to_sve(target);
914         set_tsk_thread_flag(target, TIF_SVE);
915
916         BUILD_BUG_ON(SVE_PT_SVE_OFFSET != sizeof(header));
917         start = SVE_PT_SVE_OFFSET;
918         end = SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq);
919         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
920                                  target->thread.sve_state,
921                                  start, end);
922         if (ret)
923                 goto out;
924
925         start = end;
926         end = SVE_PT_SVE_FPSR_OFFSET(vq);
927         ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
928                                         start, end);
929         if (ret)
930                 goto out;
931
932         /*
933          * Copy fpsr, and fpcr which must follow contiguously in
934          * struct fpsimd_state:
935          */
936         start = end;
937         end = SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE;
938         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
939                                  &target->thread.uw.fpsimd_state.fpsr,
940                                  start, end);
941
942 out:
943         fpsimd_flush_task_state(target);
944         return ret;
945 }
946
947 #endif /* CONFIG_ARM64_SVE */
948
949 #ifdef CONFIG_ARM64_PTR_AUTH
950 static int pac_mask_get(struct task_struct *target,
951                         const struct user_regset *regset,
952                         unsigned int pos, unsigned int count,
953                         void *kbuf, void __user *ubuf)
954 {
955         /*
956          * The PAC bits can differ across data and instruction pointers
957          * depending on TCR_EL1.TBID*, which we may make use of in future, so
958          * we expose separate masks.
959          */
960         unsigned long mask = ptrauth_user_pac_mask();
961         struct user_pac_mask uregs = {
962                 .data_mask = mask,
963                 .insn_mask = mask,
964         };
965
966         if (!system_supports_address_auth())
967                 return -EINVAL;
968
969         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &uregs, 0, -1);
970 }
971
972 #ifdef CONFIG_CHECKPOINT_RESTORE
973 static __uint128_t pac_key_to_user(const struct ptrauth_key *key)
974 {
975         return (__uint128_t)key->hi << 64 | key->lo;
976 }
977
978 static struct ptrauth_key pac_key_from_user(__uint128_t ukey)
979 {
980         struct ptrauth_key key = {
981                 .lo = (unsigned long)ukey,
982                 .hi = (unsigned long)(ukey >> 64),
983         };
984
985         return key;
986 }
987
988 static void pac_address_keys_to_user(struct user_pac_address_keys *ukeys,
989                                      const struct ptrauth_keys *keys)
990 {
991         ukeys->apiakey = pac_key_to_user(&keys->apia);
992         ukeys->apibkey = pac_key_to_user(&keys->apib);
993         ukeys->apdakey = pac_key_to_user(&keys->apda);
994         ukeys->apdbkey = pac_key_to_user(&keys->apdb);
995 }
996
997 static void pac_address_keys_from_user(struct ptrauth_keys *keys,
998                                        const struct user_pac_address_keys *ukeys)
999 {
1000         keys->apia = pac_key_from_user(ukeys->apiakey);
1001         keys->apib = pac_key_from_user(ukeys->apibkey);
1002         keys->apda = pac_key_from_user(ukeys->apdakey);
1003         keys->apdb = pac_key_from_user(ukeys->apdbkey);
1004 }
1005
1006 static int pac_address_keys_get(struct task_struct *target,
1007                                 const struct user_regset *regset,
1008                                 unsigned int pos, unsigned int count,
1009                                 void *kbuf, void __user *ubuf)
1010 {
1011         struct ptrauth_keys *keys = &target->thread.keys_user;
1012         struct user_pac_address_keys user_keys;
1013
1014         if (!system_supports_address_auth())
1015                 return -EINVAL;
1016
1017         pac_address_keys_to_user(&user_keys, keys);
1018
1019         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1020                                    &user_keys, 0, -1);
1021 }
1022
1023 static int pac_address_keys_set(struct task_struct *target,
1024                                 const struct user_regset *regset,
1025                                 unsigned int pos, unsigned int count,
1026                                 const void *kbuf, const void __user *ubuf)
1027 {
1028         struct ptrauth_keys *keys = &target->thread.keys_user;
1029         struct user_pac_address_keys user_keys;
1030         int ret;
1031
1032         if (!system_supports_address_auth())
1033                 return -EINVAL;
1034
1035         pac_address_keys_to_user(&user_keys, keys);
1036         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1037                                  &user_keys, 0, -1);
1038         if (ret)
1039                 return ret;
1040         pac_address_keys_from_user(keys, &user_keys);
1041
1042         return 0;
1043 }
1044
1045 static void pac_generic_keys_to_user(struct user_pac_generic_keys *ukeys,
1046                                      const struct ptrauth_keys *keys)
1047 {
1048         ukeys->apgakey = pac_key_to_user(&keys->apga);
1049 }
1050
1051 static void pac_generic_keys_from_user(struct ptrauth_keys *keys,
1052                                        const struct user_pac_generic_keys *ukeys)
1053 {
1054         keys->apga = pac_key_from_user(ukeys->apgakey);
1055 }
1056
1057 static int pac_generic_keys_get(struct task_struct *target,
1058                                 const struct user_regset *regset,
1059                                 unsigned int pos, unsigned int count,
1060                                 void *kbuf, void __user *ubuf)
1061 {
1062         struct ptrauth_keys *keys = &target->thread.keys_user;
1063         struct user_pac_generic_keys user_keys;
1064
1065         if (!system_supports_generic_auth())
1066                 return -EINVAL;
1067
1068         pac_generic_keys_to_user(&user_keys, keys);
1069
1070         return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
1071                                    &user_keys, 0, -1);
1072 }
1073
1074 static int pac_generic_keys_set(struct task_struct *target,
1075                                 const struct user_regset *regset,
1076                                 unsigned int pos, unsigned int count,
1077                                 const void *kbuf, const void __user *ubuf)
1078 {
1079         struct ptrauth_keys *keys = &target->thread.keys_user;
1080         struct user_pac_generic_keys user_keys;
1081         int ret;
1082
1083         if (!system_supports_generic_auth())
1084                 return -EINVAL;
1085
1086         pac_generic_keys_to_user(&user_keys, keys);
1087         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
1088                                  &user_keys, 0, -1);
1089         if (ret)
1090                 return ret;
1091         pac_generic_keys_from_user(keys, &user_keys);
1092
1093         return 0;
1094 }
1095 #endif /* CONFIG_CHECKPOINT_RESTORE */
1096 #endif /* CONFIG_ARM64_PTR_AUTH */
1097
1098 enum aarch64_regset {
1099         REGSET_GPR,
1100         REGSET_FPR,
1101         REGSET_TLS,
1102 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1103         REGSET_HW_BREAK,
1104         REGSET_HW_WATCH,
1105 #endif
1106         REGSET_SYSTEM_CALL,
1107 #ifdef CONFIG_ARM64_SVE
1108         REGSET_SVE,
1109 #endif
1110 #ifdef CONFIG_ARM64_PTR_AUTH
1111         REGSET_PAC_MASK,
1112 #ifdef CONFIG_CHECKPOINT_RESTORE
1113         REGSET_PACA_KEYS,
1114         REGSET_PACG_KEYS,
1115 #endif
1116 #endif
1117 };
1118
1119 static const struct user_regset aarch64_regsets[] = {
1120         [REGSET_GPR] = {
1121                 .core_note_type = NT_PRSTATUS,
1122                 .n = sizeof(struct user_pt_regs) / sizeof(u64),
1123                 .size = sizeof(u64),
1124                 .align = sizeof(u64),
1125                 .get = gpr_get,
1126                 .set = gpr_set
1127         },
1128         [REGSET_FPR] = {
1129                 .core_note_type = NT_PRFPREG,
1130                 .n = sizeof(struct user_fpsimd_state) / sizeof(u32),
1131                 /*
1132                  * We pretend we have 32-bit registers because the fpsr and
1133                  * fpcr are 32-bits wide.
1134                  */
1135                 .size = sizeof(u32),
1136                 .align = sizeof(u32),
1137                 .get = fpr_get,
1138                 .set = fpr_set
1139         },
1140         [REGSET_TLS] = {
1141                 .core_note_type = NT_ARM_TLS,
1142                 .n = 1,
1143                 .size = sizeof(void *),
1144                 .align = sizeof(void *),
1145                 .get = tls_get,
1146                 .set = tls_set,
1147         },
1148 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1149         [REGSET_HW_BREAK] = {
1150                 .core_note_type = NT_ARM_HW_BREAK,
1151                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1152                 .size = sizeof(u32),
1153                 .align = sizeof(u32),
1154                 .get = hw_break_get,
1155                 .set = hw_break_set,
1156         },
1157         [REGSET_HW_WATCH] = {
1158                 .core_note_type = NT_ARM_HW_WATCH,
1159                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1160                 .size = sizeof(u32),
1161                 .align = sizeof(u32),
1162                 .get = hw_break_get,
1163                 .set = hw_break_set,
1164         },
1165 #endif
1166         [REGSET_SYSTEM_CALL] = {
1167                 .core_note_type = NT_ARM_SYSTEM_CALL,
1168                 .n = 1,
1169                 .size = sizeof(int),
1170                 .align = sizeof(int),
1171                 .get = system_call_get,
1172                 .set = system_call_set,
1173         },
1174 #ifdef CONFIG_ARM64_SVE
1175         [REGSET_SVE] = { /* Scalable Vector Extension */
1176                 .core_note_type = NT_ARM_SVE,
1177                 .n = DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX, SVE_PT_REGS_SVE),
1178                                   SVE_VQ_BYTES),
1179                 .size = SVE_VQ_BYTES,
1180                 .align = SVE_VQ_BYTES,
1181                 .get = sve_get,
1182                 .set = sve_set,
1183                 .get_size = sve_get_size,
1184         },
1185 #endif
1186 #ifdef CONFIG_ARM64_PTR_AUTH
1187         [REGSET_PAC_MASK] = {
1188                 .core_note_type = NT_ARM_PAC_MASK,
1189                 .n = sizeof(struct user_pac_mask) / sizeof(u64),
1190                 .size = sizeof(u64),
1191                 .align = sizeof(u64),
1192                 .get = pac_mask_get,
1193                 /* this cannot be set dynamically */
1194         },
1195 #ifdef CONFIG_CHECKPOINT_RESTORE
1196         [REGSET_PACA_KEYS] = {
1197                 .core_note_type = NT_ARM_PACA_KEYS,
1198                 .n = sizeof(struct user_pac_address_keys) / sizeof(__uint128_t),
1199                 .size = sizeof(__uint128_t),
1200                 .align = sizeof(__uint128_t),
1201                 .get = pac_address_keys_get,
1202                 .set = pac_address_keys_set,
1203         },
1204         [REGSET_PACG_KEYS] = {
1205                 .core_note_type = NT_ARM_PACG_KEYS,
1206                 .n = sizeof(struct user_pac_generic_keys) / sizeof(__uint128_t),
1207                 .size = sizeof(__uint128_t),
1208                 .align = sizeof(__uint128_t),
1209                 .get = pac_generic_keys_get,
1210                 .set = pac_generic_keys_set,
1211         },
1212 #endif
1213 #endif
1214 };
1215
1216 static const struct user_regset_view user_aarch64_view = {
1217         .name = "aarch64", .e_machine = EM_AARCH64,
1218         .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets)
1219 };
1220
1221 #ifdef CONFIG_COMPAT
1222 enum compat_regset {
1223         REGSET_COMPAT_GPR,
1224         REGSET_COMPAT_VFP,
1225 };
1226
1227 static int compat_gpr_get(struct task_struct *target,
1228                           const struct user_regset *regset,
1229                           unsigned int pos, unsigned int count,
1230                           void *kbuf, void __user *ubuf)
1231 {
1232         int ret = 0;
1233         unsigned int i, start, num_regs;
1234
1235         /* Calculate the number of AArch32 registers contained in count */
1236         num_regs = count / regset->size;
1237
1238         /* Convert pos into an register number */
1239         start = pos / regset->size;
1240
1241         if (start + num_regs > regset->n)
1242                 return -EIO;
1243
1244         for (i = 0; i < num_regs; ++i) {
1245                 unsigned int idx = start + i;
1246                 compat_ulong_t reg;
1247
1248                 switch (idx) {
1249                 case 15:
1250                         reg = task_pt_regs(target)->pc;
1251                         break;
1252                 case 16:
1253                         reg = task_pt_regs(target)->pstate;
1254                         reg = pstate_to_compat_psr(reg);
1255                         break;
1256                 case 17:
1257                         reg = task_pt_regs(target)->orig_x0;
1258                         break;
1259                 default:
1260                         reg = task_pt_regs(target)->regs[idx];
1261                 }
1262
1263                 if (kbuf) {
1264                         memcpy(kbuf, &reg, sizeof(reg));
1265                         kbuf += sizeof(reg);
1266                 } else {
1267                         ret = copy_to_user(ubuf, &reg, sizeof(reg));
1268                         if (ret) {
1269                                 ret = -EFAULT;
1270                                 break;
1271                         }
1272
1273                         ubuf += sizeof(reg);
1274                 }
1275         }
1276
1277         return ret;
1278 }
1279
1280 static int compat_gpr_set(struct task_struct *target,
1281                           const struct user_regset *regset,
1282                           unsigned int pos, unsigned int count,
1283                           const void *kbuf, const void __user *ubuf)
1284 {
1285         struct pt_regs newregs;
1286         int ret = 0;
1287         unsigned int i, start, num_regs;
1288
1289         /* Calculate the number of AArch32 registers contained in count */
1290         num_regs = count / regset->size;
1291
1292         /* Convert pos into an register number */
1293         start = pos / regset->size;
1294
1295         if (start + num_regs > regset->n)
1296                 return -EIO;
1297
1298         newregs = *task_pt_regs(target);
1299
1300         for (i = 0; i < num_regs; ++i) {
1301                 unsigned int idx = start + i;
1302                 compat_ulong_t reg;
1303
1304                 if (kbuf) {
1305                         memcpy(&reg, kbuf, sizeof(reg));
1306                         kbuf += sizeof(reg);
1307                 } else {
1308                         ret = copy_from_user(&reg, ubuf, sizeof(reg));
1309                         if (ret) {
1310                                 ret = -EFAULT;
1311                                 break;
1312                         }
1313
1314                         ubuf += sizeof(reg);
1315                 }
1316
1317                 switch (idx) {
1318                 case 15:
1319                         newregs.pc = reg;
1320                         break;
1321                 case 16:
1322                         reg = compat_psr_to_pstate(reg);
1323                         newregs.pstate = reg;
1324                         break;
1325                 case 17:
1326                         newregs.orig_x0 = reg;
1327                         break;
1328                 default:
1329                         newregs.regs[idx] = reg;
1330                 }
1331
1332         }
1333
1334         if (valid_user_regs(&newregs.user_regs, target))
1335                 *task_pt_regs(target) = newregs;
1336         else
1337                 ret = -EINVAL;
1338
1339         return ret;
1340 }
1341
1342 static int compat_vfp_get(struct task_struct *target,
1343                           const struct user_regset *regset,
1344                           unsigned int pos, unsigned int count,
1345                           void *kbuf, void __user *ubuf)
1346 {
1347         struct user_fpsimd_state *uregs;
1348         compat_ulong_t fpscr;
1349         int ret, vregs_end_pos;
1350
1351         uregs = &target->thread.uw.fpsimd_state;
1352
1353         if (target == current)
1354                 fpsimd_preserve_current_state();
1355
1356         /*
1357          * The VFP registers are packed into the fpsimd_state, so they all sit
1358          * nicely together for us. We just need to create the fpscr separately.
1359          */
1360         vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1361         ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs,
1362                                   0, vregs_end_pos);
1363
1364         if (count && !ret) {
1365                 fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) |
1366                         (uregs->fpcr & VFP_FPSCR_CTRL_MASK);
1367
1368                 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &fpscr,
1369                                           vregs_end_pos, VFP_STATE_SIZE);
1370         }
1371
1372         return ret;
1373 }
1374
1375 static int compat_vfp_set(struct task_struct *target,
1376                           const struct user_regset *regset,
1377                           unsigned int pos, unsigned int count,
1378                           const void *kbuf, const void __user *ubuf)
1379 {
1380         struct user_fpsimd_state *uregs;
1381         compat_ulong_t fpscr;
1382         int ret, vregs_end_pos;
1383
1384         uregs = &target->thread.uw.fpsimd_state;
1385
1386         vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
1387         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
1388                                  vregs_end_pos);
1389
1390         if (count && !ret) {
1391                 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpscr,
1392                                          vregs_end_pos, VFP_STATE_SIZE);
1393                 if (!ret) {
1394                         uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK;
1395                         uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK;
1396                 }
1397         }
1398
1399         fpsimd_flush_task_state(target);
1400         return ret;
1401 }
1402
1403 static int compat_tls_get(struct task_struct *target,
1404                           const struct user_regset *regset, unsigned int pos,
1405                           unsigned int count, void *kbuf, void __user *ubuf)
1406 {
1407         compat_ulong_t tls = (compat_ulong_t)target->thread.uw.tp_value;
1408         return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1409 }
1410
1411 static int compat_tls_set(struct task_struct *target,
1412                           const struct user_regset *regset, unsigned int pos,
1413                           unsigned int count, const void *kbuf,
1414                           const void __user *ubuf)
1415 {
1416         int ret;
1417         compat_ulong_t tls = target->thread.uw.tp_value;
1418
1419         ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
1420         if (ret)
1421                 return ret;
1422
1423         target->thread.uw.tp_value = tls;
1424         return ret;
1425 }
1426
1427 static const struct user_regset aarch32_regsets[] = {
1428         [REGSET_COMPAT_GPR] = {
1429                 .core_note_type = NT_PRSTATUS,
1430                 .n = COMPAT_ELF_NGREG,
1431                 .size = sizeof(compat_elf_greg_t),
1432                 .align = sizeof(compat_elf_greg_t),
1433                 .get = compat_gpr_get,
1434                 .set = compat_gpr_set
1435         },
1436         [REGSET_COMPAT_VFP] = {
1437                 .core_note_type = NT_ARM_VFP,
1438                 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1439                 .size = sizeof(compat_ulong_t),
1440                 .align = sizeof(compat_ulong_t),
1441                 .get = compat_vfp_get,
1442                 .set = compat_vfp_set
1443         },
1444 };
1445
1446 static const struct user_regset_view user_aarch32_view = {
1447         .name = "aarch32", .e_machine = EM_ARM,
1448         .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
1449 };
1450
1451 static const struct user_regset aarch32_ptrace_regsets[] = {
1452         [REGSET_GPR] = {
1453                 .core_note_type = NT_PRSTATUS,
1454                 .n = COMPAT_ELF_NGREG,
1455                 .size = sizeof(compat_elf_greg_t),
1456                 .align = sizeof(compat_elf_greg_t),
1457                 .get = compat_gpr_get,
1458                 .set = compat_gpr_set
1459         },
1460         [REGSET_FPR] = {
1461                 .core_note_type = NT_ARM_VFP,
1462                 .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
1463                 .size = sizeof(compat_ulong_t),
1464                 .align = sizeof(compat_ulong_t),
1465                 .get = compat_vfp_get,
1466                 .set = compat_vfp_set
1467         },
1468         [REGSET_TLS] = {
1469                 .core_note_type = NT_ARM_TLS,
1470                 .n = 1,
1471                 .size = sizeof(compat_ulong_t),
1472                 .align = sizeof(compat_ulong_t),
1473                 .get = compat_tls_get,
1474                 .set = compat_tls_set,
1475         },
1476 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1477         [REGSET_HW_BREAK] = {
1478                 .core_note_type = NT_ARM_HW_BREAK,
1479                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1480                 .size = sizeof(u32),
1481                 .align = sizeof(u32),
1482                 .get = hw_break_get,
1483                 .set = hw_break_set,
1484         },
1485         [REGSET_HW_WATCH] = {
1486                 .core_note_type = NT_ARM_HW_WATCH,
1487                 .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
1488                 .size = sizeof(u32),
1489                 .align = sizeof(u32),
1490                 .get = hw_break_get,
1491                 .set = hw_break_set,
1492         },
1493 #endif
1494         [REGSET_SYSTEM_CALL] = {
1495                 .core_note_type = NT_ARM_SYSTEM_CALL,
1496                 .n = 1,
1497                 .size = sizeof(int),
1498                 .align = sizeof(int),
1499                 .get = system_call_get,
1500                 .set = system_call_set,
1501         },
1502 };
1503
1504 static const struct user_regset_view user_aarch32_ptrace_view = {
1505         .name = "aarch32", .e_machine = EM_ARM,
1506         .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
1507 };
1508
1509 static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
1510                                    compat_ulong_t __user *ret)
1511 {
1512         compat_ulong_t tmp;
1513
1514         if (off & 3)
1515                 return -EIO;
1516
1517         if (off == COMPAT_PT_TEXT_ADDR)
1518                 tmp = tsk->mm->start_code;
1519         else if (off == COMPAT_PT_DATA_ADDR)
1520                 tmp = tsk->mm->start_data;
1521         else if (off == COMPAT_PT_TEXT_END_ADDR)
1522                 tmp = tsk->mm->end_code;
1523         else if (off < sizeof(compat_elf_gregset_t))
1524                 return copy_regset_to_user(tsk, &user_aarch32_view,
1525                                            REGSET_COMPAT_GPR, off,
1526                                            sizeof(compat_ulong_t), ret);
1527         else if (off >= COMPAT_USER_SZ)
1528                 return -EIO;
1529         else
1530                 tmp = 0;
1531
1532         return put_user(tmp, ret);
1533 }
1534
1535 static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
1536                                     compat_ulong_t val)
1537 {
1538         int ret;
1539         mm_segment_t old_fs = get_fs();
1540
1541         if (off & 3 || off >= COMPAT_USER_SZ)
1542                 return -EIO;
1543
1544         if (off >= sizeof(compat_elf_gregset_t))
1545                 return 0;
1546
1547         set_fs(KERNEL_DS);
1548         ret = copy_regset_from_user(tsk, &user_aarch32_view,
1549                                     REGSET_COMPAT_GPR, off,
1550                                     sizeof(compat_ulong_t),
1551                                     &val);
1552         set_fs(old_fs);
1553
1554         return ret;
1555 }
1556
1557 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1558
1559 /*
1560  * Convert a virtual register number into an index for a thread_info
1561  * breakpoint array. Breakpoints are identified using positive numbers
1562  * whilst watchpoints are negative. The registers are laid out as pairs
1563  * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1564  * Register 0 is reserved for describing resource information.
1565  */
1566 static int compat_ptrace_hbp_num_to_idx(compat_long_t num)
1567 {
1568         return (abs(num) - 1) >> 1;
1569 }
1570
1571 static int compat_ptrace_hbp_get_resource_info(u32 *kdata)
1572 {
1573         u8 num_brps, num_wrps, debug_arch, wp_len;
1574         u32 reg = 0;
1575
1576         num_brps        = hw_breakpoint_slots(TYPE_INST);
1577         num_wrps        = hw_breakpoint_slots(TYPE_DATA);
1578
1579         debug_arch      = debug_monitors_arch();
1580         wp_len          = 8;
1581         reg             |= debug_arch;
1582         reg             <<= 8;
1583         reg             |= wp_len;
1584         reg             <<= 8;
1585         reg             |= num_wrps;
1586         reg             <<= 8;
1587         reg             |= num_brps;
1588
1589         *kdata = reg;
1590         return 0;
1591 }
1592
1593 static int compat_ptrace_hbp_get(unsigned int note_type,
1594                                  struct task_struct *tsk,
1595                                  compat_long_t num,
1596                                  u32 *kdata)
1597 {
1598         u64 addr = 0;
1599         u32 ctrl = 0;
1600
1601         int err, idx = compat_ptrace_hbp_num_to_idx(num);
1602
1603         if (num & 1) {
1604                 err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
1605                 *kdata = (u32)addr;
1606         } else {
1607                 err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl);
1608                 *kdata = ctrl;
1609         }
1610
1611         return err;
1612 }
1613
1614 static int compat_ptrace_hbp_set(unsigned int note_type,
1615                                  struct task_struct *tsk,
1616                                  compat_long_t num,
1617                                  u32 *kdata)
1618 {
1619         u64 addr;
1620         u32 ctrl;
1621
1622         int err, idx = compat_ptrace_hbp_num_to_idx(num);
1623
1624         if (num & 1) {
1625                 addr = *kdata;
1626                 err = ptrace_hbp_set_addr(note_type, tsk, idx, addr);
1627         } else {
1628                 ctrl = *kdata;
1629                 err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl);
1630         }
1631
1632         return err;
1633 }
1634
1635 static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num,
1636                                     compat_ulong_t __user *data)
1637 {
1638         int ret;
1639         u32 kdata;
1640
1641         /* Watchpoint */
1642         if (num < 0) {
1643                 ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata);
1644         /* Resource info */
1645         } else if (num == 0) {
1646                 ret = compat_ptrace_hbp_get_resource_info(&kdata);
1647         /* Breakpoint */
1648         } else {
1649                 ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata);
1650         }
1651
1652         if (!ret)
1653                 ret = put_user(kdata, data);
1654
1655         return ret;
1656 }
1657
1658 static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num,
1659                                     compat_ulong_t __user *data)
1660 {
1661         int ret;
1662         u32 kdata = 0;
1663
1664         if (num == 0)
1665                 return 0;
1666
1667         ret = get_user(kdata, data);
1668         if (ret)
1669                 return ret;
1670
1671         if (num < 0)
1672                 ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata);
1673         else
1674                 ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata);
1675
1676         return ret;
1677 }
1678 #endif  /* CONFIG_HAVE_HW_BREAKPOINT */
1679
1680 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1681                         compat_ulong_t caddr, compat_ulong_t cdata)
1682 {
1683         unsigned long addr = caddr;
1684         unsigned long data = cdata;
1685         void __user *datap = compat_ptr(data);
1686         int ret;
1687
1688         switch (request) {
1689                 case PTRACE_PEEKUSR:
1690                         ret = compat_ptrace_read_user(child, addr, datap);
1691                         break;
1692
1693                 case PTRACE_POKEUSR:
1694                         ret = compat_ptrace_write_user(child, addr, data);
1695                         break;
1696
1697                 case COMPAT_PTRACE_GETREGS:
1698                         ret = copy_regset_to_user(child,
1699                                                   &user_aarch32_view,
1700                                                   REGSET_COMPAT_GPR,
1701                                                   0, sizeof(compat_elf_gregset_t),
1702                                                   datap);
1703                         break;
1704
1705                 case COMPAT_PTRACE_SETREGS:
1706                         ret = copy_regset_from_user(child,
1707                                                     &user_aarch32_view,
1708                                                     REGSET_COMPAT_GPR,
1709                                                     0, sizeof(compat_elf_gregset_t),
1710                                                     datap);
1711                         break;
1712
1713                 case COMPAT_PTRACE_GET_THREAD_AREA:
1714                         ret = put_user((compat_ulong_t)child->thread.uw.tp_value,
1715                                        (compat_ulong_t __user *)datap);
1716                         break;
1717
1718                 case COMPAT_PTRACE_SET_SYSCALL:
1719                         task_pt_regs(child)->syscallno = data;
1720                         ret = 0;
1721                         break;
1722
1723                 case COMPAT_PTRACE_GETVFPREGS:
1724                         ret = copy_regset_to_user(child,
1725                                                   &user_aarch32_view,
1726                                                   REGSET_COMPAT_VFP,
1727                                                   0, VFP_STATE_SIZE,
1728                                                   datap);
1729                         break;
1730
1731                 case COMPAT_PTRACE_SETVFPREGS:
1732                         ret = copy_regset_from_user(child,
1733                                                     &user_aarch32_view,
1734                                                     REGSET_COMPAT_VFP,
1735                                                     0, VFP_STATE_SIZE,
1736                                                     datap);
1737                         break;
1738
1739 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1740                 case COMPAT_PTRACE_GETHBPREGS:
1741                         ret = compat_ptrace_gethbpregs(child, addr, datap);
1742                         break;
1743
1744                 case COMPAT_PTRACE_SETHBPREGS:
1745                         ret = compat_ptrace_sethbpregs(child, addr, datap);
1746                         break;
1747 #endif
1748
1749                 default:
1750                         ret = compat_ptrace_request(child, request, addr,
1751                                                     data);
1752                         break;
1753         }
1754
1755         return ret;
1756 }
1757 #endif /* CONFIG_COMPAT */
1758
1759 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1760 {
1761 #ifdef CONFIG_COMPAT
1762         /*
1763          * Core dumping of 32-bit tasks or compat ptrace requests must use the
1764          * user_aarch32_view compatible with arm32. Native ptrace requests on
1765          * 32-bit children use an extended user_aarch32_ptrace_view to allow
1766          * access to the TLS register.
1767          */
1768         if (is_compat_task())
1769                 return &user_aarch32_view;
1770         else if (is_compat_thread(task_thread_info(task)))
1771                 return &user_aarch32_ptrace_view;
1772 #endif
1773         return &user_aarch64_view;
1774 }
1775
1776 long arch_ptrace(struct task_struct *child, long request,
1777                  unsigned long addr, unsigned long data)
1778 {
1779         return ptrace_request(child, request, addr, data);
1780 }
1781
1782 enum ptrace_syscall_dir {
1783         PTRACE_SYSCALL_ENTER = 0,
1784         PTRACE_SYSCALL_EXIT,
1785 };
1786
1787 static void tracehook_report_syscall(struct pt_regs *regs,
1788                                      enum ptrace_syscall_dir dir)
1789 {
1790         int regno;
1791         unsigned long saved_reg;
1792
1793         /*
1794          * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
1795          * used to denote syscall entry/exit:
1796          */
1797         regno = (is_compat_task() ? 12 : 7);
1798         saved_reg = regs->regs[regno];
1799         regs->regs[regno] = dir;
1800
1801         if (dir == PTRACE_SYSCALL_EXIT)
1802                 tracehook_report_syscall_exit(regs, 0);
1803         else if (tracehook_report_syscall_entry(regs))
1804                 forget_syscall(regs);
1805
1806         regs->regs[regno] = saved_reg;
1807 }
1808
1809 int syscall_trace_enter(struct pt_regs *regs)
1810 {
1811         if (test_thread_flag(TIF_SYSCALL_TRACE) ||
1812                 test_thread_flag(TIF_SYSCALL_EMU)) {
1813                 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
1814                 if (!in_syscall(regs) || test_thread_flag(TIF_SYSCALL_EMU))
1815                         return -1;
1816         }
1817
1818         /* Do the secure computing after ptrace; failures should be fast. */
1819         if (secure_computing(NULL) == -1)
1820                 return -1;
1821
1822         if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1823                 trace_sys_enter(regs, regs->syscallno);
1824
1825         audit_syscall_entry(regs->syscallno, regs->orig_x0, regs->regs[1],
1826                             regs->regs[2], regs->regs[3]);
1827
1828         return regs->syscallno;
1829 }
1830
1831 void syscall_trace_exit(struct pt_regs *regs)
1832 {
1833         audit_syscall_exit(regs);
1834
1835         if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
1836                 trace_sys_exit(regs, regs_return_value(regs));
1837
1838         if (test_thread_flag(TIF_SYSCALL_TRACE))
1839                 tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
1840
1841         rseq_syscall(regs);
1842 }
1843
1844 /*
1845  * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
1846  * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
1847  * not described in ARM DDI 0487D.a.
1848  * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
1849  * be allocated an EL0 meaning in future.
1850  * Userspace cannot use these until they have an architectural meaning.
1851  * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
1852  * We also reserve IL for the kernel; SS is handled dynamically.
1853  */
1854 #define SPSR_EL1_AARCH64_RES0_BITS \
1855         (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \
1856          GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5))
1857 #define SPSR_EL1_AARCH32_RES0_BITS \
1858         (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
1859
1860 static int valid_compat_regs(struct user_pt_regs *regs)
1861 {
1862         regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
1863
1864         if (!system_supports_mixed_endian_el0()) {
1865                 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1866                         regs->pstate |= PSR_AA32_E_BIT;
1867                 else
1868                         regs->pstate &= ~PSR_AA32_E_BIT;
1869         }
1870
1871         if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
1872             (regs->pstate & PSR_AA32_A_BIT) == 0 &&
1873             (regs->pstate & PSR_AA32_I_BIT) == 0 &&
1874             (regs->pstate & PSR_AA32_F_BIT) == 0) {
1875                 return 1;
1876         }
1877
1878         /*
1879          * Force PSR to a valid 32-bit EL0t, preserving the same bits as
1880          * arch/arm.
1881          */
1882         regs->pstate &= PSR_AA32_N_BIT | PSR_AA32_Z_BIT |
1883                         PSR_AA32_C_BIT | PSR_AA32_V_BIT |
1884                         PSR_AA32_Q_BIT | PSR_AA32_IT_MASK |
1885                         PSR_AA32_GE_MASK | PSR_AA32_E_BIT |
1886                         PSR_AA32_T_BIT;
1887         regs->pstate |= PSR_MODE32_BIT;
1888
1889         return 0;
1890 }
1891
1892 static int valid_native_regs(struct user_pt_regs *regs)
1893 {
1894         regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
1895
1896         if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
1897             (regs->pstate & PSR_D_BIT) == 0 &&
1898             (regs->pstate & PSR_A_BIT) == 0 &&
1899             (regs->pstate & PSR_I_BIT) == 0 &&
1900             (regs->pstate & PSR_F_BIT) == 0) {
1901                 return 1;
1902         }
1903
1904         /* Force PSR to a valid 64-bit EL0t */
1905         regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
1906
1907         return 0;
1908 }
1909
1910 /*
1911  * Are the current registers suitable for user mode? (used to maintain
1912  * security in signal handlers)
1913  */
1914 int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
1915 {
1916         if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
1917                 regs->pstate &= ~DBG_SPSR_SS;
1918
1919         if (is_compat_thread(task_thread_info(task)))
1920                 return valid_compat_regs(regs);
1921         else
1922                 return valid_native_regs(regs);
1923 }